content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from functools import lru_cache
import requests
from six import u
from unidecode import unidecode
_synset_sparql_query = """
SELECT ?item ?itemLabel WHERE {{
?item wdt:P2888 <http://wordnet-rdf.princeton.edu/wn30/{}-n>
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "{}". }}
}}
"""
_wikidata_url = 'https://query.wikidata.org/sparql'
@lru_cache(maxsize=1000)
def synset_to_label(synset, language='en'):
"""
Queries WordNet for the word of a specified synset in a given language.
:param synset:
:param language:
:return:
"""
# Parse to final query
query = _synset_sparql_query.format(synset[1:], language)
# Query wikidata and get data as JSON
params = {'query': query, 'format': 'json'}
response = requests.get(_wikidata_url, params=params)
data = response.json()
# Fetch labels
labels = [item['itemLabel']['value']
for item in data['results']['bindings']]
# Return
if len(labels) > 0:
return labels[0]
else:
return "???"
def unicode_to_ascii(text):
encoded = ''
for character in text:
if character == u('\xe5'):
encoded += 'aa'
elif character == u('\xe6'):
encoded += 'ae'
elif character == u('\xf8'):
encoded += 'oe'
elif character == u('\xf6'):
encoded += 'oe'
elif character == u('\xe4'):
encoded += 'ae'
elif character == u('\xfc'):
encoded += 'u'
else:
encoded += character
return unidecode(encoded)
if __name__ == "__main__":
test_languages = ["en", "da", "fr", "de", "nl"]
decoded = [[('n03207941', 'dishwasher', 0.25054157),
('n04442312', 'toaster', 0.240155),
('n04070727', 'refrigerator', 0.099175394),
('n04554684', 'washer', 0.065704145),
('n04004767', 'printer', 0.063971408)]]
best_decoded = decoded[0][0]
for code in test_languages:
# Attempt to label
unicode_label = unicode_to_ascii(synset_to_label(best_decoded[0], language=code))
print(code + ":", unicode_label)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
import sys
if sys.version_info.major > 2:
import http.server as http_server
import socketserver
else:
import SimpleHTTPServer as http_server
import SocketServer as socketserver
Handler = http_server.SimpleHTTPRequestHandler
# python -c "import SimpleHTTPServer; m = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map; m[''] = 'text/plain'; m.update(dict([(k, v + ';charset=UTF-8') for k, v in m.items()])); SimpleHTTPServer.test();"
Handler.extensions_map = {
'.manifest': 'text/cache-manifest',
'.html': 'text/html',
'.txt': 'text/html',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.svg': 'image/svg+xml',
'.css': 'text/css',
'.js': 'application/x-javascript',
'.md': 'text/x-markdown',
'.markdown': 'text/x-markdown',
'': 'application/octet-stream', # Default
}
m = Handler.extensions_map
m.update(dict([(k, v + ';charset=UTF-8') for k, v in m.items()]))
PORT = 8081
httpd = socketserver.TCPServer(("0.0.0.0", PORT), Handler)
print('serving at port: {}'.format(PORT))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('\nserver shutdown!')
httpd.server_close()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
Contact: t.hessels@un-ihe.org
Repository: https://github.com/TimHessels/SEBAL
Module: SEBAL
Description:
This module contains a compilation of scripts and functions to run pySEBAL
"""
from SEBAL import pySEBAL
__all__ = ['pySEBAL']
__version__ = '0.1'
|
nilq/baby-python
|
python
|
import unittest
import smartphone
from parameterized import parameterized, parameterized_class
TEST_PHONE_NUMBER = '123'
class SmartPhoneTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.phone = smartphone.SmartPhone()
@parameterized.expand([
('idle', smartphone.CallState.IDLE, f'Has call from {TEST_PHONE_NUMBER}', smartphone.CallState.RING),
('ring', smartphone.CallState.RING, f'Busy with call from {TEST_PHONE_NUMBER}', smartphone.CallState.RING),
('incall', smartphone.CallState.INCALL, f'Alerting user of incoming call from {TEST_PHONE_NUMBER}', smartphone.CallState.INCALL),
])
def test_in_call(self, state_name, state_obj, expected_msg, expected_state):
self.phone.state = state_obj
self.assertEqual(
self.phone.in_call(TEST_PHONE_NUMBER),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
@parameterized.expand([
('idle', smartphone.CallState.IDLE, 'No incoming call', smartphone.CallState.IDLE, None),
('ring', smartphone.CallState.RING, f'Pickup call from {TEST_PHONE_NUMBER}', smartphone.CallState.INCALL, TEST_PHONE_NUMBER),
('incall', smartphone.CallState.INCALL, f'Switch call to {TEST_PHONE_NUMBER}', smartphone.CallState.INCALL, TEST_PHONE_NUMBER),
('incall_no_incoming', smartphone.CallState.INCALL, 'No incoming call', smartphone.CallState.INCALL, None),
])
def test_answer_call(self, state_name, state_obj, expected_msg, expected_state, incoming_call_number):
self.phone.state = state_obj
self.phone.incoming_call_number = incoming_call_number
self.assertEqual(
self.phone.answer_call(),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
@parameterized.expand([
('idle', smartphone.CallState.IDLE, 'No call', smartphone.CallState.IDLE, None, None),
('ring', smartphone.CallState.RING, f'Can not end call in RING', smartphone.CallState.RING, None, TEST_PHONE_NUMBER),
('incall', smartphone.CallState.INCALL, f'End call from {TEST_PHONE_NUMBER}', smartphone.CallState.IDLE, TEST_PHONE_NUMBER, None),
('incall_with_incoming', smartphone.CallState.INCALL, f'End call from {TEST_PHONE_NUMBER}', smartphone.CallState.RING, TEST_PHONE_NUMBER, '456'),
])
def test_end_call(self, state_name, state_obj, expected_msg, expected_state, in_call_number, incoming_call_number):
print(f'test case: {state_name}')
self.phone.state = state_obj
self.phone.incoming_call_number = incoming_call_number
self.phone.in_call_number = in_call_number
self.assertEqual(
self.phone.end_call(),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
@parameterized.expand([
('idle', smartphone.CallState.IDLE, 'No call', smartphone.CallState.IDLE, None, None),
('ring', smartphone.CallState.RING, f'Reject call from {TEST_PHONE_NUMBER}', smartphone.CallState.IDLE, None, TEST_PHONE_NUMBER),
('incall', smartphone.CallState.INCALL, 'Can not reject call in INCALL', smartphone.CallState.INCALL, TEST_PHONE_NUMBER, None),
('incall_with_incoming', smartphone.CallState.INCALL, 'Can not reject call in INCALL', smartphone.CallState.INCALL, TEST_PHONE_NUMBER, '456'),
])
def test_reject_call(self, state_name, state_obj, expected_msg, expected_state, in_call_number, incoming_call_number):
print(f'test case: {state_name}')
self.phone.state = state_obj
self.phone.incoming_call_number = incoming_call_number
self.phone.in_call_number = in_call_number
self.assertEqual(
self.phone.reject_call(),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import Bio; from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
import urllib2
import sys
import StringIO
import os
base = os.path.expanduser('~')
prot_folder = base + '/biotools/uniprot_proteomes/'
fasta_records = []
if len(sys.argv) == 1:
accession = input('Enter UNIPROT proteome accession number: ')
filename = accession
elif len(sys.argv) > 1:
filename = '_'.join(sys.argv[1:])
for arg in sys.argv[1:]:
accession = arg
present = False
for prot_file in os.listdir(prot_folder):
if present == False:
if prot_file == '{}.fasta'.format(accession):
fasta = list(SeqIO.parse(prot_folder + prot_file,'fasta'))
fasta_records.append(fasta)
present = True
if present == False:
url = 'http://www.uniprot.org/uniprot/?query=proteome:{}&format=fasta'.format(accession)
path = prot_folder
f = urllib2.urlopen(url)
page = StringIO.StringIO(f.read())
f.close()
prot = list(SeqIO.parse(page,'fasta'))
SeqIO.write(prot,'{}/{}.fasta'.format(path, accession),'fasta')
#SeqIO.write(embl,'{}/{}.fasta'.format(path,accession),'fasta')
#fasta = SeqIO.read('{}/{}.fasta'.format(path,accession),'fasta')
fasta_records.append(prot)
final = []
for i in fasta_records:
final +=i
SeqIO.write(final, sys.stdout,'fasta')
|
nilq/baby-python
|
python
|
import os
import math
import cereal.messaging as messaging
import cereal.messaging_arne as messaging_arne
from selfdrive.swaglog import cloudlog
from common.realtime import sec_since_boot
from selfdrive.controls.lib.radar_helpers import _LEAD_ACCEL_TAU
from selfdrive.controls.lib.longitudinal_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG
from common.op_params import opParams
from common.numpy_fast import interp, clip
from common.travis_checker import travis
LOG_MPC = os.environ.get('LOG_MPC', False)
class LongitudinalMpc():
def __init__(self, mpc_id):
self.mpc_id = mpc_id
self.op_params = opParams()
self.setup_mpc()
self.v_mpc = 0.0
self.v_mpc_future = 0.0
self.a_mpc = 0.0
self.v_cruise = 0.0
self.prev_lead_status = False
self.prev_lead_x = 0.0
self.new_lead = False
self.TR_Mod = 0
self.last_cloudlog_t = 0.0
if not travis and mpc_id == 1:
self.pm = messaging_arne.PubMaster(['smiskolData'])
else:
self.pm = None
self.last_cost = 0.0
self.df_profile = self.op_params.get('dynamic_follow', 'relaxed').strip().lower()
self.sng = False
def send_mpc_solution(self, pm, qp_iterations, calculation_time):
qp_iterations = max(0, qp_iterations)
dat = messaging.new_message('liveLongitudinalMpc')
dat.liveLongitudinalMpc.xEgo = list(self.mpc_solution[0].x_ego)
dat.liveLongitudinalMpc.vEgo = list(self.mpc_solution[0].v_ego)
dat.liveLongitudinalMpc.aEgo = list(self.mpc_solution[0].a_ego)
dat.liveLongitudinalMpc.xLead = list(self.mpc_solution[0].x_l)
dat.liveLongitudinalMpc.vLead = list(self.mpc_solution[0].v_l)
dat.liveLongitudinalMpc.cost = self.mpc_solution[0].cost
dat.liveLongitudinalMpc.aLeadTau = self.a_lead_tau
dat.liveLongitudinalMpc.qpIterations = qp_iterations
dat.liveLongitudinalMpc.mpcId = self.mpc_id
dat.liveLongitudinalMpc.calculationTime = calculation_time
pm.send('liveLongitudinalMpc', dat)
def setup_mpc(self):
ffi, self.libmpc = libmpc_py.get_libmpc(self.mpc_id)
self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE,
MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.mpc_solution = ffi.new("log_t *")
self.cur_state = ffi.new("state_t *")
self.cur_state[0].v_ego = 0
self.cur_state[0].a_ego = 0
self.a_lead_tau = _LEAD_ACCEL_TAU
def set_cur_state(self, v, a):
self.cur_state[0].v_ego = v
self.cur_state[0].a_ego = a
def get_TR(self, CS, lead):
if not lead.status or travis:
TR = 1.8
elif CS.vEgo < 5.0:
TR = 1.8
else:
TR = self.dynamic_follow(CS, lead)
if not travis:
self.change_cost(TR,CS.vEgo)
self.send_cur_TR(TR)
return TR
def send_cur_TR(self, TR):
if self.mpc_id == 1 and self.pm is not None:
dat = messaging_arne.new_message('smiskolData')
dat.smiskolData.mpcTR = TR
self.pm.send('smiskolData', dat)
def change_cost(self, TR, vEgo):
TRs = [0.9, 1.8, 2.7]
costs = [1.0, 0.11, 0.05]
cost = interp(TR, TRs, costs)
if self.last_cost != cost:
self.libmpc.change_tr(MPC_COST_LONG.TTC, cost, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.last_cost = cost
def dynamic_follow(self, CS, lead):
self.df_profile = self.op_params.get('dynamic_follow', 'normal').strip().lower()
x_vel = [5.0, 15.0] # velocities
if self.df_profile == 'far':
y_dist = [1.8, 2.7] # TRs
elif self.df_profile == 'close': # for in congested traffic
x_vel = [5.0, 15.0]
y_dist = [1.8, 0.9]
else: # default to normal
y_dist = [1.8, 1.8]
TR = interp(CS.vEgo, x_vel, y_dist)
# Dynamic follow modifications (the secret sauce)
x = [-5.0, 0.0, 5.0] # relative velocity values
y = [0.3, 0.0, -0.3] # modification values
self.TR_Mod = interp(lead.vRel, x, y)
TR += self.TR_Mod
if CS.leftBlinker or CS.rightBlinker:
x = [9.0, 55.0] #
y = [1.0, 0.65] # reduce TR when changing lanes
TR *= interp(CS.vEgo, x, y)
return clip(TR, 0.9, 2.7)
def update(self, pm, CS, lead, v_cruise_setpoint):
v_ego = CS.vEgo
# Setup current mpc state
self.cur_state[0].x_ego = 0.0
if lead is not None and lead.status:
x_lead = lead.dRel
v_lead = max(0.0, lead.vLead)
a_lead = lead.aLeadK
if (v_lead < 0.1 or -a_lead / 2.0 > v_lead):
v_lead = 0.0
a_lead = 0.0
self.a_lead_tau = lead.aLeadTau
self.new_lead = False
if not self.prev_lead_status or abs(x_lead - self.prev_lead_x) > 2.5:
self.libmpc.init_with_simulation(self.v_mpc, x_lead, v_lead, a_lead, self.a_lead_tau)
self.new_lead = True
self.prev_lead_status = True
self.prev_lead_x = x_lead
self.cur_state[0].x_l = x_lead
self.cur_state[0].v_l = v_lead
else:
self.prev_lead_status = False
# Fake a fast lead car, so mpc keeps running
self.cur_state[0].x_l = 50.0
self.cur_state[0].v_l = v_ego + 10.0
a_lead = 0.0
self.a_lead_tau = _LEAD_ACCEL_TAU
# Calculate mpc
t = sec_since_boot()
n_its = self.libmpc.run_mpc(self.cur_state, self.mpc_solution, self.a_lead_tau, a_lead, self.get_TR(CS, lead))
duration = int((sec_since_boot() - t) * 1e9)
if LOG_MPC:
self.send_mpc_solution(pm, n_its, duration)
# Get solution. MPC timestep is 0.2 s, so interpolation to 0.05 s is needed
self.v_mpc = self.mpc_solution[0].v_ego[1]
self.a_mpc = self.mpc_solution[0].a_ego[1]
self.v_mpc_future = self.mpc_solution[0].v_ego[10]
# Reset if NaN or goes through lead car
crashing = any(lead - ego < -50 for (lead, ego) in zip(self.mpc_solution[0].x_l, self.mpc_solution[0].x_ego))
nans = any(math.isnan(x) for x in self.mpc_solution[0].v_ego)
backwards = min(self.mpc_solution[0].v_ego) < -0.01
if ((backwards or crashing) and self.prev_lead_status) or nans:
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Longitudinal mpc %d reset - backwards: %s crashing: %s nan: %s" % (
self.mpc_id, backwards, crashing, nans))
self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE,
MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.cur_state[0].v_ego = v_ego
self.cur_state[0].a_ego = 0.0
self.v_mpc = v_ego
self.a_mpc = CS.aEgo
self.prev_lead_status = False
|
nilq/baby-python
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
import os
import logging
from glob import glob
from django import shortcuts
from django.core import exceptions
from django.conf.urls.defaults import *
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import messages
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.utils.importlib import import_module
from django.views import generic as generic_views
import django.views.i18n
LOG = logging.getLogger(__name__)
topbars = []
def get_topbar_name(file_name):
return os.path.basename(os.path.dirname(os.path.abspath(file_name)))
class TopbarRoleCheckMiddleware(object):
def __init__(self):
if not hasattr(self, "topbar"):
self.topbar = self.__class__.__module__.split('.')[2]
def process_request(self, request):
if "username" not in request.session:
return
script_name = settings.SCRIPT_NAME
if not request.path.startswith(script_name):
return
path = request.path[len(script_name) + 1:]
if not (path == self.topbar or path.startswith(self.topbar + "/")):
return
if not (self.roles & set(request.session["roles"])):
# flush other error messages
for message in messages.get_messages(request):
pass
messages.error(request,
"Access denied for user %s at topbar %s" %
(request.session["username"],
self.topbar))
return shortcuts.redirect("auth/splash")
|
nilq/baby-python
|
python
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azext_devops.dev.common.arguments import should_detect
class TestArgumentsMethods(unittest.TestCase):
def test_should_detect(self):
# tests default behaviour for detect
self.assertEqual(should_detect(None), True)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
"""
This file is part of genofunc (https://github.com/xiaoyu518/genofunc).
Copyright 2020 Xiaoyu Yu (xiaoyu.yu@ed.ac.uk) & Rachel Colquhoun (rachel.colquhoun@ed.ac.uk).
"""
import os
import unittest
from genofunc.extract_metadata import *
this_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_dir = os.path.join(this_dir, 'tests', 'data')
class TestExtractMetadata(unittest.TestCase):
def test_run_extract_metadata(self):
in_fasta = "%s/sequences/seqB.fasta" %data_dir
in_metadata = "%s/metadata/metadataB.tsv" %data_dir
column = ["country"]
index_field = "strain"
out_fasta = "%s/output/tmp.extract.fasta" %data_dir
out_metadata = "%s/output/tmp.extracted_metadata.csv" %data_dir
log_file = "%s/output/extract_metadata.log" %data_dir
extract_metadata(in_fasta, in_metadata, column, index_field, out_fasta, out_metadata, log_file)
os.unlink(out_fasta)
os.unlink(out_metadata)
os.unlink(log_file)
|
nilq/baby-python
|
python
|
from .atmosphere import Atmosphere
from .generalized_atmosphere import GeneralizedAtmosphere
from .generalized_matching import GeneralizedMatching
from .psycop import PSYCOP
from .illicit_conversion import IllicitConversion
from .logically_valid_lookup import LogicallyValidLookup
from .matching import Matching
from .mental_models import MentalModels
from .mreasoner import MReasoner
from .phm import PHM
from .verbal_models import VerbalModels
|
nilq/baby-python
|
python
|
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
import requests
import traceback
from typing import Dict, Any
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
TYPES = {
'threatTypes': ["MALWARE", "SOCIAL_ENGINEERING", "POTENTIALLY_HARMFUL_APPLICATION", "UNWANTED_SOFTWARE"],
'platformTypes': ["ANY_PLATFORM", "WINDOWS", "LINUX", "ALL_PLATFORMS", "OSX", "CHROME", "IOS", "ANDROID"]
}
INTEGRATION_NAME = 'GoogleSafeBrowsing'
URL_OUTPUT_PREFIX = 'GoogleSafeBrowsing.URL'
class Client(BaseClient):
def __init__(self, proxy: bool, verify: bool, reliability: str, base_url: str, params: dict):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
super().__init__(proxy=proxy, verify=verify, base_url=base_url, headers=headers)
self.base_url = base_url
self.client_body = {
'clientId': params.get('client_id'),
'clientVersion': params.get('client_version'),
}
if DBotScoreReliability.is_valid_type(reliability):
self.reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
raise Exception("Google Safe Browsing v2 error: "
"Please provide a valid value for the Source Reliability parameter.")
def build_request_body(self, client_body: Dict, list_url: List) -> Dict:
""" build the request body according to the client body and the urls.
Args:
client_body: client body to add it in the request body
list_url: The urls list
Returns:
(dict) The request body, in the right format.
"""
list_urls = []
for url in list_url:
list_urls.append({"url": url})
body: Dict = {
"client": client_body,
"threatInfo": {
"threatTypes": TYPES.get('threatTypes'),
"platformTypes": TYPES.get('platformTypes'),
"threatEntryTypes": ["URL"],
"threatEntries": list_urls
}
}
return body
def url_request(self, client_body, list_url) -> Dict:
""" send the url request.
Args:
client_body: client body to add it in the request body
list_url: The urls list
Returns:
(dict) The response from the request.
"""
body = self.build_request_body(client_body, list_url)
result = self._http_request(
method='POST',
json_data=body,
full_url=self.base_url)
return result
def test_module(client: Client) -> str:
"""
Performs basic get request to get sample URL details.
"""
try:
# testing a known malicious URL to check if we get matches
test_url = "http://testsafebrowsing.appspot.com/apiv4/ANY_PLATFORM/MALWARE/URL/"
res = client.url_request(client.client_body, [test_url])
if res.get('matches'): # matches - There is a match for the URL we were looking for
message = 'ok'
else:
message = 'Error querying Google Safe Browsing. Expected matching respons, but received none'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: please make sure the API Key is set correctly.'
else:
raise e
return message
def handle_errors(result: Dict) -> None:
"""
Handle errors, raise Exception when there is errors in the response.
"""
status_code = result.get('StatusCode', 0)
result_body = result.get('Body')
if result_body == '' and status_code == 204:
raise Exception('No content received. Possible API rate limit reached.')
if 200 < status_code < 299:
raise Exception(f'Failed to perform request, request status code: {status_code}.')
if result_body == '':
raise Exception('No content received. Maybe you tried a private API?.')
if result.get('error'):
error_massage = result.get('error', {}).get('message')
error_code = result.get('error', {}).get('code')
raise Exception(f'Failed accessing Google Safe Browsing APIs. Error: {error_massage}. Error code: {error_code}')
def arrange_results_to_urls(results: List, url_list: List) -> Dict:
""" Arrange and filter the URLs results according to the URLs list that we asked information on.
Args:
results: the API response.
url_list: The URLs list that we asked information on.
Returns:
(dict) The results according the urls.
"""
urls_results: Dict[str, list] = {}
for url in url_list:
urls_results[url] = []
for result in results:
url = result.get('threat', {}).get('url')
urls_results[url].append(result)
return urls_results
def url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
url command: Returns URL details for a list of URL
"""
url = argToList(args.get('url'))
result = client.url_request(client.client_body, url)
if not result or result.get('StatusCode'):
handle_errors(result)
urls_data = arrange_results_to_urls(result.get('matches'), url) # type: ignore
url_data_list = []
for url_key, url_data in urls_data.items():
if url_data:
dbot_score = Common.DBotScore(
indicator=url_key,
indicator_type=DBotScoreType.URL,
integration_name=INTEGRATION_NAME,
score=3,
reliability=client.reliability
)
url_standard_context = Common.URL(
url=url_key,
dbot_score=dbot_score
)
url_data_list.append(CommandResults(
readable_output=tableToMarkdown(f'Google Safe Browsing APIs - URL Query: {url_key}', url_data),
outputs_prefix=URL_OUTPUT_PREFIX,
outputs_key_field='IndicatorValue',
outputs=url_data,
indicator=url_standard_context
))
else:
dbot_score = Common.DBotScore(
indicator=url_key,
indicator_type=DBotScoreType.URL,
integration_name=INTEGRATION_NAME,
score=0,
reliability=client.reliability
)
url_standard_context = Common.URL(
url=url_key,
dbot_score=dbot_score
)
url_data_list.append(CommandResults(
readable_output=f'No matches for URL {url_key}',
outputs_prefix=URL_OUTPUT_PREFIX,
outputs_key_field='IndicatorValue',
outputs=result,
indicator=url_standard_context
))
return url_data_list
def build_base_url(params: Dict) -> str:
api_key = params.get('api_key')
base_url = params.get('url', '')
if not base_url.endswith('/'):
base_url += '/'
return f"{base_url}?key={api_key}"
def main() -> None:
params = demisto.params()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
base_url = build_base_url(params)
reliability = params.get('integrationReliability')
reliability = reliability if reliability else DBotScoreReliability.B
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
params=params,
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
reliability=reliability)
if demisto.command() == 'test-module':
result = test_module(client)
return_results(result)
elif demisto.command() == 'url':
return_results(url_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
nilq/baby-python
|
python
|
from typing import Optional
from odmantic import Model
class Person(Model):
name: str
age: Optional[int]
john = Person(name="John")
print(john.age)
#> None
|
nilq/baby-python
|
python
|
""" Implementation of DRO models """
import gurobipy as grb
import numpy as np
from time import time
def train_classifier(Kernel, labels_raw, all_epsilon, all_kappa, nc):
print('Train class ', nc + 1, '...')
t = time()
n_samples = Kernel.shape[0]
alpha = np.zeros((n_samples, len(all_kappa), len(all_epsilon)))
labels = -np.ones(n_samples)
labels[labels_raw[nc]] = 1
for nk, kappa in enumerate(all_kappa):
for ne, epsilon in enumerate(all_epsilon):
optimal = ksvm(Kernel, labels, epsilon, kappa)
alpha[:, nk, ne] = optimal['alpha']
elapsed = time() - t
print('Class ', nc + 1, ' is trained in ', np.round(elapsed/60.0, 2), ' minutes.')
return alpha
def ksvm(Kernel, labels, epsilon, kappa):
""" kernelized SVM """
certif = np.linalg.eigvalsh(Kernel)[0]
if certif < 0:
Kernel = Kernel - 2 * certif * np.eye(Kernel.shape[0])
if epsilon == 0:
optimal = hinge_ksvm(Kernel, labels)
elif np.isinf(kappa):
optimal = regularized_ksvm(Kernel, labels, epsilon)
else:
optimal = dist_rob_ksvm(Kernel, labels, epsilon, kappa)
return optimal
def dist_rob_ksvm(Kernel, labels, epsilon, kappa):
""" kernelized distributionally robust SVM """
n_samples = Kernel.shape[0]
# Step 0: create model
model = grb.Model('Ker_DRSVM')
model.setParam('OutputFlag', False)
# Step 1: define decision variables
var_lambda = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_s = {}
var_alpha = {}
for i in range(n_samples):
var_s[i] = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_alpha[i] = model.addVar(
vtype=grb.GRB.CONTINUOUS, lb=-grb.GRB.INFINITY)
# Step 2: integerate variables
model.update()
# Step 3: define constraints
chg_cons = {}
for i in range(n_samples):
model.addConstr(
1 - labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) <= var_s[i])
chg_cons[i] = model.addConstr(
1 + labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) -
kappa * var_lambda <= var_s[i])
model.addQConstr(
grb.quicksum(var_alpha[k1] * Kernel[k1, k2] * var_alpha[k2]
for k1 in range(n_samples)
for k2 in range(n_samples)) <= var_lambda * var_lambda)
# Step 4: define objective value
sum_var_s = grb.quicksum(var_s[i] for i in range(n_samples))
obj = var_lambda * epsilon + 1.0 / n_samples * sum_var_s
model.setObjective(obj, grb.GRB.MINIMIZE)
# Step 5: solve the problem
model.optimize()
# Step 6: store results
alpha_opt = np.array([var_alpha[i].x for i in range(n_samples)])
optimal = {
'alpha': alpha_opt,
'objective': model.ObjVal,
'diagnosis': model.status
}
return optimal
def regularized_ksvm(Kernel, labels, epsilon):
""" kernelized robust/regularized SVM """
n_samples = Kernel.shape[0]
# Step 0: create model
model = grb.Model('Ker_RSVM')
model.setParam('OutputFlag', False)
# Step 1: define decision variables
var_lambda = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_s = {}
var_alpha = {}
for i in range(n_samples):
var_s[i] = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_alpha[i] = model.addVar(
vtype=grb.GRB.CONTINUOUS, lb=-grb.GRB.INFINITY)
# Step 2: integerate variables
model.update()
# Step 3: define constraints
for i in range(n_samples):
model.addConstr(
1 - labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) <= var_s[i])
model.addQConstr(
grb.quicksum(var_alpha[k1] * Kernel[k1, k2] * var_alpha[k2]
for k1 in range(n_samples)
for k2 in range(n_samples)) <= var_lambda * var_lambda)
# Step 4: define objective value
sum_var_s = grb.quicksum(var_s[i] for i in range(n_samples))
obj = var_lambda * epsilon + 1.0 / n_samples * sum_var_s
model.setObjective(obj, grb.GRB.MINIMIZE)
# Step 5: solve the problem
model.optimize()
# Step 6: store results
alpha_opt = np.array([var_alpha[i].x for i in range(n_samples)])
optimal = {
'alpha': alpha_opt,
'objective': model.ObjVal,
'diagnosis': model.status
}
return optimal
def hinge_ksvm(Kernel, labels):
""" kernelized hinge loss minimization """
n_samples = Kernel.shape[0]
# Step 0: create model
model = grb.Model('Ker_RSVM')
model.setParam('OutputFlag', False)
# Step 1: define decision variables
var_s = {}
var_alpha = {}
for i in range(n_samples):
var_s[i] = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_alpha[i] = model.addVar(
vtype=grb.GRB.CONTINUOUS, lb=-grb.GRB.INFINITY)
# Step 2: integerate variables
model.update()
# Step 3: define constraints
for i in range(n_samples):
model.addConstr(
1 - labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) <= var_s[i])
# Step 4: define objective value
sum_var_s = grb.quicksum(var_s[i] for i in range(n_samples))
obj = 1.0 / n_samples * sum_var_s
model.setObjective(obj, grb.GRB.MINIMIZE)
# Step 5: solve the problem
model.optimize()
# Step 6: store results
alpha_opt = np.array([var_alpha[i].x for i in range(n_samples)])
optimal = {
'alpha': alpha_opt,
'objective': model.ObjVal,
'diagnosis': model.status
}
return optimal
|
nilq/baby-python
|
python
|
import sys
if len(sys.argv) != 2:
print("Usage: python buildcml.py <cml file>")
exit(1)
infile = sys.argv[1]
# file names
outfile_md = "docs/" + infile.split(".")[0] + ".md"
outfile_txt = infile.split(".")[0] + ".txt"
# file buffers
md_buffer = "# Controller Layouts\n"
txt_buffer = ""
with open(infile, "r") as f:
cml = eval(f.read())
f.close()
for controller in cml:
print(f"Parsing {controller} controller")
# add data to buffers
md_buffer += f"## {controller}\nType: {cml[controller]['Type']}\n\nPort: {cml[controller]['Port']}\n| | |\n| -- | -- |\n"
txt_buffer += f"-- {controller} --\nType: {cml[controller]['Type']}\nPort: {cml[controller]['Port']}\n"
# parse through inputs
for input_type in cml[controller]:
if type(cml[controller][input_type]) != type({}):
# Skip non iterable items
continue
# add data to buffers
md_buffer += f"| {input_type} | -- |\n"
txt_buffer += f" {input_type}:\n"
# parse items
for item in cml[controller][input_type]:
# deal with extra nesting
if type(cml[controller][input_type][item]) == type({}):
md_buffer += f"| {item} | -- |\n"
txt_buffer += f" {item}:\n"
for subitem in cml[controller][input_type][item]:
md_buffer += f"| {subitem} | {cml[controller][input_type][item][subitem]} |\n"
txt_buffer += f" {subitem}: {cml[controller][input_type][item][subitem]}\n"
continue
# add data to buffers
md_buffer += f"| {item} | {cml[controller][input_type][item]} |\n"
txt_buffer += f" {item}: {cml[controller][input_type][item]}\n"
print("Done.")
print("Writing to files...")
with open(outfile_txt, "w") as f:
f.writelines(txt_buffer)
f.close()
with open(outfile_md, "w") as f:
f.writelines(md_buffer)
f.close
print("Done.")
|
nilq/baby-python
|
python
|
from dataclasses import field
from datetime import datetime
from typing import List, Optional
from pydantic.dataclasses import dataclass
@dataclass
class TypeA:
one: str
two: float
@dataclass
class TypeB(TypeA):
one: str
three: bool = field(default=True)
@dataclass
class TypeC(TypeB):
four: List[datetime] = field(default_factory=list, metadata={"format": "%d %B %Y %H:%M"})
any: Optional[object] = field(default=None, metadata={"type": "Wildcard"})
|
nilq/baby-python
|
python
|
import requests
import unittest
class TestStringMethods(unittest.TestCase):
'''def test_000_operacoes_ola1(self):
r = requests.get('http://localhost:5000/ola/marcio')
self.assertEqual(r.text,'ola marcio')
r = requests.get('http://localhost:5000/ola/mario')
self.assertEqual(r.text,'ola mario')
def test_001_operacoes_ola2(self):
r = requests.get('http://localhost:5000/ola_upgrade?pessoa1=marcio&pessoa2=alvaro')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=alvaro&pessoa1=marcio')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=robin&pessoa1=batman')
self.assertEqual(r.text,'ola batman e robin')
def test_002_operacoes_ola3(self):
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'batman','pessoa2':'robin'})
self.assertEqual(r.text,'ola batman e robin')
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'tonico','pessoa2':'tinoco'})
self.assertEqual(r.text,'ola tonico e tinoco')
def test_003_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney&pessoa2=fred')
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=ron&pessoa1=harry')
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_004_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
def test_005_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'barney','pessoa2':'fred'})
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry','pessoa2':'ron'})
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_006_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa2':'fred'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
'''
def test_100_arquivo_aquecimento(self):
import aquecimento_dicionarios #esse teste verifica se o arquivo aquecimento_dicionarios esta na mesma pasta que o runtests.py
def test_101_aquecimento_consulta(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('tt0076759','lucio')['comment'],'achei legal')
self.assertEqual(consulta('tt0076759','marcos')['comment'],'gostei')
self.assertEqual(consulta('tt0076759','maria'),'nao encontrado')
def test_102_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('1212','maria'),'nao encontrado')
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
def test_103_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
antes = len(reviews_aquecimento)
adiciona('1212','maria','mudei de ideia')
self.assertEqual(consulta('1212','maria')['comment'],'mudei de ideia')
adiciona('1212','maria','quer saber? bom mesmo')
self.assertEqual(consulta('1212','maria')['comment'],'quer saber? bom mesmo')
depois = len(reviews_aquecimento)
self.assertEqual(antes,depois)
def test_203_pega_review(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertTrue('gostei' in r.json()['comment'])
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'achei legal'})
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'estranho'})
def test_204_pega_review_com_erro(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/outro/gato')
self.assertEqual(r.json(),{'erro':'comentario nao encontrado'})
self.assertEqual(r.status_code,404)
def test_205_adiciona_review(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/tt1211837/marcos',
json={'comment':'esquisito mesmo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'esquisito mesmo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/marcos')
self.assertEqual(r.json(),{'user_id':'marcos','comment':'esquisito mesmo'})
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'curiosa mistura de fantasmas e empreendedorismo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
def test_206_muda_review(self):
antes = self.total_reviews()
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
depois = self.total_reviews()
self.assertEqual(antes,depois)
def test_207_all_films(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
self.assertTrue(len(lista_respostas) >= 2)
achei_dr_strange = False
for review in r.json():
if review['film_id'] == 'tt1211837':
achei_dr_strange = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o filme dr strange')
def test_208_estrelas(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/marcos')
self.assertEqual(int(r.json()['stars']),4)
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/lucio')
self.assertEqual(int(r.json()['stars']),5)
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/lucio')
self.assertEqual(int(r.json()['stars']),2)
self.assertEqual(r.status_code,200) #codigo normal, que ocorre
#se voce simplesmente nao fizer nada
def test_209_estrelas_review_nao_encontrada(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'review nao encontrada')
self.assertEqual(r.status_code,404)
def test_210_novas_estrelas(self):
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':3})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],3)
contagem = self.total_stars()
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],4)
cont_depois = self.total_stars()
self.assertEqual(contagem,cont_depois)
def test_211_average_stars(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':1})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(2.9 < r.json()['average_stars'] < 3.1)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
def test_301_filme_invalido(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/jamesbond/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def test_302_all_films_nome(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
achei_dr_strange = False
achei_star_wars = False
for review in r.json():
if 'film_name' not in review:
self.fail('achei um filme sem nome!')
if 'trange' in review['film_name']:
achei_dr_strange = True
if 'ars' in review['film_name']:
achei_star_wars = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o nome do dr strange')
if not achei_star_wars:
self.fail('a lista de reviews do marcos nao contem o nome do star wars')
def test_303_all_films_nao_deve_alterar_a_review(self):
r = requests.get('http://localhost:5001/socialfilm/all')
lista_reviews = r.json()['reviews']
for review in lista_reviews:
if 'film_name' in review:
self.fail('voce alterou as reviews do servidor, colocando nome')
def test_304_estrelas_filme_inexistente(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759nao/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
r = requests.get('http://localhost:5001/socialfilm/stars/tt00076759/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def total_reviews(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['reviews'])
def total_stars(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['notas'])
def carregar_arquivo_aquecimento(self):
'''
carrega o arquivo aquecimento_dicionarios, se
ele ainda nao foi carregado
'''
global consulta,adiciona,reviews_aquecimento
try:
consulta #se o modulo ainda nao foi carregado
#essa linha da pau e o except é executado
except:
from aquecimento_dicionarios import consulta, adiciona#entao carregue
from aquecimento_dicionarios import reviews_aquecimento
def runTests():
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2,failfast=True).run(suite)
try:
from aquecimento_dicionarios_gabarito_NAO import consulta,adiciona
from aquecimento_dicionarios_gabarito_NAO import reviews_aquecimento
except:
pass
if __name__ == '__main__':
runTests()
|
nilq/baby-python
|
python
|
from pytest import (fixture, mark)
from wrdpzl import(Board, Solver)
@fixture(scope='module')
def words():
with open('words.txt') as file:
return list(map(str.strip, file.readlines()))
@fixture(scope='module')
def solver(words):
return Solver(words)
@mark.timeout(0.5)
@mark.parametrize('board', [
(Board.load(['performance'] * 10)),
(Board.load(['top' * 5] * 15)),
(Board.load(['up' * 75] * 150)),
])
def test_performance(board, solver):
assert solver.solve(board) != []
|
nilq/baby-python
|
python
|
# Copyright (c) 2017-2021 Analog Devices Inc.
# All rights reserved.
# www.analog.com
#
# SPDX-License-Identifier: Apache-2.0
#
import PMBus_I2C
from encodings import hex_codec
import codecs
from time import *
from array import array
import math
import sys
if sys.version_info.major < 3:
input = raw_input
class dac_data:
def __init__(self, address=None, input_channel=None):
self.address = address
self.input_channel = input_channel - 1
ADM1266_Address = 0x00
config_file_name = ""
firmware_file_name = ""
crc_name = ['Main Mini Bootloader CRC', 'Main Bootloader CRC', 'Backup Mini Bootloader CRC', 'Backup Bootloader CRC',
'Main AB Config CRC', 'Main Project CRC',
'Main Firmware CRC', 'Main Password CRC', 'Backup AB Config CRC', 'Backup Project CRC',
'Backup Firmware CRC', 'Backup Password CRC']
# Based on the number of devices the following function calls subfunction to pause the sequence, program firmware hex, and do a system (ADM1266 CPU) reset.
def program_firmware():
for x in range(len(ADM1266_Address)):
pause_sequence(ADM1266_Address[x])
for x in range(len(ADM1266_Address)):
print('Loading firmware to device {0:#04x}.'.format(ADM1266_Address[x]))
program_firmware_hex(ADM1266_Address[x], firmware_file_name, True)
system_reset(ADM1266_Address[x])
# Based on the number devices the following function calls sub function to pause sequence, program the hex file, start the sequence and trigger memory refresh.
# If the number of configuration file provided is not equal to the number of PMBus address of the device the following function will not proceed.
def program_configration(reset=True):
if len(ADM1266_Address) == len(config_file_name):
for x in range(len(ADM1266_Address)):
pause_sequence(ADM1266_Address[x], reset)
for x in range(len(ADM1266_Address)):
print('Loading configuration to device {0:#04x}.'.format(ADM1266_Address[x]))
program_hex(ADM1266_Address[x], config_file_name[x])
for x in range(len(ADM1266_Address)):
start_sequence(ADM1266_Address[x])
for x in range(len(ADM1266_Address)):
unlock(ADM1266_Address[x])
refresh_flash(ADM1266_Address[x])
print('Running Memory Refresh.')
delay(10000)
else:
print("Number of devices does not match with number of configuration files provided.")
# Reads back the firmware version number and checks for the CRC error.
# If there is any CRC error it will display which CRC is failing or else display "All CRC Passed"
def crc_summary():
print("\n\nProgramming Summary")
print("---------------------------------------")
for x in range(len(ADM1266_Address)):
recalculate_crc(ADM1266_Address[x])
crc_status = all_crc_status(ADM1266_Address[x])
fw_version = get_firmware_rev(ADM1266_Address[x])
print(
'\nFirmware version in device {3:#04x} is v{0}.{1}.{2} '.format(fw_version[0], fw_version[1], fw_version[2],
ADM1266_Address[x]))
if crc_status > 0:
print('The following CRC failed in device {0:#04x}:'.format(ADM1266_Address[x]))
for y in range(0, 12):
if (((int(crc_status) & int(math.pow(2, y))) >> int(y)) == 1):
print(crc_name[y])
else:
print('All CRC passed in device {0:#04x}.'.format(ADM1266_Address[x]))
# Based on the number of devices the following function checks if there is a bootloader and the part is unlocked.
# If the part is not unlocked then unlock the part.
def program_firmware_hex(device_address, file, unlock_part):
bootloadVer = get_bootload_rev(device_address)
if bootloadVer != array('B', [0, 0, 0]):
if unlock_part:
unlock(device_address)
assert islocked(device_address) == False, 'device @0x{0:02X} should be unlocked!'.format(i2c_address)
jump_to_iap(device_address)
hex = open(file, "rb")
count = 0
for line in hex.readlines():
if (line.startswith(b":00000001FF")):
break
data_len = int(line[1:3], 16)
cmd = int(line[3:7], 16)
# data = [] if data_len == 0 else array('B', line[9:9 + data_len * 2].decode("hex")).tolist()
data = [] if data_len == 0 else array('B', codecs.decode((line[9:9 + data_len * 2]), "hex_codec")).tolist()
if cmd != 0xD8:
PMBus_I2C.PMBus_Write(device_address, [cmd] + data)
if count == 0:
count = 1
delay(3000)
else:
delay(10)
# The following function unlocks the ADM1266 (if locked), pause sequence, points to main memory, writes the configuration to the part with respective delays
def program_hex(device_address, file, unlock_and_stop=True, main=True):
hex = open(file, "rb")
if unlock_and_stop:
unlock(device_address)
assert islocked(device_address) == False, 'device @0x{0:02X} should be unlocked!'.format(i2c_address)
switch_memory(device_address, main)
for line in hex.readlines():
if (line.startswith(b":00000001FF")):
break
data_len = int(line[1:3], 16)
cmd = int(line[3:7], 16)
# data = [] if data_len == 0 else array('B', line[9:9 + data_len * 2].decode("hex")).tolist()
data = [] if data_len == 0 else array('B', codecs.decode((line[9:9 + data_len * 2]), "hex_codec")).tolist()
if cmd != 0xD8:
PMBus_I2C.PMBus_Write(device_address, [cmd] + data)
delayMs = 0
offset = 0
if cmd == 0xD8:
delayMs = 100
elif cmd == 0x15:
delayMs = 300
elif cmd == 0xD7:
offset = (data[1] | (data[2] << 8))
delayMs = 400 if offset == 0 else 40
elif cmd == 0xE3:
offset = (data[1] | (data[2] << 8))
delayMs = 100 if offset == 0 else 40
elif cmd == 0xE0:
offset = (data[1] | (data[2] << 8))
delayMs = 200 if offset == 0 else 40
elif cmd == 0xD6:
if data[1] == 0xff and data[2] == 0xff:
pageCount = data[3]
delayMs = 100 + (pageCount - 1) * 30
else:
delayMs = 40
elif cmd == 0xF8:
delayMs = 100
delay(delayMs)
# All the functions from here onward writes to ADM1266 to perform different tasks
def refresh_flash(device_address, config=2):
PMBus_I2C.PMBus_Write(device_address, [0xF5, 0x01, config])
# delay(10000)
def system_reset(device_address):
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x04, 0x00])
delay(1000)
def recalculate_crc(device_address):
PMBus_I2C.PMBus_Write(device_address, [0xF9, 1, 0])
delay(600)
def unlock(device_address,
pwd=[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]):
write_password(device_address, 0x02, pwd)
delay(1)
write_password(device_address, 0x02, pwd)
delay(1)
def write_password(device_address, cmd, pwd):
assert len(pwd) == 16
data = [0xFD, 0x11] + pwd + [cmd]
PMBus_I2C.PMBus_Write(device_address, data)
def pause_sequence(device_address, reset_sequence=True):
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x03 if reset_sequence else 0x11, 0x00])
delay(10)
def start_sequence(device_address, reset=True):
if reset:
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x02, 0x00])
# PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x00, 0x00])
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x00, 0x00])
delay(500)
def start_sequence(device_address, reset=False):
if reset:
PMBus_I2C.PMBus_Write(device_address, [0xd8, 0x02, 0x00])
PMBus_I2C.PMBus_Write(device_address, [0xd8, 0x00, 0x00])
delay(500)
def switch_memory(device_address, main):
PMBus_I2C.PMBus_Write(device_address, [0xFA, 1, 0 if main else 1])
def status_mfr_specific(device_address):
return PMBus_I2C.PMBus_Write_Read(device_address, [0x80], 1)
def islocked(device_address):
status = status_mfr_specific(device_address)
return (status[0] & 0x04) > 0;
def get_bootload_rev(device_address):
data = PMBus_I2C.PMBus_Write_Read(device_address, [0xAE], 9)
return data[4:7]
def get_firmware_rev(device_address):
data = PMBus_I2C.PMBus_Write_Read(device_address, [0xAE], 9)
return data[1:4]
def jump_to_iap(device_address):
PMBus_I2C.PMBus_Write(device_address, [0xFC, 2, 0, 0])
delay(1000)
def all_crc_status(device_address):
status = PMBus_I2C.PMBus_Write_Read(device_address, [0xED], 2)
status = status[0] + (status[1] << 8)
return (status >> 4)
def delay(ms):
sleep((ms + 1) / 1000.0) # http://stackoverflow.com/questions/1133857/how-accurate-is-pythons-time-sleep
def refresh_status():
refresh_running = False
for x in range(len(ADM1266_Address)):
status = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[x], [0x80], 1)
refresh = (status[0] & 0x08) >> 3
if refresh == 1:
refresh_running = True
return refresh_running
def device_present():
all_preset = False
for x in range(len(ADM1266_Address)):
for x in range(len(ADM1266_Address)):
ic_id = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[x], [0xAD], 4)
if len(ic_id) == 4:
if (ic_id[1] == 66 or ic_id[1] == 65) and ic_id[2] == 18 and ic_id[3] == 102:
all_present = True
else:
all_present = False
raise Exception('Device with address ' + hex(ADM1266_Address[x]) + " is not present.")
else:
all_present = False
raise Exception('Device with address ' + hex(ADM1266_Address[x]) + " is not present.")
return all_present
def margin_all(margin_type, group_command=False):
margin_type = margin_type.upper()
if margin_type == "HIGH":
command_data = 0xA4
elif margin_type == "LOW":
command_data = 0x94
elif margin_type == "VOUT":
command_data = 0x84
else:
command_data = 0x44
for x in range(len(ADM1266_Address)):
status = PMBus_I2C.PMBus_Write(ADM1266_Address[x], [0x00, 0xFF])
if group_command == True:
status = PMBus_I2C.PMBus_Group_Write(ADM1266_Address, [0x01, command_data])
else:
for x in range(len(ADM1266_Address)):
status = PMBus_I2C.PMBus_Write(ADM1266_Address[x], [0x01, command_data])
print("Margin all rails - " + margin_type)
def dac_mapping():
dac_config_data = []
for x in range(len(ADM1266_Address)):
for y in range(9):
dac_cofig_reg = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[x], [0xD5, 0x01, y], 3)
dac_cofig_reg = dac_cofig_reg[1] + (dac_cofig_reg[2] << 8)
if (((dac_cofig_reg >> 6) & 0x1f) != 0):
dac_config_data.append(dac_data(ADM1266_Address[x], ((dac_cofig_reg >> 6) & 0x1f)))
return dac_config_data
def margin_single(device_address, pin_number, margin_type):
# device_address = device_address
margin_type = margin_type.upper()
# pin_name = pin_name.upper()
# pin_number = 0xFF
# if pin_name == "VH1":
# pin_number = 0x00
# elif pin_name == "VH2":
# pin_number = 0x01
# elif pin_name == "VH3":
# pin_number = 0x02
# elif pin_name == "VH4":
# pin_number = 0x03
# elif pin_name == "VP1":
# pin_number = 0x04
# elif pin_name == "VP2":
# pin_number = 0x05
# elif pin_name == "VP3":
# pin_number = 0x06
# elif pin_name == "VP4":
# pin_number = 0x07
# elif pin_name == "VP5":
# pin_number = 0x08
# elif pin_name == "VP6":
# pin_number = 0x09
# elif pin_name == "VP7":
# pin_number = 0x0A
# elif pin_name == "VP8":
# pin_number = 0x0B
# elif pin_name == "VP9":
# pin_number = 0x0C
# elif pin_name == "VP10":
# pin_number = 0x0D
# elif pin_name == "VP11":
# pin_number = 0x0E
# elif pin_name == "VP12":
# pin_number = 0x0F
# elif pin_name == "VP13":
# pin_number = 0x10
# else:
# pin_number = 0xFF
if margin_type == "HIGH":
command_data = 0xA4
elif margin_type == "LOW":
command_data = 0x94
elif margin_type == "VOUT":
command_data = 0x84
else:
command_data = 0x44
dac_index = 0
if (pin_number == "0xFF"):
print("Please enter a valid pin number.")
else:
for dac_index in range(9):
data = PMBus_I2C.PMBus_Write_Read(device_address, [0xD5, 1, dac_index], 3)
data_combine = data[1] + (data[2] << 8)
dac_mapping = (data_combine >> 6) & 0x1F
if (dac_mapping == (pin_number + 1)):
dac_check = True
break
else:
dac_check = False
if (dac_check == True):
status = PMBus_I2C.PMBus_Write(device_address, [0x00, pin_number])
status = PMBus_I2C.PMBus_Write(device_address, [0x01, command_data])
print("Rail margined - " + margin_type.lower())
else:
print("Input channel is not closed loop margined by any DAC.")
def margin_open_loop(device_address, dac_name, dac_voltage):
device_address = int(device_address, 16)
dac_voltage = float(dac_voltage)
dac_name = dac_name.upper()
dac_names = ["DAC1", "DAC2", "DAC3", "DAC4", "DAC5", "DAC6", "DAC7", "DAC8", "DAC9"]
dac_index = 0xff
if dac_name in dac_names:
dac_index = dac_names.index(dac_name)
if dac_voltage >= 0.202 and dac_voltage <= 0.808:
mid_code = 0
dac_code = dac_code_calc(dac_voltage, 0.506)
elif dac_voltage >= 0.707 and dac_voltage <= 1.313:
mid_code = 3
dac_code = dac_code_calc(dac_voltage, 1.011)
elif dac_voltage >= 0.959 and dac_voltage <= 1.565:
mid_code = 4
dac_code = dac_code_calc(dac_voltage, 1.263)
else:
mid_code = 5
if mid_code < 5:
dac_code_parameter = 0x01 + (mid_code << 1)
dac_config_data = [0xEB, 0x03, dac_index, dac_code_parameter, dac_code]
status = PMBus_I2C.PMBus_Write(device_address, dac_config_data)
else:
print("Enter DAC voltage in between 0.202V - 1.565V.")
else:
print("Enter a valid DAC name.")
def dac_config(device_address, dac_name):
device_address = int(device_address, 16)
dac_name = dac_name.upper()
dac_names = ["DAC1", "DAC2", "DAC3", "DAC4", "DAC5", "DAC6", "DAC7", "DAC8", "DAC9"]
dac_index = 0xff
if dac_name in dac_names:
dac_index = dac_names.index(dac_name)
write_data = [0xD5, 0x01, dac_index]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 3)
margin_mode = read_data[1] & 0x03
if margin_mode != 1:
print("\nSelected DAC is not configured as open loop, would you like to configure the DAC as open loop?")
set_open_loop = input("Enter 'Y' for yes or press enter to exit: ")
set_open_loop = set_open_loop.upper()
if set_open_loop == "Y":
write_data = [0xD5, 0x03, dac_index, 0x01, 0x00]
status = PMBus_I2C.PMBus_Write(device_address, write_data)
return True
else:
print("DAC is not configured as open loop, output voltage could not be set.")
return False
else:
return True
else:
print("Enter a valid DAC name.")
return False
def dac_code_calc(dac_voltage, mid_code_volt):
dac_code = int((mid_code_volt - dac_voltage) / (0.606 / 256)) + 127
return dac_code
def margin_single_percent(device_address, pin_number, margin_percent):
# Set page to respective input channel
write_data = [0x00, pin_number]
status = PMBus_I2C.PMBus_Write(device_address, write_data)
# Readback exp and ment
write_data = [0x20]
data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 1)
exp = data[0]
write_data = [0x21]
data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 2)
ment = data[0] + (data[1] << 8)
nominal_value = ment_exp_to_val(exp, ment)
# Calculate ment for margin high
margin_high = nominal_value * ((100 + margin_percent) / 100)
ment = val_to_ment(margin_high, exp)
write_data = [None] * 3
write_data[1] = ment & 0xFF
write_data[2] = ment >> 8
write_data[0] = 0x25
status = PMBus_I2C.PMBus_Write(device_address, write_data)
# Calculate ment for margin low
margin_low = nominal_value * ((100 - margin_percent) / 100)
ment = val_to_ment(margin_low, exp)
write_data[1] = ment & 0xFF
write_data[2] = ment >> 8
write_data[0] = 0x26
status = PMBus_I2C.PMBus_Write(device_address, write_data)
def ment_exp_to_val(exp, ment):
value = exp_calc(exp)
value = ment * (2 ** value)
return value
def val_to_ment(value, exp):
value = value / (2 ** exp_calc(exp))
return int(value)
def exp_calc(value):
if value < 16:
temp = value
else:
temp = value - 32
return temp
# Copyright (c) 2017 Analog Devices Inc.
# All rights reserved.
# www.analog.com
# --------------------------------------------------------------------------
# Redistribution and use of this file in source and binary forms, with
# or without modification, are permitted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
import datetime
import PMBus_I2C
# variables
VH_Names = ["VH1", "VH2", "VH3", "VH4"]
VP_Names = ["VP1", "VP2", "VP3", "VP4", "VP5", "VP6", "VP7", "VP8", "VP9", "VP10", "VP11", "VP12", "VP13"]
VX_Names = ["VH1", "VH2", "VH3", "VH4", "VP1", "VP2", "VP3", "VP4", "VP5", "VP6", "VP7", "VP8", "VP9", "VP10", "VP11",
"VP12", "VP13"]
PDIO_GPIO_Names = ["PDIO1", "PDIO2", "PDIO3", "PDIO4", "PDIO5", "PDIO6", "PDIO7", "PDIO8", "PDIO9", "PDIO10", "PDIO11",
"PDIO12", "PDIO13", "PDIO14", "PDIO15", "PDIO16", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
"GPIO6", "GPIO7", "GPIO8", "GPIO9"]
PDIO_GPIO_Pad = [0, 22, 30, 31, 32, 33, 34, 35, 36, 37, 23, 24, 25, 26, 27, 28, 29, 14, 15, 16, 44, 45, 46, 43, 18, 19]
VX_Pad = [0, 47, 48, 49, 50, 51, 56, 57, 58, 59, 60, 61, 62, 63, 52, 53, 54, 55]
GPIO = [0 for k in range(10)]
Normal_Rails = list()
Disabled_Rails = list()
OV_Rails = list()
UV_Rails = list()
System_Data = list()
State_Names = list()
Signals_Status = list()
ADM1266_Address = list()
Summary_Data = [0 for k in range(6)]
Record_Index = 0
Num_Records = 0
# function to dynamically initialize nested lists to store system and blackbox data
def Init_Lists():
Address = ADM1266_Address
global VH_Data
VH_Data = [[[0 for k in range(15)] for j in range(5)] for i in range(len(Address))]
# i - dev_id, j - VH1 - 4, k - Name, PDIO_num, PDIO_dev_id, PDIO_pol, OV BB status, UV BB status, PDIO BB Status,
# Exp, Mant, OV Status, UV Status, OW Status, UW Status, Enable Status
global VP_Data
VP_Data = [[[0 for k in range(15)] for j in range(14)] for i in range(len(Address))]
# i - dev_id, j - VP1 - 13, k - Name, PDIO_num, PDIO_dev_id, PDIO_pol, OV BB status, UV BB status, PDIO BB Status,
# Exp, Mant, OV Status, UV Status, OW Status, UW Status, Enable Status
global BB_Data
BB_Data = [[0 for k in range(65)] for i in range(len(Address))]
# i - dev_id, k - BB data
global Signals_Data
Signals_Data = [[[0 for k in range(7)] for j in range(26)] for i in range(len(Address))]
# i - dev_id, j - PDIO16+GPIO9, k - Name, PDIO_num, PDIOGPIOType, Direction, Input BB Status, Output BB Status, PDIO Inst Status
# readback from first device and get the number of records and index available
def Number_Of_Records():
write_data = [0xE6]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[0], write_data, 5)
global Record_Index
global Num_Records
Record_Index = read_data[3]
Num_Records = read_data[4]
# for the record number provided, based on the number of records and the last index, calculate the record index and read back the blackbox
# information from all the devices
# blackbox raw data is saved in the BB_Data list
def Get_Raw_Data(record_number):
j = Record_Index + int(record_number) - Num_Records
if j < 0:
j += 32
for i in range(len(ADM1266_Address)):
BB_Data[i] = Indexed_Blackbox_Data(ADM1266_Address[i], j)
def Blackbox_Clear():
write_data = [0xDE, 0x02, 0xFE, 0x00]
for i in range(len(ADM1266_Address)):
read_data = PMBus_I2C.PMBus_Write(ADM1266_Address[i], write_data)
# readback system information for the device address passed. Max length = 2kbytes.
# readback the length of the data from the "Common Data" section, and based on the data lenth, readback the remaing "System Config Data".
# all data is stored in the System_Data list
def System_Read(device_address):
write_data = [0xD7, 0x03, 0x80, 0x00, 0x00]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 129)
Data_length = read_data[1] + (read_data[2] * 256)
Summary_Data[0] = "Configuration Name - '"
Summary_Data[0] += List_to_String(read_data[30:(read_data[29] + 30)])
Summary_Data[0] += "'"
j = 256
j = 128
while j < Data_length:
l = j & 0xFF
k = (j & 0xFF00) / 256
n = Data_length - j
if n > 128:
n = 128
write_data = [0xD7, 0x03, n, l, int(k)]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, n + 1)
# read and add one byte of data after commonheader
if k == 0 and l == 128 and n == 128:
System_Data.extend([read_data[128]])
else:
# Remove CRC byte of System Data
if k == 7 and l == 128 and n == 128:
del read_data[128]
# Remove byte count of PMBus Block Read
del read_data[0]
System_Data.extend(read_data)
# remove CRC byte for system data
j += 128
# readback blackbox data for the device address and index provided
def Indexed_Blackbox_Data(device_address, index):
write_data = [0xDE, 0x01, index]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 65)
return (read_data)
# get the starting pointer and length for Rails, Signals and States
# call the 3 sub functions to parse the information for Rails, Signals and States, based on their pointers and lengths
def System_Parse():
for i in range(len(ADM1266_Address)):
System_Read(ADM1266_Address[i])
next_pointer = 42
(PadData_length, PadData_pointer) = VLQ_Decode(next_pointer)
next_pointer = PadData_pointer + PadData_length + 1
(RailData_length, RailData_pointer) = VLQ_Decode(next_pointer)
next_pointer = RailData_pointer + RailData_length + 1
(StateData_length, StateData_pointer) = VLQ_Decode(next_pointer)
next_pointer = StateData_pointer + StateData_length + 1
(SignalData_length, SignalData_pointer) = VLQ_Decode(next_pointer)
Rail_Parse(RailData_length, RailData_pointer)
Signal_Parse(SignalData_length, SignalData_pointer)
State_Parse(StateData_length, StateData_pointer)
# parse the Blackbox record, from raw data to filling out lists summary, rails and signals status
def BB_Parse():
Summary_Data[1] = "Record ID : " + str(Blackbox_ID(BB_Data[0][1:3]))
Summary_Data[2] = "Power-up Counter : " + str(Blackbox_ID(BB_Data[0][23:25]))
Summary_Data[3] = "Time : " + RTS(BB_Data[0][25:32])
Summary_Data[4] = "Trigger Source : Enable Blackbox[" + str(BB_Data[0][4]) + "] in '" + State_Names[
(BB_Data[0][8] * 256) + BB_Data[0][7] - 1] + "' state"
Summary_Data[5] = "Previous State : " + State_Names[(BB_Data[0][10] * 256) + BB_Data[0][9] - 1]
for i in range(len(ADM1266_Address)):
VH_BB_Data(BB_Data[i][6], i)
VP_BB_Data(BB_Data[i][11:15], i)
PDIO_Rail_BB_Data(BB_Data[i][21:23], i)
PDIO_Signal_BB_Input_Data(BB_Data[i][19:21], i)
GPIO_Signal_BB_Input_Data(BB_Data[i][15:17], i)
GPIO_Signal_BB_Output_Data(BB_Data[i][17:19], i)
Rails_Status()
Signals_Status_Fill()
def Blackbox_ID(data):
Calculated_Value = data[0] + (data[1] * 256)
return Calculated_Value
def Powerup_Count(data):
Calculated_Value = data[0] + (data[1] * 256)
return Calculated_Value
def RTS(data):
Calculated_Value = 0
for i in range(2, 6, 1):
Calculated_Value = Calculated_Value + (data[i] * (2 ** (8 * i)))
Calculated_Value = Calculated_Value * (1 / (32768 * 2))
if Calculated_Value > 315360000:
Calculated_Value = str(datetime.datetime.utcfromtimestamp(Calculated_Value))
else:
Calculated_Value = str(datetime.timedelta(seconds=Calculated_Value))
return Calculated_Value
def VP_BB_Data(data, device):
tempov = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(14)]
tempov.reverse()
tempuv = [int(x) for x in bin(data[2] + (256 * data[3]))[2:].zfill(14)]
tempuv.reverse()
for i in range(0, 13, 1):
VP_Data[device][i + 1][4] = tempov[i]
VP_Data[device][i + 1][5] = tempuv[i]
def VH_BB_Data(data, device):
temp = [int(x) for x in bin(data)[2:].zfill(8)]
temp.reverse()
for i in range(0, 4, 1):
VH_Data[device][i + 1][4] = temp[i]
VH_Data[device][i + 1][5] = temp[i + 4]
def PDIO_Rail_BB_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
for i in range(0, 16, 1):
for j in range(len(ADM1266_Address)):
for k in range(1, 5, 1):
if (VH_Data[j][k][1] == i + 1 and VH_Data[j][k][2] == device):
VH_Data[j][k][6] = temp[i]
for k in range(1, 14, 1):
if (VP_Data[j][k][1] == i + 1 and VP_Data[j][k][2] == device):
VP_Data[j][k][6] = temp[i]
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][5] = temp[i]
def PDIO_Signal_BB_Input_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
for i in range(0, 16, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][4] = temp[i]
def GPIO_map(data):
GPIO[0] = data[0]
GPIO[1] = data[1]
GPIO[2] = data[2]
GPIO[3] = data[8]
GPIO[4] = data[9]
GPIO[5] = data[10]
GPIO[6] = data[11]
GPIO[7] = data[6]
GPIO[8] = data[7]
return GPIO
def GPIO_Signal_BB_Input_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
for i in range(0, 10, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][4] = temp[i]
def GPIO_Signal_BB_Output_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
for i in range(0, 10, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][5] = temp[i]
def Signals_Status_Fill():
del Signals_Status[:]
for i in range(len(ADM1266_Address)):
for j in range(0, 25, 1):
if Signals_Data[i][j][0] != 0:
if Signals_Data[i][j][4] == 1:
i_val = "High"
else:
i_val = "Low"
if Signals_Data[i][j][5] == 1:
o_val = "High"
else:
o_val = "Low"
Signals_Status.append(
str(Signals_Data[i][j][0]) + " - Input Value : " + i_val + " - Output Value : " + o_val)
def Rails_Status():
del OV_Rails[:]
del UV_Rails[:]
del Normal_Rails[:]
del Disabled_Rails[:]
for i in range(len(ADM1266_Address)):
for j in range(1, 5, 1):
if VH_Data[i][j][0] != 0:
if VH_Data[i][j][1] == 0:
if (VH_Data[i][j][4] == 1):
OV_Rails.append(str(VH_Data[i][j][0]) + " : OV ")
if (VH_Data[i][j][5] == 1):
UV_Rails.append(str(VH_Data[i][j][0]) + " : UV ")
if (VH_Data[i][j][4] == 0 and VH_Data[i][j][5] == 0):
Normal_Rails.append(str(VH_Data[i][j][0]) + " : Normal ")
else:
if (VH_Data[i][j][4] == 1 and VH_Data[i][j][3] == VH_Data[i][j][6]):
OV_Rails.append(str(VH_Data[i][j][0]) + " : OV ")
if (VH_Data[i][j][5] == 1 and VH_Data[i][j][3] == VH_Data[i][j][6]):
UV_Rails.append(str(VH_Data[i][j][0]) + " : UV ")
if (VH_Data[i][j][3] != VH_Data[i][j][6]):
Disabled_Rails.append(str(VH_Data[i][j][0]) + " : Disabled ")
if (VH_Data[i][j][4] == 0 and VH_Data[i][j][5] == 0 and VH_Data[i][j][3] == VH_Data[i][j][6]):
Normal_Rails.append(str(VH_Data[i][j][0]) + " : Normal ")
for j in range(1, 14, 1):
if VP_Data[i][j][0] != 0:
if VP_Data[i][j][1] == 0:
if (VP_Data[i][j][4] == 1):
OV_Rails.append(str(VP_Data[i][j][0]) + " : OV ")
if (VP_Data[i][j][5] == 1):
UV_Rails.append(str(VP_Data[i][j][0]) + " : UV ")
if (VP_Data[i][j][4] == 0 and VP_Data[i][j][5] == 0):
Normal_Rails.append(str(VP_Data[i][j][0]) + " : Normal ")
else:
if (VP_Data[i][j][4] == 1 and VP_Data[i][j][3] == VP_Data[i][j][6]):
OV_Rails.append(str(VP_Data[i][j][0]) + " : OV ")
if (VP_Data[i][j][5] == 1 and VP_Data[i][j][3] == VP_Data[i][j][6]):
UV_Rails.append(str(VP_Data[i][j][0]) + " : UV ")
if (VP_Data[i][j][3] != VP_Data[i][j][6]):
Disabled_Rails.append(str(VP_Data[i][j][0]) + " : Disabled ")
if (VP_Data[i][j][4] == 0 and VP_Data[i][j][5] == 0 and VP_Data[i][j][3] == VP_Data[i][j][6]):
Normal_Rails.append(str(VP_Data[i][j][0]) + " : Normal ")
def VP_Status(data, device):
tempov = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(13)]
tempov.reverse()
tempuv = [int(x) for x in bin(data[2] + (256 * data[3]))[2:].zfill(13)]
tempuv.reverse()
for i in range(0, 13, 1):
if tempov[i] == 0 and tempuv[i] == 0:
Normal_Rails.append(str(VP_Data[device][i + 1][0]) + " : Normal ")
else:
if tempov[i] == 1:
OV_Rails.append(str(VP_Data[device][i + 1][0]) + " : OV ")
if tempuv[i] == 1:
UV_Rails.append(str(VP_Data[device][i + 1][0]) + " : UV ")
def List_to_String(data):
name = ""
for i in range(len(data)):
name += chr(data[i])
return (name)
def VLQ_Decode(index):
i = index
j = 0
value = 0
while System_Data[i] > 127:
if j == 0:
value += (System_Data[i] & 127)
else:
value += (System_Data[i] & 127) * 128 * j
i += 1
j += 1
if j == 0:
value += (System_Data[i] & 127)
else:
value += (System_Data[i] & 127) * 128 * j
return (value, i + 1)
def Rail_Parse(RailData_length, RailData_pointer):
next_pointer = RailData_pointer
(temp, next_pointer) = VLQ_Decode(next_pointer)
while next_pointer < (RailData_pointer + RailData_length):
(name_length, next_pointer) = VLQ_Decode(next_pointer)
Rail_Name = List_to_String(System_Data[next_pointer:(next_pointer + name_length)])
next_pointer += name_length
(temp, next_pointer) = VLQ_Decode(next_pointer)
(PDIO_GPIO_Num, PDIO_GPIO_Type, PDIO_GPIO_dev_id) = PDIO_GPIO_Global_Index(temp)
(temp, next_pointer) = VLQ_Decode(next_pointer)
(VX_Num, VX_Type, VX_dev_id) = VX_Global_Index(temp)
(temp, next_pointer) = VLQ_Decode(next_pointer)
(temp, next_pointer) = VLQ_Decode(next_pointer)
(temp, next_pointer) = VLQ_Decode(next_pointer)
PDIO_GPIO_Polarity = temp & 0x01
#if PDIO_GPIO_Type == 0:
if VX_Type == 0:
VH_Data[VX_dev_id][VX_Num][0] = Rail_Name
VH_Data[VX_dev_id][VX_Num][1] = PDIO_GPIO_Num
VH_Data[VX_dev_id][VX_Num][2] = PDIO_GPIO_dev_id
VH_Data[VX_dev_id][VX_Num][3] = PDIO_GPIO_Polarity
else:
VP_Data[VX_dev_id][VX_Num][0] = Rail_Name
VP_Data[VX_dev_id][VX_Num][1] = PDIO_GPIO_Num
VP_Data[VX_dev_id][VX_Num][2] = PDIO_GPIO_dev_id
VP_Data[VX_dev_id][VX_Num][3] = PDIO_GPIO_Polarity
def Signal_Parse(SignalData_length, SignalData_pointer):
next_pointer = SignalData_pointer
(temp, next_pointer) = VLQ_Decode(next_pointer)
i = 0
while next_pointer < (SignalData_pointer + SignalData_length):
(name_length, next_pointer) = VLQ_Decode(next_pointer)
Signal_Name = List_to_String(System_Data[next_pointer:(next_pointer + name_length)])
next_pointer += name_length
(temp, next_pointer) = VLQ_Decode(next_pointer)
(PDIO_GPIO_Num, PDIO_GPIO_Type, PDIO_GPIO_dev_id) = PDIO_GPIO_Global_Index(temp)
(temp, next_pointer) = VLQ_Decode(next_pointer)
Signal_Direction = temp
Signals_Data[PDIO_GPIO_dev_id][i][0] = Signal_Name
Signals_Data[PDIO_GPIO_dev_id][i][1] = PDIO_GPIO_Num
Signals_Data[PDIO_GPIO_dev_id][i][2] = PDIO_GPIO_Type
Signals_Data[PDIO_GPIO_dev_id][i][3] = Signal_Direction
i += 1
def State_Parse(StateData_length, StateData_pointer):
next_pointer = StateData_pointer
(temp, next_pointer) = VLQ_Decode(next_pointer)
while next_pointer < (StateData_pointer + StateData_length):
(name_length, next_pointer) = VLQ_Decode(next_pointer)
State_Names.append(List_to_String(System_Data[next_pointer:(next_pointer + name_length)]))
next_pointer += name_length
def PDIO_GPIO_Global_Index(data):
if data < 256:
PDIO_GPIO_Num = PDIO_GPIO_Pad.index(data)
Dev_Id = 0
else:
PDIO_GPIO_Num = PDIO_GPIO_Pad.index(data & 0xFF)
Dev_Id = int((data & 0xFF00) / 256)
PDIO_GPIO_Type = 0 # 0 for PDIO, 1 for GPIO
if PDIO_GPIO_Num > 16:
PDIO_GPIO_Num = PDIO_GPIO_Num - 16
PDIO_GPIO_Type = 1
return (PDIO_GPIO_Num, PDIO_GPIO_Type, Dev_Id)
def VX_Global_Index(data):
if data < 256:
VX_Num = VX_Pad.index(data)
Dev_Id = 0
else:
VX_Num = VX_Pad.index(data & 0xFF)
Dev_Id = int((data & 0xFF00) / 256)
VX_Type = 0 # 0 for H, 1 for P
if VX_Num > 4:
VX_Num = VX_Num - 4
VX_Type = 1
return (VX_Num, VX_Type, Dev_Id)
Normal_I_Rails = list()
Disabled_I_Rails = list()
OV_I_Rails = list()
UV_I_Rails = list()
OVW_I_Rails = list()
UVW_I_Rails = list()
Signals_I_Status = list()
def Exp_Calc(data):
if data < 16:
return (data)
else:
temp = data - 32
return (temp)
def VOUT_Status(data):
OVF = (data & 128) / 128
OVW = (data & 64) / 64
UVW = (data & 32) / 32
UVF = (data & 16) / 16
return (OVF, UVF, OVW, UVW)
def PDIO_Rail_Inst_Data(data, device):
temp = [int(x) for x in bin(data[1] + (256 * data[2]))[2:].zfill(16)]
temp.reverse()
for i in range(0, 16, 1):
for j in range(len(ADM1266_Address)):
for k in range(1, 5, 1):
if (VH_Data[j][k][1] == i + 1 and VH_Data[j][k][2] == device):
VH_Data[j][k][14] = temp[i]
for k in range(1, 14, 1):
if (VP_Data[j][k][1] == i + 1 and VP_Data[j][k][2] == device):
VP_Data[j][k][14] = temp[i]
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 0 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][6] = temp[i]
def GPIO_Signal_Inst_Data(data, device):
temp = [int(x) for x in bin(data[1] + (256 * data[2]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
for i in range(0, 10, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][6] = temp[i]
def Get_Rail_Current_Data(address, page):
for i in range(len(ADM1266_Address)):
write_data = [0xE9]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
PDIO_Rail_Inst_Data(read_data, i)
write_data = [0xEA]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
GPIO_Signal_Inst_Data(read_data, i)
write_data = [0x00, page]
read_data = PMBus_I2C.PMBus_Write(ADM1266_Address[address], write_data)
write_data = [0x7A]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 2)
if page < 4:
(VH_Data[address][page + 1][10], VH_Data[address][page + 1][11], VH_Data[address][page + 1][12],
VH_Data[address][page + 1][13]) = VOUT_Status(read_data[0])
status = VH_Status(address, page + 1)
else:
(VP_Data[address][page - 3][10], VP_Data[address][page - 3][11], VP_Data[address][page - 3][12],
VP_Data[address][page - 3][13]) = VOUT_Status(read_data[0])
status = VP_Status(address, page - 3)
write_data = [0x20]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 2)
if page < 4:
VH_Data[address][page + 1][8] = Exp_Calc(read_data[0])
else:
VP_Data[address][page - 3][8] = Exp_Calc(read_data[0])
write_data = [0x8B]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 3)
if page < 4:
VH_Data[address][page + 1][9] = read_data[0] + (read_data[1] * 256)
value = VH_Data[address][page + 1][9] * (2 ** VH_Data[address][page + 1][8])
name = VH_Data[address][page + 1][0]
else:
VP_Data[address][page - 3][9] = read_data[0] + (read_data[1] * 256)
value = VP_Data[address][page - 3][9] * (2 ** VP_Data[address][page - 3][8])
name = VP_Data[address][page - 3][0]
return (round(value, 3), status, name)
def Get_Signal_Current_Data(address, index):
status = 0
name = 0
if index < 16:
write_data = [0xE9]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 3)
PDIO_Rail_Inst_Data(read_data, address)
index = index + 1
for n in range(0, 25, 1):
if Signals_Data[address][n][2] == 0 and Signals_Data[address][n][1] == (index) and Signals_Data[address][n][
0] != 0:
status = Signals_Data[address][n][6]
name = Signals_Data[address][n][0]
if name == 0:
temp = [int(x) for x in bin(read_data[1] + (256 * read_data[2]))[2:].zfill(16)]
temp.reverse()
status = temp[index - 1]
else:
write_data = [0xEA]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 3)
GPIO_Signal_Inst_Data(read_data, address)
index = index - 15
for n in range(0, 25, 1):
if Signals_Data[address][n][2] == 1 and Signals_Data[address][n][1] == (index) and Signals_Data[address][n][
0] != 0:
status = Signals_Data[address][n][6]
name = Signals_Data[address][n][0]
if name == 0:
temp = [int(x) for x in bin(read_data[1] + (256 * read_data[2]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
status = temp[index - 1]
return (status, name)
def Get_Current_Data():
for i in range(len(ADM1266_Address)):
k = 1
write_data = [0xE8]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 52)
for j in range(1, 5, 1):
VH_Data[i][j][9] = read_data[k] + (read_data[k + 1] * 256)
VH_Data[i][j][8] = Exp_Calc(read_data[j + 34])
k += 2
for j in range(1, 14, 1):
VP_Data[i][j][9] = read_data[k] + (read_data[k + 1] * 256)
VP_Data[i][j][8] = Exp_Calc(read_data[j + 38])
k += 2
k = 1
write_data = [0xE7]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 18)
for j in range(1, 5, 1):
(VH_Data[i][j][10], VH_Data[i][j][11], VH_Data[i][j][12], VH_Data[i][j][13]) = VOUT_Status(read_data[k])
k += 1
for j in range(1, 14, 1):
(VP_Data[i][j][10], VP_Data[i][j][11], VP_Data[i][j][12], VP_Data[i][j][13]) = VOUT_Status(read_data[k])
k += 1
write_data = [0xE9]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
PDIO_Rail_Inst_Data(read_data, i)
write_data = [0xEA]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
GPIO_Signal_Inst_Data(read_data, i)
def VH_Status(address, page):
result = 0
if VH_Data[address][page][1] == 0:
if (VH_Data[address][page][10] == 1):
result = 5
if (VH_Data[address][page][11] == 1):
result = 4
if (VH_Data[address][page][12] == 1):
result = 3
if (VH_Data[address][page][13] == 1):
result = 2
if (VH_Data[address][page][10] == 0 and VH_Data[address][page][11] == 0 and VH_Data[address][page][12] == 0 and
VH_Data[address][page][13] == 0):
result = 0
else:
if (VH_Data[address][page][10] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 5
if (VH_Data[address][page][11] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 4
if (VH_Data[address][page][12] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 3
if (VH_Data[address][page][13] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 2
if (VH_Data[address][page][3] != VH_Data[address][page][14]):
result = 1
if (VH_Data[address][page][10] == 0 and VH_Data[address][page][11] == 0 and VH_Data[address][page][3] ==
VH_Data[address][page][14]):
result = 0
return (result)
def VP_Status(address, page):
result = 0
if VP_Data[address][page][1] == 0:
if (VP_Data[address][page][10] == 1):
result = 5
if (VP_Data[address][page][11] == 1):
result = 4
if (VP_Data[address][page][12] == 1):
result = 3
if (VP_Data[address][page][13] == 1):
result = 2
if (VP_Data[address][page][10] == 0 and VP_Data[address][page][11] == 0 and VP_Data[address][page][12] == 0 and
VP_Data[address][page][13] == 0):
result = 0
else:
if (VP_Data[address][page][10] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 5
if (VP_Data[address][page][11] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 4
if (VP_Data[address][page][12] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 3
if (VP_Data[address][page][13] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 2
if (VP_Data[address][page][3] != VP_Data[address][page][14]):
result = 1
if (VP_Data[address][page][10] == 0 and VP_Data[address][page][11] == 0 and VP_Data[address][page][3] ==
VP_Data[address][page][14]):
result = 0
return (result)
def Rails_I_Status():
del OV_I_Rails[:]
del UV_I_Rails[:]
del OVW_I_Rails[:]
del UVW_I_Rails[:]
del Normal_I_Rails[:]
del Disabled_I_Rails[:]
for i in range(len(ADM1266_Address)):
for j in range(1, 5, 1):
if VH_Data[i][j][0] != 0:
temp = VH_Data[i][j][9] * (2 ** VH_Data[i][j][8])
if VH_Data[i][j][1] == 0:
if (VH_Data[i][j][10] == 1):
OV_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][11] == 1):
UV_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][12] == 1):
OVW_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][13] == 1):
UVW_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][10] == 0 and VH_Data[i][j][11] == 0 and VH_Data[i][j][12] == 0 and VH_Data[i][j][
13] == 0):
Normal_I_Rails.append(str(VH_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
else:
if (VH_Data[i][j][10] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
OV_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][11] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
UV_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][12] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
OVW_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][13] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
UVW_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][3] != VH_Data[i][j][14]):
Disabled_I_Rails.append(str(VH_Data[i][j][0]) + " : Disabled - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][10] == 0 and VH_Data[i][j][11] == 0 and VH_Data[i][j][3] == VH_Data[i][j][14]):
Normal_I_Rails.append(str(VH_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
for j in range(1, 14, 1):
if VP_Data[i][j][0] != 0:
temp = VP_Data[i][j][9] * (2 ** VP_Data[i][j][8])
if VP_Data[i][j][1] == 0:
if (VP_Data[i][j][10] == 1):
OV_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][11] == 1):
UV_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][12] == 1):
OVW_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][13] == 1):
UVW_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][10] == 0 and VP_Data[i][j][11] == 0 and VP_Data[i][j][12] == 0 and VP_Data[i][j][
13] == 0):
Normal_I_Rails.append(str(VP_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
else:
if (VP_Data[i][j][10] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
OV_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][11] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
UV_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][12] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
OVW_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][13] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
UVW_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][3] != VP_Data[i][j][14]):
Disabled_I_Rails.append(str(VP_Data[i][j][0]) + " : Disabled - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][10] == 0 and VP_Data[i][j][11] == 0 and VP_Data[i][j][3] == VP_Data[i][j][14]):
Normal_I_Rails.append(str(VP_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
def Signals_I_Status_Fill():
del Signals_I_Status[:]
for i in range(len(ADM1266_Address)):
for j in range(0, 25, 1):
if Signals_Data[i][j][0] != 0:
if Signals_Data[i][j][6] == 1:
i_val = "High"
else:
i_val = "Low"
Signals_I_Status.append(str(Signals_Data[i][j][0]) + " - Value : " + i_val)
# offline blackbox
def System_Read_Offline(system_data):
# write_data = [0xD7, 0x03, 0x80, 0x00, 0x00]
# read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 129)
read_data = system_data[0]
Data_length = read_data[1] + (read_data[2] * 256)
Summary_Data[0] = "Configuration Name - '"
Summary_Data[0] += List_to_String(read_data[30:(read_data[29] + 30)])
Summary_Data[0] += "'"
j = 256
j = 128
counter = 1
while j < Data_length:
l = j & 0xFF
k = (j & 0xFF00) / 256
n = Data_length - j
if n > 128:
n = 128
write_data = [0xD7, 0x03, n, l, int(k)]
# read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, n + 1)
read_data = system_data[counter]
# read and add one byte of data after commonheader
if k == 0 and l == 128 and n == 128:
System_Data.extend([read_data[128]])
else:
# Remove CRC byte of System Data
if k == 7 and l == 128 and n == 128:
del read_data[128]
# Remove byte count of PMBus Block Read
del read_data[0]
System_Data.extend(read_data)
# remove CRC byte for system data
j += 128
counter += 1
def System_Parse_Offline(hex_file_path, system_data):
hex_file = open(hex_file_path, "rb")
if hex_file is not None:
for line in hex_file.readlines():
if line.startswith(b":00000001FF"):
break
data_len = int(line[1:3], 16)
cmd = int(line[3:7], 16)
data = [] if data_len == 0 else array('B', codecs.decode((line[9:9 + data_len * 2]), "hex_codec")).tolist()
if cmd is 0xD7:
del data[1:3]
system_data.append(data)
for i in range(len(ADM1266_Address)):
System_Read_Offline(system_data)
next_pointer = 42
(PadData_length, PadData_pointer) = VLQ_Decode(next_pointer)
next_pointer = PadData_pointer + PadData_length + 1
(RailData_length, RailData_pointer) = VLQ_Decode(next_pointer)
next_pointer = RailData_pointer + RailData_length + 1
(StateData_length, StateData_pointer) = VLQ_Decode(next_pointer)
next_pointer = StateData_pointer + StateData_length + 1
(SignalData_length, SignalData_pointer) = VLQ_Decode(next_pointer)
Rail_Parse(RailData_length, RailData_pointer)
Signal_Parse(SignalData_length, SignalData_pointer)
State_Parse(StateData_length, StateData_pointer)
return True
else:
return False
def Get_Raw_Data_Offline(bb_data_list, record_number):
j = Record_Index + int(record_number) - Num_Records
if j < 0:
j += 32
for i in range(len(ADM1266_Address)):
BB_Data[i] = bb_data_list[64*(j):64*(j+1)]
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Url Entity class."""
from typing import Any, Dict, Mapping
from ..._version import VERSION
from ...common.utility import export
from ...sectools.domain_utils import url_components
from .entity import Entity
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=invalid-name
@export
class Url(Entity):
"""URL Entity."""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
:param src_entity: instantiate entity using properties of src entity
:param kwargs: key-value pair representation of entity
"""
super().__init__(src_entity=src_entity, **kwargs)
if self.Url:
self.__dict__.update(url_components(self.Url))
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Url}"
# # We need to do some trickery with the Url defined as
# # a property since base Entity class expects to be able to set
# # attributes directly in self.__dict__
# @property
# def Url(self) -> Optional[str]:
# """Return Url."""
# if self._url is None and "Url" in self.__dict__:
# self.Url = self.__dict__["Url"]
# return self._url
# @Url.setter
# def Url(self, url):
# """Return host component of Url."""
# self._url = url
# if url:
# self.__dict__.update(url_components(url))
_entity_schema: Dict[str, Any] = {}
|
nilq/baby-python
|
python
|
import queue
import time
from threading import Thread
import cv2
from scripts import centerface_utils
TARGET_WIDTH = 640
TARGET_HEIGHT = 640
TARGET_FPS = 30
class CameraDemo:
"""Multi-threaded python centerface detection demo."""
def __init__(self, runner: centerface_utils.CenterFaceNoDetection) -> None:
self.keep_going = True
self.runner = runner
def capture_frame(self, cap, queue):
"""Thread function which captures data from webcam and places into queue"""
prev = 0
cur = 0
while self.keep_going:
cur = time.time()
_, img = cap.read()
if (cur - prev) >= 1.0 / TARGET_FPS:
prev = cur
queue.put(img)
def process_frame(
self, runner, processing_func, input_queue, output_queue, threshold
):
"""Thread function which detects and overlays results, add it to queue for rendering"""
while self.keep_going:
if input_queue.empty():
continue
frame = input_queue.get()
frame = processing_func(frame)
np_array = cv2.dnn.blobFromImage(
frame,
scalefactor=1.0,
size=(TARGET_WIDTH, TARGET_HEIGHT),
mean=(0, 0, 0),
swapRB=True,
crop=True,
)
start = time.time()
detections, landmarks = runner(
np_array, TARGET_HEIGHT, TARGET_WIDTH, threshold=threshold
)
end = time.time()
print(f"Processing frame too {(end - start) * 1000} ms")
# Draw predictions and show frame
for det in detections:
boxes, _ = det[:4], det[4]
cv2.rectangle(
frame,
(int(boxes[0]), int(boxes[1])),
(int(boxes[2]), int(boxes[3])),
(2, 255, 0),
3,
)
for lm in landmarks:
for i in range(0, 5):
cv2.circle(
frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 4, (0, 0, 255), -1
)
output_queue.put(frame)
def run(self, threshold=0.5):
cap = cv2.VideoCapture(0)
cap_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
cap_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Doesn't seem to do anything :/
# cap.set(cv2.CAP_PROP_FPS, TARGET_FPS)
cap_fps = cap.get(cv2.CAP_PROP_FPS)
print("* Capture width:", cap_width)
print("* Capture height:", cap_height)
print("* Capture FPS:", cap_fps)
_, frame = cap.read()
# assume w > h
h, w = frame.shape[:2]
scale = TARGET_WIDTH / h
new_width = int(scale * w)
new_height = int(scale * h)
# For centercrop
left = (new_width - TARGET_WIDTH) // 2
top = (new_height - TARGET_HEIGHT) // 2
right = (new_width + TARGET_WIDTH) // 2
bottom = (new_height + TARGET_HEIGHT) // 2
# initial queue for webcam data
frames_queue = queue.Queue(maxsize=0)
# queue after we've streamed it to real-time feed
ready_for_processing_queue = queue.Queue(maxsize=0)
# queue for processed frames with prediction overlays
processed_frames_queue = queue.Queue(maxsize=0)
# start thread to capture data from webcam
capture_thread = Thread(
target=self.capture_frame,
args=(
cap,
frames_queue,
),
daemon=True,
)
capture_thread.start()
def processing_func(cv2_frame):
# Resize and center crop frame
frame = cv2.resize(cv2_frame, (new_width, new_height))
frame = frame[top:bottom, left:right]
return frame
# start thread to process images with model
processing_thread = Thread(
target=self.process_frame,
args=(
self.runner,
processing_func,
ready_for_processing_queue,
processed_frames_queue,
threshold,
),
daemon=True,
)
processing_thread.start()
while self.keep_going:
if not frames_queue.empty():
img_real_time = frames_queue.get()
if img_real_time is not None:
cv2.imshow("realtime", img_real_time)
ready_for_processing_queue.put(img_real_time)
if not processed_frames_queue.empty():
img_processed = processed_frames_queue.get()
if img_processed is not None:
cv2.imshow("predicted", img_processed)
if cv2.waitKey(1) & 0xFF == ord("q"):
self.keep_going = False
break
cap.release()
capture_thread.join()
processing_thread.join()
if __name__ == "__main__":
onnx_runner = centerface_utils.CenterFaceOnnx("models/centerface-optimized.onnx")
tvm_runner_fp32 = centerface_utils.CenterFaceTVM(
"compiled_packages/centerface_autoscheduler_30000kt_fp32_llvm.tar"
)
tvm_runner_fp16 = centerface_utils.CenterFaceTVM(
"compiled_packages/centerface_autoscheduler_30000kt_fp16_llvm.tar"
)
dummy_runner = centerface_utils.CenterFaceNoDetection()
# Change runners at will
demo = CameraDemo(tvm_runner_fp16)
demo.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
ASMO Configuration
Author:
Rony Novianto (rony@ronynovianto.com)
'''
# Run web to support any programming language via RESTful web service
# Run local if a higher performance is required (e.g. using ASMO with machine learning)
is_running_local = False
host = 'http://localhost:12766'
# Memory
memory_uri = 'memory'
# Attention
process_uri = 'process'
compete_uri = 'compete'
competition_time = 0.5
priority_level_key = 'priority_level'
total_attention_level_key = 'total_attention_level'
attention_value_key = 'attention_value'
boost_value_key = 'boost_value'
required_resources_key = 'required_resources'
actions_key = 'actions'
|
nilq/baby-python
|
python
|
def double_first(vec):
try:
first = vec[0]
parsed = int(first)
return parsed * 2
except IndexError:
print("no first item")
except ValueError:
print("invalid first item")
if __name__ == '__main__':
numbers = ["42", "93", "18"]
empty = []
strings = ["tofu", "93", "18"]
print(double_first(numbers))
print(double_first(empty))
print(double_first(strings))
|
nilq/baby-python
|
python
|
import numpy as np
# initial values
ARRAY = []
with open("xoData.txt") as f:
for line in f:
ARRAY.append([int(x) for x in line.split()])
# step function (activation function)
def step_function(sum):
if sum >= 0:
return 1
return -1
# calculateing output
def calculate_output(instance, weights, bias):
sum = instance.dot(weights) + bias
return step_function(sum)
# Hebbian Algorithm
def hebb():
inputs = np.array(ARRAY)
weights = np.array([0.0] * 25)
bias = 0.0
for i in range(len(inputs)):
for j in range(len(inputs[i]) - 1):
weights[j] = weights[j] + (inputs[i][j] * inputs[i][25])
bias = bias + (1 * inputs[i][25])
return weights, bias
# Perceptron Algorithm
def perceptron():
inputs = np.array(ARRAY)
weights = np.array([0.0] * 25)
learning_rate = 0.1
bias = 0.0
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s = inputs[i][:-1].dot(weights)
prediction = step_function(s + bias)
if inputs[i][25] != prediction:
for j in range(len(inputs[i]) - 1):
weights[j] = weights[j] + (
learning_rate * inputs[i][j] * inputs[i][25]
)
bias = bias + (learning_rate * inputs[i][25])
return weights, bias
# Adaline Algorithm
def adaline():
inputs = np.array(ARRAY)
weights = np.array([0.0] * 25)
learning_rate = 0.1
bias = 0.0
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s = inputs[i][:-1].dot(weights) + bias
prediction = step_function(s)
error = inputs[i][25] - s
if inputs[i][25] != prediction:
for j in range(len(inputs[i]) - 1):
weights[j] = weights[j] + (learning_rate * inputs[i][j] * error)
bias = bias + (learning_rate * error)
return weights, bias
# Multi Class Perceptron
def multiClassPerceptron():
inputs = np.array(ARRAY)
weights = np.array([[0.0] * 25, [0.0] * 25])
learning_rate = 0.1
bias = [0.0, 0.0]
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s1 = inputs[i][:-1].dot(weights[0])
s2 = inputs[i][:-1].dot(weights[1])
predictionX = step_function(s1 + bias[0])
predictionO = step_function(s2 + bias[1])
if inputs[i][25] != predictionX:
for j in range(len(inputs[i]) - 1):
weights[0][j] = weights[0][j] + (
learning_rate * inputs[i][j] * inputs[i][25]
)
bias[0] = bias[0] + (learning_rate * inputs[i][25])
if (inputs[i][25] * (-1)) != predictionO:
for j in range(len(inputs[i]) - 1):
weights[1][j] = weights[1][j] + (
learning_rate * inputs[i][j] * (inputs[i][25] * (-1))
)
bias[1] = bias[1] + (learning_rate * (inputs[i][25] * (-1)))
return weights, bias
# Multi Class Adaline
def multiClassAdaline():
inputs = np.array(ARRAY)
weights = np.array([[0.0] * 25, [0.0] * 25])
learning_rate = 0.1
bias = [0.0, 0.0]
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s1 = inputs[i][:-1].dot(weights[0]) + bias[0]
s2 = inputs[i][:-1].dot(weights[1]) + bias[1]
predictionX = step_function(s1)
predictionO = step_function(s2)
error1 = inputs[i][25] - s1
error2 = (inputs[i][25] * (-1)) - s2
if inputs[i][25] != predictionX:
for j in range(len(inputs[i]) - 1):
weights[0][j] = weights[0][j] + (
learning_rate * inputs[i][j] * error1
)
bias[0] = bias[0] + (learning_rate * error1)
if (inputs[i][25] * (-1)) != predictionO:
for j in range(len(inputs[i]) - 1):
weights[1][j] = weights[1][j] + (
learning_rate * inputs[i][j] * error2
)
bias[1] = bias[1] + (learning_rate * error2)
return weights, bias
|
nilq/baby-python
|
python
|
from yargy.utils import Record
|
nilq/baby-python
|
python
|
import networkx as nx
import numpy as np
from copy import deepcopy
from collections import defaultdict
from ylearn.utils import to_repr
from . import prob
from .utils import (check_nodes, ancestors_of_iter, descendents_of_iter)
class CausalGraph:
"""
A class for representing DAGs of causal structures.
Attributes
----------
causation : dict
Descriptions of the causal structures where values are parents of the
corresponding keys.
dag : nx.MultiDiGraph
Graph represented by the networkx package.
prob : ylearn.causal_model.prob.Prob
The encoded probability distribution of the causal graph.
latent_confounding_arcs : list of tuple of two str
Two elements in the tuple are names of nodes in the graph where there
exists an latent confounding arcs between them. Semi-Markovian graphs
with unobserved confounders can be converted to a graph without
unobserved variables, where one can add bi-directed latent confounding
arcs represent these relations. For example, the causal graph X <- U -> Y,
where U is an unobserved confounder of X and Y, can be converted
equivalently to X <-->Y where <--> is a latent confounding arc.
is_dag : bool
Determine whether the graph is a DAG, which is a necessary condition
for it to be a valid causal graph.
c_components : set
The C-components of the graph.
observed_dag : nx.MultiDiGraph
A causal graph with only observed variables.
topo_order : list
The topological order of the graph.
explicit_unob_var_dag : nx.MultiDiGraph
A new dag where all unobserved confounding arcs are replaced
by explicit unobserved variables. See latent_confounding_arcs for more
details of the unobserved variables.
Methods
----------
to_adj_matrix()
Return the numpy matrix of the adjecency matrix.
to_adj_list()
Return the numpy array of the adjecency matrix.
ancestors(y)
Return ancestors of y.
add_nodes(nodes, new=False)
If not new, add all nodes in the nodes to the current
CausalGraph, else create a new graph and add nodes.
add_edges_from(edge_list, new=False, observed=True)
Add all edges in the edge_list to the CausalGraph.
parents(x, observed=True)
Find the parents of the node x in the CausalGraph.
add_edge(i, j, observed=True)
Add an edge between nodes i and j to the CausalGraph. Add an unobserved
confounding arc if not observed.
remove_nodes(nodes, new=False)
Remove all nodes in the graph. If new, do this in a new CausalGraph.
remove_edge(i, j, observed=True)
Remove the edge in the CausalGraph. If observed, remove the unobserved
latent confounding arcs.
remove_edges_from(edge_list, new=False, observed=True)
Remove all edges in the edge_list in the CausalGraph.
build_sub_graph(subset)
Return a new CausalGraph as the subgraph of self with nodes in the
subset.
remove_incoming_edges(y, new=False)
Remove all incoming edges of all nodes in y. If new, return a new
CausalGraph.
remove_outgoing_edges(y, new=False)
Remove all outgoing edges of all nodes in y. If new, return a new
CausalGraph.
"""
def __init__(self, causation, dag=None, latent_confounding_arcs=None):
"""
Parameters
----------
causation : dict
Descriptions of the causal structures where values are parents of the
corresponding keys.
dag : nx.MultiGraph, optional
A konw graph structure represented. If provided, dag must represent
the causal structures stored in causation. Defaults to None.
latent_confounding_arcs : set or list of tuple of two str, optional
Two elements in the tuple are names of nodes in the graph where there
exists an latent confounding arcs between them. Semi-Markovian graphs
with unobserved confounders can be converted to a graph without
unobserved variables, where one can add bi-directed latent confounding
arcs to represent these relations. For example, the causal graph X <- U -> Y,
where U is an unobserved confounder of X and Y, can be converted
equivalently to X <-->Y where <--> is a latent confounding arc.
"""
self.causation = defaultdict(list, causation)
self.ava_nodes = self.causation.keys()
self.dag = self.observed_dag.copy() if dag is None else dag
# add unobserved bidirected confounding arcs to the graph, with the
# letter 'n' representing that the edge is unobserved
if latent_confounding_arcs is not None:
for edge in latent_confounding_arcs:
self.dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
@property
def prob(self):
"""The encoded probability distribution.
Returns
----------
Prob
"""
return prob.Prob(variables=set(self.causation.keys()))
@property
def latent_confounding_arcs(self):
"""Return the latent confounding arcs encoded in the graph.
Returns
----------
list
"""
W = nx.to_numpy_matrix(self.dag)
a, w_t = np.where(W >= 1), W.T.A
arcs, nodes = [], list(self.dag.nodes)
for row, col in zip(a[0], a[1]):
if w_t[row][col] >= 1 and (nodes[col], nodes[row]) not in arcs:
arcs.append((nodes[row], nodes[col]))
return arcs
@property
def is_dag(self):
"""Verify whether the constructed graph is a DAG.
"""
# TODO: determin if the graph is a DAG, try tr(e^{W\circledot W}-d)=0
return nx.is_directed_acyclic_graph(self.observed_dag)
def to_adj_matrix(self):
"""Return the adjacency matrix.
"""
W = nx.to_numpy_matrix(self.dag)
return W
# def to_adj_list(self):
# """Return the adjacency list."""
# pass
def is_d_separated(self, x, y, test_set):
"""Check if test_set d-separates x and y.
Parameters
----------
x : set of str
y : set of str
test_set : set of str
Returns
----------
Bool
If test_set d-separates x and y, return True else return False.
"""
return nx.d_separated(self.explicit_unob_var_dag, x, y, test_set)
@property
def c_components(self):
"""Return the C-component set of the graph.
Returns
----------
set of str
The C-component set of the graph
"""
bi_directed_graph = nx.Graph()
bi_directed_graph.add_nodes_from(self.dag.nodes)
bi_directed_graph.add_edges_from(self.latent_confounding_arcs)
return nx.connected_components(bi_directed_graph)
def ancestors(self, x):
"""Return the ancestors of all nodes in x.
Parameters
----------
x : set of str
a set of nodes in the graph
Returns
----------
set of str
Ancestors of nodes in x in the graph
"""
g = self.observed_dag
return ancestors_of_iter(g, x)
def descendents(self, x):
"""Return the descendents of all nodes in x.
Parameters
----------
x : set of str
a set of nodes in the graph
Returns
----------
set of str
Descendents of nodes x of the graph
"""
# des = set()
# x = {x} if isinstance(x, str) else x
# for node in x:
# des.add(node)
# try:
# des.update(nx.descendants(self.observed_dag, node))
# except Exception:
# pass
g = self.observed_dag
return descendents_of_iter(g, x)
def parents(self, x, only_observed=True):
"""Return the direct parents of the node x in the graph.
Parameters
----------
x : str
Name of the node x.
only_observed : bool, optional
If True, then only find the observed parents in the causal graph,
otherwise also include the unobserved variables, by default True
Returns
-------
list
Parents of the node x in the graph
"""
if only_observed:
return self.causation[x]
else:
return list(self.explicit_unob_var_dag.predecessors(x))
@property
def observed_dag(self):
"""Return the observed part of the graph, including observed nodes and
edges between them.
Returns
----------
nx.MultiDiGraph
The observed part of the graph
"""
edges = []
for k, v in self.causation.items():
for para in v:
edges.append((para, k, 0))
ob_dag = nx.MultiDiGraph()
ob_dag.add_edges_from(edges)
return ob_dag
@property
def explicit_unob_var_dag(self):
"""Build a new dag where all unobserved confounding arcs are replaced
by explicit unobserved variables
Returns
----------
nx.MultiDiGraph
"""
new_dag = self.observed_dag
for i, (node1, node2) in enumerate(self.latent_confounding_arcs):
new_dag.add_edges_from(
[(f'U{i}', node1, 'n'), (f'U{i}', node2, 'n')]
)
return new_dag
@property
def topo_order(self):
"""Retrun the topological order of the nodes in the observed graph
Returns
----------
generator
Nodes in the topological order
"""
return nx.topological_sort(self.observed_dag)
def add_nodes(self, nodes, new=False):
"""
If not new, add all nodes in the nodes to the current
CausalGraph, else create a new graph and add nodes.
Parameters
----------
nodes : set or list
new : bool, optional
If new create and return a new graph. Defaults to False.
Returns
----------
CausalGraph
"""
ori_nodes = self.dag.nodes
if not new:
self.dag.add_nodes_from(nodes)
for node in nodes:
if node not in ori_nodes:
self.causation[node] = []
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
new_dag.add_nodes_from(nodes)
for node in nodes:
if node not in ori_nodes:
new_causation[node] = []
return CausalGraph(new_causation, dag=new_dag)
def add_edges_from(self, edge_list, new=False, observed=True):
"""
Add edges to the causal graph.
Parameters
----------
edge_list : list
Every element of the list contains two elements, the first for
the parent
new : bool
Return a new graph if set as True
observed : bool
Add unobserved bidirected confounding arcs if not observed.
"""
if not new:
if observed:
for edge in edge_list:
self.causation[edge[1]].append(edge[0])
self.dag.add_edge(edge[0], edge[1], 0)
else:
for edge in edge_list:
self.dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
if observed:
new_dag.add_edges_from(edge_list)
for edge in edge_list:
new_causation[edge[1]].append(edge[0])
else:
for edge in edge_list:
new_dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
return CausalGraph(new_causation, dag=new_dag)
def add_edge(self, s, t, observed=True):
"""
Add an edge between nodes i and j. Add an unobserved latent confounding
arc if not observed.
Parameters
----------
s : str
Source of the edge.
t : str
Target of the edge.
observed : bool
Add an unobserved latent confounding arc if False.
"""
if observed:
self.dag.add_edge(s, t, 0)
self.causation[t].append(s)
else:
self.dag.add_edge(s, t, 'n')
def remove_nodes(self, nodes, new=False):
"""
Remove all nodes in the graph.
Parameters
----------
nodes : set or list
new : bool, optional
If True, create a new graph, remove nodes in that graph and return
it. Defaults to False.
Returns
---------
CausalGraph
Return a CausalGraph if new.
"""
if not new:
for node in nodes:
for k in list(self.causation.keys()):
if k == node:
del self.causation[node]
continue
try:
self.causation[k].remove(node)
except Exception:
pass
self.dag.remove_nodes_from(nodes)
else:
new_causation = deepcopy(self.causation)
new_dag = deepcopy(self.dag)
new_dag.remove_nodes_from(nodes)
for node in nodes:
for k in list(new_causation.keys()):
if k == node:
del new_causation[node]
continue
try:
new_causation[k].remove(node)
except Exception:
pass
return CausalGraph(new_causation, dag=new_dag)
def remove_edge(self, edge, observed=True):
"""
Remove the edge in the CausalGraph. If observed, remove the unobserved
latent confounding arcs.
Parameters
----------
edge : tuple
2 elements denote the start and end of the edge, respectively
observed : bool
If not observed, remove the unobserved latent confounding arcs.
"""
if observed:
self.dag.remove_edge(edge[0], edge[1], 0)
try:
self.causation[edge[1]].remove(edge[0])
except Exception:
pass
else:
self.dag.remove_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
def remove_edges_from(self, edge_list, new=False, observed=True):
"""
Remove all edges in the edge_list in the graph.
Parameters
----------
edge_list : list
new : bool, optional
If new, creat a new CausalGraph and remove edges.
observed : bool, optional
Remove unobserved latent confounding arcs if not observed.
Returns
----------
CausalGraph
Return a new CausalGraph if new.
"""
if not new:
if observed:
for edge in edge_list:
self.dag.remove_edge(edge[0], edge[1], 0)
try:
self.causation[edge[1]].remove(edge[0])
except Exception:
pass
else:
for edge in edge_list:
self.dag.remove_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
if observed:
for edge in edge_list:
new_dag.remove_edge(edge[0], edge[1], 0)
try:
new_causation[edge[1]].remove(edge[0])
except Exception:
pass
else:
for edge in edge_list:
new_dag.remove_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
return CausalGraph(new_causation, new_dag)
def build_sub_graph(self, subset):
"""Return a new CausalGraph as the subgraph of the graph with nodes in the
subset.
Parameters
----------
subset : set
Returns
----------
CausalGraph
"""
check_nodes(self.ava_nodes, subset)
nodes = set(self.causation.keys()).difference(subset)
return self.remove_nodes(nodes, new=True)
def remove_incoming_edges(self, x, new=False):
"""Remove incoming edges of all nodes of x. If new, do this in the new
CausalGraph.
Parameters
----------
x : set or list
new : bool
Return a new graph if set as Ture.
Returns
----------
CausalGraph
If new, return a subgraph of the graph without all incoming edges
of nodes in x
"""
check_nodes(self.ava_nodes, x)
edges = self.dag.in_edges(x, keys=True)
o_edges, u_edges = [], []
for edge in edges:
if edge[2] == 'n':
u_edges.append(edge)
else:
o_edges.append(edge)
if new:
return self.remove_edges_from(o_edges, new).remove_edges_from(
u_edges, new, observed=False
)
else:
self.remove_edges_from(o_edges, new)
self.remove_edges_from(u_edges, new, observed=False)
def remove_outgoing_edges(self, x, new=False):
"""Remove outcoming edges of all nodes in x.
Parameters
----------
x : set
new : bool
Returns
----------
CausalGraph
If new, return a subgraph of the graph without all outcoming edges
of nodes in x.
"""
check_nodes(self.ava_nodes, x)
removing_edges = [
edge for edge in self.dag.out_edges(x, keys=True) if edge[2] == 0
]
return self.remove_edges_from(removing_edges, new, observed=True)
def plot(self, **kwargs):
ng = nx.DiGraph(self.causation)
options = dict(with_labels=True, node_size=1000, **kwargs)
nx.draw(ng, **options)
def __repr__(self):
return to_repr(self)
|
nilq/baby-python
|
python
|
import typing
import uuid
from datetime import datetime
class SetType:
_all: typing.Set = set()
def __init__(self, api_name: str, desc: str):
self.name = str(api_name)
self.desc = desc
SetType._all.add(self)
def __eq__(self, other):
if not isinstance(other, SetType):
return NotImplemented
return self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
def __str__(self) -> str:
return self.name.lower()
@staticmethod
def parse(name: str) -> 'SetType':
for s in SetType._all:
if s.name == name:
return s
return None
TypeCore = SetType('core', 'A yearly Magic core set (Tenth Edition, etc)')
TypeExpansion = SetType('expansion', 'A rotational expansion set in a block (Zendikar, etc)')
TypeMasters = SetType('masters', 'A reprint set that contains no new cards (Modern Masters, etc)')
TypeMasterpiece = SetType('masterpiece', 'Masterpiece Series premium foil cards')
TypeFromTheVault = SetType('from_the_vault', 'From the Vault gift sets')
TypeSpellbook = SetType('spellbook', 'Spellbook series gift sets')
TypePremiumDeck = SetType('premium_deck', 'Premium Deck Series decks')
TypeDuelDeck = SetType('duel_deck', 'Duel Decks')
TypeDraftInnovation = SetType('draft_innovation', 'Special draft sets, like Conspiracy and Battlebond')
TypeTreasureChest = SetType('treasure_chest', 'Magic Online treasure chest prize sets')
TypeCommander = SetType('commander', 'Commander preconstructed decks')
TypePlanechase = SetType('plainchase', 'Planechase sets')
TypeArchenemy = SetType('archenemy', 'Archenemy sets')
TypeVanguard = SetType('vanguard', 'Vanguard card sets')
TypeFunny = SetType('funny', 'A funny un-set or set with funny promos (Unglued, Happy Holidays, etc)')
TypeStarter = SetType('starter', 'A starter/introductory set (Portal, etc)')
TypeBox = SetType('box', 'A gift box set')
TypePromo = SetType('promo', 'A set that contains purely promotional cards')
TypeToken = SetType('token', 'A set made up of tokens and emblems.')
TypeMemorabilia = SetType('memorabilia', 'A set made up of gold-bordered, oversize, or trophy cards that are not legal')
class Set:
"""
Represents a group of related MTG cards. Not all are from official releases,
some are for grouping purposes only. All are provided by data from Scryfall.
:ivar id: Scryfall ID of this set.
:ivar code: The unique three to five-letter code for this set.
:ivar name: English language name for the set.
"""
def __init__(self, **kwargs):
self.id: uuid.UUID = uuid.UUID("00000000-0000-0000-0000-000000000000")
self.code: str = ''
self.name: str = ''
self.type: SetType = TypeCore
self.release_date: datetime = datetime.min
self.block: str = ''
self.parent_set: str = ''
self.card_count: int = 0
self.digital: bool = False
self.foil_only: bool = False
self.nonfoil_only: bool = False
if kwargs is not None:
if 'id' in kwargs:
id = kwargs['id']
if isinstance(id, uuid.UUID):
self.id = id
else:
self.id = uuid.UUID(kwargs['id'])
if 'code' in kwargs:
self.code = str(kwargs['code'])
if 'name' in kwargs:
self.name = str(kwargs['name'])
if 'type' in kwargs:
t = kwargs['type']
if isinstance(t, SetType):
self.type = t
else:
self.type = SetType.parse(str(t))
if 'release_date' in kwargs:
rd = kwargs['release_date']
if isinstance(rd, datetime):
self.release_date = rd
else:
self.release_date = datetime.fromisoformat(str(rd))
self.number = kwargs['number']
if 'block' in kwargs:
self.block = str(kwargs['block'])
if 'parent_set' in kwargs:
self.parent_set = str(kwargs['parent_set'])
if 'card_count' in kwargs:
self.card_count = int(kwargs['card_count'])
if 'digital' in kwargs:
self.digital = bool(kwargs['digital'])
if 'foil_only' in kwargs:
self.foil_only = bool(kwargs['foil_only'])
if 'nonfoil_only' in kwargs:
self.nonfoil_only = bool(kwargs['nonfoil_only'])
def __hash__(self) -> int:
return hash((self.id, self.code))
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return (self.id, self.code) == (other.id, other.code)
def __ne__(self, other):
if not isinstance(other, Set):
return NotImplemented
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return (self.type.name, self.release_date, self.name) < (other.type.name, other.release_date, other.name)
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return not self.__lt__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return not self.__le__(other)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
s = 'Set(id={!r}, code={!r}, name={!r}, type={!r}, release_date={!r}, '
s += 'block={!r}, parent_set={!r}, card_count={!r}, digital={!r}, '
s += 'foil_only={!r}, nonfoil_only={!r})'
return s.format(
self.id,
self.code,
self.name,
self.type,
self.release_date,
self.block,
self.parent_set,
self.card_count,
self.digital,
self.foil_only,
self.nonfoil_only,
)
@property
def has_foils(self) -> bool:
return not self.nonfoil_only
@property
def has_nonfoils(self) -> bool:
return not self.foil_only
def to_dict(self) -> typing.Dict[str, typing.Any]:
d = {
'id': str(self.id),
'code': self.code,
'name': self.name,
'type': self.type,
'release_date': self.release_date.isoformat(),
'block': self.block,
'parent_set': self.parent_set,
'card_count': self.card_count,
'digital': self.digital,
'foil_only': self.foil_only,
'nonfoil_only': self.nonfoil_only
}
return d
|
nilq/baby-python
|
python
|
# coding=utf8
import re
from decimal import Decimal
from typing import Union
MAX_VALUE_LIMIT = 1000000000000 # 10^12
LOWER_UNITS = '千百十亿千百十万千百十_'
LOWER_DIGITS = '零一二三四五六七八九'
UPPER_UNITS = '仟佰拾亿仟佰拾万仟佰拾_'
UPPER_DIGITS = '零壹贰叁肆伍陆柒捌玖'
class ChineseNumbers:
RULES = [
(r'一十', '十'),
(r'零[千百十]', '零'),
(r'零{2,}', '零'),
(r'零([亿|万])', r'\g<1>'),
(r'亿零{0,3}万', '亿'),
(r'零?_', ''),
]
@staticmethod
def measure_number(num: Union[int, str]) -> str:
if isinstance(num, str):
_n = int(num)
else:
_n = num
if _n < 0 or _n >= MAX_VALUE_LIMIT:
raise ValueError('Out of range')
num_str = str(num)
capital_str = ''.join([LOWER_DIGITS[int(i)] for i in num_str])
s_units = LOWER_UNITS[len(LOWER_UNITS) - len(num_str):]
o = ''.join('{}{}'.format(u, d) for u, d in zip(capital_str, s_units))
for p, d in ChineseNumbers.RULES:
o = re.sub(p, d, o)
if 10 <= _n < 20:
o.replace('一十', '十')
return o
@staticmethod
def order_number(num: Union[int, str]) -> str:
val = ChineseNumbers.measure_number(num)
return val.replace('零', '〇')
@staticmethod
def to_chinese_number(num: Union[int, str], upper: bool = False, order: bool = False) -> str:
if order:
lower_string = ChineseNumbers.order_number(num)
else:
lower_string = ChineseNumbers.measure_number(num)
if upper:
for _ld, _ud in zip(LOWER_DIGITS + LOWER_UNITS[:3], UPPER_DIGITS + UPPER_UNITS[:3]):
lower_string = lower_string.replace(_ld, _ud)
return lower_string
class FinanceNumbers:
RULES = [
(r'零角零分$', '整'),
(r'零[仟佰拾]', '零'),
(r'零{2,}', '零'),
(r'零([亿|万])', r'\g<1>'),
(r'零+元', '元'),
(r'亿零{0,3}万', '亿'),
(r'^元', '零元')
]
@staticmethod
def to_capital_str(num: Union[int, float, Decimal, str]) -> str:
units = UPPER_UNITS[:-1] + '元角分'
if isinstance(num, str):
_n = Decimal(num)
else:
_n = num
if _n < 0 or _n >= MAX_VALUE_LIMIT:
raise ValueError('Out of range')
num_str = str(num) + '00'
dot_pos = num_str.find('.')
if dot_pos > -1:
num_str = num_str[:dot_pos] + num_str[dot_pos + 1:dot_pos + 3]
capital_str = ''.join([UPPER_DIGITS[int(i)] for i in num_str])
s_units = units[len(units) - len(num_str):]
o = ''.join('{}{}'.format(u, d) for u, d in zip(capital_str, s_units))
for p, d in FinanceNumbers.RULES:
o = re.sub(p, d, o)
return o
|
nilq/baby-python
|
python
|
"""
Configuration module for ladim
"""
# ----------------------------------
# Bjørn Ådlandsvik <bjorn@imr.no>
# Institute of Marine Research
# 2017-01-17
# ----------------------------------
# import datetime
import logging
from typing import Dict, Any
import numpy as np
import yaml
import yaml.parser
from netCDF4 import Dataset, num2date
Config = Dict[str, Any] # type of the config dictionary
def configure_ibm(conf: Dict[str, Any]) -> Config:
"""Configure the IBM module
Input: Raw conf dictionary from configuration file
Return: Dictionary with IBM configuration
If an IBM is used, check that module name is present
Special treatment for the variables item
Other items are stored for the IBM module
"""
logging.info("Configuration: IBM")
if conf is None: # No ibm section
return {}
D = conf.get("ibm") # Empty ibm section
if D is None:
return {}
# Mandatory: module name (or obsolete ibm_module)
if "module" not in D:
if "ibm_module" in D:
D["module"] = D.pop("ibm_module")
else:
logging.error("No IBM module specified")
raise SystemExit(1)
logging.info(f' {"module":15s}: {D["module"]}')
# The variables item
if "variables" not in D:
if "ibm_variables" in D:
D["variables"] = D.pop("ibm_variables")
# ibm_variables may live under state (obsolete)
elif "state" in conf and conf["state"] is not None:
if "ibm_variables" in conf.get("state", dict()):
D["variables"] = conf["state"]["ibm_variables"]
else:
D["variables"] = []
for key in D:
if key != "module":
logging.info(f" {key:15s}: {D[key]}")
return D
def configure_gridforce(conf: Dict[str, Any]) -> Config:
"""Parse gridforce related info and pass on
Input: raw conf dictionary from configuration file
Return: dictionary with gridforce configuration
"""
logging.info("Configuration: gridforce")
if conf is None:
logging.error("No gridforce section in configuration file")
raise SystemExit(1)
D = conf.get("gridforce")
if D is None:
logging.error("Empty gridforce section in configuration file")
raise SystemExit(1)
# module is the only mandatory field
if "module" not in D:
logging.error("No gridforce module specified")
raise SystemExit(1)
logging.info(f' {"module":15s}: {D["module"]}')
# Backwards compability (for ROMS.py)
if "files" in conf and conf["files"] is not None:
if "grid_file" in conf["files"]:
# Give grid_file under gridforce highest priority
if "grid_file" not in D:
D["grid_file"] = conf["files"]["grid_file"]
if "input_file" in conf["files"]:
# Give input_file under gridforce highest priority
if "input_file" not in D:
D["input_file"] = conf["files"]["input_file"]
for key in D:
if key != "module":
logging.info(f" {key:15s}: {D[key]}")
return D
# ---------------------------------------
def configure(config_stream) -> Config:
"""The main configuration handling function
Input: Name of configuration file in yaml format
Returns: Configuration dictionary
"""
config: Config = dict()
# --- Read the configuration file ---
try:
conf = yaml.safe_load(config_stream)
except yaml.parser.ParserError:
logging.critical("Can not parse configuration")
raise SystemExit(2)
# ----------------
# Time control
# ----------------
logging.info("Configuration: Time Control")
for name in ["start_time", "stop_time"]:
config[name] = np.datetime64(conf["time_control"][name]).astype("M8[s]")
logging.info(f" {name.replace('_', ' '):15s}: {config[name]}")
# reference_time, default = start_time
config["reference_time"] = np.datetime64(
conf["time_control"].get("reference_time", config["start_time"])
).astype("M8[s]")
logging.info(f' {"reference time":15s}: {config["reference_time"]}')
# -------------
# Files
# -------------
logging.info("Configuration: Files")
logging.info(f' {"config_stream":15s}: {config_stream}')
for name in ["particle_release_file", "output_file"]:
config[name] = conf["files"][name]
logging.info(f" {name:15s}: {config[name]}")
try:
config["warm_start_file"] = conf["files"]["warm_start_file"]
config["start"] = "warm"
logging.info(f' {"Warm start from":15s}: {config["warm_start_file"]}')
except KeyError:
config["start"] = "cold"
config["warm_start_file"] = ""
# Override start time for warm start
if config["start"] == "warm":
try:
nc = Dataset(config["warm_start_file"])
except (FileNotFoundError, OSError):
logging.error(f"Could not open warm start file,{config['warm_start_file']}")
raise SystemExit(1)
tvar = nc.variables["time"]
# Use last record in restart file
warm_start_time = np.datetime64(num2date(tvar[-1], tvar.units))
warm_start_time = warm_start_time.astype("M8[s]")
config["start_time"] = warm_start_time
logging.info(f" Warm start at {warm_start_time}")
# Variables needed by restart, mightwarm_ be changed
# default should be instance variables among release variables
try:
warm_start_variables = conf["state"]["warm_start_variables"]
except KeyError:
warm_start_variables = ["X", "Y", "Z"]
config["warm_start_variables"] = warm_start_variables
# --- Time stepping ---
logging.info("Configuration: Time Stepping")
# Read time step and convert to seconds
dt = np.timedelta64(*tuple(conf["numerics"]["dt"]))
config["dt"] = int(dt.astype("m8[s]").astype("int"))
config["simulation_time"] = np.timedelta64(
config["stop_time"] - config["start_time"], "s"
).astype("int")
config["numsteps"] = config["simulation_time"] // config["dt"]
logging.info(f' {"dt":15s}: {config["dt"]} seconds')
logging.info(
f' {"simulation time":15s}: {config["simulation_time"] // 3600} hours'
)
logging.info(f' {"number of time steps":15s}: {config["numsteps"]}')
# --- Grid ---
config["gridforce"] = configure_gridforce(conf)
# --- Forcing ---
try:
config["ibm_forcing"] = conf["gridforce"]["ibm_forcing"]
except (KeyError, TypeError):
config["ibm_forcing"] = []
# ibm_forcing used to be a dictionary
if isinstance(config["ibm_forcing"], dict):
config["ibm_forcing"] = list(config["ibm_forcing"].keys())
logging.info(f' {"ibm_forcing":15s}: {config["ibm_forcing"]}')
# --- IBM ---
config["ibm"] = configure_ibm(conf)
# Make obsolete
config["ibm_variables"] = config["ibm"].get("variables", [])
config["ibm_module"] = config["ibm"].get("module")
# --- Particle release ---
logging.info("Configuration: Particle Releaser")
prelease = conf["particle_release"]
try:
config["release_type"] = prelease["release_type"]
except KeyError:
config["release_type"] = "discrete"
logging.info(f' {"release_type":15s}: {config["release_type"]}')
if config["release_type"] == "continuous":
config["release_frequency"] = np.timedelta64(
*tuple(prelease["release_frequency"])
)
logging.info(
f' {"release_frequency":11s}: {str(config["release_frequency"])}'
)
config["release_format"] = conf["particle_release"]["variables"]
config["release_dtype"] = dict()
# Map from str to converter
type_mapping = dict(int=int, float=float, time=np.datetime64, str=str)
for name in config["release_format"]:
config["release_dtype"][name] = type_mapping[
conf["particle_release"].get(name, "float")
]
logging.info(f' {name:15s}: {config["release_dtype"][name]}')
config["particle_variables"] = prelease["particle_variables"]
# --- Model state ---
# logging.info("Configuration: Model State Variables")
# -----------------
# Output control
# -----------------
logging.info("Configuration: Output Control")
try:
output_format = conf["output_variables"]["format"]
except KeyError:
output_format = "NETCDF3_64BIT_OFFSET"
config["output_format"] = output_format
logging.info(f' {"output_format":15s}: {config["output_format"]}')
# Skip output of initial state, useful for restart
# with cold start the default is False
# with warm start, the default is true
try:
skip_initial = conf["output_variables"]["skip_initial_output"]
except KeyError:
skip_initial = config["start"] == "warm"
config["skip_initial"] = skip_initial
logging.info(f" {'Skip inital output':15s}: {skip_initial}")
try:
numrec = conf["output_variables"]["numrec"]
except KeyError:
numrec = 0
config["output_numrec"] = numrec
logging.info(f' {"output_numrec":15s}: {config["output_numrec"]}')
outper = np.timedelta64(*tuple(conf["output_variables"]["outper"]))
outper = outper.astype("m8[s]").astype("int") // config["dt"]
config["output_period"] = outper
logging.info(f' {"output_period":15s}: {config["output_period"]} timesteps')
config["num_output"] = 1 + config["numsteps"] // config["output_period"]
logging.info(f' {"numsteps":15s}: {config["numsteps"]}')
config["output_particle"] = conf["output_variables"]["particle"]
config["output_instance"] = conf["output_variables"]["instance"]
config["nc_attributes"] = dict()
for name in config["output_particle"] + config["output_instance"]:
value = conf["output_variables"][name]
if "units" in value:
if value["units"] == "seconds since reference_time":
timeref = str(config["reference_time"]).replace("T", " ")
value["units"] = f"seconds since {timeref}"
config["nc_attributes"][name] = conf["output_variables"][name]
logging.info(" particle variables")
for name in config["output_particle"]:
logging.info(8 * " " + name)
for item in config["nc_attributes"][name].items():
logging.info(12 * " " + "{:11s}: {:s}".format(*item))
logging.info(" particle instance variables")
for name in config["output_instance"]:
logging.info(8 * " " + name)
for item in config["nc_attributes"][name].items():
logging.info(12 * " " + "{:11s}: {:s}".format(*item))
# --- Numerics ---
# dt belongs here, but is already read
logging.info("Configuration: Numerics")
try:
config["advection"] = conf["numerics"]["advection"]
except KeyError:
config["advection"] = "RK4"
logging.info(f' {"advection":15s}: {config["advection"]}')
try:
diffusion = conf["numerics"]["diffusion"]
except KeyError:
diffusion = 0.0
if diffusion > 0:
config["diffusion"] = True
config["diffusion_coefficient"] = diffusion
logging.info(
f' {"diffusion coefficient":15s}: {config["diffusion_coefficient"]}'
)
else:
config["diffusion"] = False
logging.info(" no diffusion")
return config
|
nilq/baby-python
|
python
|
# TODO: put the name flexstring in the Class.
# Class is not "Named" and its names are not interned.
# Sym continues like today. Meth is named.
import os, re, sys
import collections
from logging import info
# Tuning.
MEMORY_LEN = 0x8000 # Somewhat arbtrary.
SYM_VEC_LEN = 256
CLASS_VEC_LEN = 256
# Leave a little gap for future overhead.
MAX_OBJ_SIZE = 258
MAX_FLEX_BYTES = 256
MAX_FLEX_PTRS = 128
# Target memory.
Memory = MEMORY_LEN * ['#']
SymVec = SYM_VEC_LEN * [0]
ClassVec = CLASS_VEC_LEN * [0]
# Compiler state.
OpList = []
OpNums = {}
Method = collections.defaultdict(dict)
Op = {}
# Util
def Hi(x): return 255 & (x>>8)
def Lo(x): return 255 & x
##### LEX
LEX_INT = re.compile('(-?[0-9]+|[$][0-9a-fA-F]+)').match
LEX_COLON = re.compile('(([A-Za-z][A-Za-z0-9]*)\\s*[:])').match
LEX_IDENT = re.compile('([A-Za-z][A-Za-z0-9]*)').match
LEX_MULOP = re.compile('([*]|/|%)').match
LEX_ADDOP = re.compile('([+]|-)').match
LEX_RELOP = re.compile('(<|>|==|!=|<=|>=)').match
LEX_PUNCT = re.compile('([():,.;=])').match
LEX_WHITE = re.compile('([ \\t\\n\\r]*)').match
PATTERNS = [('C',LEX_COLON), ('I',LEX_INT), ('W',LEX_IDENT), ('M',LEX_MULOP), ('A',LEX_ADDOP), ('R',LEX_RELOP), ('P',LEX_PUNCT)]
class Lex(object):
def __init__(self, source):
self.source = source
self.n = len(source)
self.i = 0
print ('Inital', self.source, self.i, self.n)
self.Advance()
def Advance(self):
self.token = self.Next()
def Next(self):
if self.i == self.n:
print 'Next', 60, ('Z', '', self.i)
return ('Z', '', self.i)
rest = self.source[self.i:]
white = LEX_WHITE(rest)
if white:
self.i += len(white.group(1))
if self.i == self.n:
print 'Next', 69, ('Z', '', self.i)
return ('Z', '', self.i)
rest = self.source[self.i:]
for typ,pat in PATTERNS:
m = pat(rest)
if m:
self.i += len(m.group(1))
print 'Next', 78, (typ, m.group(1), self.i)
return (typ, m.group(1), self.i)
raise Exception('Cannot lex rest: %s' % repr(rest))
class PExpr(object):
pass
class PSeq(PExpr):
def __init__(self, exprs):
self.exprs = exprs
def __str__(self):
return '{%s}' % ' ; '.join(str(e) for e in self.exprs)
def visit(self, v):
v.visitSeq(self)
class PAssign(PExpr):
def __init__(self, varz, expr):
self.vars = varz
self.expr = expr
def __str__(self):
return '%s= %s' % (self.vars, self.expr)
def visit(self, v):
v.visitAssign(self)
class PList(PExpr):
def __init__(self, exprs):
self.exprs = exprs
def __str__(self):
return '[%s]' % ' , '.join(str(e) for e in self.exprs)
def visit(self, v):
v.visitList(self)
class PVar(PExpr):
def __init__(self, s):
self.s = s.upper()
def __str__(self):
return '%s' % self.s
def visit(self, v):
v.visitVar(self)
class PInt(PExpr):
def __init__(self, n):
self.n = n
def __str__(self):
return '%d' % self.n
def visit(self, v):
v.visitInt(self)
class PUnary(PExpr):
def __init__(self, r, meth):
self.r = r
self.meth = meth.upper()
def __str__(self):
return '%s %s' % (self.r, self.meth)
def visit(self, v):
v.visitUnary(self)
class PMul(PExpr):
def __init__(self, r, meth, a):
self.r = r
self.meth = meth
self.a = a
def __str__(self):
return '(%s %s %s)' % (self.r, self.meth, self.a)
def visit(self, v):
v.visitMul(self)
class PAdd(PExpr):
def __init__(self, r, meth, a):
self.r = r
self.meth = meth
self.a = a
def __str__(self):
return '(%s %s %s)' % (self.r, self.meth, self.a)
def visit(self, v):
v.visitAdd(self)
class PRel(PExpr):
def __init__(self, r, meth, a):
self.r = r
self.meth = meth
self.a = a
def __str__(self):
return '(%s %s %s)' % (self.r, self.meth, self.a)
def visit(self, v):
v.visitRel(self)
class PKeyword(PExpr):
def __init__(self, r, meth, args):
self.r = r
self.meth = meth.upper()
self.args = args
def __str__(self):
z = '( (%s) ' % self.r
for k,v in zip(self.meth.split(':'), [str(a) for a in self.args]):
z += '%s: %s ' % (k, v)
return z + ')'
def visit(self, v):
v.visitKeyword(self)
class PMacro(PExpr):
def __init__(self, keywords, varz, exprs):
self.keywords = keywords
self.vars = varz
self.exprs = exprs
def __str__(self):
z = ''
for k,v,e in zip(self.keywords, self.vars, self.exprs):
z += '%s(%s%s)' % (k, ('%s:' % v if v else ''), e)
return z
def visit(self, v):
v.visitMacro(self)
class Parser(object):
def __init__(self, source):
self.source = source
self.lex = Lex(source)
def Parse(self):
seq = self.ParseSeq()
typ, s, i = self.lex.token
if typ != 'Z':
print('Extra stuff: %s' % repr((typ, s, i)))
raise Exception('Extra stuff: %s' % repr(self.source[i:]))
return seq
def ParseSeq(self):
z = []
while True:
a = self.ParseAssign()
z.append(a)
typ, s, i = self.lex.token
if typ=='Z' or s==')' or s=='=':
break
elif s=='.' or s==';':
self.lex.Advance()
typ, s, i = self.lex.token
if typ=='Z' or s==')' or s=='=': # If trailing "."
break
else:
raise Exception('EXPECTED EOS or ")" or "." or ";" AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
return z[0] if len(z)==1 else PSeq(z)
def ParseAssign(self):
a = self.ParseList()
typ, s, i = self.lex.token
if s == '=':
if not isinstance(a, PList) and not isinstance(a, PVar):
raise Exception('Bad target of assignment AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
if isinstance(a, PList):
for b in a.exprs:
if not isinstance(b, PVar):
raise Exception('Bad subtarget "%s" of assignment AFTER %s BEFORE %s' % (b, repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
b = self.ParseList()
return PAssign(a, b)
return a
def ParseList(self):
z = []
while True:
a = self.ParseKeyword()
z.append(a)
typ, s, i = self.lex.token
if s==',':
self.lex.Advance()
else:
break
return z[0] if len(z)==1 else PList(z)
def ParseMacro(self, name):
typ, s, i = self.lex.token
keywords = [name]
varz = []
exprs = []
while True:
if varz: # Not the first time:
keywords.append(s)
# next comes the open paren
self.lex.Advance()
typ, s, i = self.lex.token
if s != '(':
raise Exception('Expected "(" in macro AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
typ, s, i = self.lex.token
var = None
if typ == 'C':
var = LEX_COLON(s).group(2) # extract word.
self.lex.Advance()
ex = self.ParseSeq()
varz.append(var)
exprs.append(ex)
typ, s, i = self.lex.token
if s != ')':
raise Exception('Expected ")" in macro AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
typ, s, i = self.lex.token
if typ != 'W':
break
return PMacro(keywords, varz, exprs)
def ParsePrim(self):
typ, s, i = self.lex.token
if typ == 'I':
self.lex.Advance()
val = int(s[1:],base=16) if s[0]=='$' else int(s)
return PInt(val)
elif typ == 'W':
name = s
self.lex.Advance()
typ, s, i = self.lex.token
if s == '(':
# Macro syntax
self.lex.Advance()
return self.ParseMacro(name)
else:
# Just a var name
return PVar(name)
elif s == '(':
self.lex.Advance()
seq = self.ParseSeq()
typ, s, i = self.lex.token
if s != ')':
raise Exception('EXPECTED ")" AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
return seq
else:
raise Exception('UNEXPECTED prim: %s' % repr((typ, s, i)))
def ParseKeyword(self):
rargs = [ self.ParseRel() ] # rargs are receiver and args.
keywords = ''
while True:
typ, s, i = self.lex.token
if typ == 'C': # a word and a colon
s = LEX_COLON(s).group(2) # extract word.
keywords += s + ':'
self.lex.Advance()
rargs.append(self.ParseRel())
else:
break
if len(rargs) > 1:
return PKeyword(rargs[0], keywords, rargs[1:])
else:
return rargs[0]
def ParseRel(self):
a = self.ParseAdd()
typ, s, i = self.lex.token
if typ == 'R':
op = s
self.lex.Advance()
b = self.ParseAdd()
return PRel(a, op, b)
return a
def ParseAdd(self):
a = self.ParseMul()
typ, s, i = self.lex.token
if typ == 'A':
op = s
self.lex.Advance()
b = self.ParseMul()
return PAdd(a, op, b)
return a
def ParseMul(self):
a = self.ParseUnary()
typ, s, i = self.lex.token
if typ == 'M':
op = s
self.lex.Advance()
b = self.ParseUnary()
return PMul(a, op, b)
return a
def ParseUnary(self):
a = self.ParsePrim()
typ, s, i = self.lex.token
while typ == 'W':
a = PUnary(a, s)
self.lex.Advance()
typ, s, i = self.lex.token
return a
for s in [
'4',
'4 - 6',
'sys print:( x square + y square ) sqrt ',
'sys print:( x square + y square ) sqrt on: stdout',
'2. 4. 6. 64 sqrt',
'(foo foo; bar bar; 2, 4, 6, 64 sqrt) len',
'(1,2,3),(4,5,6),(7,8,9)',
'(1,2,3)x,(4,5,6)y,(7,8,9)z',
'r, s, t = (1,2,3)x,(4,5,6)y,(7,8,9)z',
'z = IF(a<2)THEN(a+0)ELSE(demo recurse: a - 1)',
'a,b,c = FOR(i: words)MAP( IF(i<0)THEN(i neg) ELSE (i) )',
]:
print '<<< %s' % s
print '>>> %s' % str(Parser(s).Parse())
class LocalsVisitor(object):
def __init__(self):
self.locals = set()
def visitSeq(self, p):
for e in p.exprs:
e.visit(self)
def visitAssign(self, p):
p.expr.visit(self)
if isinstance(p.vars, list):
for v in p.vars:
self.locals.add(v.s)
elif isinstance(p.vars, PVar):
self.locals.add(p.vars.s)
else:
raise type(p)
def visitList(self, p):
for e in p.exprs:
e.visit(self)
def visitVar(self, p):
pass
def visitInt(self, p):
pass
def visitUnary(self, p):
p.r.visit(self)
def visitMul(self, p):
p.r.visit(self)
p.a.visit(self)
def visitAdd(self, p):
p.r.visit(self)
p.a.visit(self)
def visitRel(self, p):
p.r.visit(self)
p.a.visit(self)
def visitKeyword(self, p):
p.r.visit(self)
for e in p.args:
e.visit(self)
def visitMacro(self, p):
for v in p.vars:
self.locals.add(v)
for e in p.exprs:
e.visit(self)
class CompilerVisitor(object):
def __init__(self, top, cls):
self.top = top
self.cls = cls
self.explain = []
self.codes = []
self.slots = {}
self.flex = None
self.localindex = {}
for k,offset in self.cls.bslots:
# Strip prefix b_ from k.
self.slots[k[2:].upper()] = ('b', offset)
for k,offset in self.cls.pslots:
# Strip prefix p_ from k.
self.slots[k[2:].upper()] = ('p', offset)
for k,offset in self.cls.flexes:
# Like ('FLEX_BYTES', 2) or ('FLEX_PTRS', 2).
self.flex = (k, offset)
# Find all names assigned. Filter out the slots, to get locals.
# (This is like the python rule: instead of declaring locals, assign them.)
lv = LocalsVisitor()
top.visit(lv)
self.locals = sorted([e for e in lv.locals if e not in self.slots])
for i,var in zip(range(len(self.locals)), self.locals):
self.localindex[var] = i
def AddLocal(self, var):
i = len(self.locals)
self.localindex[var] = i
self.locals.append(var)
return i
def visitSeq(self, p):
last = p.exprs.pop()
for e in p.exprs:
e.visit(self)
self.codes.append('drop') # Drop middle results.
last.visit(self) # the last one returns the result.
def visitAssign(self, p):
p.expr.visit(self)
self.codes.append('dup') # one to assign and one for result.
if isinstance(p.vars, list):
raise 'TODO: list deconstruction'
var = p.vars.s
print 'visitAssign:', p, '|', p.vars, '|', p.expr
print 'self.slots,var:', self.slots, '|', var, type(var)
slot = self.slots.get(var)
if slot:
kind, offset = slot
if kind=='b':
self.codes.append('self')
self.codes.append('putb_b')
self.codes.append(offset)
elif kind=='p':
self.codes.append('self')
self.codes.append('putp_b')
self.codes.append(offset)
else:
raise 'bad'
else:
# Not a slot, so it should be a local var.
i = self.localindex[var]
if i<4 and False:
self.codes.append('sto%d' % i)
else:
self.codes.append('sto_b')
self.codes.append(i)
def visitList(self, p):
raise 'TODO'
def visitVar(self, p):
var = p.s
slot = self.slots.get(var)
cls = ClassDict.get(var)
if var in ['SELF','SUPER','TRUE','FALSE','NIL','A','B','C','D']:
self.codes.append(var)
elif slot:
kind, offset = slot
if kind=='b':
self.codes.append('self')
self.codes.append('getb_b')
self.codes.append(offset)
elif kind=='p':
self.codes.append('self')
self.codes.append('getp_b')
self.codes.append(offset)
else:
raise 'bad'
elif cls:
self.codes.append('cls_b')
self.codes.append(cls.b_this)
else:
i = self.localindex[var]
if i<4 and False:
self.codes.append('rcl%d' % i)
else:
self.codes.append('rcl_b')
self.codes.append(i)
def visitInt(self, p):
n = p.n
if -64 <= n < 64:
if n<0: n+=256
self.codes.append('lit_b')
self.codes.append(255&((n<<1)|1))
else:
if n<0: n+=65536
self.codes.append('lit_w')
self.codes.append(255&(n>>7))
self.codes.append(255&((n<<1)|1))
def visitUnary(self, p):
p.r.visit(self)
self.codes.append('call0_b')
self.codes.append(InternDict[p.meth])
def visitMul(self, p):
p.a.visit(self)
p.r.visit(self)
self.codes.append('call1_b')
self.codes.append(InternDict[p.meth])
def visitAdd(self, p):
p.a.visit(self)
p.r.visit(self)
self.codes.append('call1_b')
self.codes.append(InternDict[p.meth])
def visitRel(self, p):
p.a.visit(self)
p.r.visit(self)
self.codes.append('call1_b')
self.codes.append(InternDict[p.meth])
def visitKeyword(self, p):
args = p.args[:]
args.reverse()
for a in args:
a.visit(self)
p.r.visit(self)
self.codes.append('call%d_b' % len(args))
self.codes.append(InternDict[p.meth])
def visitMacro(self, p):
name = '_'.join(p.keywords)
macro = MACROS[name]
macro(self, p.vars, p.exprs)
_Serial = 0
def Serial():
global _Serial
_Serial += 1
return _Serial
def IfThenMacro(v, varz, exprs):
varz.append(None)
exprs.append(PVar('NIL'))
IfThenElseMacro(v, varz, exprs)
def IfThenElseMacro(v, varz, exprs):
assert all([var is None for var in varz])
mark1 = Serial()
mark2 = Serial()
exprs[0].visit(v)
v.codes.append('/bfalse/%d' % mark1)
exprs[1].visit(v)
v.codes.append('/jump/%d' % mark2)
v.codes.append('/mark/%d' % mark1)
exprs[2].visit(v)
v.codes.append('/mark/%d' % mark2)
def WhileDoMacro(v, varz, exprs):
assert all([var is None for var in varz])
mark1 = Serial()
mark2 = Serial()
v.codes.append('/mark/%d' % mark1)
exprs[0].visit(v)
v.codes.append('/bfalse/%d' % mark2)
exprs[1].visit(v)
v.codes.append('/jump/%d' % mark1)
v.codes.append('/mark/%d' % mark2)
def ForDoMacro(v, varz, exprs):
assert varz[0]
assert not varz[1]
# Create the local variable for Limit:
limit = '_tmp_%d' % Serial()
lim = v.AddLocal(limit.upper())
# Find the index variable.
ix = v.AddLocal(varz[0].upper())
# Store 0 in index.
v.codes.append('lit_b')
v.codes.append('1')
v.codes.append('sto_b')
v.codes.append(ix)
# Evaluate limit.
exprs[0].visit(v)
v.codes.append('sto_b')
v.codes.append(lim)
mark1 = Serial()
mark2 = Serial()
v.codes.append('/mark/%d' % mark1)
# Check for ix reached the limit.
v.codes.append('rcl_b')
v.codes.append(lim)
v.codes.append('rcl_b')
v.codes.append(ix)
v.codes.append('subtract')
v.codes.append('/bfalse/%d' % mark2)
exprs[1].visit(v)
v.codes.append('incr_local_b')
v.codes.append(ix)
v.codes.append('/jump/%d' % mark1)
v.codes.append('/mark/%d' % mark2)
MACROS = dict(
IF_THEN = IfThenMacro,
IF_THEN_ELSE = IfThenElseMacro,
WHILE_DO = WhileDoMacro,
FOR_DO = ForDoMacro,
)
def CompileToCodes(s, cls):
p = Parser(s).Parse()
v = CompilerVisitor(p, cls)
p.visit(v)
return v.codes, len(v.locals)
InternDict = {} # str to index.
def Intern(s):
n = InternDict.get(s)
if not n:
n = len(InternDict)
InternDict[s] = n
return n
Intern("") # Empty string is intern index 0.
CLASS_PATTERN = re.compile("^@([A-Za-z0-9_:]+)$").match
SYM_PATTERN = re.compile("^#([A-Za-z0-9_:]+)$").match
INT_PATTERN = re.compile("^-?[0-9]+$").match
MARK_PATTERN = re.compile("^/([a-z]+)/([0-9]+)$").match
def EvalInt(s):
z = 0
for ch in s:
i = ord(ch) - ord('0')
if 0 <= i and i <= 9:
z = 10*z + i
else:
raise Exception('Bad decimal digit in string: %s' % s)
return z
def Num2Oop(x):
z = (x << 1) | 1
if z > 0xFFFF:
raise Exception('Num2Oop too big: %d.' % x)
return z
# Nicknames are just for debugging the compiler.
Nick = 0
def GetNick():
global Nick
Nick+=1
return Nick
# All objects that need copying into Mem.
Here = 0
MemorableList = []
class Memorable(object):
def __init__(self):
self.nick = GetNick()
def Reify(self):
global Here
assert len(getattr(self, 'flexbytes', [])) == getattr(self, 'flexsize', 0)
self.basesize = self.BaseByteSize() # not including flex len and flex bytes.
self.size = self.basesize
fs = getattr(self, 'flexsize', None)
if fs is not None:
self.size += fs
if self.size & 1:
self.padded = 1
self.size += 1 # Final size must be even.
else:
self.padded = 0
if self.size < 4:
self.padded = 4 - self.size
self.size = 4
if self.size > 256:
raise Exception("Object size too big: %d: %s", self.size, vars(self))
self.addr = Here
Here += self.size
MemorableList.append(self)
def Materialize(self):
print 'Materialize:', self
print 'Materialize:', vars(self)
for k,v in vars(self).items():
if k.startswith('b_'):
k2 = 'B_' + k[2:]
v2 = getattr(self, k2)
print self, k, v, k2, v2
Memory[self.addr + v2] = v
if k.startswith('p_'):
k2 = 'P_' + k[2:]
v2 = getattr(self, k2)
print self, k, v, k2, v2
if isinstance(v, Memorable):
Memory[self.addr + v2] = Hi(v.addr)
Memory[self.addr + v2 + 1] = Lo(v.addr)
elif type(v) is int:
Memory[self.addr + v2] = Hi(v)
Memory[self.addr + v2 + 1] = Lo(v)
else:
raise Exception('Weird kind: %s' % type(v))
fb = getattr(self, 'flexbytes', None)
print ':fb:', self.basesize, fb, self
if fb is not None:
i = 0
for b in fb:
Memory[self.addr + self.basesize + i] = b
i += 1
for i in range(self.padded):
Memory[self.addr + self.size - 1 - i] = '^'
def BaseByteSize(self):
self.Bslots = [k for k in dir(self) if k.startswith('B_')]
self.Pslots = [k for k in dir(self) if k.startswith('P_')]
z = len(self.Bslots) + 2*len(self.Pslots)
assert z <= MAX_OBJ_SIZE
return z
def __str__(self):
if hasattr(self, 'addr'):
return '<%s:%s:%04x>' % (self.__class__.__name__, self.nick, self.addr)
else:
return '<%s:%s:????>' % (self.__class__.__name__, self.nick)
def __repr__(self):
return self.__str__()
class Ur(Memorable): # Proto Object (for proxies).
B_gcsize = 0 # Garbage Collection size, for allocator and collector.
B_cls = 1 # Class Number.
class Obj(Ur): # Smalltalk root Object.
pass
class Num(Obj):
pass
class Int(Num): # Small 15-bit signed integer. Encoded in an oop with low bit set.
pass
class Addr(Num): # 16-bit unsigned integer.
B_hi = 2
B_lo = 3
class NilT(Obj): # class of nil
pass
class Bool(Obj):
pass
class TrueT(Bool): # class of true
pass
class FalseT(Bool): # class of false
pass
class Arr(Obj): # LowLevel: Flexible-length abstract object.
pass
class ArrByt(Arr): # LowLevel: Flexible-length bytes storage.
FLEX_BYTES = 2
class ArrPtr(Arr): # LowLevel: Flexible-length oops storage.
FLEX_PTRS = 2
class Tuple(ArrPtr): # Tuple: uses fixed FlexP.
pass
class Slice(Obj): # LowLevel: Slice of a Flx.
B_begin = 2
B_len = 3
B_intern = 4 # used in Sym for intern number.
P_guts = 5
class Vec(Slice): # Vector of Pointers.
pass
class Buf(Slice): # Buffer of Bytes.
pass
class Str(Buf):
pass
class Sym(Str):
pass
class Err(Buf):
pass
class Named(Obj): # Object with interned name.
B_name = 2 # Symbol index.
# The only linked lists we need is methods in a class, so no Link List class.
K_FLEX_BYTES = 1
K_FLEX_PTRS = 2
class Cls(Obj):
B_flags = 2 # e.g. K_FLEX_BYTES, K_FLEX_PTRS
B_bSize = 3 # Base size of instance in bytes.
B_numB = 4 # Number of byte slots.
B_numP = 5 # Number of Oop slots.
B_this = 6 # This class index.
B_partner = 7 # class to meta; meta to class.
P_sup = 8 # Superclass, by Pointer, for faster method dispatch.
P_meths = 10 # Head of linked list of meths.
FLEX_BYTES = 12 # For the name of the class, so it does not have to be interned.
class Metacls(Cls):
pass
class Meth(Named):
B_owner = 3 # Owning Class.
B_numL = 4 # num Locals.
P_next = 5 # Linked list of methods on this class.
FLEX_BYTES = 7
# Since bytecodes are built-in flex bytes, if you recompile, you may have to
# replace (and re-linked-list) the entire Meth object.
# Limit of about 250 bytecodes.
class Demo(Obj):
B_one = 2 # two byte fields: one, two.
B_two = 3
P_three = 4 # two oop fields: three, four.
P_four = 6
#### Stack layout
## [
## args
## 10 ]
## 8 Receiver
## 6 Selector Sym|Method, debug.
## 4 DE, argcount, debug.
## 2 ReturnPC
## 0 ReturnFP <--- fp
## -2 [
## locals
## ] <--- sp?
# Offests from Frame pointer.
K_ARG4 = 16
K_ARG3 = 14
K_ARG2 = 12
K_ARG1 = 10
K_RCVR = 8 # conceptually, RCVR is like ARG0.
K_MSG = 6 # Needed for a debugger interpreting a stack.
K_DE = 4 # Could omit this. It makes viewing stacks easier.
K_RET_PC = 2
K_RET_FP = 0
K_LCL0 = -2
K_LCL1 = -4
K_LCL2 = -6
K_LCL3 = -8
Method['URCLS']['new'] = '''C
word rcvr = W(fp + K_RCVR);
fprintf(stderr, "URCLS::new -- rcvr=%04x\\n", rcvr);
word z = MakeInstance(rcvr, 0);
Inspect(z, "URCLS new.");
PUSH(z);
'''
Method['URCLS']['new:'] = '''C
word rcvr = W(fp + K_RCVR);
word n = W(fp + K_ARG1);
fprintf(stderr, "URCLS::new:a -- rcvr=$%04x a==$%x=%d.\\n", rcvr, n, n);
word z = MakeInstance(rcvr, OOP2NUM(n));
Hex20("URCLS new: -->", n, z);
Inspect(z, "URCLS new:.");
PUSH(z);
'''
Method['UR']['same'] = 'B self a same'
Method['UR']['=='] = 'B self a same'
Method['UR']['must'] = 'B self must self' # Most objects are true.
Method['UR']['not'] = 'B self not' # Most objects are true.
Method['UR']['bytlen'] = 'B self bytlen'
Method['UR']['ptrlen'] = 'B self ptrlen'
Method['UR']['bytat:'] = 'B a self bytat'
Method['UR']['ptrat:'] = 'B a self ptrat'
Method['UR']['bytat:put:'] = 'B b a self bytatput nil'
Method['UR']['ptrat:put:'] = 'B b a self ptratput nil'
Method['ARRBYT']['len'] = 'B self bytlen'
Method['ARRBYT']['at:'] = 'B a self bytat'
Method['ARRBYT']['at:put:'] = 'B b self a bytatput nil'
Method['ARRPTR']['len'] = 'B self ptrlen'
Method['ARRPTR']['at:'] = 'B a self ptrat'
Method['ARRPTR']['at:put:'] = 'B b self a ptratput nil'
Method['DEMO']['run'] = '''B
lit_b 51 self #double: lit_w %d %d call dup show
lit_b 51 self #twice: lit_w %d %d call dup show
add dup show
''' % (0xDE, 0x01, 0xDE, 0x01)
Method['DEMO']['run2setup'] = '''T
acct = Demo new init.
acct balance show.
acct deposit: 10.
acct balance show.
acct deposit: 100.
acct balance show.
acct withdraw: 20.
acct balance show.
'''
Method['DEMO']['RUN2'] = '''T
self run2setup.
IF( 5 )THEN( 5 show ).
IF( true )THEN( 42 show )ELSE( 666 show ).
n = 3.
WHILE( n )DO( n show. n = n - 1. ).
FOR( i : 5 )DO( i show ).
p = ArrByt new: 5.
FOR( i : 5 )DO( p bytAt: i put: 10 + i ).
FOR( i : 5 )DO( (p bytAt: i) show ).
FOR( i : 5 )DO( ((p bytAt: i) == i) must ).
p bytLen show.
'''
Method['DEMO']['double:'] = 'B arg1 arg1 add ' # Using Bytecodes.
Method['DEMO']['twice:'] = 'T a + a ' # Using TerseTalk.
Method['DEMO']['init'] = 'T one = 0. self'
Method['DEMO']['deposit:'] = 'T one = one + a. nil'
Method['DEMO']['withdraw:'] = 'T one = one - a. nil'
Method['DEMO']['balance'] = 'T one'
Method['INT']['+'] = 'B self arg1 add '
Method['INT']['-'] = 'B self arg1 subtract '
Method['INT']['show'] = 'B self show self'
Op['stop'] = ' goto STOP; '
Op['self'] = ' PUSH(W(fp+K_RCVR));'
Op['arg1'] = ' PUSH(W(fp+K_ARG1));'
Op['arg2'] = ' PUSH(W(fp+K_ARG2));'
Op['arg3'] = ' PUSH(W(fp+K_ARG3));'
Op['a'] = ' PUSH(W(fp+K_ARG1));'
Op['b'] = ' PUSH(W(fp+K_ARG2));'
Op['c'] = ' PUSH(W(fp+K_ARG3));'
Op['d'] = ' PUSH(W(fp+K_ARG4));'
Op['cls_b'] = ' byte n = BYTE(pc); pc += 1; PUSH(ClassVec[n]); '
Op['clsof'] = ' word x = PEEK(0); POKE(0, CLASSOF(x)); '
Op['same'] = ' word x = POP(); word y = PEEK(0); POKE(0, (x==y));'
Op['bytlen'] = ''' // p -> len
word p = PEEK(0);
POKE(0, NUM2OOP(BytLen(p)));
'''
Op['ptrlen'] = ''' // p -> len
word p = PEEK(0);
POKE(0, NUM2OOP(PtrLen(p)));
'''
Op['bytat'] = ''' // p i -> b
word p = POP();
word i = OOP2NUM(PEEK(0));
POKE(0, NUM2OOP(BytAt(p, i)));
'''
Op['ptrat'] = '''
word p = POP();
word i = OOP2NUM(PEEK(0));
POKE(0, PtrAt(p, i));
'''
Op['bytatput'] = '''
word p = PEEK(0);
word i = OOP2NUM(PEEK(2));
word v = PEEK(4);
BytAtPut(p, i, v);
sp += 6;
'''
Op['ptratput'] = '''
word p = PEEK(0);
word i = OOP2NUM(PEEK(2));
word v = PEEK(4);
PtrAtPut(p, i, v);
sp += 6;
'''
Op['forward_jump_b'] = '''
byte n = BYTE(pc); pc += 1;
pc += n;
'''
Op['reverse_jump_b'] = '''
byte n = BYTE(pc); pc += 1;
pc -= n;
'''
Op['forward_bfalse_b'] = '''
byte n = BYTE(pc); pc += 1;
word x = POP();
if (!Truth(x)) {
pc += n;
}
'''
Op['reverse_bfalse_b'] = '''
byte n = BYTE(pc); pc += 1;
word x = POP();
if (!Truth(x)) {
pc -= n;
}
'''
# Get/Put pointer slots.
Op['getp_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = PEEK(0);
word x = W(obj + n);
POKE(0, x);
'''
Op['putp_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = POP();
word x = POP();
PUT_WORD(obj+n, x);
'''
# Get/Put byte slots: values are unsigned integers in 0..255.
Op['getb_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = PEEK(0);
byte x = B(obj + n);
POKE(0, NUM2OOP(x));
'''
Op['putb_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = POP();
word x = POP();
CHECK3(x&1, 1, x);
CHECK3(x&0xFE00, 0, x);
PUT_BYTE(obj+n, OOP2BYTE(x));
'''
# Store/Recall local variables.
Op['sto0'] = ' word w = POP(); PUT_WORD(fp-2, w);'
Op['sto1'] = ' word w = POP(); PUT_WORD(fp-4, w);'
Op['sto2'] = ' word w = POP(); PUT_WORD(fp-6, w);'
Op['sto3'] = ' word w = POP(); PUT_WORD(fp-8, w);'
Op['sto_b'] = ' byte n = BYTE(pc); pc += 1; word w = POP(); PUT_WORD(fp-2*(n+1), w);'
Op['rcl0'] = ' word w = W(fp-2); PUSH(w);'
Op['rcl1'] = ' word w = W(fp-4); PUSH(w);'
Op['rcl2'] = ' word w = W(fp-6); PUSH(w);'
Op['rcl3'] = ' word w = W(fp-8); PUSH(w);'
Op['rcl_b'] = ' byte n = BYTE(pc); pc += 1; word w = W(fp-2*(n+1)); PUSH(w);'
Op['incr_local_b'] = 'byte n = BYTE(pc); pc += 1; word p = fp-2*(n+1); word w = W(p); PUT_WORD(p, w+2);'
Op['true'] = ' PUSH(trueAddr);'
Op['false'] = ' PUSH(falseAddr);'
Op['nil'] = ' PUSH(nilAddr);'
Op['show'] = ' word w = POP(); printf(" ==$%04x=%u.==\\n", w, w); fflush(stdout); '
Op['lit2pcr'] = '''
PUSH(WORD(pc) - pc);
pc += 2;
'''
Op['lit1pcr'] = '''
byte n = BYTE(pc);
word w = ((word)n) | ((n & 128) ? 0xFF00U : 0x0000U); // SEX.
PUSH(w - pc);
pc += 1;
'''
Op['lit_w'] = '''
PUSH(WORD(pc));
pc += 2;
'''
Op['lit_b'] = '''
byte n = BYTE(pc);
word w = (0x80&n) ? (0xFF80 | (word)n) : (word)n; // SEX.
PUSH(w);
pc += 1;
'''
Op['sym_b'] = '''
byte n = BYTE(pc);
PUSH(SymVec[n]);
pc += 1;
'''
Op['get'] = '''
word off = PEEK(0);
POKE(0, WORD(fp+off));
'''
Op['drop'] = '''
sp += 2;
'''
Op['dup'] = '''
word top = PEEK(0);
PUSH(top);
'''
Op['lognum'] = '''
word id = POP();
word value = POP();
fprintf(stderr, "%04x:=:%04x ", id, value);
'''
Op['must'] = '''
word x = POP();
CHECK3(Truth(x), 1, pc);
'''
Op['not'] = '''
word x = PEEK(0);
POKE(0, Truth(x) ? trueAddr : falseAddr);
'''
Op['add'] = '''
word a = POP();
CHECK3(a&1, 1, a);
word b = PEEK(0);
CHECK3(b&1, 1, b);
POKE(0, (0xFFFE & a)+b);
'''
Op['subtract'] = '''
word a = POP();
CHECK3(a&1, 1, a);
word b = PEEK(0);
CHECK3(b&1, 1, b);
word nega = (~a) + 2;
fprintf(stderr, "subtract a=%04x b=%04x nega=%04x z=%04x\\n", a, b, nega, b+nega);
POKE(0, b+nega);
'''
Op['call0_b'] = '''
byte msg = BYTE(pc);
pc += 1;
word rcvr = PEEK(0);
PUSH(msg);
PUSH(0xDE00);
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
byte i;
byte numL = B(meth + METH_B_numL);
for (i=0; i<numL; i++) {
PUSH(nilAddr);
}
pc = meth + METH_FLEXSIZE;
PrintWhere();
'''
Op['call1_b'] = '''
byte msg = BYTE(pc);
pc += 1;
word rcvr = PEEK(0);
PUSH(msg);
PUSH(0xDE01);
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
byte i;
for (i=0; i<B(meth + METH_B_numL); i++) {
PUSH(nilAddr);
}
pc = meth + METH_FLEXSIZE;
PrintWhere();
'''
Op['call2_b'] = '''
byte msg = BYTE(pc);
pc += 1;
word rcvr = PEEK(0);
PUSH(msg);
PUSH(0xDE02); // This is all that changes..... TODO
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
byte i;
for (i=0; i<B(meth + METH_B_numL); i++) {
PUSH(nilAddr);
}
pc = meth + METH_FLEXSIZE;
PrintWhere();
'''
Op['call'] = '''
word rcvr = PEEK(4);
//Hex20("call--rcvr", rcvr, rcvr);
Inspect(rcvr, "call--rcvr");
word msg = PEEK(2);
//Hex20("call--msg", msg, -1);
PrintSymNum(msg, "call--msg");
Inspect(SymVec[msg], "call--msg");
word de = PEEK(0);
Hex20("call--de", de, -1);
CHECK3(de & 0xFFF0, 0xDE00, de);
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
Inspect(meth, "call--meth");
byte i;
byte num_locals = BF(meth, METH_B_numL);
fprintf(stderr, "Num Locals = %d\\n", num_locals);
for (i=0; i<num_locals; i++) {
PUSH(nilAddr);
}
if (BytLen(meth)) {
pc = FlexAddrAt(meth, 0);
}
PrintWhere();
'''
Op['return'] = '''
word result = PEEK(0);
sp = fp;
fp = POP();
fprintf(stderr, "Popped FP = $%04x\\n", fp);
if (!fp) {
fprintf(stderr, "Finishing with Zero FP.\\n");
goto STOP;
}
pc = POP();
if (!pc) {
fprintf(stderr, "Finishing with Zero PC.\\n");
goto STOP;
}
fprintf(stderr, "Popped PC = $%04x\\n", pc);
word nargs = POP();
fprintf(stderr, "Peeked nargs = $%04x\\n", nargs);
nargs &= 255;
sp += 2 * (nargs + 2 /* msg, rcvr*/ );
PUSH(result);
PrintWhere();
'''
def AllClassesPreorder(start=Ur):
z = [start]
for sub in sorted(start.__subclasses__(), key=lambda c: c.__name__):
z += AllClassesPreorder(sub)
return z
# Gather all Konstants.
Konsts = {}
for k, v in globals().items():
if k.startswith('K_'):
Konsts[k] = v
# First create NIL, FALSE, TRUE instances, in that order.
NIL, FALSE, TRUE = NilT(), FalseT(), TrueT()
NIL.Reify(), FALSE.Reify(), TRUE.Reify()
def FixSlotsOnClass(c, inst):
print 'FNORD', inst.nick, dir(c)
# Check the bslots, pslots, & flexes; compute b_flags, b_bSize, b_numB, b_numP
bslots = [(k, getattr(c, k)) for k in dir(c) if k.startswith('B_')]
pslots = [(k, getattr(c, k)) for k in dir(c) if k.startswith('P_')]
flexes = [(k, getattr(c, k)) for k in dir(c) if k.startswith('FLEX_')]
bslots = sorted(bslots, key=lambda pair: pair[1])
pslots = sorted(pslots, key=lambda pair: pair[1])
print 'cBPF', c, bslots, pslots, flexes
for i, (k, v) in zip(range(len(bslots)), bslots):
if i != v: raise Exception("Bad B_ numbers in class %s: %s" % (c, bslots))
for i, (k, v) in zip(range(len(pslots)), pslots):
if len(bslots)+2*i != v: raise Exception("Bad P_ numbers in class %s: %s" % (c, pslots))
inst.b_numB = len(bslots)
inst.b_numP = len(pslots)
inst.b_bSize = inst.b_numB + 2*inst.b_numP
if flexes:
assert len(flexes) == 1 # ThereCanOnlyBeOne
if flexes[0][0]=='FLEX_BYTES':
inst.b_flags = K_FLEX_BYTES
elif flexes[0][0]=='FLEX_PTRS':
inst.b_flags = K_FLEX_PTRS
else:
raise Exception('Bad FLEX records in class %s: %s' % (c, flexes))
else:
inst.b_flags = 0
inst.bslots = bslots
inst.pslots = pslots
inst.flexes = flexes
# Create class objects.
ClassDict = {}
for c in AllClassesPreorder():
inst = Cls()
inst.pycls = c
inst.nick = c.__name__
inst.name = c.__name__.upper()
inst.flexstring = inst.name
inst.flexsize = len(inst.name)
inst.flexbytes = [ord(s) for s in inst.name]
inst.b_this = len(ClassDict) + 1 # Skip the 0 class, meaning unused memory.
ClassVec[inst.b_this] = inst
inst.sup = None if c is Ur else c.__bases__[0]
inst.p_sup = NIL if c is Ur else ClassDict[c.__bases__[0].__name__.upper()]
inst.p_meths = NIL
inst.Reify()
ClassDict[inst.name] = inst
FixSlotsOnClass(c, inst)
def WriteInspectors():
for cname, c in sorted(ClassDict.items()):
print 'struct FieldInfo FI_%s[] = {' % (
c.name)
for bs in c.bslots:
fname, foff = bs
print ' { "%s", 1, %d }, ' % (fname, foff)
for ps in c.pslots:
fname, foff = ps
print ' { "%s", 2, %d }, ' % (fname, foff)
print ' { NULL, 0, 0 }'
print '};'
print '''
struct ClassInfo CI_%s = {
"%s",
%d,
FI_%s};
''' % (c.name, c.name, c.b_this, c.name)
print 'void InitInfo() {'
for cname, c in sorted(ClassDict.items()):
print ' ClassInfos[%d] = &CI_%s;' % (
c.b_this, c.name)
print '}'
# Create metaclass objects.
METACLS = ClassDict['METACLS']
for c in AllClassesPreorder():
meta = Metacls()
meta.nick = c.__name__ + 'ClS'
meta.name = c.__name__.upper() + 'CLS'
if True:
meta.flexstring = meta.name
meta.flexsize = len(meta.name)
meta.flexbytes = [ord(s) for s in meta.name]
else:
meta.flexstring = ''
meta.flexsize = 0
meta.flexbytes = []
meta.b_this = len(ClassDict) + 1 # Skip the 0 class, meaning unused.
ClassVec[meta.b_this] = meta
meta.p_sup = ClassDict['CLS'] if c is Ur else ClassDict[c.__bases__[0].__name__.upper() + 'CLS']
meta.sup = meta.p_sup
meta.p_meths = NIL
meta.Reify()
ClassDict[meta.name] = meta
FixSlotsOnClass(METACLS, meta)
# Link metaclass class objects.
for c in AllClassesPreorder():
meta = ClassDict[c.__name__.upper() + 'CLS']
meta.b_cls = METACLS.b_this
inst = ClassDict[c.__name__.upper()]
inst.b_cls = meta.b_this
inst.b_partner = meta.b_this
meta.b_partner = inst.b_this
#### Compile methods and intern symbols.
Op = dict([(k.upper(), v) for k,v in Op.items()]) # Normalize keys upper.
Method = dict([(k.upper(), v) for k,v in Method.items()]) # Normalize keys upper.
for k,v in Method.items(): # Also normalize inner keys (method names).
for k2 in v:
Intern(k2.upper())
Method[k] = dict([(k2.upper(), v2) for k2,v2 in v.items()])
print '=== Op:', repr(Op)
print '=== Method:', repr(Method)
for cname, m in sorted(Method.items()):
for mname, v in sorted(m.items()):
Intern(mname.upper())
OpList = ['STOP'] + sorted([k.upper() for k in Op if k != 'STOP'])
for i, op in zip(range(len(OpList)), OpList):
OpNums[op] = i
print '=== OpNums:', repr(OpNums)
def CompileMethod(cname, mname, v):
numL = 2
if v[0] == 'T':
codes, numL = CompileToCodes(v[1:], ClassDict[cname])
# Change to format 'B' for text bytecode string.
v = 'B ' + ' '.join([str(c) for c in codes])
if v[0] == 'C':
# Create an opcode for the C code.
opname = ('%s_%s_c' % (cname, mname)).upper().replace(':', '_')
Op[opname] = v[1:]
opnum = len(OpList)
OpList.append(opname)
OpNums[opname] = opnum
# Now pretend it was a B definition, to call the new opcode.
v = 'B %s' % opname
if v[0] != 'B':
raise Exception('Only Bytecode (B) supported: %s' % repr(v))
v = v[1:]
explain, codes = [], []
ww = v.split()
print 'Compiling (%s %s): %s' % (cname, mname, ww)
Marks = {}
Fixes = []
for w in ww:
if INT_PATTERN(w):
explain.append(EvalInt(w))
codes.append(EvalInt(w))
elif SYM_PATTERN(w):
num = Intern(SYM_PATTERN(w).group(1).upper())
explain.append('lit_b')
explain.append(num)
codes.append(OpNums['LIT_B'])
codes.append(num)
elif CLASS_PATTERN(w):
cn = CLASS_PATTERN(w).group(1).upper()
c = ClassDict.get(cn)
explain.append('CLASS_B')
explain.append(c.b_this)
codes.append(OpNums['CLASS_B'])
codes.append(c.b_this)
elif MARK_PATTERN(w):
verb, target = MARK_PATTERN(w).groups()
target = int(target)
if verb == 'mark':
Marks[target] = len(codes)
elif verb == 'jump':
Fixes.append((target, len(codes)))
codes.append('jump_b')
codes.append(0)
elif verb == 'bfalse':
Fixes.append((target, len(codes)))
codes.append('bfalse_b')
codes.append(0)
else:
raise 'bad'
else:
num = OpNums.get(w.upper())
if num is None:
raise Exception('No such opcode: [%s %s]: %s: %s' % (cname, mname, w, repr(v)))
explain.append(w)
codes.append(OpNums[w.upper()])
explain.append('RETURN');
codes.append(OpNums['RETURN']);
for (mark, loc) in Fixes:
target = Marks[mark]
if target < loc:
codes[loc] = OpNums[('reverse_' + codes[loc]).upper()]
codes[loc+1] = loc + 2 - target
else:
codes[loc] = OpNums[('forward_' + codes[loc]).upper()]
codes[loc+1] = target - loc - 2
print 'CompileMethod: %s %s: %s' % (cname, mname, explain)
print 'CompileMethod: %s %s: %s' % (cname, mname, codes)
return explain, codes, numL
CompiledMethods = {}
for cname, m in sorted(Method.items()):
cname = cname.upper();
print 'CNAME: %s METHODS: %s' % (cname, m)
for mname, v in sorted(m.items()):
mname = mname.upper();
explain, codes, numL = CompileMethod(cname, mname, v)
CompiledMethods[(cname, mname)] = (codes, numL)
for (cname,mname),(codes,numL) in sorted(CompiledMethods.items()):
meth = Meth()
cls = ClassDict[cname]
meth.b_cls = ClassDict['METH'].b_this
meth.b_name = Intern(mname.upper())
meth.b_owner = cls.b_this
meth.b_numL = numL
meth.p_next = cls.p_meths # prepend to linked list.
cls.p_meths = meth
meth.flexsize = len(codes)
meth.flexbytes = codes
meth.Reify()
# Prepare packed strings with all interned symbols.
InternLoc = {}
InternSym = {}
PackedStrings = ['']
for (k, v) in InternDict.items():
s = PackedStrings[-1]
if len(s) + len(k) > MAX_FLEX_BYTES:
s = ''
PackedStrings.append(s)
InternLoc[k] = (len(PackedStrings), len(s))
s += k
PackedStrings[-1] = s # Put the new string back.
# Reify the interned symbols.
for (k, v) in InternDict.items():
sym = Sym()
sym.b_intern = v
sym.Reify()
SymVec[v] = sym
InternSym[k] = sym
# Reify the packed strings.
PackedList = []
for ps in PackedStrings:
po = ArrByt()
po.flexstring = ps
po.flexsize = len(ps)
po.flexbytes = [ord(s) for s in ps]
assert len(po.flexbytes) == po.flexsize
po.Reify()
print 'PackedString:', po.nick, po.addr, po.basesize, po.flexsize, po.flexbytes
PackedList.append(po)
# Fill in symbol fields.
for (k, v) in InternSym.items():
v.str = k
packNum, offset = InternLoc[k]
v.b_begin = offset
v.b_len = len(k)
v.p_guts = PackedList[packNum-1].addr
for m in MemorableList:
m.b_gcsize = ((m.size-2)>>1, m.nick, m.addr, m.basesize, m.__class__.__name__)
assert m.b_gcsize[0] > 0, vars(m)
assert m.b_gcsize[0] < 128, vars(m)
m.CLS = ClassDict[m.__class__.__name__.upper()]
if type(m) is not Cls:
# Do not change classes, which are already correct.
m.b_cls = m.CLS.b_this
m.Materialize()
pass;pass;pass
def GenerateH():
print '''/* This is Generated Code */
#ifndef TERSETALK9_GENERATED_H_
#define TERSETALK9_GENERATED_H_
#include "vm.h"
'''
for k,v in globals().items():
if k.startswith('K_'):
print '#define %s %s' % (k, v)
for op in OpList:
print '#define OP_%-17s %d' % (op, OpNums[op])
print
for c in ClassVec[1:]:
if not c: continue
i = 0
for e, off in c.bslots:
assert i == off
print ' #define %s_%s %d' % (c.name, e, off)
i += 1
for e, off in c.pslots:
assert i == off
print ' #define %s_%s %d' % (c.name, e, off)
i += 2
if c.flexes:
print ' #define %s_FLEXSIZE %d' % (c.name, c.flexes[0][1])
i += 1
print
print '#endif'
def GenerateC():
print '''/* This is Generated Code */
#include "vm.h"
#include "_generated.h"
'''
print '''
#ifdef DEBUG
char* OpNames[] = {
'''
for e in OpList:
print ' "%s",' % e
print '''
NULL,
};
#endif
'''
print '''
void Boot() {
'''
print ' nilAddr = 0x%04x;' % NIL.addr
print ' falseAddr = 0x%04x;' % FALSE.addr
print ' trueAddr = 0x%04x;' % TRUE.addr
print ' intAddr = 0x%04x;' % ClassDict['INT'].addr
print ' clsAddr = 0x%04x;' % ClassDict['CLS'].addr
print '''
}
'''
print '''
void Loop() {
while (1) {
#ifdef DEBUG
Hex20("pc", pc, pc);
Hex20("fp", fp, fp);
Hex20("sp", sp, sp);
#endif
byte opcode = BYTE(pc);
++pc;
#ifdef DEBUG
fprintf(stderr, "Step: opcode: $%02x=%d.=%s\\n", opcode, opcode, OpNames[opcode]);
#endif
switch (opcode) {
'''
for op in OpList:
print '\tcase OP_%s: {' % op
if op.endswith('_B'):
print '\t fprintf(stderr, "OP: %s $%%02x=%%d.\\n", B(pc), B(pc));' % op
elif op.endswith('_W'):
print '\t fprintf(stderr, "OP: %s $%%04x=%%d.\\n", W(pc), W(pc));' % op
else:
print '\t fprintf(stderr, "OP: %s\\n");' % op
for k,v in Op.items():
done = False
if k.upper() == op:
if done:
raise Exception('already done Op[%s]' % op)
for s in v.split('\n'):
print '\t\t%s' % s
done = True
print '\t}'
print '\tbreak;'
print
print '''
}
}
STOP:
return;
}
'''
def GenerateImage():
def w(x): sys.stdout.write(x)
def w2(x): w('%c%c' % (Hi(x), Lo(x)))
def w1(x): w('%c' % x)
# Terse talk version number 1
w('T/'); w2(2); w2(1)
# Class vector
n = len(ClassDict)
w('C/'); w2(n*2)
for c in ClassVec[:n]:
w2(0 if c==0 else c.addr) # Initial 0 for unused mem.
# Intern Symbol vector
n = len(InternDict)
w('S/'); w2(n*2)
for y in SymVec[:n]:
w2(y.addr)
# Memory bytes
n = Here
w('M/'); w2(n)
for x in Memory[:n]:
if x == '^': w1(0) # padding
#elif x == '#': raise 'bad' # w1(0) # unused part of page
#elif x == '@': raise 'bad' # w1(0) # last in page
elif type(x) is tuple: w1(x[0])
elif type(x) is int: w1(x)
else: raise Exception('weird memory: %s' % x)
# End with a zero-length segment with '!/' name.
w('!/'); w2(0);
pass
print dir(TRUE)
print vars(TRUE)
print
for m in MemorableList:
print 'M:', m, vars(m)
print
for name, cls in ClassDict.items():
print name, cls, (cls.__class__.__bases__)
print
print 'SymVec:', SymVec
print
print 'InternSym:', len(InternSym), InternSym
print
print 'InternDict:', len(InternDict), InternDict
print
print 'ClassDict:', len(ClassDict), ClassDict
print
print 'ClassVec:', ClassVec
print
print 'OpList:', OpList
print
print 'Memory:', Here, Memory[:Here]
print
for resource, used, maximum in [
('Memory', Here, 65536),
('Classes', len(ClassDict), 255),
('Symbols', len(InternDict), 256),
('Opcodes', len(OpNums), 256),
]:
print '%10s %5.2f%% %6d/%6d full' % (resource, 100.0 * used / maximum, used, maximum)
sys.stdout = open('_generated.h', 'w')
GenerateH()
sys.stdout.close()
sys.stdout = open('_generated.c', 'w')
GenerateC()
WriteInspectors()
sys.stdout.close()
sys.stdout = open('_generated.image', 'wb')
GenerateImage()
sys.stdout.close()
pass
|
nilq/baby-python
|
python
|
"""
This file contains the full ImageFeaturizer class, which allows users to upload
an image directory, a csv containing a list of image URLs, or a directory with a
csv containing names of images in the directory.
It featurizes the images using pretrained, decapitated InceptionV3 model, and
saves the featurized data to a csv, as well as within the ImageFeaturizer class
itself. This allows data scientists to easily analyze image data using simpler models.
Functionality:
1. Build the featurizer model. The class initializer ImageFeaturizer() takes as input:
depth : int
1, 2, 3, or 4, depending on how far down you want to sample the featurizer layer
autosample : bool
a boolean flag signalling automatic downsampling
downsample_size : int
desired number of features to downsample the final layer to. Must be an
integer divisor of the number of features in the layer.
2. Load the data. The self.load_data() function takes as input:
image_columns : str
the name of the column holding the image data, if a csv exists,
or what the name of the column will be, if generating the csv
from a directory
image_path : str
the path to the folder containing the images. If using URLs, leave blank
csv_path : str
the path to the csv. If just using a directory, leave blank.
If csv exists, this is the path where the featurized csv will be
generated.
scaled_size : tuple
The size that the images get scaled to. Default is (299, 299)
grayscale : bool
Decides if image is grayscale or not. May get deprecated. Don't
think it works on the InceptionV3 model due to input size.
3. Featurize the data. The self.featurize_preloaded_data() function takes no input, and
featurizes the loaded data, writing the new csvs to the same path as the loaded csv
Also adds a binary "image_missing" column automatically, for any images that are missing
from the image list.
3a. Users can also load and featurize the data in one pass, with the
self.featurize_data function, which takes the same input as the
load_data function and performs the featurization automatically.
"""
import logging
import os
import math
import time
import numpy as np
import trafaret as t
import pandas as pd
from .build_featurizer import build_featurizer, supported_model_types
from .feature_preprocessing import preprocess_data, _image_paths_finder
from .data_featurizing import featurize_data, create_features
logger = logging.getLogger(__name__)
SIZE_DICT = {'squeezenet': (227, 227), 'vgg16': (224, 224), 'vgg19': (224, 224),
'resnet50': (224, 224), 'inceptionv3': (299, 299), 'xception': (299, 299)}
DEFAULT_NEW_CSV_PATH = '{}{}'.format(os.path.expanduser('~'), '/Downloads/images.csv')
class ImageFeaturizer:
"""
This object can load images, rescale, crop, and vectorize them into a
uniform batch, and then featurize the images for use with custom classifiers.
Methods
------------------
__init__(depth, autosample,
downsample_size):
--------------------------------
Initialize the ImageFeaturizer. Build the featurizer model with the
depth and feature downsampling specified by the inputs.
featurize_data(image_columns, image_path,
csv_path, new_csv_path, scaled_size, grayscale):
--------------------------------
Loads image directory and/or csv into the model, and
featurizes the images
load_data(image_columns, image_path, csv_path,
scaled_size, grayscale):
--------------------------------
Loads image directory and/or csv into the model, and vectorize the
images for input into the featurizer
featurize_preloaded_data():
--------------------------------
Featurize the loaded data, append the features to the csv, and
return the full dataframe
"""
@t.guard(depth=t.Int(gte=1, lte=4),
autosample=t.Bool,
downsample_size=t.Int(gte=0),
model=t.Enum(*supported_model_types.keys()))
def __init__(self,
depth=1,
autosample=False,
downsample_size=0,
model='squeezenet'
):
"""
Initializer.
Loads an initial InceptionV3 pretrained network, decapitates it and
downsamples according to user specifications.
Parameters:
----------
depth : int
How deep to decapitate the model. Deeper means less specific but
also less complex
autosample : bool
If True, feature layer is automatically downsampled to the right size.
downsample_size: int
The number of features to downsample the featurizer to
Returns:
--------
None. Initializes and saves the featurizer object attributes.
"""
# BUILDING THE MODEL #
logging.info("Building the featurizer.")
featurizer = build_featurizer(depth, autosample,
downsample_size, model_str=model.lower())
# Saving initializations of model
self.depth = depth
self.autosample = autosample
self.downsample_size = downsample_size
self.num_features = featurizer.layers[-1].output_shape[-1]
# Save the model
self.model_name = model.lower()
self.featurizer = featurizer
self.visualize = featurizer.summary
# Initializing preprocessing variables for after we load and featurize the images
self.data = np.zeros((1))
self.features = pd.DataFrame()
self.df_original = pd.DataFrame()
self.full_dataframe = pd.DataFrame()
self.df_features = pd.DataFrame()
self.csv_path = ''
self.image_dict = {}
self.image_columns = ''
self.image_path = ''
# Image scaling and cropping
self.scaled_size = (0, 0)
self.crop_size = (0, 0)
self.number_crops = 0
self.isotropic_scaling = False
def load_data(self,
image_columns,
image_path='',
image_dict='',
csv_path='',
grayscale=False,
save_data=True,
# crop_size = (299, 299),
# number_crops = 0,
# random_crop = False,
# isotropic_scaling = True
):
"""
Load image directory and/or csv, and vectorize the images for input into the featurizer.
Parameters:
----------
image_columns : str
the name of the column holding the image data, if a csv exists,
or what the name of the column will be, if generating the csv
from a directory
image_path : str
the path to the folder containing the images. If using URLs, leave blank
csv_path : str
the path to the csv. If just using a directory, leave blank.
If csv exists, this is the path where the featurized csv will be
generated.
# These features haven't been implemented yet.
# grayscale : bool
# Flags the image as grayscale
#
# isotropic_scaling : bool
# If True, images are scaled keeping proportions and then cropped
#
# crop_size: tuple
# If the image gets cropped, decides the size of the crop
#
# random_crop: bool
# If False, only take the center crop. If True, take random crop
#
"""
# Fix column headers and image path if they haven't been done, build path for new csv
image_columns, image_path = _input_fixer(image_columns, image_path)
# If there's no dataframe, build it!
if csv_path == '':
if len(image_columns) > 1:
raise ValueError('If building the dataframe from an image directory, the featurizer'
'can only create a single image column. If two image columns are '
'needed, please create a csv to pass in.')
# If the image_dict hasn't been passed in (which only happens in batch processing),
# build the full image dict and save the original dataframe
if not image_dict:
image_dict, df = _build_image_dict(image_path, csv_path,
image_columns)
self.df_original = df
self.full_dataframe = df
self.image_columns = image_columns
self.image_dict = image_dict
scaled_size, full_image_data = \
self._load_data_helper(self.model_name, image_columns,
image_path, image_dict, csv_path, grayscale)
# Save all of the necessary data to the featurizer
if save_data:
self.data = full_image_data
self.csv_path = csv_path
self.image_path = image_path
self.scaled_size = scaled_size
return full_image_data
@t.guard(batch_data=t.Type(np.ndarray),
image_columns=t.List(t.String(allow_blank=True)) | t.String(allow_blank=True),
batch_processing=t.Bool,
features_only=t.Bool,
save_features=t.Bool,
save_csv=t.Bool,
new_csv_path=t.String(allow_blank=True),
omit_model=t.Bool,
omit_depth=t.Bool,
omit_output=t.Bool,
omit_time=t.Bool,
)
def featurize_preloaded_data(self, batch_data=np.zeros((1)), image_columns='',
batch_processing=False, features_only=False,
save_features=False, save_csv=False, new_csv_path='',
omit_model=False, omit_depth=False, omit_output=False,
omit_time=False):
"""
Featurize the loaded data, returning the dataframe and writing the features
and the full combined data to csv
Parameters
----------
Returns
-------
full_dataframe or df_features: pandas.DataFrame
If features_only, this returns a Dataframe containing the features.
Otherwise, it returns a DataFrame containing the features appended to the
original csv. If save_csv is set to True, it also writes csv's
to the same path as the csv containing the list of names.
"""
# If the batch data isn't passed in, then load the full data from the attributes
if np.array_equal(batch_data, np.zeros((1))):
batch_data = self.data
if image_columns == '':
image_columns = self.image_columns
if isinstance(image_columns, str):
image_columns = [image_columns]
# Check data has been loaded, and that the data was vectorized correctly
if np.array_equal(batch_data, np.zeros((1))):
raise IOError('Must load data into the model first. Call load_data.')
# If batch processing, make sure we're only doing a single column at a time.
# Otherwise, make sure the number of columns matches the first dimension of the data
if batch_processing:
assert len(image_columns) == 1 or isinstance(image_columns, str)
else:
assert len(image_columns) == batch_data.shape[0]
logging.info("Trying to featurize data.")
# Initialize featurized data vector with appropriate size
features = np.zeros((batch_data.shape[1],
self.num_features * len(image_columns)))
# Get the image features
df_features = self._featurize_helper(
features, image_columns, batch_data)
# Save features if boolean set to True
if save_features:
self.features = df_features
# If called with features_only, returns only the features
if features_only:
return df_features
# Save the image features with the original dataframe
full_dataframe = pd.concat([self.df_original, df_features], axis=1)
# If batch processing, this is only the batch dataframe. Otherwise, this is the actual
# full dataframe.
if not batch_processing:
self.full_dataframe = full_dataframe
# Save csv if called
if save_csv:
self.save_csv(new_csv_path=new_csv_path, omit_model=omit_model, omit_depth=omit_depth,
omit_output=omit_output, omit_time=omit_time, save_features=save_features)
return full_dataframe
@t.guard(image_columns=t.List(t.String(allow_blank=True)) | t.String(allow_blank=True),
image_path=t.String(allow_blank=True),
csv_path=t.String(allow_blank=True),
new_csv_path=t.String(allow_blank=True),
batch_processing=t.Bool,
batch_size=t.Int,
save_data=t.Bool,
save_features=t.Bool,
save_csv=t.Bool,
omit_time=t.Bool,
omit_model=t.Bool,
omit_depth=t.Bool,
omit_output=t.Bool,
verbose=t.Bool,
grayscale=t.Bool
)
def featurize(self,
image_columns,
image_path='',
csv_path='',
new_csv_path='',
batch_processing=True,
batch_size=1000,
save_data=False,
save_features=False,
save_csv=False,
omit_time=False,
omit_model=False,
omit_depth=False,
omit_output=False,
verbose=True,
grayscale=False
# crop_size = (299, 299),
# number_crops = 0,
# random_crop = False,
# isotropic_scaling = True
):
"""
Load image directory and/or csv, and vectorize the images for input into the featurizer.
Then, featurize the data.
Parameters:
----------
image_columns : list of str
list of the names of the column holding the image data, if a csv exists,
or what the name of the column will be, if generating the csv
from a directory
image_path : str
the path to the folder containing the images. If using URLs, leave blank
csv_path : str
the path to the csv. If just using a directory, leave blank, and
specify the path for the generated csv in new_csv_path.
If csv exists, this is the path where the featurized csv will be
generated.
new_csv_path : str
the path to the new csv, if one is being generated from a directory.
If no csv exists, this is the path where the featurized csv will
be generated
grayscale : bool
Decides if image is grayscale or not. May get deprecated. Don't
think it works on the InceptionV3 model due to input size.
# These features haven't been implemented yet.
# isotropic_scaling : bool
# if True, images are scaled keeping proportions and then cropped
#
# crop_size: tuple
# if the image gets cropped, decides the size of the crop
#
# random_crop: bool
# If False, only take the center crop. If True, take random crop
#
Returns:
--------
full_dataframe :
Dataframe containing the features appended to the original csv.
Also writes csvs containing the features only and the full dataframe
to the same path as the csv containing the list of names
"""
if not image_path and not csv_path:
raise ValueError("Must specify either image_path or csv_path as input.")
# Set logging level
if verbose:
logger.setLevel(logging.INFO)
# Fix column headers and image path if necessary
image_columns, image_path = _input_fixer(image_columns, image_path)
# Find the full image dict and save the original dataframe. This is required early to know
# how many images exist in total, to control batch processing.
full_image_dict, df_original = _build_image_dict(image_path, csv_path,
image_columns)
# Save the fixed inputs and full image dict
self.df_original = df_original
self.image_columns = image_columns
self.image_dict = full_image_dict
# Users can turn off batch processing by either setting batch_processing to false, or
# setting batch_size to 0
if batch_processing and batch_size:
# Perform batch processing, and save the full dataframe and the full features dataframe
features_df = self._batch_processing(full_image_dict, image_columns,
image_path, csv_path,
batch_size, grayscale)
# If batch processing is turned off, load the images in one big batch and features them all
else:
logger.info("Loading full data tensor without batch processing. If you "
"experience a memory error, make sure batch processing is enabled.")
full_data = self.load_data(image_columns, image_path, full_image_dict, csv_path,
grayscale, save_data)
features_df = \
self.featurize_preloaded_data(full_data, image_columns=image_columns,
features_only=True)
# Save the full dataframe with the features
full_df = pd.concat([df_original, features_df], axis=1)
self.full_dataframe = full_df
# Save features and csv if flags are enabled
if save_features:
self.features = features_df
if save_csv:
self.save_csv(new_csv_path=new_csv_path, omit_model=omit_model, omit_depth=omit_depth,
omit_output=omit_output, omit_time=omit_time, save_features=save_features)
# Return the full featurized dataframe
return full_df
def save_csv(self, new_csv_path='', omit_model=False, omit_depth=False,
omit_output=False, omit_time=False, save_features=False):
"""
"""
if self.full_dataframe.empty:
raise AttributeError('No dataframe has been featurized.')
# Save the name and extension separately, for robust naming
if not new_csv_path:
new_csv_path = self.csv_path or DEFAULT_NEW_CSV_PATH
csv_name, ext = os.path.splitext(new_csv_path)
name_path = _named_path_finder("{}_featurized".format(csv_name), self.model_name,
self.depth, self.num_features, omit_model, omit_depth,
omit_output, omit_time)
else:
name_path, ext = os.path.splitext(new_csv_path)
_create_csv_path(name_path)
logger.warning("Saving full dataframe to csv as {}{}".format(name_path, ext))
self.full_dataframe.to_csv("{}{}".format(name_path, ext), index=False)
if save_features:
logger.warning("Saving features to csv as {}_features_only{}".format(name_path, ext))
self.df_features.to_csv("{}_features_only{}".format(name_path, ext),
index=False)
@t.guard(confirm=t.Bool)
def clear_input(self, confirm=False):
"""
Clear all input for the model. Requires the user to confirm with an additional "confirm"
argument in order to run.
Parameters:
----------
confirm : bool
Users are required to modify this to true in order to clear all attributes
from the featurizer
"""
if not confirm:
raise ValueError('If you\'re sure you would like to clear the inputs of this model, '
'rerun the function with the following argument: '
'clear_input(confirm=True). This operation cannot be reversed.')
self.data = np.zeros((1))
self.features = pd.DataFrame()
self.full_dataframe = pd.DataFrame()
self.csv_path = ''
self.image_list = ''
self.image_columns = ''
self.image_path = ''
# ###################
# Helper Functions! #
# ###################
def _load_data_helper(self,
model_name,
image_columns,
image_path,
image_dict,
csv_path,
grayscale):
"""
This function helps load the image data from the image directory and/or csv.
It can be called by either batch processing, where each column is handled separately in the
parent function and the data is loaded in batches, or it can be called without batch
processing, where the columns must each be loaded and concatenated here.
Parameters:
----------
model_name : str
The name of the model type, which determines scaling size
image_columns : list
A list of the image column headers
image_path : str
Path to the image directory
image_dict : dict
This is a dictionary containing the names of each image column as a key, along with
all of the image paths for that column.
csv_path : str
Path to the csv
grayscale : bool
Whether the images are grayscale or not
"""
# Save size that model scales to
scaled_size = SIZE_DICT[model_name]
# Save the full image tensor, the path to the csv, and the list of image paths
image_data, list_of_image_paths = \
preprocess_data(image_columns[0], model_name,
image_dict[image_columns[0]],
image_path, csv_path, scaled_size, grayscale)
image_data_list = [np.expand_dims(image_data, axis=0)]
# If there is more than one image column, repeat this process for each
if len(image_columns) > 1:
for column in image_columns[1:]:
image_data, list_of_image_paths = \
preprocess_data(column, model_name, image_dict[column], image_path,
csv_path, scaled_size, grayscale)
image_data_list.append(np.expand_dims(image_data, axis=0))
full_image_data = np.concatenate(image_data_list)
return scaled_size, full_image_data
def _featurize_helper(self, features, image_columns, batch_data):
"""
This function featurizes the data for each image column, and creates the features array
from all of the featurized columns
Parameters:
----------
features : array
Array of features already computed
image_columns : list
A list of the image column headers
batch_data : array
The batch loaded image data (which may be the full array if not running with batches)
"""
# Save the initial features list
features_list = []
# For each image column, perform the full featurization and add the features to the df
for column in range(batch_data.shape[0]):
# Featurize the data, and save it to the appropriate columns
partial_features = featurize_data(self.featurizer, batch_data[column])
features[:, self.num_features * column:self.num_features * column + self.num_features]\
= partial_features
# Save the full dataframe
df_features = \
create_features(batch_data[column],
partial_features,
image_columns[column])
features_list.append(df_features)
df_features = pd.concat(features_list, axis=1)
return df_features
def _batch_processing(self,
full_image_dict,
image_columns,
image_path='',
csv_path='',
batch_size=1000,
grayscale=False):
"""
This function handles batch processing. It takes the full list of images that need
to be processed and loads/featurizes the images in batches.
Parameters:
----------
full_image_dict : dict
This is a dictionary containing the names of each image column as a key, along with
all of the image paths for that column.
image_columns : list
A list of the image column headers
df_original : pandas.DataFrame
The original dataframe (not containing the image features)
image_path : str
Path to the image directory
csv_path : str
Path to the csv
batch_size : int
The number of images processed per batch
grayscale : bool
Whether the images are grayscale or not
"""
features_df = pd.DataFrame()
features_df_columns_list = []
# Iterate through each image column
for column_index in range(len(image_columns)):
# Initialize the batch index and save the column name
index = 0
batch_number = 0
column = image_columns[column_index]
batch_features_df = pd.DataFrame()
# Get the list of image paths and the number of images in this column
list_of_image_paths = full_image_dict[column]
num_images = len(list_of_image_paths)
batch_features_list = []
# Loop through the images, featurizing each batch
if len(image_columns) > 1:
logger.info("Featurizing column #{}".format(column_index + 1))
while index < num_images:
tic = time.clock()
# Cap the batch size against the total number of images left to prevent overflow
if index + batch_size > num_images:
batch_size = num_images - index
# Create a dictionary for just the batch of images
batch_image_dict = {column: full_image_dict[column][index:index + batch_size]}
# Load the images
logger.info("Loading image batch.")
batch_data = self.load_data(column, image_path,
batch_image_dict, csv_path,
grayscale, save_data=False)
logger.info("\nFeaturizing image batch.")
# If this is the first batch, the batch features will be saved alone.
# Otherwise, they are concatenated to the last batch
batch_features_list.append(self.featurize_preloaded_data(batch_data, column,
features_only=True,
batch_processing=True))
# Increment index by batch size
index += batch_size
batch_number += 1
# Give update on time and number of images left in column
remaining_batches = int(math.ceil(num_images - index) / batch_size)
logger.info("Featurized batch #{}. Number of images left: {}\n"
"Estimated total time left: {} seconds\n".format(
batch_number, num_images - index,
int((time.clock() - tic) * remaining_batches))
)
# After the full column's features are calculated, concatenate them all and append them
# to the full DataFrame list
batch_features_df = pd.concat(batch_features_list, ignore_index=True)
features_df_columns_list.append(batch_features_df)
# Once all the features are created for each column, concatenate them together for both
# the features dataframe and the full dataframe
features_df = pd.concat(features_df_columns_list, axis=1)
# Return the full dataframe and features dataframe
return features_df
def _build_image_dict(image_path, csv_path, image_columns):
"""
This function creates the image dictionary that maps each image column to the images
in that column
Parameters
----------
image_path : str
Path to the image directory
csv_path : str
Path to the csv
image_columns : list
A list of the image column headers
"""
full_image_dict = {}
for column in image_columns:
list_of_image_paths, df = _image_paths_finder(image_path, csv_path,
column)
full_image_dict[column] = list_of_image_paths
return full_image_dict, df
def _input_fixer(image_columns, image_path):
"""
This function turns image_columns into a list of a single element if there is only
one image column. It also fixes the image path to contain a trailing `/` if the path to the
directory is missing one.
Parameters
----------
image_columns : list
A list of the image column headers
image_path : str
Path to the image directory
"""
# Convert column header to list if it's passed a single string
if isinstance(image_columns, str):
image_columns = [image_columns]
# Add backslash to end of image path if it is not there
if image_path != '' and image_path[-1] != "/":
image_path = '{}/'.format(image_path)
return image_columns, image_path
def _create_csv_path(new_csv_path):
"""
Create the necessary csv along with the appropriate directories
"""
# Create the filepath to the new csv
path_to_new_csv = os.path.dirname(new_csv_path)
if not os.path.isdir(path_to_new_csv) and path_to_new_csv != '':
os.makedirs(path_to_new_csv)
def _named_path_finder(csv_name, model_str, model_depth, model_output,
omit_model, omit_depth, omit_output, omit_time):
"""
Create the named path from the robust naming configuration available.
Parameters:
-----------
omit_model : Bool
Boolean to omit the model name from the CSV name
omit_depth : Bool
Boolean to omit the model depth from the CSV name
omit_output : Bool
Boolean to omit the model output size from the CSV name
omit_time : Bool
Boolean to omit the time of creation from the CSV name
model_str : Str
The model name
model_depth : Str
The model depth
model_output : Str
The model output size
Returns:
--------
named_path : Str
The full name of the CSV file
"""
# Naming switches! Can turn on or off to remove time, model, depth, or output size
# from output filename
if not omit_time:
saved_time = "_({})".format(time.strftime("%d-%b-%Y-%H.%M.%S", time.gmtime()))
else:
saved_time = ""
if not omit_model:
saved_model = "_{}".format(model_str)
else:
saved_model = ""
if not omit_depth:
saved_depth = "_depth-{}".format(model_depth)
else:
saved_depth = ""
if not omit_output:
saved_output = "_output-{}".format(model_output)
else:
saved_output = ""
named_path = "{}{}{}{}{}".format(csv_name, saved_model, saved_depth, saved_output, saved_time)
return named_path
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Make Cozmo look around for a cube.
Cozmo looks around, reacts, and picks up and puts down a cube if found.
'''
import asyncio
import cozmo
from cozmo.util import degrees
def cozmo_program(robot: cozmo.robot.Robot):
look_around = robot.start_behavior(cozmo.behavior.BehaviorTypes.LookAroundInPlace)
# try to find a block
cube = None
try:
cube = robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube", cube)
except asyncio.TimeoutError:
print("Didn't find a cube :-(")
finally:
# whether we find it or not, we want to stop the behavior
look_around.stop()
if cube is None:
robot.play_anim_trigger(cozmo.anim.Triggers.MajorFail)
return
print("Yay, found cube")
cube.set_lights(cozmo.lights.green_light.flash())
anim = robot.play_anim_trigger(cozmo.anim.Triggers.BlockReact)
anim.wait_for_completed()
action = robot.pickup_object(cube)
print("got action", action)
result = action.wait_for_completed(timeout=30)
print("got action result", result)
robot.turn_in_place(degrees(90)).wait_for_completed()
action = robot.place_object_on_ground_here(cube)
print("got action", action)
result = action.wait_for_completed(timeout=30)
print("got action result", result)
anim = robot.play_anim_trigger(cozmo.anim.Triggers.MajorWin)
cube.set_light_corners(None, None, None, None)
anim.wait_for_completed()
cozmo.run_program(cozmo_program)
|
nilq/baby-python
|
python
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data = np.genfromtxt(path, delimiter=',', skip_header=1)
print(data, data.shape)
census = np.concatenate((data, np.array(new_record)), axis=0)
print(census, census.shape)
# --------------
#Code starts here
age = census[:,0]
max_age = age.max()
min_age = age.min()
age_mean = age.mean()
age_std = age.std()
print('Max age = {} \n Min age = {} \n mean age = {} \n std deviation = {}'.format(max_age, min_age, age_mean, age_std))
# --------------
#Code starts here
race_0 = census[census[:,2]==0]
len_0 = len(race_0)
race_1 = census[census[:,2]==1]
len_1 = len(race_1)
race_2 = census[census[:,2]==2]
len_2 = len(race_2)
race_3 = census[census[:,2]==3]
len_3 = len(race_3)
race_4 = census[census[:,2]==4]
len_4 = len(race_4)
minority = min(len_0, len_1, len_2, len_3, len_4)
if minority==len_0:
minority_race = 0
elif minority==len_1:
minority_race = 1
elif minority==len_2:
minority_race = 2
elif minority==len_3:
minority_race = 3
elif minority==len_4:
minority_race = 4
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum = senior_citizens[:,6].sum()
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours, avg_working_hours<=25)
# --------------
#Code starts here
high = census[census[:, 1]>10]
low = census[census[:, 1]<=10]
avg_pay_high = high.mean(axis=0)[7]
avg_pay_low = low.mean(axis=0)[7]
print(avg_pay_high, avg_pay_low)
|
nilq/baby-python
|
python
|
import sys
sys.path.append(r'F:\geostats')
from geostats import Scraping
from get_groupinfo import *
from get_eventsInfo import *
from urllib.error import HTTPError
import time,random,os
def generate_groupdf(groups):
groups_df = []
for j,group in enumerate(groups):
try:
record = get_groupsInfo(group_url[j])
print('Get into %s' %group_url[j])
record.update({'group': group})
groups_df.append(record)
new_path = os.path.join(path, str(group).strip())
os.makedirs(new_path)
events_df = get_eventsInfo(group_url[j])
events_path = os.path.join(new_path,'events.xlsx')
try:
df1 = pd.read_excel(events_path)
df1 = Scraping.clean_index(df1)
events_df_new = Scraping.dfmerge(df1, events_df, ['Name','Hold Date'], 'rbind')
except:
events_df_new = events_df
events_df_new.to_excel(events_path)
print("Updates%s successful" %group)
except(Exception):
continue
groups_df = pd.DataFrame(groups_df)
return groups_df
opener = Scraping.setProxy()
urllib.request.install_opener(opener)
url = "https://www.meetup.com/"
content = Scraping.parseHtml(url)
Categories = content.xpath('//*[@id="mupMain"]/div[3]/div/section[3]/div[2]/ul/li/div/div/a/h4/text()')
Sub_Url_10miles = list(content.xpath('//*[@id="mupMain"]/div[3]/div/section[3]/div[2]/ul/li/div/div/a/@href'))
Sub_Url = [url_10miles + '?allMeetups=false&radius=50&userFreeform=Dallas%2C+TX&mcId=z75201&mcName=Dallas%2C+TX&sort=default' for url_10miles in Sub_Url_10miles]
random_lst = list(range(0,len(Sub_Url)))
random.shuffle(random_lst)
for random_index in random_lst:
url1 = Sub_Url[random_index]
type = str(Categories[random_index])
path = os.path.join("./Data/", type)
content1 = Scraping.parseHtml(url1)
group1 = content1.xpath('//*[@id="simple-view"]/div[1]/ul/li/div/a[2]/div[2]/h3/text()')
groups = Scraping.clean_punctuationList(group1)
group_url = content1.xpath('//*[@id="simple-view"]/div/ul/li/div/a[@itemprop="url"]/@href')
groups_df = generate_groupdf(groups)
group_excel = os.path.join(path, type + ".xlsx")
try:
df1 = pd.read_excel(group_excel)
df1 = Scraping.clean_index(df1)
groups_df_new = Scraping.dfmerge(df1,groups_df,'Name','rbind')
except(HTTPError):
continue
except:
groups_df_new = groups_df
groups_df_new.to_excel(group_excel)
time.sleep(0.1)
|
nilq/baby-python
|
python
|
# Pulls in images from different sources
# Thomas Lloyd
import numpy as np
import flickrapi
import urllib.request
# make private
api_key = '55d426a59efdae8b630aaa3afbac4000'
api_secret = '72f4bde28a867f41'
keyword1 = 'toulouse'
def initialize(api_key, api_secret):
flickr = flickrapi.FlickrAPI(api_key, api_secret)
return flickr
def pullimages(flickr):
# photos = flickr.photos.search(user_id='60027860@N06', per_page='10')
photos = flickr.walk(text=keyword1,
tag_mode='all',
tags=keyword1,
extras='url_c',
per_page=500,
sort='relevance')
urls = []
for i, photo in enumerate(photos):
url = photo.get('url_c')
urls.append(url)
# get 50 urls
if i > 5000:
break
return urls
def fakeurls():
urls = []
urls.append('https://live.staticflickr.com/7858/47443394111_c9b79def1b_c.jpg')
urls.append('https://live.staticflickr.com/4181/34268611090_aa1b6cd86f_c.jpg')
urls.append('https://live.staticflickr.com/4226/33953994894_7213c010f4_c.jpg')
urls.append('https://live.staticflickr.com/4902/44209156090_48c2861574_c.jpg')
urls.append('https://live.staticflickr.com/7328/27511837520_12d32ef9bb_c.jpg')
for n in range(0, len(urls)):
url = urls[n]
if type(url) == str:
print("url" + str(n) + ": " + url)
return urls
def saveimages(urls, keyword1):
print('beginning url download')
for n in range(0, len(urls)):
url = urls[n]
if type(url) == str:
# urllib.request.urlretrieve(url, '/mnt/f/amsterdam/ams' + str(n) + '.jpg')
# urllib.request.urlretrieve(url, '/mnt/f/newyork/ny' + str(n) + '.jpg') # zero indexed
# urllib.request.urlretrieve(url, '/Dropbox/Documents//tko' + str(n) + '.jpg') # zero indexed
urllib.request.urlretrieve(url, '/Volumes/2018_SSD_TL/GlobalColorImages/' + keyword1 + '_flickr/' + keyword1 + '_flickr_' + str(n) + '.jpg')
# urllib.request.urlretrieve(url, '/Users/thomaslloyd/Desktop/colorFinderMultiImages/' + str(n) + '.jpg')
# else raise Exception('url type is not a string')
# main
flickr = initialize(api_key, api_secret)
# urls = fakeurls()
urls = pullimages(flickr)
saveimages(urls, keyword1)
print('number of urls stored: ' + str(len(urls)))
print(keyword1 + ' images downloaded.')
|
nilq/baby-python
|
python
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from sis_provisioner.management.commands import SISProvisionerCommand
from sis_provisioner.models.group import Group
class Command(SISProvisionerCommand):
help = "Prioritize groups for importing"
def handle(self, *args, **options):
Group.objects.update_priority_by_modified_date()
self.update_job()
|
nilq/baby-python
|
python
|
from usuarios import Usuario
class Admin(Usuario):
def __init__(self, first_name, last_name, username, email):
super().__init__(first_name, last_name, username, email)
# self.priv = []
self.privileges = privileges()
# def show_privileges(self):
# print("\nPrivilegios:")
# for priv in self.priv:
# print(priv)
class privileges():
def __init__(self, privileges=[]):
self.privileges = privileges
def show_privileges(self):
print("\nPrivilegios:")
if self.privileges:
for privilege in self.privileges:
print("- " + privilege)
else:
print("Este usuario no tiene privilegios.")
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
from RecoVertex.BeamSpotProducer.BeamSpot_cfi import *
|
nilq/baby-python
|
python
|
# Copyright (c) 2015-2019 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import pytest
from botogram.callbacks import Buttons, parse_callback_data, get_callback_data
from botogram.callbacks import hashed_callback_name
from botogram.components import Component
from botogram.context import Context
from botogram.crypto import TamperedMessageError
from botogram.hooks import Hook
def test_buttons(bot, sample_update):
component = Component("test")
hook = Hook(lambda: None, component)
buttons = Buttons()
buttons[0].url("test 1", "http://example.com")
buttons[0].callback("test 2", "test_callback")
buttons[3].callback("test 3", "another_callback", "data")
buttons[2].switch_inline_query("test 4")
buttons[2].switch_inline_query("test 5", "wow", current_chat=True)
with Context(bot, hook, sample_update):
assert buttons._serialize_attachment(sample_update.chat()) == {
"inline_keyboard": [
[
{"text": "test 1", "url": "http://example.com"},
{
"text": "test 2",
"callback_data": get_callback_data(
bot, sample_update.chat(), "test:test_callback",
),
},
],
[
{"text": "test 4", "switch_inline_query": ""},
{
"text": "test 5",
"switch_inline_query_current_chat": "wow"
},
],
[
{
"text": "test 3",
"callback_data": get_callback_data(
bot, sample_update.chat(), "test:another_callback",
"data",
),
},
],
],
}
def test_parse_callback_data(bot, sample_update):
c = sample_update.chat()
raw = get_callback_data(bot, c, "test_callback", "this is some data!")
assert parse_callback_data(bot, c, raw) == (
hashed_callback_name("test_callback"),
"this is some data!",
)
raw = get_callback_data(bot, c, "test_callback")
assert parse_callback_data(bot, c, raw) == (
hashed_callback_name("test_callback"),
None,
)
with pytest.raises(TamperedMessageError):
raw = get_callback_data(bot, c, "test_callback", "data") + "!"
parse_callback_data(bot, c, raw)
# Now test with disabled signature verification
bot.validate_callback_signatures = False
raw = get_callback_data(bot, c, "test_callback", "data") + "!"
assert parse_callback_data(bot, c, raw) == (
hashed_callback_name("test_callback"),
"data!"
)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Copyright 2022 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import pytest
from utils.common import build_image, latest_build_artifact
class TestBootImg:
@pytest.mark.min_mender_version("1.0.0")
def test_bootimg_creation(
self, request, bitbake_variables, prepared_test_build, bitbake_image
):
"""Test that we can build a bootimg successfully."""
build_image(
prepared_test_build["build_dir"],
prepared_test_build["bitbake_corebase"],
bitbake_image,
['IMAGE_FSTYPES = "bootimg"'],
)
built_img = latest_build_artifact(
request, prepared_test_build["build_dir"], "core-image*.bootimg"
)
distro_features = bitbake_variables["MENDER_FEATURES"].split()
if "mender-grub" in distro_features and "mender-image-uefi" in distro_features:
output = subprocess.check_output(
["mdir", "-i", built_img, "-b", "/grub-mender-grubenv"]
).decode()
assert "mender_grubenv1" in output.split("/")
|
nilq/baby-python
|
python
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sparse_ho.utils_plot import configure_plt, discrete_cmap
save_fig = True
# save_fig = False
configure_plt()
fontsize = 18
current_palette = sns.color_palette("colorblind")
algorithms = ['grid_search10', 'random', 'bayesian', 'grad_search']
dict_title = {}
dict_title['grid_search10'] = 'Grid-search'
dict_title['random'] = 'Random-search'
dict_title['bayesian'] = 'Bayesian'
dict_title['grad_search'] = '1st order method'
plt.close('all')
fig, axarr = plt.subplots(
1, len(algorithms), sharex=True, sharey=True,
figsize=[10.67, 3])
objs_full = np.load("results/objs_grid_search100.npy", allow_pickle=True)
log_alphas_full = np.load(
"results/log_alphas_grid_search100.npy", allow_pickle=True)
cmap = discrete_cmap(10, 'Reds')
c = np.linspace(1, 10, 10)
for i, algorithm in enumerate(algorithms):
objs = np.load("results/objs_%s.npy" % algorithm, allow_pickle=True)
log_alphas = np.load(
"results/log_alphas_%s.npy" % algorithm, allow_pickle=True)
axarr[i].plot(
log_alphas_full, objs_full / objs_full[0], color=current_palette[0],
zorder=1)
pcm = axarr[i].scatter(
log_alphas, objs / objs_full[0], c=c, cmap=cmap, marker='x', zorder=10)
axarr[i].scatter(
log_alphas, np.zeros(len(log_alphas)), c=c, cmap=cmap, marker='x',
# zorder=10)
clip_on=False, zorder=10)
axarr[i].set_title(dict_title[algorithm])
axarr[i].set_xlabel("$\lambda - \lambda_{\max}$", fontsize=fontsize)
axarr[i].set_ylim((0, 1))
print(objs.min())
axarr[0].set_ylabel(r"$\mathcal{C}(\beta^{(\lambda)})$", fontsize=fontsize)
cba = fig.colorbar(pcm, ax=axarr[3], ticks=np.linspace(1, 10, 10))
cba.set_label('Iterations', fontsize=fontsize)
fig.tight_layout()
if save_fig:
fig_dir = "../../../CD_SUGAR/tex/journal/prebuiltimages/"
fig_dir_svg = "../../../CD_SUGAR/tex/journal/images/"
fig.savefig(
fig_dir + "intro_lassoCV.pdf", bbox_inches="tight")
fig.savefig(
fig_dir_svg + "intro_lassoCV.svg", bbox_inches="tight")
plt.show(block=False)
fig.show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from rest_framework.permissions import BasePermission
from v1.models.Board import Boards
from v1.models.Permissions import READ, WRITE, DELETE
class BoardPermission(BasePermission):
def has_object_permission(self, request, view, obj):
if view.action in ['get_states', 'retrieve']:
permissions_name = [READ]
elif view.action == 'destroy':
permissions_name = [READ, DELETE]
elif view.action in [
'change_name',
'create'
]:
permissions_name = [READ, WRITE]
else:
permissions_name = [READ, WRITE, DELETE]
user = request.user
return Boards.permissions.has_boards_access(user, obj, permissions_name)
|
nilq/baby-python
|
python
|
import socket
import struct
import time
import thread
from nc_config import *
NC_PORT = 8888
CLIENT_IP = "10.0.0.1"
SERVER_IP = "10.0.0.2"
CONTROLLER_IP = "10.0.0.3"
path_reply = "reply.txt"
len_key = 16
counter = 0
def counting():
last_counter = 0
while True:
print (counter - last_counter), counter
last_counter = counter
time.sleep(1)
thread.start_new_thread(counting, ())
#f = open(path_reply, "w")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, NC_PORT))
while True:
packet, addr = s.recvfrom(1024)
counter = counter + 1
#op = struct.unpack("B", packet[0])
#key_header = struct.unpack(">I", packet[1:5])[0]
#f.write(str(op) + ' ')
#f.write(str(key_header) + '\n')
#f.flush()
#print counter
#f.close()
|
nilq/baby-python
|
python
|
import logging
import numpy as np
import kubric as kb
from kubric.renderer.blender import Blender as KubricBlender
logging.basicConfig(level="INFO") # < CRITICAL, ERROR, WARNING, INFO, DEBUG
world_matrix = {
"bunny": np.array(
(
(-1.0, 3.2584136988589307e-07, 0.0, 0.7087775468826294),
(-3.2584136988589307e-07, -1.0, 0.0, -1.2878063917160034),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
),
),
"suzanne": np.array(
(
(1.0, 0.0, 0.0, -0.8567398190498352),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
)
),
"teapot": np.array(
(
(1.0, 0.0, 0.0, -0.9078792333602905),
(0.0, 1.0, 0.0, 1.2115877866744995),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
)
),
}
points = {
"bunny": np.array(
(
(
0.044713765382766724,
-1.0193415880203247,
0.8044384121894836,
1.0,
),
(
0.056191492825746536,
-0.31232786178588867,
0.8044384121894836,
1.0,
),
(0.0, 0.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
),
),
"suzanne": np.array(
(
(-1.0, 0.0, 0.0, 1.0),
(-0.2928931713104248, 2.9802322387695312e-08, 0.0, 1.0),
(0.0, 0.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
)
),
"teapot": np.array(
(
(
0.044713765382766724,
-1.0193415880203247,
0.8044384121894836,
1.0,
),
(
0.056191492825746536,
-0.31232786178588867,
0.8044384121894836,
1.0,
),
(0.0, 0.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
),
),
}
def interpolate_position(
t: np.ndarray, handles: np.ndarray, world_matrix: np.ndarray
) -> np.ndarray:
p0, p1, p2, p3 = handles[:, np.newaxis]
t = t[..., np.newaxis]
r = 1 - t
out = r ** 3 * p0 + 3 * r ** 2 * t * p1 + 3 * r * t ** 2 * p2 + t ** 3 * p3
out = out / out[..., [-1]]
return (world_matrix @ out.T).T[..., :-1]
# --- create scene and attach a renderer and simulator
num_frames = 480
scene = kb.Scene(resolution=(256, 256), background=kb.get_color("white"))
scene.frame_end = num_frames # < numbers of frames to render
scene.frame_rate = 24 # < rendering framerate
scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)
renderer = KubricBlender(scene)
# --- populate the scene with objects, lights, cameras
rng = np.random.RandomState(0)
wall_material = kb.FlatMaterial(
color=kb.get_color("white"), indirect_visibility=True
)
bunny = kb.FileBasedObject(
render_filename="objects/bunny.obj",
name="bunny",
scale=(4.89, 4.89, 4.89),
position=(0, -1, -0.47044),
quaternion=(0.0, 0.0, 0.707, 0.707),
material=kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng)),
)
suzanne = kb.FileBasedObject(
render_filename="objects/suzanne.obj",
name="suzanne",
scale=(0.316, 0.316, 0.316),
position=(0, 0, 0.001821),
quaternion=(0.5, 0.5, 0.5, 0.5),
material=kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng)),
)
teapot = kb.FileBasedObject(
render_filename="objects/teapot.obj",
name="teapot",
scale=(0.19, 0.19, 0.19),
position=(0, 1, -0.28363),
quaternion=(0.707, 0.70, 0.0, 0.0),
material=kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng)),
)
scene += bunny
scene += suzanne
scene += teapot
scene += kb.Cube(
scale=(0.1, 100, 100),
position=(-4, 0, 0),
material=wall_material,
static=True,
background=True,
)
scene += kb.DirectionalLight(
name="sun", position=(4, 0, 3), look_at=(0, 0, 0), intensity=1.5
)
camera = kb.PerspectiveCamera(
name="camera",
position=(0, 0.0, 6.0),
quaternion=(1.0, 0.0, 0.0, 1.0),
)
scene.camera = camera
xs = np.linspace(-np.pi / 2, np.pi / 2, num_frames)
positions = {
"bunny": interpolate_position(
np.abs(np.cos(xs * 8.33)), points["bunny"], world_matrix["bunny"]
),
"teapot": interpolate_position(
np.abs(np.cos(xs * 5.13)), points["teapot"], world_matrix["teapot"]
),
"suzanne": interpolate_position(
np.abs(np.cos(xs * 7.11)), points["suzanne"], world_matrix["suzanne"]
),
}
for frame in range(1, num_frames + 1):
bunny.position = positions["bunny"][frame - 1]
bunny.keyframe_insert("position", frame)
teapot.position = positions["teapot"][frame - 1]
teapot.keyframe_insert("position", frame)
suzanne.position = positions["suzanne"][frame - 1]
suzanne.keyframe_insert("position", frame)
# --- renders the output
kb.as_path("output_top").mkdir(exist_ok=True)
np.save("output_top/suzanne.npy", positions["suzanne"])
np.save("output_top/teapot.npy", positions["teapot"])
np.save("output_top/bunny.npy", positions["bunny"])
np.save("output_top/camera_pos.npy", np.array(camera.position))
renderer.save_state("output_top/trio_top.blend")
frames_dict = renderer.render()
kb.write_image_dict(frames_dict, "output_top")
|
nilq/baby-python
|
python
|
import pygame
def compare_surfaces(surf_a: pygame.Surface, surf_b: pygame.Surface):
if surf_a.get_size() != surf_b.get_size():
return False
for x in range(surf_a.get_size()[0]):
for y in range(surf_a.get_size()[1]):
if surf_a.get_at((x, y)) != surf_b.get_at((x, y)):
return False
return True
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib import admin
from emoji_picker.widgets import EmojiPickerTextarea
from .attachment import DisplayImageWidgetStackedInline
class TranslationModelForm(forms.ModelForm):
text = forms.CharField(
required=True,
label="Text übersetzt",
help_text="Hier nur den Meldungstext in der ausgewählten Sprache eintragen.",
widget=EmojiPickerTextarea,
max_length=305)
delivered = forms.BooleanField(
label='Versendet',
help_text="Wurde diese Meldung bereits vom Bot versendet?",
disabled=True,
required=False)
class TranslationAdminInline(DisplayImageWidgetStackedInline):
image_display_fields = ['media']
extra = 1
|
nilq/baby-python
|
python
|
#coding = utf-8
import os
import Config
from Function import Function
class File(object):
def __init__(self, srcFileName, isKeep = False, dstFileName = None):
self.srcFileName = srcFileName
self.isKeep = isKeep
self.dstFileName = dstFileName
self.testFuncs = []
self.codeLines = None
self.__readCode()
def __readCode(self):
if not os.path.exists(self.srcFileName):
raise Exception('Invalid file paht\n')
with open(self.srcFileName, 'r') as f:
self.codeLines = f.readlines()
def __generateSourceCode(self):
if not self.isKeep:
os.remove(self.srcFileName)
self.dstFileName = self.srcFileName
elif self.dstFileName == None:
self.dstFileName = Config.COMPILING_FILE_PREFIX + self.srcFileName
with open(self.dstFileName, 'w') as f:
#header and function
codeStr = Config.COMPILING_FILE_HEADER + '\n' + ''.join(self.codeLines)
#test code
for testFunc in self.testFuncs:
codeStr += '\n' + testFunc[1]
#driver
codeStr += Config.COMPILING_FILE_DRIVER_PREFIX
for testFunc in self.testFuncs:
codeStr += '\t' + testFunc[0] + '();\n\n'
codeStr += Config.COMPILING_FILE_DRIVER_POSTFIX
f.write(codeStr)
def parse(self):
isFunc = False
funcStr = ""
for line in self.codeLines:
if(line.strip().startswith(Config.FUNCTION) ):
#begin one function
isFunc = True
funcStr = ""
elif isFunc:
funcStr += line
if line.strip().startswith('*/'):
isFunc = False
with Function(funcStr) as func:
func.parse()
self.testFuncs.append( (func.testFuncName, func.testCode) )
self.__generateSourceCode()
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import AbstractUser
import uuid
# esta clase define el perfil de usuario y extiende de AbstractUser
# por que solo se necesitaba eliminar los campos de first_name y last_name
# el resto del contenido se podia conservar
class profile(AbstractUser):
"""Define el modelo del usuario, hereda de AbstractUser y elimina algunos campos"""
first_name = None
last_name = None
nombre = models.CharField(max_length=100,blank=True)
a_paterno = models.CharField(max_length=100, blank=True)
a_materno = models.CharField(max_length=100,blank=True)
img = models.ImageField(upload_to = 'user/', blank= True, null = True)
clave_confirmacion = models.UUIDField(default=uuid.uuid4,editable=False)
genero = models.CharField(max_length=20, default='Prefiero no decirlo',blank=True)
comentario = models.TextField(blank= True, null = True)
# class Meta(AbstractUser.Meta):
# swappable = 'AUTH_USER_MODEL'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import subprocess
import pytest
# Use an empty temporary HOME and unset CASA_BASE_DIRECTORY (see
# conftest.py)
pytestmark = pytest.mark.usefixtures("isolate_from_home")
def test_help():
retval = subprocess.call(['casa_distro', '--help'])
assert retval == 0
def test_help_subcommand():
retval = subprocess.call(['casa_distro', 'help'])
assert retval == 0
@pytest.mark.parametrize("subcommand", [
'help',
'distro',
'list',
'run',
'pull_image',
'list_images',
'shell',
'mrun',
'bv_maker',
'clean_images',
])
def test_help_of_subcommands(subcommand):
p = subprocess.Popen(['casa_distro', 'help', subcommand],
stdout=subprocess.PIPE, bufsize=-1,
universal_newlines=True)
stdoutdata, _ = p.communicate()
assert p.returncode == 0
assert subcommand in stdoutdata
def test_list():
retval = subprocess.call(['casa_distro', 'list'])
assert retval == 0
def test_distro_subcommand():
p = subprocess.Popen(['casa_distro', 'distro'],
stdout=subprocess.PIPE, bufsize=-1,
universal_newlines=True)
stdoutdata, _ = p.communicate()
assert p.returncode == 0
assert 'brainvisa' in stdoutdata
|
nilq/baby-python
|
python
|
import yaml
from typing import List, Union, List, Any, Dict, Tuple
import typing
import enum
import attr
import attrs_strict
from attrs_strict import type_validator
class MissingAttribute(Exception):
pass
def yaml_dump(d):
return yaml.dump(d, Dumper=yaml.Dumper)
def self_attributes(self, attrs):
return {attr.name: getattr(self, attr.name) for attr in attrs}
class Base:
def _semantic_check(self):
pass
@attr.s
class Ref:
name: str = attr.ib(default=None, validator=type_validator())
@attr.s
class FileObject(Base):
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
description: Union[str, None] = attr.ib(default=None, validator=type_validator())
apiVersion: str = attr.ib(default="tekton.dev/v1beta1", validator=type_validator())
def asdict(self):
def get_delete(d, key):
v = d[key]
del d[key]
return v
def rewrite_if_fileobject(d):
if "apiVersion" in d:
# If there is an apiVersion it is a file object. Rearrange attributes
# Move all keys to the spec
spec = {}
for (key, val) in d.items():
spec[key] = val
for (key, val) in spec.items():
del d[key]
# create the file level attributes
d.update(
{
"metadata": {"name": get_delete(spec, "name")},
"kind": get_delete(spec, "kind"),
"apiVersion": get_delete(spec, "apiVersion"),
}
)
if len(spec) > 0:
d["spec"] = spec
if "description" in spec:
d["metadata"]["description"] = get_delete(spec, "description")
def rewrite_fileobjects(d):
if isinstance(d, dict):
rewrite_if_fileobject(d)
for (key, val) in d.items():
rewrite_fileobjects(val)
if isinstance(d, list):
for i in d:
rewrite_fileobjects(i)
root = attr.asdict(self, filter=lambda attr, value: value != None)
# asdict returned a dictionary that is specified correctly except the Fileobjects
rewrite_fileobjects(root)
return root
def to_yaml(self, **kwargs):
if kwargs.get("check", True):
self._semantic_check()
return yaml_dump(self.asdict())
def _semantic_check(self):
if self.name == None:
raise MissingAttribute(f"{str(self.__class__)} must have a name")
def ref(self) -> Ref:
return Ref(self.name)
@attr.s
class FileObjectAlpha(FileObject):
apiVersion: str = attr.ib(default="tekton.dev/v1alpha1", validator=type_validator())
@attr.s
class EnvVar:
name: str = attr.ib(default=None, validator=type_validator())
value: Union[str, None] = attr.ib(default=None, validator=type_validator())
@attr.s
class Step:
image: Union[str, None] = attr.ib(default=None, validator=type_validator())
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
workingDir: Union[str, None] = attr.ib(default=None, validator=type_validator())
args: Union[List[str], None] = attr.ib(default=None, validator=type_validator())
command: Union[List[str], None] = attr.ib(default=None, validator=type_validator())
# EnvFrom []EnvFromSource
env: Union[List[EnvVar], None] = attr.ib(default=None, validator=type_validator())
# VolumeMounts []VolumeMount
class ParamEnum(enum.Enum):
str = enum.auto()
list = enum.auto()
@attr.s
class Param:
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
value: Union[str, None] = attr.ib(default=None, validator=type_validator())
@attr.s
class ParamSpec:
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
description: Union[str, None] = attr.ib(default=None, validator=type_validator())
default: Union[str, None] = attr.ib(default=None, validator=type_validator())
type: Union[ParamEnum, None] = attr.ib(default=None, validator=type_validator())
def ref(self) -> str:
return f"$(params.{self.name})"
class Inputs:
pass
class Resources:
pass
@attr.s
class TaskSpec:
steps: Union[None, List[Step]] = attr.ib(default=None, validator=type_validator())
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
resources: Union[None, List[Resources]] = attr.ib(
default=None, validator=type_validator()
)
class TaskRun(FileObject):
pass
@attr.s
class Task(FileObject, TaskSpec):
kind: str = attr.ib(default="Task", validator=type_validator())
def _semantic_check(self):
if self.steps == None or len(self.steps) == 0:
raise MissingAttribute("Task object must have at least one step")
@attr.s
class PipelineTask:
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
taskRef: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
params: Union[None, List[Param]] = attr.ib(default=None, validator=type_validator())
@attr.s
class PipelineSpec:
tasks: Union[None, List[PipelineTask]] = attr.ib(
default=None, validator=type_validator()
)
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class Pipeline(FileObject, PipelineSpec):
kind: str = attr.ib(default="Pipeline", validator=type_validator())
@attr.s
class PipelineRunSpec:
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
pipelineRef: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
pipelineSpec: Union[None, PipelineSpec] = attr.ib(
default=None, validator=type_validator()
)
serviceAccountName: Union[None, str] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class PipelineRun(FileObject, PipelineRunSpec):
kind: str = attr.ib(default="PipelineRun", validator=type_validator())
# TriggerResourceTemplates = Union[PipelineRun, ...]
TriggerResourceTemplates = PipelineRun
@attr.s
class TriggerTemplateSpec:
resourcetemplates: Union[None, List[TriggerResourceTemplates]] = attr.ib(
default=None, validator=type_validator()
)
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class TriggerTemplate(FileObjectAlpha, TriggerTemplateSpec):
kind: str = attr.ib(default="TriggerTemplate", validator=type_validator())
@attr.s
class TriggerBindingSpec:
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class TriggerBinding(FileObjectAlpha, TriggerBindingSpec):
kind: str = attr.ib(default="TriggerBinding", validator=type_validator())
@attr.s
class EventListenerTrigger:
binding: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
template: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
@attr.s
class EventListenerSpec:
triggers: Union[None, List[EventListenerTrigger]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class EventListener(FileObjectAlpha, EventListenerSpec):
kind: str = attr.ib(default="EventListener", validator=type_validator())
|
nilq/baby-python
|
python
|
import requests
from requests.exceptions import HTTPError, ConnectionError
from .resources.functions import validate_protocol
class WordPressAPI:
""" WP API Object Definition """
def __init__(self, domain, username, password, protocol="https", namespace="wp-json"):
""" Object constructor """
self.domain = domain
self.username = username
self.password = password
self.protocol = protocol
self.namespace = namespace
self.connected = False
self.headers = {
"Content-Type": "application/json"
}
def __repr__(self):
""" Object representation (for developers) """
return f"WordPressAPI({self.domain}, {self.username}, {self.password})"
def __str__(self):
""" String representation """
return f"WordPressAPI Object : {self.url}"
@property
def url(self):
""" URL Builder for the API """
return f"{self.protocol}://{self.domain}/{self.namespace}"
@property
def protocol(self):
""" Getter for the protocol so that it's read only """
return self.__protocol
@protocol.setter
def protocol(self, proto):
""" Setter for the protocol verifying it's correct (either http or https) """
self.__protocol = validate_protocol(proto)
@staticmethod
def parse_json(response):
return response.json()
@staticmethod
def parse_wp_error(response):
data = response.json()
print(f"STATUS={data['data']['status']}\nCODE={data['code']}\nMESSAGE={data['message']}")
def build_authentication_url(self):
return f"{self.url}/jwt-auth/v1/token?username={self.username}&password={self.password}"
def connect(self):
""" Connect to the actual WP API. Returns None if connection wasn't successful """
try:
response = requests.post(self.build_authentication_url(), headers=self.headers)
response.raise_for_status()
self.connected = True
self.headers.update({"Authorization": f"Bearer {self.parse_json(response)['token']}"})
return response
except HTTPError as error:
self.parse_wp_error(error.response)
except ConnectionError as error:
print(error)
def get(self, endpoint, data=None, get_response=False):
""" Attempt a GET action. Returns None if request wasn't successful or raise Exception if attempted to GET when API is not connected """
try:
if self.connected:
response = requests.get(self.url + endpoint, params=data, headers=self.headers)
response.raise_for_status()
return response if get_response else self.parse_json(response)
else:
raise Exception("API is not connected!")
except HTTPError as error:
self.parse_wp_error(error.response)
except ConnectionError as error:
print(error)
def post(self, endpoint, data, get_response=False):
""" Attempt a POST action. Returns None if request wasn't successful or raise Exception if attempted to GET when API is not connected """
try:
if self.connected:
response = requests.post(self.url + endpoint, data=data, headers=self.headers)
response.raise_for_status()
return response if get_response else self.parse_json(response)
else:
raise Exception("API is not connected!")
except HTTPError as error:
self.parse_wp_error(error.response)
except ConnectionError as error:
print(error)
# TODO: Need to implement other methods (PUT, DELETE, etc.)
|
nilq/baby-python
|
python
|
# The MIT License (MIT)
#
# Copyright 2020 Barbara Barros Carlos, Tommaso Sartor
#
# This file is part of crazyflie_nmpc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from acados_template import *
import acados_template as at
from export_ode_model import *
import numpy as np
import scipy.linalg
from ctypes import *
from os.path import dirname, join, abspath
ACADOS_PATH = join(dirname(abspath(__file__)), "../../../acados")
# create render arguments
ra = acados_ocp_nlp()
# export model
model = export_ode_model()
Tf = 0.75
N = 50
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
# set ocp_nlp_dimensions
nlp_dims = ra.dims
nlp_dims.nx = nx
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nbu = nu
nlp_dims.nbx_e = 0
nlp_dims.nu = model.u.size()[0]
nlp_dims.N = N
# parameters
g0 = 9.8066 # [m.s^2] accerelation of gravity
mq = 33e-3 # [kg] total mass (with one marker)
Ct = 3.25e-4 # [N/krpm^2] Thrust coef
# bounds
hov_w = np.sqrt((mq*g0)/(4*Ct))
max_thrust = 22
# set weighting matrices
nlp_cost = ra.cost
Q = np.eye(nx)
Q[0,0] = 120.0 # x
Q[1,1] = 100.0 # y
Q[2,2] = 100.0 # z
Q[3,3] = 1.0e-3 # qw
Q[4,4] = 1.0e-3 # qx
Q[5,5] = 1.0e-3 # qy
Q[6,6] = 1.0e-3 # qz
Q[7,7] = 7e-1 # vbx
Q[8,8] = 1.0 # vby
Q[9,9] = 4.0 # vbz
Q[10,10] = 1e-5 # wx
Q[11,11] = 1e-5 # wy
Q[12,12] = 10.0 # wz
R = np.eye(nu)
R[0,0] = 0.06 # w1
R[1,1] = 0.06 # w2
R[2,2] = 0.06 # w3
R[3,3] = 0.06 # w4
nlp_cost.W = scipy.linalg.block_diag(Q, R)
Vx = np.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
Vx[2,2] = 1.0
Vx[3,3] = 1.0
Vx[4,4] = 1.0
Vx[5,5] = 1.0
Vx[6,6] = 1.0
Vx[7,7] = 1.0
Vx[8,8] = 1.0
Vx[9,9] = 1.0
Vx[10,10] = 1.0
Vx[11,11] = 1.0
Vx[12,12] = 1.0
nlp_cost.Vx = Vx
Vu = np.zeros((ny, nu))
Vu[13,0] = 1.0
Vu[14,1] = 1.0
Vu[15,2] = 1.0
Vu[16,3] = 1.0
nlp_cost.Vu = Vu
nlp_cost.W_e = 50*Q
Vx_e = np.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
Vx_e[2,2] = 1.0
Vx_e[3,3] = 1.0
Vx_e[4,4] = 1.0
Vx_e[5,5] = 1.0
Vx_e[6,6] = 1.0
Vx_e[7,7] = 1.0
Vx_e[8,8] = 1.0
Vx_e[9,9] = 1.0
Vx_e[10,10] = 1.0
Vx_e[11,11] = 1.0
Vx_e[12,12] = 1.0
nlp_cost.Vx_e = Vx_e
nlp_cost.yref = np.array([0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, hov_w, hov_w, hov_w, hov_w])
nlp_cost.yref_e = np.array([0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
nlp_con = ra.constraints
nlp_con.lbu = np.array([0,0,0,0])
nlp_con.ubu = np.array([+max_thrust,+max_thrust,+max_thrust,+max_thrust])
nlp_con.x0 = np.array([0,0,0,1,0,0,0,0,0,0,0,0,0])
nlp_con.idxbu = np.array([0, 1, 2, 3])
## set QP solver
#ra.solver_options.qp_solver = 'FULL_CONDENSING_QPOASES'
ra.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM'
ra.solver_options.hessian_approx = 'GAUSS_NEWTON'
ra.solver_options.integrator_type = 'ERK'
# set prediction horizon
ra.solver_options.tf = Tf
ra.solver_options.nlp_solver_type = 'SQP_RTI'
#ra.solver_options.nlp_solver_type = 'SQP'
# set header path
ra.acados_include_path = f'{ACADOS_PATH}/include'
ra.acados_lib_path = f'{ACADOS_PATH}/lib'
ra.model = model
acados_solver = generate_solver(ra, json_file = 'acados_ocp.json')
print('>> NMPC exported')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
# read input file
with open(sys.argv[1], 'r') as fd:
partitions = fd.readlines()
# init
part1 = 0
part2 = 0
seat_ids = []
# part1
for partition in partitions:
# row
left = 128
row = 0
for letter in partition[:7]:
left = int(left / 2)
if letter == "B":
row += left
# column
column = 0
left = 8
for letter in partition[7:]:
left = int(left / 2)
if letter == "R":
column += left
# seat_id
seat_ids.append(row * 8 + column)
part1 = max(seat_ids)
# part2
index = 0
seat_ids.sort()
for seat_id in seat_ids[0:len(seat_ids) - 1]:
if seat_ids[index + 1] - seat_ids[index] > 1:
part2 = seat_id + 1
break
index += 1
# done
print(f"part1: {part1}")
print(f"part2: {part2}")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
import logging
def get_logger(obj, level=None):
logging.basicConfig()
logger = logging.getLogger(obj.__name__)
if level is not None:
logger.setLevel(level)
return logger
|
nilq/baby-python
|
python
|
from django.test import TestCase
from .mixins import TwoUserMixin, ProposalMixin
from consensus_engine.models import ConsensusHistory
from django.utils import timezone
# models test
class ConsensusHistoryTest(TwoUserMixin, ProposalMixin, TestCase):
def test_snapshot(self):
p = self.create_proposal_with_two_proposal_choices()
dt = timezone.now()
ss = ConsensusHistory.build_snapshot(p)
ss.save()
self.assertTrue(ss is not None)
self.assertTrue(ss.snapshot_date >= dt and ss.snapshot_date <= timezone.now())
self.assertTrue(ss.proposal.id == p.id)
self.assertTrue(ss.consensus is None)
no_votes_data = [
{'choice_id': 1, 'text': "Yes", 'count': 0},
{'choice_id': 2, 'text': "No", 'count': 0}
]
self.assertTrue(ss.get_consensus_data() == no_votes_data)
pc = p.proposalchoice_set.first()
p.publish()
pc.vote(self.user)
ss2 = ConsensusHistory.objects.at_date(proposal=p, at_date=timezone.now())
one_vote_data = [
{'choice_id': 1, 'text': "Yes", 'count': 1},
{'choice_id': 2, 'text': "No", 'count': 0}
]
self.assertTrue(ss2.get_consensus_data() == one_vote_data)
all_history = ConsensusHistory.objects.all_history_for_proposal(p)
self.assertTrue(all_history.count() == 2)
|
nilq/baby-python
|
python
|
"""
Script for performing a fit to a histogramm of recorded
time differences for the use with QNet
"""
from __future__ import print_function
import sys
from matplotlib import pylab
import numpy
import scipy.optimize as optimize
def fit(bincontent=None, binning=(0, 10, 21), fitrange=None):
"""
Fit function
:param bincontent:
:param binning:
:param fitrange:
:returns:
"""
def decay(p, x):
return p[0] * numpy.exp(-x / p[1]) + p[2]
def error(p, x, y):
return decay(p, x) - y
if bincontent is None:
nbins = 10
xmin = 1.0
xmax = 20.0
times = [float(l) for l in open(sys.argv[1]).readlines()
if xmin < float(l) < xmax]
print(len(times), "decay times")
# nbins = optimalbins.optbinsize(times, 1, 80)
# print(nbins, "Optimalbins selects nbins")
# nbins = optimalbins.optbinsize(times, 1, 30)
print("Nbins:", nbins)
bin_edges = numpy.linspace(binning[0], binning[1], binning[2])
bin_centers = bin_edges[:-1] + 0.5 * (bin_edges[1] - bin_edges[0])
hist, edges = numpy.histogram(times, bin_edges)
# hist = hist[:-1]
p0 = numpy.array([200, 2.0, 5])
output = optimize.leastsq(error, p0, args=(bin_centers, hist),
full_output=1)
p = output[0]
covar = output[1]
print("Fit parameters:", p)
print("Covariance matrix:", covar)
chisquare=0.
deviations=error(p, bin_centers, hist)
for i, d in enumerate(deviations):
chisquare += d * d / decay(p, bin_centers[i])
params = {"legend.fontsize": 13}
pylab.rcParams.update(params)
fitx = numpy.linspace(xmin, xmax, 100)
pylab.plot(bin_centers, hist, "b^", fitx, decay(p, fitx), "b-")
pylab.ylim(0, max(hist) + 100)
pylab.xlabel("Decay time in microseconds")
pylab.ylabel("Events in time bin")
# pylab.legend(("Data","Fit: (%4.2f +- %4.2f) microsec," +
# "chisq / ndf=%4.2f" % (p[1], numpy.sqrt(covar[1][1]),
# chisquare / (nbins - len(p)))))
pylab.legend(("Data","Fit: (%4.2f) microsec," +
"chisq/ndf=%4.2f" % p[1], chisquare / (nbins - len(p))))
pylab.grid()
pylab.savefig("fit.png")
else:
# this is then used for the mudecay window in muonic.
# we have to adjust the bins to the values of the used histogram.
if len(bincontent) == 0:
print("WARNING: Empty bins.")
return None
bins = numpy.linspace(binning[0], binning[1], binning[2])
bin_centers = bins[:-1] + 0.5 * (bins[1] - bins[0])
if fitrange is not None:
if fitrange[0] < binning[0]:
fitrange = (binning[0], fitrange[1])
if fitrange[1] > binning[1]:
fitrange = (fitrange[0], binning[1])
bin_mask = [(bin_centers <= fitrange[1]) &
(bin_centers >= fitrange[0])]
bin_centers_ = numpy.asarray([x for x in bin_centers
if fitrange[0] <= x <= fitrange[1]])
if len(bin_centers_) < 3:
print("WARNING: fit range too small. " +
"Skipping fitting. Try with larger fit range.")
return None
else:
bin_centers = bin_centers_
bincontent = bincontent[bin_mask]
# we cut the leading edge of the distribution away for the fit
glob_max = max(bincontent)
cut = 0
for i in enumerate(bincontent):
if i[1] == glob_max:
cut = i[0]
cut_bincontent = bincontent[cut:]
# cut_bincenter = bin_centers[cut]
cut_bincenters = bin_centers[cut:]
# # maybe something for the future..
# nbins = optimalbins.optbinsize(cut_bincontent, 1, 20)
# fit_bins = numpy.linspace(cut_bincenter, 20, nbins)
# fit_bin_centers = fit_bins[:-1] + 0.5 * (fit_bins[1] - fit_bins[0])
# fit_bincontent = numpy.zeros(len(fit_bin_centers))
# # the bincontent must be redistributed to fit_bincontent
# for binindex_fit in xrange(len(fit_bincontent)):
# for binindex,content in enumerate(bincontent):
# if bin_centers[binindex] <= fit_bin_centers[binindex_fit]:
# fit_bincontent[binindex_fit] += content
p0 = numpy.array([200, 2.0, 5])
# output = optimize.leastsq(error, p0,
# args=(fit_bin_centers,fitbincontent),
# full_output=1)
output = optimize.leastsq(error, p0,
args=(cut_bincenters, cut_bincontent),
full_output=1)
p = output[0]
covar = output[1]
print("Fit parameters:", p)
print("Covariance matrix:", covar)
chisquare = 0.
deviations = error(p, cut_bincenters, cut_bincontent)
for i, d in enumerate(deviations):
chisquare += d * d / decay(p, cut_bincenters[i])
params = {"legend.fontsize": 13}
pylab.rcParams.update(params)
# nbins = 84
nbins = len(bins)
xmin = cut_bincenters[0]
xmax = cut_bincenters[-1]
fitx = numpy.linspace(xmin, xmax, 100)
# return (bin_centers, bincontent, fitx,
# decay, p, covar, chisquare, nbins)
return (cut_bincenters, cut_bincontent, fitx,
decay, p, covar, chisquare, nbins)
def gaussian_fit(bincontent, binning=(0, 2, 10), fitrange=None):
"""
Guassian fit function
:param bincontent:
:param binning:
:param fitrange:
:returns:
"""
def gauss(p, x):
return (p[0] * (1 / (p[1] * numpy.sqrt(2 * numpy.pi))) *
numpy.exp(-0.5 * (((x - p[2]) / p[1]) ** 2)))
def error(p, x, y):
return gauss(p, x) - y
if len(bincontent) == 0:
print("WARNING: Empty bins.")
return None
# this is then used for the mudecay window in muonic.
# we have to adjust the bins to the values of the used histogram.
bins = numpy.linspace(binning[0], binning[1], binning[2])
bin_centers = bins[:-1] + 0.5 * (bins[1] - bins[0])
if fitrange is not None:
if fitrange[0] < binning[0]:
fitrange = (binning[0], fitrange[1])
if fitrange[1] > binning[1]:
fitrange = (fitrange[0], binning[1])
bin_mask = [(bin_centers <= fitrange[1]) &
(bin_centers >= fitrange[0])]
bin_centers_ = numpy.asarray([x for x in bin_centers
if fitrange[0] <= x <= fitrange[1]])
if len(bin_centers_) < 3:
print("WARNING: fit range too small. " +
"Skipping fitting. Try with larger fit range.")
return None
else:
bin_centers = bin_centers_
bincontent = bincontent[bin_mask]
# # we cut the leading edge of the distribution away for the fit
# glob_max = max(bincontent)
# cut = 0
# for i in enumerate(bincontent):
# if i[1] == glob_max:
# cut = i[0]
cut_bincontent = bincontent # [cut:]
# cut_bincenter = bin_centers # [cut]
cut_bincenters = bin_centers # [cut:]
# # maybe something for the future..
# nbins = optimalbins.optbinsize(cut_bincontent, 1, 20)
# fit_bins = numpy.linspace(cut_bincenter, 20, nbins)
# fit_bin_centers = fit_bins[:-1] + 0.5 * (fit_bins[1] - fit_bins[0])
# fit_bincontent = numpy.zeros(len(fit_bin_centers))
# # the bincontent must be redistributed to fit_bincontent
# for binindex_fit in xrange(len(fit_bincontent)):
# for binindex, content in enumerate(bincontent):
# if bin_centers[binindex] <= fit_bin_centers[binindex_fit]:
# fit_bincontent[binindex_fit] += content
wsum = cut_bincontent.sum()
mean = (cut_bincontent * cut_bincenters).sum() / wsum
meansquared = (cut_bincontent * cut_bincenters ** 2).sum() / wsum
var = meansquared - mean ** 2
# p0 = numpy.array([20, 1.0, 5])
p0 = numpy.array([max(cut_bincontent), var, mean])
# output = optimize.leastsq(error, p0,
# args=(fit_bin_centers, fitbincontent),
# full_output=1)
output = optimize.leastsq(error, p0,
args=(cut_bincenters, cut_bincontent),
full_output=1)
p = output[0]
covar = output[1]
print("Fit parameters:", p)
print("Covariance matrix:", covar)
chisquare = 0.
deviations = error(p, cut_bincenters, cut_bincontent)
for i, d in enumerate(deviations):
chisquare += d*d/gauss(p,cut_bincenters[i])
params = {"legend.fontsize": 13}
pylab.rcParams.update(params)
# nbins = 84
nbins = len(bins)
xmin = cut_bincenters[0]
xmax = cut_bincenters[-1]
fitx = numpy.linspace(xmin, xmax, 100)
# return (bin_centers, bincontent, fitx, decay, p, covar, chisquare, nbins)
return (cut_bincenters, cut_bincontent, fitx,
gauss, p, covar, chisquare, nbins)
if __name__ == '__main__':
fit()
|
nilq/baby-python
|
python
|
from easydict import EasyDict
pong_dqn_gail_config = dict(
exp_name='pong_gail_dqn_seed0',
env=dict(
collector_env_num=8,
evaluator_env_num=8,
n_evaluator_episode=8,
stop_value=20,
env_id='PongNoFrameskip-v4',
frame_stack=4,
),
reward_model=dict(
type='gail',
input_size=[4, 84, 84],
hidden_size=128,
batch_size=64,
learning_rate=1e-3,
update_per_collect=100,
collect_count=1000,
action_size=6,
# Users should add their own model path here. Model path should lead to a model.
# Absolute path is recommended.
# In DI-engine, it is ``exp_name/ckpt/ckpt_best.pth.tar``.
expert_model_path='model_path_placeholder',
# Path where to store the reward model
reward_model_path='data_path_placeholder+/reward_model/ckpt/ckpt_best.pth.tar',
# Users should add their own data path here. Data path should lead to a file to store data or load the stored data.
# Absolute path is recommended.
# In DI-engine, it is usually located in ``exp_name`` directory
# e.g. 'exp_name/expert_data.pkl'
data_path='data_path_placeholder',
),
policy=dict(
cuda=True,
priority=False,
model=dict(
obs_shape=[4, 84, 84],
action_shape=6,
encoder_hidden_size_list=[128, 128, 512],
),
nstep=1,
discount_factor=0.99,
learn=dict(
update_per_collect=10,
batch_size=32,
learning_rate=0.0001,
target_update_freq=500,
),
collect=dict(n_sample=96, ),
eval=dict(evaluator=dict(eval_freq=4000, )),
other=dict(
eps=dict(
type='exp',
start=1.,
end=0.05,
decay=250000,
),
replay_buffer=dict(replay_buffer_size=100000, ),
),
),
)
pong_dqn_gail_config = EasyDict(pong_dqn_gail_config)
main_config = pong_dqn_gail_config
pong_dqn_gail_create_config = dict(
env=dict(
type='atari',
import_names=['dizoo.atari.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='dqn'),
)
pong_dqn_gail_create_config = EasyDict(pong_dqn_gail_create_config)
create_config = pong_dqn_gail_create_config
if __name__ == '__main__':
# or you can enter `ding -m serial_gail -c pong_gail_dqn_config.py -s 0`
# then input the config you used to generate your expert model in the path mentioned above
# e.g. pong_dqn_config.py
from ding.entry import serial_pipeline_gail
from dizoo.atari.config.serial.pong import pong_dqn_config, pong_dqn_create_config
expert_main_config = pong_dqn_config
expert_create_config = pong_dqn_create_config
serial_pipeline_gail(
(main_config, create_config), (expert_main_config, expert_create_config),
max_env_step=1000000,
seed=0,
collect_data=True
)
|
nilq/baby-python
|
python
|
class MyClass:
def method1(self):
print('myClass method1')
def method2(self, someString):
print("myclass method2 " + someString)
class MyOtherClass(MyClass):
def method1(self):
MyClass.method1(self)
print("anotherClass method1")
def main():
c = MyClass()
c.method1()
c.method2("hello python")
c2 = MyOtherClass()
c2.method1()
c2.method2("hello python")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# WikiBot
#
# Made by Aryan Takalkar
import speech_recognition as speech
import wikipedia
import pyttsx3
engine = pyttsx3.init()
running = True
def speech_init():
engine.setProperty('rate', 175)
engine.setProperty('volume' , 2)
voices = engine.getPropertyvoices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speech_output(x):
engine.say(x)
engine.runAndWait()
def start_listening():
global command
try:
r = speech.Recognizer()
with speech.Microphone() as source:
r.adjust_for_ambient_noise(source)
speech_output("Say something")
audio = r.listen(source)
command = r.recognize_google(audio)
speech_output("You said: " + command)
except speech.UnknownValueError:
speech_output("Sorry, I didn't get that. Try again.")
start_listening()
return str(command)
### Main Code ###
speech_init()
speech_output("I am Wikibot. I can give you information on just about anything, as long as it's in a Wikipedia article.")
userinp = start_listening()
while running:
y = wikipedia.summary(userinp, sentences = 5)
try:
speech_output(y)
userinp = start_listening()
except wikipedia.DisambiguationError:
speech_output("Sorry, I didn't get that. Try again.")
userinp = start_listening()
|
nilq/baby-python
|
python
|
"""
Provides utility functions for creating plots in exercise 8.
"""
from typing import Union
def organize_kwargs(
user_kwargs: Union[dict, None], default_kwargs: dict = None
) -> dict:
"""
Update default keyword argument configuration with user provided
configuration.
Parameters
----------
user_kwargs: Union[dict, None]
Dictionary of user provided keyword argument configurations, or
None
default_kwargs: dict
Default keyword argument configuration to be updated with user
configuration
Returns
-------
dict
Complete keyword argument configuration
"""
kwargs = user_kwargs or {}
default_kwargs = default_kwargs or {}
return {**default_kwargs, **kwargs}
|
nilq/baby-python
|
python
|
import s3fs
import numpy as np
import pandas as pd
import xarray as xr
from pyproj import Proj
def isin_merra_cell(lat, lon, latm, lonm):
dlat, dlon = 0.5, 0.625
lat1, lat2 = latm - dlat/2, latm + dlat/2
lon1, lon2 = lonm - dlon/2, lonm + dlon/2
lon_slices = [(lon1, lon2)]
if lon2 > 180:
lon_slices.append((lon1, 180))
lon_slices.append((-180, lon2 - 360))
elif lon1 <= -180:
lon_slices.append((-180, lon2))
lon_slices.append((lon1 + 360, 180))
for slc in lon_slices:
lon1, lon2 = slc
isin_cell = (lat1 <= lat <= lat2) & (lon1 <= lon <= lon2)
if isin_cell:
return True
return False
def merra2_idx2(lat, lon, latmg, lonmg):
dlat, dlon = 0.25, 0.3125
lat1, lat2 = lat - dlat, lat + dlat
lon1, lon2 = lon - dlon, lon + dlon
lonmask = (lonmg >= lon1) & (lonmg <= lon2)
if lon2 > 180:
lonmask |= (lonmg <= lon2 + dlon - 360)
mask = lonmask & (latmg >= lat1) & (latmg <= lat2)
iidx = np.arange(latmg.size).reshape(latmg.shape)
for i in iidx[mask]:
if isin_merra_cell(lat, lon, latmg.flat[i], lonmg.flat[i]):
return i
return np.nan
fs = s3fs.S3FileSystem(anon=True)
sats = [16, 17]
domains = ['C', 'F']
for sat in sats:
for domain in domains:
f = fs.open(fs.ls(f'noaa-goes{sat}/ABI-L2-DSI{domain}/2020/001/01')[0])
ds = xr.open_dataset(f)
h = ds.goes_imager_projection.perspective_point_height[0]
lon_0 = ds.goes_imager_projection.longitude_of_projection_origin[0]
sweep = ds.goes_imager_projection.sweep_angle_axis
p = Proj(proj='geos', h=h, lon_0=lon_0, sweep=sweep)
x, y = np.meshgrid(h*ds.x, h*ds.y)
lon, lat = p(x, y, inverse=True)
lon[lon == 1e30] = np.nan
lat[lat == 1e30] = np.nan
ds = ds.assign_coords(lat=(('y', 'x'), lat), lon=(('y', 'x'), lon))
npts = ds.x.size * ds.y.size
latm = np.arange(-90, 90.5, 0.5)
lonm = np.arange(-180, 180, 0.625)
lonmg, latmg = np.meshgrid(lonm, latm)
m2i = [merra2_idx2(ds.lat.values.flat[i], ds.lon.values.flat[i], latmg, lonmg) for i in range(npts)]
groups = {}
for i, v in enumerate(m2i):
if np.isnan(v):
continue
v = int(v)
if v not in groups:
groups[v] = []
groups[v].append(i)
group_idx = np.asarray(list(groups.keys())).astype(int)
pixel_count = np.zeros((latmg.size), dtype=int)
pixel_count[group_idx] = np.asarray([len(g) for g in groups.values()])
merra_grid = np.zeros((pixel_count.max(), latmg.size))
for i, g in groups.items():
merra_grid[:len(g), int(i)] = g
space = pd.MultiIndex.from_product([latm, lonm], names=['lat', 'lon'])
idx = xr.Dataset(coords=dict(space=space))
idx['pixel_index'] = ('pix', 'space'), merra_grid.astype(int)
idx['pixel_count'] = ('space'), pixel_count
idx.unstack('space').to_netcdf(f'idx_{sat}_{domain}.nc')
print(f'Saved: idx_{sat}_{domain}.nc')
|
nilq/baby-python
|
python
|
"""Sets (expansions) information
"""
import datetime
from typing import Hashable
from . import utils
class Set(utils.i18nMixin, utils.NamedMixin):
"""A class representing a V:tES Set (expansion)."""
def __init__(self, **kwargs):
super().__init__()
self.id = 0
self.abbrev = kwargs.get("abbrev", None)
self.release_date = kwargs.get("release_date", None)
self.name = kwargs.get("name", None)
self.company = kwargs.get("abbrev", None)
def from_vekn(self, data: dict):
"""Load info from VEKN CSV dict."""
self.id = int(data["Id"])
self.abbrev = data["Abbrev"]
self.release_date = (
datetime.datetime.strptime(data["Release Date"], "%Y%m%d")
.date()
.isoformat()
)
self.name = data["Full Name"]
self.company = data["Company"]
class SetMap(dict):
"""A dict of all sets, index by Abbreviation and English name."""
PROMOS = {
"Promo-20210709": ["2021 Kickstarter Promo", "2021-07-09"],
"Promo-20210701": ["2021 Kickstarter Promo", "2021-07-01"],
"Promo-20210331": ["2021 Mind’s Eye Theatre Promo", "2021-03-31"],
"Promo-20210310": ["2021 Resellers Promo", "2021-03-31"],
"Promo-20191123": ["2020 GP Promo", "2020-11-23"],
"Promo-20201030": ["V5 Polish Edition promo", "2020-10-30"],
"Promo-20201123": ["2020 GP Promo", "2020-11-23"],
"Promo-20200511": ["2020 Promo Pack 2", "2020-05-11"],
"Promo-20191027": ["2019 ACC Promo", "2019-10-27"],
"Promo-20191005": ["2019 AC Promo", "2019-10-05"],
"Promo-20190818": ["2019 EC Promo", "2019-08-18"],
"Promo-20190816": ["2019 DriveThruCards Promo", "2019-08-16"],
"Promo-20190614": ["2019 Promo", "2019-06-14"],
"Promo-20190601": ["2019 SAC Promo", "2019-06-01"],
"Promo-20190615": ["2019 NAC Promo", "2019-06-15"],
"Promo-20190629": ["2019 Grand Prix Promo", "2019-06-15"],
"Promo-20190408": ["2019 Promo Pack 1", "2019-04-08"],
"Promo-20181004": ["2018 Humble Bundle", "2018-10-04"],
"Promo-20150219": ["2015 Storyline Rewards", "2015-02-19"],
"Promo-20150221": ["2015 Storyline Rewards", "2015-02-21"],
"Promo-20150215": ["2015 Storyline Rewards", "2015-02-15"],
"Promo-20150214": ["2015 Storyline Rewards", "2015-02-14"],
"Promo-20150211": ["2015 Storyline Rewards", "2015-02-11"],
"Promo-20150216": ["2015 Storyline Rewards", "2015-02-16"],
"Promo-20150220": ["2015 Storyline Rewards", "2015-02-20"],
"Promo-20150218": ["2015 Storyline Rewards", "2015-02-18"],
"Promo-20150217": ["2015 Storyline Rewards", "2015-02-17"],
"Promo-20150213": ["2015 Storyline Rewards", "2015-02-13"],
"Promo-20150212": ["2015 Storyline Rewards", "2015-02-12"],
"Promo-20100510": ["2010 Storyline promo", "2010-05-10"],
"Promo-20090929": ["2009 Tournament / Storyline promo", "2009-09-29"],
"Promo-20090401": ["2009 Tournament / Storyline promo", "2009-04-01"],
"Promo-20081119": ["2008 Tournament promo", "2008-11-19"],
"Promo-20081023": ["2008 Tournament promo", "2008-10-23"],
"Promo-20080810": ["2008 Storyline promo", "2008-08-10"],
"Promo-20080203": ["2008 Tournament promo", "2008-08-10"],
"Promo-20070601": ["2007 Promo", "2007-06-01"],
"Promo-20070101": ["Sword of Caine promo", "2007-01-01"],
"Promo-20061126": ["2006 EC Tournament promo", "2006-11-26"],
"Promo-20061101": ["2006 Storyline promo", "2006-11-01"],
"Promo-20061026": ["2006 Tournament promo", "2006-10-26"],
"Promo-20060902": ["2006 Tournament promo", "2006-09-02"],
"Promo-20060710": ["Third Edition promo", "2006-07-10"],
"Promo-20060417": ["2006 Championship promo", "2006-04-17"],
"Promo-20060213": ["2006 Tournament promo", "2006-02-13"],
"Promo-20060123": ["2006 Tournament promo", "2006-01-23"],
"Promo-20051026": ["Legacies of Blood promo", "2005-10-26"],
"Promo-20051001": ["2005 Storyline promo", "2005-10-01"],
"Promo-20050914": ["Legacies of Blood promo", "2005-09-14"],
"Promo-20050611": ["2005 Tournament promo", "2005-06-11"],
"Promo-20050122": ["2005 Tournament promo", "2005-01-22"],
"Promo-20050115": ["Kindred Most Wanted promo", "2005-01-15"],
"Promo-20041015": ["Fall 2004 Storyline promo", "2004-10-15"],
"Promo-20040411": ["Gehenna promo", "2004-04-11"],
"Promo-20040409": ["2004 promo", "2004-04-09"],
"Promo-20040301": ["Prophecies league promo", "2004-03-01"],
"Promo-20031105": ["Black Hand promo", "2003-11-05"],
"Promo-20030901": ["Summer 2003 Storyline promo", "2003-09-01"],
"Promo-20030307": ["Anarchs promo", "2003-03-07"],
"Promo-20021201": ["2003 Tournament promo", "2002-12-01"],
"Promo-20021101": ["Fall 2002 Storyline promo", "2002-11-01"],
"Promo-20020811": ["Sabbat War promo", "2002-08-11"],
"Promo-20020704": ["Camarilla Edition promo", "2002-07-04"],
"Promo-20020201": ["Winter 2002 Storyline promo", "2002-02-01"],
"Promo-20011201": ["Bloodlines promo", "2001-12-01"],
"Promo-20010428": ["Final Nights promo", "2001-04-28"],
"Promo-20010302": ["Final Nights promo", "2001-03-02"],
"Promo-19960101": ["1996 Promo", "1996-01-01"],
}
def __init__(self):
super().__init__()
self.add(Set(abbrev="POD", name="Print on Demand"))
for abbrev, (name, release_date) in self.PROMOS.items():
self.add(Set(abbrev=abbrev, name=name, release_date=release_date))
def add(self, set_: Set) -> None:
"""Add a set to the map."""
self[set_.abbrev] = set_
self[set_.name] = set_
def i18n_set(self, set_: Set) -> None:
"""Add a translation for a set."""
self[set_.abbrev].i18n_set()
class DefaultSetMap(dict):
"""A default map with no information other than the set abbreviation.
Can be used to enable card information parsing when no set info is available.
"""
def __getitem__(self, k: Hashable) -> Set:
return Set(id=1, abbrev=k, name=k)
#: Use the default set map to parse cards information with no set information available
DEFAULT_SET_MAP = DefaultSetMap()
|
nilq/baby-python
|
python
|
from SimPEG import *
import simpegEM as EM
from scipy.constants import mu_0
import matplotlib.pyplot as plt
plotIt = False
cs, ncx, ncz, npad = 5., 25, 15, 15
hx = [(cs,ncx), (cs,npad,1.3)]
hz = [(cs,npad,-1.3), (cs,ncz), (cs,npad,1.3)]
mesh = Mesh.CylMesh([hx,1,hz], '00C')
active = mesh.vectorCCz<0.
layer = (mesh.vectorCCz<0.) & (mesh.vectorCCz>=-100.)
actMap = Maps.ActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = Maps.ExpMap(mesh) * Maps.Vertical1DMap(mesh) * actMap
sig_half = 2e-3
sig_air = 1e-8
sig_layer = 1e-3
sigma = np.ones(mesh.nCz)*sig_air
sigma[active] = sig_half
sigma[layer] = sig_layer
mtrue = np.log(sigma[active])
if plotIt:
fig, ax = plt.subplots(1,1, figsize = (3, 6))
plt.semilogx(sigma[active], mesh.vectorCCz[active])
ax.set_ylim(-600, 0)
ax.set_xlim(1e-4, 1e-2)
ax.set_xlabel('Conductivity (S/m)', fontsize = 14)
ax.set_ylabel('Depth (m)', fontsize = 14)
ax.grid(color='k', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.show()
rxOffset=1e-3
rx = EM.TDEM.RxTDEM(np.array([[rxOffset, 0., 30]]), np.logspace(-5,-3, 31), 'bz')
src = EM.TDEM.SrcTDEM_VMD_MVP([rx], np.array([0., 0., 80]))
survey = EM.TDEM.SurveyTDEM([src])
prb = EM.TDEM.ProblemTDEM_b(mesh, mapping=mapping)
prb.Solver = SolverLU
prb.timeSteps = [(1e-06, 20),(1e-05, 20), (0.0001, 20)]
prb.pair(survey)
dtrue = survey.dpred(mtrue)
survey.dtrue = dtrue
std = 0.05
noise = std*abs(survey.dtrue)*np.random.randn(*survey.dtrue.shape)
survey.dobs = survey.dtrue+noise
survey.std = survey.dobs*0 + std
survey.Wd = 1/(abs(survey.dobs)*std)
if plotIt:
fig, ax = plt.subplots(1,1, figsize = (10, 6))
ax.loglog(rx.times, dtrue, 'b.-')
ax.loglog(rx.times, survey.dobs, 'r.-')
ax.legend(('Noisefree', '$d^{obs}$'), fontsize = 16)
ax.set_xlabel('Time (s)', fontsize = 14)
ax.set_ylabel('$B_z$ (T)', fontsize = 16)
ax.set_xlabel('Time (s)', fontsize = 14)
ax.grid(color='k', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.show()
dmisfit = DataMisfit.l2_DataMisfit(survey)
regMesh = Mesh.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = Regularization.Tikhonov(regMesh)
opt = Optimization.InexactGaussNewton(maxIter = 5)
invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt)
# Create an inversion object
beta = Directives.BetaSchedule(coolingFactor=5, coolingRate=2)
betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e0)
inv = Inversion.BaseInversion(invProb, directiveList=[beta,betaest])
m0 = np.log(np.ones(mtrue.size)*sig_half)
reg.alpha_s = 1e-2
reg.alpha_x = 1.
prb.counter = opt.counter = Utils.Counter()
opt.LSshorten = 0.5
opt.remember('xc')
mopt = inv.run(m0)
if plotIt:
fig, ax = plt.subplots(1,1, figsize = (3, 6))
plt.semilogx(sigma[active], mesh.vectorCCz[active])
plt.semilogx(np.exp(mopt), mesh.vectorCCz[active])
ax.set_ylim(-600, 0)
ax.set_xlim(1e-4, 1e-2)
ax.set_xlabel('Conductivity (S/m)', fontsize = 14)
ax.set_ylabel('Depth (m)', fontsize = 14)
ax.grid(color='k', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.legend(['$\sigma_{true}$', '$\sigma_{pred}$'])
plt.show()
|
nilq/baby-python
|
python
|
"""
Support for EnOcean sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.enocean/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_ID)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components import enocean
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'EnOcean sensor'
DEPENDENCIES = ['enocean']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up an EnOcean sensor device."""
dev_id = config.get(CONF_ID)
devname = config.get(CONF_NAME)
add_devices([EnOceanSensor(dev_id, devname)])
class EnOceanSensor(enocean.EnOceanDevice, Entity):
"""Representation of an EnOcean sensor device such as a power meter."""
def __init__(self, dev_id, devname):
"""Initialize the EnOcean sensor device."""
enocean.EnOceanDevice.__init__(self)
self.stype = "powersensor"
self.power = None
self.dev_id = dev_id
self.which = -1
self.onoff = -1
self.devname = devname
@property
def name(self):
"""Return the name of the device."""
return 'Power %s' % self.devname
def value_changed(self, value):
"""Update the internal state of the device."""
self.power = value
self.schedule_update_ha_state()
@property
def state(self):
"""Return the state of the device."""
return self.power
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return 'W'
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright (c) 2019, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_os_info
short_description: Get information about the Vultr OSes available.
description:
- Get infos about OSes available to boot servers.
author:
- "Yanis Guenane (@Spredzy)"
- "René Moser (@resmo)"
extends_documentation_fragment:
- community.general.vultr
'''
EXAMPLES = r'''
- name: Get Vultr OSes infos
vultr_os_info:
register: results
- name: Print the gathered infos
debug:
var: results.vultr_os_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_os_info:
description: Response from Vultr API as list
returned: available
type: complex
contains:
arch:
description: OS Architecture
returned: success
type: str
sample: x64
family:
description: OS family
returned: success
type: str
sample: openbsd
name:
description: OS name
returned: success
type: str
sample: OpenBSD 6 x64
windows:
description: OS is a MS Windows
returned: success
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrOSInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrOSInfo, self).__init__(module, "vultr_os_info")
self.returns = {
"OSID": dict(key='id', convert_to='int'),
"arch": dict(),
"family": dict(),
"name": dict(),
"windows": dict(convert_to='bool')
}
def get_oses(self):
return self.api_query(path="/v1/os/list")
def parse_oses_list(oses_list):
return [os for id, os in oses_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
os_info = AnsibleVultrOSInfo(module)
result = os_info.get_result(parse_oses_list(os_info.get_oses()))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Ensomniac 2022 Ryan Martin, ryan@ensomniac.com
# Andrew Stet, stetandrew@gmail.com
import os
import sys
class GlobalSpacing:
group: bool
ignore: str
source_code: list
iter_limit_range: range
starts_with_keyword: str
line_break_quantity: int
GetIndentSpaceCount: callable
line_end_keyword_strings: list
def __init__(self):
super().__init__()
def RemoveExtraLinesAtStartOfFile(self):
for _ in self.iter_limit_range:
try:
if not len(self.source_code[0]):
self.source_code.pop(0)
else:
break
except IndexError:
break
def RemoveExtraLinesAtEndOfFile(self):
for _ in self.iter_limit_range:
try:
if not len(self.source_code[-1]) and not len(self.source_code[-2]):
self.source_code.pop()
else:
break
except IndexError:
break
def CheckSpecificSpacing(self, starts_with_keyword, line_break_quantity=1, group=False, ignore=""):
finished = False
for _ in self.iter_limit_range:
if finished:
break
self.starts_with_keyword = starts_with_keyword
self.line_break_quantity = line_break_quantity
self.group = group
self.ignore = ignore
finished = self.fix_specific_spacing()
self.fix_comments_separated_from_top_of_blocks()
def AddNeededLineBreaks(self):
finished = False
for keyword in self.line_end_keyword_strings:
for _ in self.iter_limit_range:
if finished:
break
for index, line in enumerate(self.source_code):
if index == len(self.source_code) - 1:
finished = True
break
if not line.strip().endswith(keyword) or not len(self.source_code[index + 1]):
continue
if self.GetIndentSpaceCount(line) != self.GetIndentSpaceCount(self.source_code[index - 1]):
continue
self.source_code.insert(index + 1, "")
def RemoveExtraLinesBetweenStatements(self, exception_strings=[]):
finished = False
for _ in self.iter_limit_range:
if finished:
break
for index, line in enumerate(self.source_code):
altered = False
stripped = line.strip()
next_statement_index = None
for exception in exception_strings:
if stripped.startswith(exception):
continue
if not len(stripped):
continue
for num in self.iter_limit_range:
if num < 1:
continue
try:
if len(self.source_code[index + num].strip()):
next_statement_index = index + num
break
except:
break
if not type(next_statement_index) == int:
continue
spaces = (next_statement_index - 1) - index
for _ in self.iter_limit_range:
if spaces > 1:
try:
self.source_code.pop(index + 1)
altered = True
spaces -= 1
except:
break
else:
break
if altered:
break
if index == len(self.source_code) - 1:
finished = True
def fix_comments_separated_from_top_of_blocks(self):
finished = False
for _ in self.iter_limit_range:
if finished:
break
for index, line in enumerate(self.source_code):
if index == len(self.source_code) - 1:
finished = True
break
try:
two_strip = self.source_code[index + 2].strip()
except:
finished = True
break
if line.strip().startswith("#") \
and not self.source_code[index - 1].strip().startswith("#") \
and not self.source_code[index + 1].strip().startswith("#"):
if two_strip.startswith("def ") or two_strip.startswith("class "):
self.source_code.pop(index + 1)
break
elif not len(two_strip):
for num in self.iter_limit_range:
if num <= 2:
continue
try:
next_strip = self.source_code[index + num].strip()
except:
break
if not len(next_strip):
continue
elif next_strip.startswith("def ") or next_strip.startswith("class "):
for n in range(0, num - 1):
self.source_code.pop(index + 1)
break
else:
break
if line.strip().startswith("#"):
next_line_strip = self.source_code[index + 1].strip()
prev1_line_strip = self.source_code[index - 1].strip()
prev2_line_strip = self.source_code[index - 2].strip()
if (next_line_strip.startswith("class ") or (next_line_strip.startswith("def ") and self.GetIndentSpaceCount(line) == 0)) and (len(prev1_line_strip) or len(prev2_line_strip)):
self.source_code.insert(index, "")
break
def fix_specific_spacing(self):
last_index_before_line_breaks = 0
altered = False
occurrence = 0
indented_keyword = ""
for index, line in enumerate(self.source_code):
indent = self.GetIndentSpaceCount(line)
if self.starts_with_keyword == "def ":
if indent == 0:
self.line_break_quantity = 2
else:
self.line_break_quantity = 1
if line.startswith(" ") and self.starts_with_keyword in line:
indented_keyword = f"{indent * ' '}{self.starts_with_keyword}"
if line.startswith(self.starts_with_keyword) or \
(len(indented_keyword) and line.startswith(indented_keyword)):
if len(self.ignore) and self.ignore in line:
continue
line_break_count = index - (last_index_before_line_breaks + 1)
occurrence += 1
if occurrence > 1 and self.group:
self.line_break_quantity = 0
if line_break_count == self.line_break_quantity:
last_index_before_line_breaks = index
continue
for _ in self.iter_limit_range:
if line_break_count != self.line_break_quantity:
if line_break_count > self.line_break_quantity:
self.source_code.pop(index - 1)
index -= 1
line_break_count -= 1
altered = True
elif line_break_count < self.line_break_quantity:
self.source_code.insert(index, "")
index += 1
line_break_count += 1
altered = True
else:
if altered:
return False
break
elif len(line):
last_index_before_line_breaks = index
if index == len(self.source_code) - 1:
return True
|
nilq/baby-python
|
python
|
import cvxpy as cp
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_X_y
from sklearn.metrics.pairwise import euclidean_distances
from wdwd.utils import pm1
from wdwd.linear_model import LinearClassifierMixin
class DWD(BaseEstimator, LinearClassifierMixin):
def __init__(self, C=1.0, solver_kws={}):
self.C = C
self.solver_kws = solver_kws
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
"""
# TODO: what to do about multi-class
self.classes_ = np.unique(y)
if self.C == 'auto':
self.C = auto_dwd_C(X, y)
# fit DWD
self.coef_, self.intercept_, self.eta_, self.d_, self.problem_ = \
solve_dwd_socp(X, y, C=self.C,
sample_weight=sample_weight,
solver_kws=self.solver_kws)
self.coef_ = self.coef_.reshape(1, -1)
self.intercept_ = self.intercept_.reshape(-1)
return self
def solve_dwd_socp(X, y, C=1.0, sample_weight=None, solver_kws={}):
"""
Solves distance weighted discrimination optimization problem.
Solves problem (2.7) from https://arxiv.org/pdf/1508.05913.pdf
Parameters
----------
X: (n_samples, n_features)
y: (n_samples, )
C: float
Strictly positive tuning parameter.
sample_weight: None, (n_samples, )
Weights for samples.
solver_kws: dict
Keyword arguments to cp.solve
Returns
------
beta: (n_features, )
DWD normal vector.
intercept: float
DWD intercept.
eta, d: float
Optimization variables.
problem: cp.Problem
"""
if C < 0:
raise ValueError("Penalty term must be positive; got (C={})".format(C))
# TODO: add sample weights
if sample_weight is not None:
raise NotImplementedError
X, y = check_X_y(X, y,
accept_sparse='csr',
dtype='numeric')
# convert y to +/- 1
y = pm1(y)
n_samples, n_features = X.shape
# problem data
X = cp.Parameter(shape=X.shape, value=X)
y = cp.Parameter(shape=y.shape, value=y)
C = cp.Parameter(value=C, nonneg=True)
# optimization variables
beta = cp.Variable(shape=n_features)
intercept = cp.Variable()
eta = cp.Variable(shape=n_samples, nonneg=True)
rho = cp.Variable(shape=n_samples)
sigma = cp.Variable(shape=n_samples)
# objective funtion
# TODO: check this is correct way to do sample weighting
if sample_weight is None:
v = np.ones(n_samples)
else:
v = np.array(sample_weight).reshape(-1)
assert len(v) == n_samples
objective = v.T @ (rho + sigma + C * eta)
# setup constraints
# TODO: do we need explicit SOCP constraints?
Y_tilde = cp.diag(y) # TODO: make sparse
constraints = [rho - sigma == Y_tilde @ X @ beta + intercept * y + eta,
cp.SOC(cp.Parameter(value=1), beta)] # ||beta||_2^2 <= 1
# rho^2 - sigma^2 >= 1
constraints.extend([cp.SOC(rho[i], cp.vstack([sigma[i], 1]))
for i in range(n_samples)])
# solve problem
problem = cp.Problem(cp.Minimize(objective),
constraints=constraints)
problem.solve(**solver_kws)
# d = rho - sigma
# rho = (1/d + d), sigma = (1/d - d)/2
d = rho.value - sigma.value
return beta.value, intercept.value, eta.value, d, problem
def auto_dwd_C(X, y, const=100):
"""
Automatic choice of C from Distance-Weighted Discrimination by Marron et al, 2007. Note this only is for the SOCP formulation of DWD.
C = 100 / d ** 2
Where d is the median distance between points in either class.
Parameters
----------
X: array-like, (n_samples, n_features)
The input data.
y: array-like, (n_samples, )
The vector of binary class labels.
const: float
The constanted used to determine C. Originally suggested to be 100.
"""
labels = np.unique(y)
assert len(labels) == 2
# pariwise distances between points in each class
D = euclidean_distances(X[y == labels[0], :],
X[y == labels[1], :])
d = np.median(D.ravel())
return const / d ** 2
def dwd_obj(X, y, C, beta, offset, eta):
"""
Objective function for DWD.
"""
d = y * (X.dot(beta) + offset) + eta
return sum(1.0 / d) + C * sum(eta)
|
nilq/baby-python
|
python
|
from psyrun import Param
pspace = Param()
python = 'true'
def execute():
return {}
|
nilq/baby-python
|
python
|
import numpy as np
def PlotPlanetXZ(fig,R=1.0,Center=[0.0,0.0,0.0],zorder=10,NoBlack=False,NoonTop=True):
a = 2*np.pi*np.arange(361,dtype='float32')/360
x = R*np.sin(a) + Center[0]
z = R*np.cos(a) + Center[2]
if NoonTop:
fig.fill(z,x,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(z,x,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(z[180:360],x[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
else:
fig.fill(x,z,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(x,z,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(x[180:360],z[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
def PlotPlanetXY(fig,R=1.0,Center=[0.0,0.0,0.0],zorder=10,NoBlack=False,NoonTop=True):
a = 2*np.pi*np.arange(361,dtype='float32')/360
x = R*np.sin(a) + Center[0]
y = R*np.cos(a) + Center[1]
if NoonTop:
fig.fill(y,x,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(y,x,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(y[180:360],x[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
else:
fig.fill(x,y,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(x,y,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(x[180:360],y[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
def PlotPlanetYZ(fig,R=1.0,Center=[0.0,0.0,0.0],Side='day',zorder=10,NoFill=False,Color=[0.0,0.0,0.0],linestyle='-'):
a = 2*np.pi*np.arange(361,dtype='float32')/360
y = R*np.sin(a) + Center[1]
z = R*np.cos(a) + Center[2]
if NoFill == False:
if Side == 'day':
fig.fill(y,z,color=[1.0,1.0,1.0],zorder=zorder)
else:
fig.fill(y,z,color=[0.0,0.0,0.0],zorder=zorder)
fig.plot(y,z,color=Color,zorder=zorder+1,linestyle='-')
def PlotPlanetCoreXZ(ax,R=1.0,Center=[0.0,0.0,0.0],Colors=([1.0,0.7,0.0,0.5],[1.0,0.2,0.0,0.5],[0.5,0.5,0.5,0.5]),Layers=(0.0,0.832,1.0),zorder=1.0,NoFill=False,linestyle='-',linewidth=2.0):
'''
Plots the different layers of hte planet
'''
a = 2*np.pi*np.arange(361,dtype='float32')/360
nl = len(Layers)
if NoFill:
for i in range(0,nl):
x = Layers[i]*R*np.sin(a) + Center[0]
z = Layers[i]*R*np.cos(a) + Center[2]
ax.plot(x,z,color=Colors[i],zorder=zorder,linestyle=linestyle,linewidth=linewidth)
else:
for i in range(0,nl-1):
l0 = Layers[i]
l1 = Layers[i+1]
x0 = l0*R*np.sin(a) + Center[0]
z0 = l0*R*np.cos(a) + Center[2]
x1 = l1*R*np.sin(a) + Center[0]
z1 = l1*R*np.cos(a) + Center[2]
if l0 == 0.0:
ax.fill(x1,z1,color=Colors[i],zorder=zorder,linewidth=0.0)
else:
x = np.append(x0,x1[::-1])
z = np.append(z0,z1[::-1])
ax.fill(x,z,color=Colors[i],zorder=zorder,linewidth=0.0)
x = R*np.sin(a) + Center[0]
z = R*np.cos(a) + Center[2]
ax.plot(x,z,color=Colors[-1],linestyle=linestyle,linewidth=linewidth,zorder=zorder)
|
nilq/baby-python
|
python
|
def text(result):
output = [
f"active rolls: {result['active_rolls']}",
f"cycles: {result['cycles']}",
"",
]
for key in ["bakes", "endorsements", "total"]:
output.append(key)
col1 = "mean"
col2 = "max"
header = " " * 10 + f"{col1:>10} {col2:>10}"
output.append("-" * len(header))
output.append(header)
d = result[key]
for key2 in ["count", "deposits", "rewards"]:
mean = d["mean"][key2]
max = d["max"][key2]
output.append(f"{key2:>8}: {mean:10.2f} {max:10.2f}")
output.append("\n")
return "\n".join(output)
|
nilq/baby-python
|
python
|
from app.runner.setup import setup
app = setup()
|
nilq/baby-python
|
python
|
import os
from datetime import datetime, timezone
from typing import Dict
import click
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
class Config:
@property
def repository(self) -> str:
return os.getenv("GITHUB_REPOSITORY")
@property
def actor(self) -> str:
return os.getenv("GITHUB_ACTOR")
@property
def run_id(self) -> str:
return os.getenv("GITHUB_RUN_ID")
@property
def workflow(self) -> str:
return os.getenv("GITHUB_WORKFLOW")
@property
def job_id(self) -> str:
return os.getenv("GITHUB_JOB")
@property
def ref(self) -> str:
return os.getenv("GITHUB_REF", "")
@property
def branch(self) -> str:
try:
return self.ref.split("/", 2)[2]
except IndexError:
return "unknown branch"
config = Config()
def build_status_block(
job_status: str, actor: str, flow: str, branch: str, run_id: str, repository: str
) -> Dict:
if job_status.lower() == "success":
message = ":white_check_mark: *Success*"
elif job_status.lower() == "cancelled":
message = ":large_blue_circle: *Cancelled*"
else:
message = ":x: *Failed*"
message = (
message
+ f" *{repository}* <https://github.com/{repository}/actions/runs/{run_id}|View Job>\n"
+ f"[ {flow} ] [ {branch} ]\n"
)
message = message + f"Triggered by {actor}"
return {"type": "section", "text": {"type": "mrkdwn", "text": message}}
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.option("--token", required=True, help="Slack token")
@click.option("--channel", required=True, help="Channel id")
@click.option("--job-status", required=True, help="Job status")
def send_to_slack(token: str, channel: str, job_status: str):
client = WebClient(token=token)
blocks = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": datetime.now(tz=timezone.utc).strftime(
"%a %b %d %Y %H:%M:%S %Z"
),
},
},
build_status_block(
job_status,
config.actor,
config.job_id,
config.branch,
config.run_id,
config.repository,
),
]
try:
result = client.chat_postMessage(
channel=channel, text="Some text", blocks=blocks
)
# Print result, which includes information about the message (like TS)
print(result)
except SlackApiError as e:
print(f"Error: {e}")
if __name__ == "__main__":
send_to_slack()
|
nilq/baby-python
|
python
|
import peewee
db = peewee.SqliteDatabase('./apidocs/api_doc.db')
class ApiDoc(peewee.Model):
title = peewee.CharField(default='')
url = peewee.CharField()
method = peewee.CharField()
description = peewee.CharField(default='')
class Meta:
database = db
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
import peewee
from torcms.core import tools
from torcms.model.core_tab import TabPost
from torcms.model.core_tab import TabRel
from torcms.model.core_tab import TabPost2Tag
from torcms.model.post2catalog_model import MPost2Catalog as MInfor2Catalog
from torcms.model.abc_model import Mabc
class MRelation(Mabc):
# def __init__(self):
# super(MRelation, self).__init__()
@staticmethod
def add_relation(app_f, app_t, weight=1):
recs = TabRel.select().where(
(TabRel.post_f_id == app_f) &
(TabRel.post_t_id == app_t)
)
if recs.count() > 1:
for record in recs:
MRelation.delete(record.uid)
if recs.count() == 0:
uid = tools.get_uuid()
entry = TabRel.create(
uid=uid,
post_f_id=app_f,
post_t_id=app_t,
count=1,
)
return entry.uid
elif recs.count() == 1:
MRelation.update_relation(app_f, app_t, weight)
else:
return False
@staticmethod
def delete(uid):
entry = TabRel.delete().where(
TabRel.uid == uid
)
entry.execute()
@staticmethod
def update_relation(app_f, app_t, weight=1):
try:
postinfo = TabRel.get(
(TabRel.post_f_id == app_f) &
(TabRel.post_t_id == app_t)
)
except:
return False
entry = TabRel.update(
count=postinfo.count + weight
).where(
(TabRel.post_f_id == app_f) &
(TabRel.post_t_id == app_t)
)
entry.execute()
@staticmethod
def get_app_relations(app_id, num=20, kind='1'):
'''
The the related infors.
'''
info_tag = MInfor2Catalog.get_first_category(app_id)
if info_tag:
return TabPost2Tag.select(
TabPost2Tag, TabPost.title.alias('post_title'), TabPost.valid.alias('post_valid')
).join(
TabPost, on=(TabPost2Tag.post_id == TabPost.uid)
).where(
(TabPost2Tag.tag_id == info_tag.tag_id) &
(TabPost.kind == kind)
).order_by(
peewee.fn.Random()
).limit(num)
else:
return TabPost2Tag.select(
TabPost2Tag, TabPost.title.alias('post_title'), TabPost.valid.alias('post_valid')
).join(TabPost, on=(TabPost2Tag.post_id == TabPost.uid)).where(
TabPost.kind == kind
).order_by(peewee.fn.Random()).limit(num)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import torch
from ..distributions import MultivariateNormal
from ..lazy import InterpolatedLazyTensor
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.interpolation import Interpolation, left_interp
from ..utils.memoize import cached
from ._variational_strategy import _VariationalStrategy
class GridInterpolationVariationalStrategy(_VariationalStrategy):
"""
This strategy constrains the inducing points to a grid and applies a deterministic
relationship between :math:`\mathbf f` and :math:`\mathbf u`.
It was introduced by `Wilson et al. (2016)`_.
Here, the inducing points are not learned. Instead, the strategy
automatically creates inducing points based on a set of grid sizes and grid
bounds.
.. _Wilson et al. (2016):
https://arxiv.org/abs/1611.00336
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param int grid_size: Size of the grid
:param list grid_bounds: Bounds of each dimension of the grid (should be a list of (float, float) tuples)
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
"""
def __init__(self, model, grid_size, grid_bounds, variational_distribution):
grid = torch.zeros(grid_size, len(grid_bounds))
for i in range(len(grid_bounds)):
grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_size - 2)
grid[:, i] = torch.linspace(grid_bounds[i][0] - grid_diff, grid_bounds[i][1] + grid_diff, grid_size)
inducing_points = torch.zeros(int(pow(grid_size, len(grid_bounds))), len(grid_bounds))
prev_points = None
for i in range(len(grid_bounds)):
for j in range(grid_size):
inducing_points[j * grid_size ** i : (j + 1) * grid_size ** i, i].fill_(grid[j, i])
if prev_points is not None:
inducing_points[j * grid_size ** i : (j + 1) * grid_size ** i, :i].copy_(prev_points)
prev_points = inducing_points[: grid_size ** (i + 1), : (i + 1)]
super(GridInterpolationVariationalStrategy, self).__init__(
model, inducing_points, variational_distribution, learn_inducing_locations=False
)
object.__setattr__(self, "model", model)
self.register_buffer("grid", grid)
def _compute_grid(self, inputs):
n_data, n_dimensions = inputs.size(-2), inputs.size(-1)
batch_shape = inputs.shape[:-2]
inputs = inputs.reshape(-1, n_dimensions)
interp_indices, interp_values = Interpolation().interpolate(self.grid, inputs)
interp_indices = interp_indices.view(*batch_shape, n_data, -1)
interp_values = interp_values.view(*batch_shape, n_data, -1)
if (interp_indices.dim() - 2) != len(self._variational_distribution.batch_shape):
batch_shape = _mul_broadcast_shape(interp_indices.shape[:-2], self._variational_distribution.batch_shape)
interp_indices = interp_indices.expand(*batch_shape, *interp_indices.shape[-2:])
interp_values = interp_values.expand(*batch_shape, *interp_values.shape[-2:])
return interp_indices, interp_values
@property
@cached(name="prior_distribution_memo")
def prior_distribution(self):
out = self.model.forward(self.inducing_points)
res = MultivariateNormal(out.mean, out.lazy_covariance_matrix.add_jitter())
return res
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
if variational_inducing_covar is None:
raise RuntimeError(
"GridInterpolationVariationalStrategy is only compatible with Gaussian variational "
f"distributions. Got ({self.variational_distribution.__class__.__name__}."
)
variational_distribution = self.variational_distribution
# Get interpolations
interp_indices, interp_values = self._compute_grid(x)
# Compute test mean
# Left multiply samples by interpolation matrix
predictive_mean = left_interp(interp_indices, interp_values, inducing_values.unsqueeze(-1))
predictive_mean = predictive_mean.squeeze(-1)
# Compute test covar
predictive_covar = InterpolatedLazyTensor(
variational_distribution.lazy_covariance_matrix,
interp_indices,
interp_values,
interp_indices,
interp_values,
)
output = MultivariateNormal(predictive_mean, predictive_covar)
return output
|
nilq/baby-python
|
python
|
from machine import Pin
import utime
#import pyb
class IRQCounter:
provides = ["count", "time_since_last_trigger"]
def __init__(self, port, trigger, cooldown):
self.counter = 0
self.last_trigger = utime.ticks_ms()
def irq_handler(pin):
now = utime.ticks_ms()
if self.last_trigger + cooldown < now:
self.counter += 1
self.last_trigger = now
port.init(Pin.IN, None)
port.irq(irq_handler, trigger)
def readout(self):
irq_state = pyb.disable_irq()
count = self.counter
time_since_last_trigger = utime.ticks_ms() - self.last_trigger
pyb.enable_irq(irq_state)
return {"count": count, "time_since_last_trigger": time_since_last_trigger}
|
nilq/baby-python
|
python
|
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import pytest
import rfpipe
from astropy import time
def test_create():
st = rfpipe.state.State(validate=False, showsummary=False)
assert st.metadata.atdefaults()
@pytest.fixture(scope="module")
def mockstate():
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.3/(24*3600), 27, 4, 32*4, 4,
5e3, datasource='sim', antconfig='D')
return rfpipe.state.State(inmeta=meta)
def test_mock(mockstate):
assert mockstate.datashape == (60, 351, mockstate.nchan, 2)
def test_pol(mockstate):
assert len(mockstate.metadata.pols_orig) == 4 and len(mockstate.pols) == 2
def test_mocknseg(mockstate):
assert mockstate.nsegment == 1
def test_version(mockstate):
assert mockstate.version
def test_clearcache(mockstate):
segmenttimes = mockstate.segmenttimes
mockstate.clearcache()
mockstate.summarize()
assert (segmenttimes == mockstate.segmenttimes).all()
def test_lowmem():
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.3/(24*3600), 27, 4, 32*4, 4,
5e3, datasource='sim')
st = rfpipe.state.State(inmeta=meta, inprefs={'memory_limit': 0.1})
assert st.validate()
|
nilq/baby-python
|
python
|
'''
* Copyright (c) 2022 MouBieCat
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
'''
from typing import List
# 是否大於零
def not_zero(
number:int,
showErrorMsg:bool=False
) -> bool:
if number < 0 and showErrorMsg:
print("該數字不是一個合法有效的數字")
return number < 0
# 是否為質數
def is_prime_number(
number:int
) -> bool:
for nub in range(2, number):
if number % nub == 0:
return False
return True
# 奇數 = True, 偶數 = False
def is_odd_number(
number:int
) -> bool:
return (number % 2 == 1)
# 獲取因數列表
def get_number_divisor_array(
number:int,
showCountMsg:bool = False
) -> List[int]:
# 使用該功能必須導入
# from typing import List
# 函數庫語句
# 聲明:該部分目前不再該課程當中,我只是想當作自己的經驗與練習用途!
returnList = [] # 建立因數集合對象
for nbs in range(1, number + 1):
if number % nbs == 0:
returnList.append(nbs)
if showCountMsg:
print("總共找到了 " + str(returnList.__len__()) + " 個因數")
return returnList
# Main
number = int(input("請輸入一個正整數 => "))
if not not_zero(number, True):
print(str(number) + " 它是一個質數"
if is_prime_number(number)
else str(number) + " 它不是一個質數")
print(str(number) + " 它是一個奇數"
if is_odd_number(number)
else str(number) + " 它是一個偶數")
for index in get_number_divisor_array(number):
print(str(number) + " 的因數有 " + str(index))
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name="azblob",
version="1.0.1",
author="Philipp Lang",
packages=["azblob"],
url=("https://github.com/plang85/azblob"),
license="MIT License",
description="Download Azure blobs.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development",
],
entry_points={"console_scripts": ["azblob = azblob.ops:cli"]},
install_requires=[
"azure-storage-blob>=12.0.0",
"azure-storage-file>=1.3.0",
"tabulate>=0.8.2",
],
extras_require={"dev": ["black", "twine"]},
)
|
nilq/baby-python
|
python
|
# program to create set difference.
setx = set(["apple", "mango"])
sety = set(["mango", "orange"])
setz = setx & sety
print(setz)
#Set difference
setb = setx - setz
print(setb)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
# #NUMBER 1: DATA PREPARATION
# In[4]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
print("Done importing libraries")
# In[5]:
#path_to_file= "C:/Users/necbanking/Desktop/2.1 modular/fifa_AI/"
df=pd.read_csv("players_20.csv")
df_19=pd.read_csv("players_19.csv")
df.head()
# In[6]:
df_19.head()
# In[7]:
df = df.drop('dob', axis =1)
df = df.drop('weight_kg', axis =1)
df = df.drop('international_reputation', axis =1)
df = df.drop('real_face', axis =1)
df = df.drop('release_clause_eur', axis =1)
df = df.drop('player_tags', axis =1)
df = df.drop('team_jersey_number', axis =1)
df = df.drop('loaned_from', axis =1)
df = df.drop('joined', axis =1)
df = df.drop('contract_valid_until', axis =1)
df = df.drop('nation_position', axis =1)
df = df.drop('nation_jersey_number', axis =1)
df = df.drop('player_traits', axis =1)
df = df.drop('sofifa_id', axis =1)
df = df.drop('long_name', axis =1)
# In[8]:
df_19 = df_19.drop('dob', axis =1)
df_19 = df_19.drop('weight_kg', axis =1)
df_19 = df_19.drop('international_reputation', axis =1)
df_19 = df_19.drop('real_face', axis =1)
df_19 = df_19.drop('release_clause_eur', axis =1)
df_19 = df_19.drop('player_tags', axis =1)
df_19 = df_19.drop('team_jersey_number', axis =1)
df_19 = df_19.drop('loaned_from', axis =1)
df_19 = df_19.drop('joined', axis =1)
df_19 = df_19.drop('contract_valid_until', axis =1)
df_19 = df_19.drop('nation_position', axis =1)
df_19 = df_19.drop('nation_jersey_number', axis =1)
df_19 = df_19.drop('player_traits', axis =1)
df_19 = df_19.drop('sofifa_id', axis =1)
df_19 = df_19.drop('long_name', axis =1)
# #NUMBER 2: CORRELATION
# In[9]:
#splitting data
train_data, test_data=train_test_split(df,test_size=0.25)
print("Leingth of training data is:"+str(len(train_data)))
print("Leingth of test data is:"+str(len(test_data)))
# In[10]:
#selecting features
target_feature='overall'
#finding features that arecorrelated to the overall column
feature_corr=train_data.corr(method='pearson')[target_feature]
feature_corr=feature_corr.sort_values(ascending=False)
#print thetop ten correlations with the target value
print(feature_corr[1:21])
corr_matrix = df.corr()
corr_matrix['overall'].sort_values(ascending=False)
##
# #NUMBER 3: REGRESSION MODEL
#
# In[11]:
#Training Rgression model
features=corr_matrix['overall'].sort_values(ascending=False)
features=['potential','value_eur','wage_eur','attacking_short_passing','skill_long_passing','age','skill_ball_control','skill_curve','skill_moves','attacking_volleys']
X_train=df[features]
y_train=df['overall']
r = LinearRegression()
r.fit(X_train,y_train )
print(r.score(X_train,y_train))
# In[12]:
#copying top 20 relavent features to be used by model
features=feature_corr[1:14].index.tolist()
print(features)
# In[13]:
#training the model
x_train=train_data[features]
y_train=train_data[target_feature]
#replace all empty cells with zero
x_train.fillna(0,inplace=True)
#using the LinearRegression method to build the model
model=LinearRegression().fit(x_train,y_train)
#print score
print("Score:"+str(model.score(x_train,y_train)))
# #NUMBER 4: A PROCESS OF OPTIMISATION
# In[14]:
#testing the model usint the 25% of the players_20.csv(df) dataframe
#sort test data first
test_data=test_data.sort_values([target_feature], ascending=False)
x_test=test_data[features]
x_test.fillna(0,inplace=True)
y_test=test_data[target_feature]
#start predicting
y_predict=model.predict(x_test)
#add new column called predicted
test_data['predicted']=y_predict
rating=((y_predict-y_test)/y_test*100)
#add anew column called accuracy
test_data['difference']=rating
test_data[["short_name","overall","predicted","difference"]]
# In[16]:
#preproccessing features
df_19['potential'] = pd.to_numeric(df_19['potential'],errors='coerce')
df_19['value_eur'] = pd.to_numeric(df_19['value_eur'],errors='coerce')
df_19['wage_eur'] = pd.to_numeric(df_19['wage_eur'],errors='coerce')
df_19['attacking_short_passing'] = pd.to_numeric(df_19['attacking_short_passing'],errors='coerce')
df_19['skill_long_passing'] = pd.to_numeric(df_19['skill_long_passing'],errors='coerce')
df_19['age'] = pd.to_numeric(df_19['age'],errors='coerce')
df_19['skill_ball_control'] = pd.to_numeric(df_19['skill_ball_control'],errors='coerce')
df_19['skill_curve'] = pd.to_numeric(df_19['skill_curve'],errors='coerce')
df_19['skill_moves'] = pd.to_numeric(df_19['skill_moves'],errors='coerce')
df_19['attacking_volleys'] = pd.to_numeric(df_19['attacking_volleys'],errors='coerce')
# #NUMBER 5
# In[17]:
#selecting features from the 2019 dataset
features=['potential','value_eur','wage_eur','attacking_short_passing','skill_long_passing','age','skill_ball_control','skill_curve','skill_moves','attacking_volleys']
x_test=df_19[features]
x_test.fillna(0,inplace=True)
y_test=df_19['overall']
predict=r.predict(x_test)
df_19['predicted']=predict
df_19[['short_name','overall','predicted']]
# In[18]:
import pickle
# In[19]:
filename="player_rating.pkl"
outfile=open(filename,'wb')
pickle.dump(model,outfile)
outfile.close()
# In[ ]:
|
nilq/baby-python
|
python
|
import util
from copy import deepcopy
from collections import defaultdict
def solver(paticipants, pizas):
pizas_key = defaultdict(list)
for i, v in enumerate(pizas):
pizas_key[v].append(i)
acc = [[], 0]
print('File loaded :: size of piza : %s, paticipants : %d ' % (len(pizas), paticipants))
def solve(pizs):
s = sum(pizs)
if s <= paticipants:
if s > acc[1]:
acc[0], acc[1] = pizs, s
print('Found :', s)
return
for j in range(len(pizs)):
solve(pizs[0:j] + pizs[j+1:])
if acc[1] == paticipants: break
solve(pizas)
return [pizas_key[v].pop(0) for v in acc[0]]
if __name__=="__main__":
util.solve_files('input', solver)
|
nilq/baby-python
|
python
|
"""
[LeetCode] 708. Insert into a Cyclic Sorted List
# Insert into a Cyclic Sorted List linspiration
Problem
Given a node from a cyclic linked list which is sorted in ascending order, write a function to insert a value into the list such that it remains a cyclic sorted list. The given node can be a reference to any single node in the list, and may not be necessarily the smallest value in the cyclic list.
If there are multiple suitable places for insertion, you may choose any place to insert the new value. After the insertion, the cyclic list should remain sorted.
If the list is empty (i.e., given node is null), you should create a new single cyclic list and return the reference to that single node. Otherwise, you should return the original given node.
The following example may help you understand the problem better:
clipboard.png
In the figure above, there is a cyclic sorted list of three elements. You are given a reference to the node with value 3, and we need to insert 2 into the list.
clipboard.png
The new node should insert between node 1 and node 3. After the insertion, the list should look like this, and we should still return node 3.
"""
# V0
# IDEA : LINKED LIST
# IDEA : CONSIDER THE 4 CASES BELOW :
# CASE 1) No head:
# CASE 2) prev.val <= val <= cur.val
# CASE 3) prev.val > cur.val and (val < cur.val or prev.val < cur): cur is either the min or the max with not all nodes with the same value
# CASE 4) val != every nodes's value in a cyclic linked list where every node has the same value
class Solution(object):
def insert(self, head, val):
node = Node(val, head)
# case 1): no head
if not head:
return node
prev, cur = head, head.next
while True:
# case 2): prev.val <= val <= cur.val
# e.g. 1 -> 3 -> 5 -> "4" (insert 4)
if prev.val <= val <= cur.val:
break
# case 3): prev.val > cur.val and val < cur.val or prev.val < cur
# e.g. 6 -> 4 -> "5" (insert 5), or 5 -> 4 -> "3" (insert 3)
# "SORTED" means the linked-list can be in an ascending or descending order
elif prev.val > cur.val and (val <= cur.val or prev.val <= val):
break
prev, cur = prev.next, cur.next
# case 4): prev == head
# e.g. 1 -> 1 -> 1 ->...-> 1
if prev == head: # in case of all nodes have same value that are > val
break
# insert node between prev and cur
prev.next = node
node.next = cur
return head
# V1
# https://blog.csdn.net/weixin_41677877/article/details/81200818
class Solution:
def insert(self, node, x):
# write your code
originNode = node
tmp = ListNode(x)
if node == None:
node = tmp
node.next = node
return node
else:
while True:
if node.next.next == node:
tmp.next = node.next
node.next = tmp
return node
if (node.val<=x and node.next.val>x) or (node.val<x and node.next.val>=x) or (node.val>node.next.val and node.val<x and node.next.val<x) or (node.val>node.next.val and node.val>x and node.next.val>x):
tmp.next = node.next
node.next = tmp
return node
node = node.next
if node == originNode:
tmp.next = node.next
node.next = tmp
return node
# V1'
# https://ttzztt.gitbooks.io/lc/content/linked-list/insert-into-a-cyclic-sorted-list.html
# IDEA : LINKED LIST
# IDEA : CONSIDER THE 4 CASES BELOW :
# CASE 1) No head:
# CASE 2) prev.val <= val <= cur.val
# CASE 3) prev.val > cur.val and (val < cur.val or prev.val < cur): cur is either the min or the max with not all nodes with the same value
# CASE 4) val != every nodes's value in a cyclic linked list where every node has the same value
class Solution(object):
def insert(self, head, val):
"""
:type head: Node
:type insertVal: int
:rtype: Node
"""
node = Node(val, head)
# case 1 no head
if not head:
return node
prev, cur = head, head.next
while 1:
# case 2: prev.val <= val <= cur.val
if prev.val <= val <= cur.val:
break
# case 3: prev.val > cur.val and val < cur.val or prev.val < cur
elif prev.val > cur.val and (val <= cur.val or prev.val <= val):
break
prev, cur = prev.next, cur.next
# case 4: prev == head
if prev == head: # in case of all nodes have same value that are > val
break
# insert node between prev and cur
prev.next = node
node.next = cur
return head
# V1''
# https://github.com/dennyzhang/code.dennyzhang.com/tree/master/problems/insert-into-a-cyclic-sorted-list
class Solution:
def insert(self, head, insertVal):
"""
:type head: Node
:type insertVal: int
:rtype: Node
"""
node = Node(insertVal, None)
# empty
if head is None:
node.next = node
return node
# one node
if head.next is None:
head.next = node
node.next = head
return head
# find the smallest value, which is no less than the target
p = head
while True:
# end of the loop
if p.val > p.next.val:
# biggest or smallest
if insertVal >= p.val or insertVal <= p.next.val:
break
# should keep going
if insertVal > p.next.val and insertVal < p.val:
p = p.next
continue
break
if insertVal >= p.val and insertVal <= p.next.val:
break
p = p.next
if p == head:
# run into the loop again
break
node.next = p.next
p.next = node
return head
# V1
# https://ttzztt.gitbooks.io/lc/content/linked-list/insert-into-a-cyclic-sorted-list.html
class Solution(object):
def insert(self, head, val):
node = Node(val, head)
# case 1 no head
if not head:
return node
prev, cur = head, head.next
while 1:
# case 2: prev.val <= val <= cur.val
if prev.val <= val <= cur.val:
break
# case 3: prev.val > cur.val and val < cur.val or prev.val < cur
elif prev.val > cur.val and (val <= cur.val or prev.val <= val):
break
prev, cur = prev.next, cur.next
# case 4: prev == head
if prev == head: # in case of all nodes have same value that are > val
break
# insert node between prev and cur
prev.next = node
node.next = cur
return head
# V1
# https://ithelp.ithome.com.tw/articles/10223721
# V1'
# https://blog.51cto.com/u_15127692/3670466
# V2
|
nilq/baby-python
|
python
|
# Estadística para Datos Univariados
Datos univariados (o data univariada) son datos que se describen con una sola variable. Por ejemplo, las alturas de los compñaeros de clase son datos univariados. El propósito principal del análisis de datos univariados es la descripción de los datos.
El análisis de datos univariados no considera relaciones entre distintas variables, como podría ser la relación entre la altura y el peso de los compañeros de clase.
import math
import random
import numpy as np
import pandas as pd
import plotly.express as px
## Muestreo (Sampling)
**Definiciones**
- **Población:** el grupo entero del que queremos estudiar una característica. Por ejemplo, todos las mujeres de Chile, todos los hogares de la comuna de Providencia.
- **Muestra (Sample):** el subgrupo de la población que se utiliza para inferir propiedades de la población. Por ejemplo, para estudiar alguna propiedad de las mujeres de Chile, utilizamos una muestra de mujeres que consiste en 10 mujeres de cada comuna de Chile.
### Técnicas de Muestreo
- **Muestreo por Conveniencia (convenience sampling):** se seleccionan aquellos miembros de la población que son de más fácil acceso. Por ejemplo, para el ejemplo de las mujeres de Chile, utilizo como muestra a las mujeres de mi colegio.
- **Muestro Aleatorio Simple (simple random sampling):** cada miembro de la población tiene la misma probabilidad de ser elegido. Por ejemplo, con un generador de números aleatorios genramos RUTs y elegimos los RUTs generados que correspondan a mujeres.
- **Muestreo Sistemático (systematic sampling):** Se listan (ordenan) los miembros de la población y se eligen a partir de un número inicial y un intervalo fijo.
- **Muestreo Estratificado (stratified sampling):** Se divide la población en subgrupos más pequeños (estratos). Los estratos se construyen basándose en características comunes de sus miembros. Luego, se elige una muestra aleatoria de cada estrato.
- **Muestreo por Cuota (quota sampling):** Muy similar al muestro estratificado, pero el tamaño de la muestra de cada estrato depende de la proporción del estrato en la población total.
### Tipos de Datos
- **Datos Discretos:** datos cuyos valore posibles pueden ser contados (incluso si en teoría el número de datos posibles es infinito). Por ejemplo, la talla de zapatos es un dato discreto ya que sólo existe un número finito de tallas posibles.
- **Datos continuos:** datos cuyos valores posibles no pueden ser contados. Generalmente se representan con un número real (con decimales). Por ejemplo, la altura de cada individuo. La temepratura a una cierta hora del día en un lugar preespecificado de la ciudad.
## Presentación de los Datos
Para datos discretos, la herramienta más usual de presentación son la tabla y gráfico de frecuencias.
**Ejemplo:**
Consideremos las notas de 32 alumnos en un test en el cual se puede obtener una nota entera del 0 al 10. Supongamos que los resultados son los siguientes:
resultados = [0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 10]
Contamos cuantas ocurrencias de cada nota hay en la muestra:
frecuencia = [(i, resultados.count(i)) for i in range(11)]
df_frecuencia = pd.DataFrame(frecuencia, columns=['nota', 'frecuencia'])
df_frecuencia
Mostramos los datos de la tabla anterior en un gráfico de barras.
fig = px.bar(df_frecuencia, x='nota', y=['frecuencia',],
title=f'Frecuencia de Notas')
fig.show()
Para datos contínuos, en cambio, la herramienta más usual es un histograma. Un histograma también representa la frecuencia de ocurrencia de datos, pero, al tratarse de datos contínuos, se representa la frecuencia de ocurrencia de datos en un cierto intervalo.
Veamos en ejemplo considerando una serie histórica de precios del USD en términos del CLP (USDCLP) de 10 años.
df_usdclp = pd.read_excel('data/20210312_10Y_usdclp.xlsx')
fig = px.line(df_usdclp, x='fecha', y=['valor',],
title=f'Serie Histórica USDCLP')
fig.show()
Podemos ver como los valores están entre 450 y 870 aproximadamente. Vamos a dividir ese intervalo en subintervalos de 10 CLP y luego graficaremos (con un gráfico de barras) la frecuencia de precios observados en cada uno de esos subintervalos.
fig = px.histogram(
df_usdclp,
x="valor",
title='Histograma USDCLP - Frecuencia en Intervalos del 10 CLP')
fig.show()
### Forma del Histograma
Es importante describir la forma del histograma, la principal característica de un histograma es la presencia de sesgo (skew):
df_sim = pd.DataFrame([(0,1), (1,2), (2,3), (3,4), (4,5), (5,6), (6,8), (7,9), (8,11), (9,10), (10,8)],
columns=['intervalo', 'frecuencia'])
fig = px.bar(df_sim, x='intervalo', y=['frecuencia',],
title=f'Sesgo Negativo')
fig.show()
df_sim = pd.DataFrame([(0,8), (1,10), (2,11), (3,9), (4,8), (5,6), (6,5), (7,4), (8,3), (9,2), (10,1)],
columns=['intervalo', 'frecuencia'])
fig = px.bar(df_sim, x='intervalo', y=['frecuencia',],
title=f'Sesgo Positivo')
fig.show()
df_sim = pd.DataFrame([(0,1), (1,2), (2,3), (3,4), (4,5), (5,6), (6,5), (7,4), (8,3), (9,2), (10,1)],
columns=['intervalo', 'frecuencia'])
fig = px.bar(df_sim, x='intervalo', y=['frecuencia',],
title=f'Sin Sesgo')
fig.show()
## Medidas de Tendencia Central
Hasta ahora hemos visto como recopilar y presentar datos. El próximo paso es elegir un único valor que pueda representar la data de forma general. Una medida de tendencia central que nos indica donde está "la mitad" de los datos recopilados. Las medidas más comunes de tendencia central son:
- moda
- media
- mediana
### Moda
**Definición:** la moda es el valor que ocurre con más frecuencia en los datos.
**Tips:**
- Puede haber más de una *moda* si dos o más valores son los que ocurren con mayor frecuencia.
- Si no hay ningún valor de la muestra que ocurra con mayor frecuencia (todos ocurren sólo una vez) entonces la muestra no tiene *moda*.
**Ejemplo:**
data = [4, 7, 3, 3, 1, 2, 7, 5, 7, 11]
contador = {elem: data.count(elem) for elem in set(data)}
highest_counter = [(k, v) for k, v in contador.items() if v == max(contador.values())]
print(f'La moda es: {highest_counter[0][0]}')
Cuando los datos se presentan en una tabla de frecuencias, la moda es el grupo que tiene la más alta frecuencia. En el gráfico de barras, es el grupo con la barra más alta.
df_frecuencia = pd.DataFrame.from_dict(contador, orient='index')
df_frecuencia.columns = ['frecuencia']
df_frecuencia
fig = px.bar(df_frecuencia, x=df_frecuencia.index, y=['frecuencia',],
title=f'Gráfico de Barras Notas')
fig.show()
#### La Clase Modal
Cuando se busca la modal de datos que han sido agrupados, se debe determinar el **grupo** que tiene la mayor frecuencia. A este grupo se le llama la **clase modal**.
Si revisar toda la data, no se puede determinar cula valor dentro de la clase modal es el que tiene la mayor frecuencia.
### Media
La media aritmética, también llamada promedio, es la medida más común de tendencia central. La media es simplemente la suma de todos los valores, dividida por el número total de datos. Usualmente se denota con $\mu$ o $\overline x$. De forma más matemática:
$$\overline x = \frac{\sum_{i=1}^N x_i}{N}$$
Al contrario de la moda, la media, usualmente, es un número que no pertenece a los datos. Por ejemplo, si tus notas son 6, 6, 7 y 7 la media será 6.5 que no coincide con ninguna de las notas obtenidas.
¿Cómo se obtiene la media de los datos a partir de la tabla de frecuencias?
**Respuesta:** en el caso anterior la media se obtiene con la siguiente fórmula.
$$\overline x =\frac{\sum_{i=1}^N f_i\cdot x_i}{\sum_{i=1}^N f_i}$$
donde $f_i$ es la frecuencia de la observación $x_i$.
### Mediana
La mediana es el dato que está justo en el medio cuando los datos se ordenan de forma ascendente. Si el número de datos es par, entonces la mediana es la media de los dos datos que están en el medio.
Esto implica que 50% de los datos están a la izquierda de la mediana y 50% de los datos están a la derecha de la mediana.
**Ejemplo:**
Encontrar la mediana de 7, 12, 1, 4, 17, 9, 11, 16, 10, 18.
datos = [7, 12, 1, 4, 2, 17, 9, 11, 16, 10, 18]
datos.sort()
print(f'Los datos ordenados son: {datos}')
Son 11 elementos, el número del medio es entonces el número 6. Por lo tanto la mediana es:
print(f'mediana: {datos[6]}')
### Resumen
````{panels}
:column: col-4
:card: border-2
Moda
^^^
La **moda** cual es el valor que con más frecuencia ocurre en la muestra.
**Ventajas**
- Los valores extremos no afectan la moda.
**Desventajas**
- No utiliza todos los elementos del conjunto de datos.
- No es necesariamente única. Puede haber más de una **moda**. En estos casos su interpretación se hace difícil.
- La **moda** no está definida cuando ningún valor se repite.
---
Media
^^^
La media es la suma de todos los datos dividida por el número total de datos.
**Ventajas**
- Es la medida más popular y más utilizada.
- Utiliza todos los datos de la muestra.
- Es única y está siempre bien definida.
- Útil para comparar distintas muestras.
- Muy utilizada en cálculos posteriores.
**Desventajas**
- Se ve afectada por los valores extremos de la muestra.
---
Mediana
^^^
Ordenados los datos de la muestra de menor a mayor, la mediana es el dato que está justo al medio de la muestra.
**Ventajas**
- Los valores extremos no la afectan tanto como a la media.
- Útil para comparar distintas muestras.
- Es única y está siempre bien definida.
**Desventajas**
- No considera todos los datos de la muestra.
- Se utiliza poco en cálculos posteriores.
````
## Medidas de Dispersión
### Rango
El **rango** es la diferencia entre el máximo y el mínimo valor de una muestra.
$$Rango=\max\left(x_1,x_2,\ldots ,x_N\right)-\min\left(x_1,x_2,\ldots ,x_N\right)$$
donde $x_1,x_2,\ldots ,x_N$ son los datos de la muestra.
En el ejemplo de las notas de 32 alumnos en un examen con puntajes del 1 al 10 los resultados eran:
print(f'Resultados: {resultados}')
En este caso tenemos que:
min_res = min(resultados)
print(f'La nota mínima es: {min_res}')
max_res = max(resultados)
print(f'La nota máxima es: {max_res}')
rango = max_res - min_res
print(f'Por lo tanto el rango es: {max_res} - {min_res} = {rango} ')
### Cuartiles
Los **cuartiles**, son los valores que dividen la data en cuartos.
- El primer cuartil (llamado cuartil inferior o $Q_1$) es tal que 25% de los datos son inferiores a $Q_1$.
- El segundo cuartil es la mediana, 50% de los datos son inferiores a ella.
- El tercer cuartil (llamado cuartil superior o $Q_3$) es tal que 75% de los datos son inferiores a $Q_3$.
- El último cuartil es el máximo valor de la muestra.
**Observación:** $Q_1$ es la mediana del 50% inferior de la muestra y $Q_3$ es la mediana del 50% superior de la muestra.
### Box Plots
Es posible obtener una idea de la distribución de una muestra de datos examinando el siguiente resumen de 5 números:
- El valor mínimo
- El primer cuartil
- La mediana (segundo cuartil)
- El tercer cuartil
- El valor máximo
Estos 5 números pueden ser representados gráficamente a través de un diagrama de *Caja y Bigotes* (box-and-whisker diagram).
Veamos un ejemplo con datos generados de forma aleatoria.
import plotly.graph_objects as go
import numpy as np
x0 = np.array([-0.01266288, -0.39623657, -2.27460173, 0.26492423, -0.37191596,
-0.0469952 , -1.12485845, 0.26766143, -1.74320972, 0.58269502,
0.56357888, -2.16268586, 0.65205293, 0.06388311, 0.86067789,
-1.19481468, -0.45478148, -0.86976107, -1.9288584 , 1.28710555,
0.17671311, -1.19529302, 0.69459011, 0.51450959, 1.81595071,
0.8890141 , -1.31808439, -1.57484991, 0.2511651 , 0.64026872,
-1.04312134, 0.59108169, 0.75979648, -1.44733236, 1.65422606,
-0.2734052 , 1.75192239, 1.03558314, 1.01046211, 0.73390352,
-0.82820519, -1.53824126, 0.58670701, -1.33037958, 1.34250693,
0.71374556, -0.80025983, -0.75024957, -1.75550578, -1.62384854])
fig = go.Figure()
fig.add_trace(go.Box(x=x0))
fig.show()
La diferencia $Q_3-Q_1$ suele llamarse *rango intercuantil* y se denota con $IQR$.
### Outliers
Los datos extremos de una muestra se llaman **outliers** (en español también se usa la palabra en inglés, no existe una traducción ampliamente aceptada).
```{admonition} Criterio para Identificar un Outlier
Se considera outlier cualquier valor que esté $1.5 \cdot IQR$ veces por debajo de $Q_1$ o por encima de $Q_3$.
```
#### Cuando Rechazar o Mantener un Outlier
Hemos visto un criterio para identificar un outlier. Ahora se debe decidir si se acepta o se rechaza ese outlier (en la práctica esto significa eliminar o mantener el dato en la muestra).
Los outliers pueden tener un efecto importante en medidas estadísticas como la media, pero algunos de ellos son datos válidos y no es aceptable rechazarlos sin una razón bien fundamentada.
Por el contrario, cuando un outlier se produce por un error de medición, éste debe ser eliminado. Por ejemplo, si estamos estudiando la altura de una población, un dato de 3.0 metros es seguramente un error de medición.
Por otra parte, supongamos que los resultados de una prueba de 7 estudiantes son los siguientes: 20%, 22%, 18%, 30%, 26%, 89% y 21%. Si se concluye que el 89% está bien registrado, entonces eliminarlo conduciría a concluir que la prueba era demasiado difícil para los alumnos. Sin embargo, considerando que no hay error de medición, al mantenerlo se podría concluir que el nivel de dificultad de la prueba era el adecuado y que 6 de los 7 alumnos no se prepararon lo suficiente.
### Frecuencia Acumulada
Los siguientes datos muestran el número de veces que 50 estudiantes perdieron un lápiz durante la semana:
lapices = [5, 9, 10, 5, 9, 9, 8, 4, 9, 8, 5, 7, 3, 10, 7, 7, 8, 7, 6, 6, 9, 6, 4,
4, 10, 5, 6, 6, 3, 8, 7, 8,3, 4, 6, 6, 5, 7, 5, 4, 3, 5, 2, 4, 2, 8, 1,0, 3, 5]
Vamos a construir una tabla de frecuencia acumulada, es decir, en cada fila vamos a anotar el número de lápices perdidos y el número de alumnos que ha perdido ese número de lápices o menos:
frec_acum = [(i, sum([lapices.count(j) for j in range(i+1)])) for i in range(11)]
df_frec_acum = pd.DataFrame(
frec_acum,
columns=['num_lapices', 'Número de alumnos que perdió num_lapices o menos lápices'])
df_frec_acum
Vamos a dibujar el gráfico de la fecuencia acumulada:
fig = px.line(
df_frec_acum,
x='num_lapices',
y=['Número de alumnos que perdió num_lapices o menos lápices',],
title=f'Gráfico de la Frecuencia Acumulada')
fig.update_traces(mode='markers+lines')
fig.show()
- El menor valor en el eje $y$ del gráfico es 1 y el mayor valor es 50, que coincide con el número de alumnos.
- Dado que para cada nuevo valor de la variable `num_lapices`, agregamos más alumnos, el gráfico nunca puede ser decreciente.
Cuando se dispone de toda la data (*raw data*) se puede utilizar la fórmula:
$$mediana=\left(\frac{n+resto\left(n,2\right)}{2}\right)esimo\space valor$$
para calcular la mediana y los cuartiles cuando la data se ordena de forma ascendente. Aquí, $resto\left(n,2\right)$ es el resto de la división entera de $n$ por 2, por lo tanto será 0 si $n$ es par y 1 si $n$ es impar. Para muestras con muchos datos, esta distinción se hace muy poco significativa.
Sin embargo, cuando se dispone de data agrupada, puede ser difícil determinar la mediana o un cuartil cuando ese valor está en el medio de uno de los grupos.
Las curvas de frecuencia acumulada permiten estimar la mediana y los cuartiles a partir de data acumulada. Por ejemplo, para encontrar la mediana, se dibuja una línea horizontal que cruza el eje $y$ en el $\frac{n}{2} esimo$ valor y desde la intersección de esa línea con el gráfico de frecuencia acumulada, se traza una línea vertical hacia abajo. El punto donde esta línea intersecta el eje $x$, corresponde a la mediana.
Por ejemplo, en el caso anterior:
fig = px.line(
df_frec_acum,
x='num_lapices',
y=['Número de alumnos que perdió num_lapices o menos lápices'],
title=f'Gráfico de la Frecuencia Acumulada con Cálculo de Mediana')
fig.update_traces(mode='lines')
fig.add_hline(y=25, annotation_text=' y=25', line_color='green')
fig.add_vline(x=5.3, annotation_text=' x=5.3 => mediana=5.3\n', line_color='red')
fig.show()
### Percentiles
Un *percentil* es un número tal que un porcentaje de los datos están por debajo de del percentil. Por ejemplo, si el percentil 10% es $P_{10}$ esto significa que un 10% de los datos de la muestra están por debajo de $P_{10}$.
Veamos un ejemplo con una muestra de 500 datos con números aleatorios entre 1 y 100 (que podrían representar los puntajes en una prueba tomada por 500 alumnos).
np.random.seed(123456)
notas = np.random.lognormal(0, 1, 500) * 40
notas = [int(min(nota, 100)) for nota in notas]
notas_frec_acum = [(i, sum([notas.count(j) for j in range(i+1)])) for i in range(101)]
df_notas_frec_acum = pd.DataFrame(
notas_frec_acum,
columns=['nota', 'Número de alumnos con nota igual o inferior'])
Veamos las primeras 12 filas de la tabla con las frecuencias acumuladas:
df_notas_frec_acum.head(12)
Podemos observar que hay 5 alumnos (un 1.0%) con puntaje igual o menor a 2 y 7 (1.4%) alumnos con un puntaje igual a 3. Esto nos indica que el percentil 1% de esta muestra está "entremedio" de estos dos valores y que el percentil 1% no es un puntaje posible de obtener (así como la media, muchas veces, no corresponde a ningún resultado de la medición).
```{admonition} Cálculo de Percentil
:class: tip
En estas situaciones, existen varias maneras de calcular un percentil y es importante tener claro, según el contexto, cuál se está utilizando y cómo se calcula.
```
Por ejemplo en este caso:
percentil = 1
metodo = 'lower'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
metodo = 'higher'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
metodo = 'midpoint'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
metodo = 'linear'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
En este ejemplo estamos usando la función `percentile` de `numpy`, que tiene la siguiente documentación. Lo referido al parámetro `interpolation` explica el funcionamiento de los distintos métodos.
print(np.percentile.__doc__)
### Varianza y Desviación Estándar
El rango y los cuartiles son buenas medidas de cuan dispersa está una muestra de datos respecto a su mediana, pero no utilizan toda la data disponible. Por otro lado, la **varianza** es una métrica de dispersión que utiliza todos los datos de la muestra. Es una métrica que refleja qué tan lejanos están, en promedio, cada uno de los datos de la media.
Para ejemplificar, volvamos a considerar el ejemplo de los resultados de 32 alumnos en una prueba con puntajes entre 0 y 10. Los datos eran los siguientes:
print(f'Notas de 32 alumnos: {resultados}')
Tabulemos esta data y vamos calculando paso a paso la varianza:
df_resultados = pd.DataFrame(resultados, columns=['nota',])
df_resultados
Se calcula la media:
media = np.mean(df_resultados['nota'])
print(f'La media es: {media:.2f}')
Se calcula la distancia de cada nota a la media:
df_resultados['(nota - media)'] = df_resultados['nota'] - media
df_resultados
Se calcula ahora el cuadrado de la distancia:
df_resultados['(nota - media)^2'] = df_resultados['(nota - media)'] ** 2
df_resultados
Elevar al cuadrado la distancia logra dos objetivos:
- evitar que las distancias negativas se compensen con las positivas dando así un falsa idea de la dispersión.
- ponderar más en el promedio final las distancias mayores.
Finalmente, la varianza, que usualmente se denota con $\sigma^2$ está dada por:
$$\sigma^2=\frac{1}{n}\sum_{i=1}^n{\left(x_i-\mu\right)^2}$$
Aquí, $\mu$ es la media de los datos.
En el ejemplo, la variaza resulta ser:
print(f"Varianza es: {np.mean(df_resultados['(nota - media)^2']):.2f}")
Dado que la unidad en la que se mide la **varianza** no coincide con la unidad de los datos, también se define la desviación estándar, que se denota con $\sigma$, como:
$$\sigma=\sqrt{\frac{1}{n}\sum_{i=1}^n{\left(x_i-\mu\right)^2}}=\sqrt{\sigma^2}$$
#### Propiedades de la Desviación Estándar
- La **desviación estándar** sólo se utiliza para medir dispersión alrededor de la media.
- La **varianza** y la **desviación estándar** son siempre positivas.
- La **desviación estándar** es sensible a los *outliers*. Un sólo *outlier* puede cambiar significativamente su valor.
- Para muestras de datos con una **media** similar, mientras más dispersión en los datos mayor es la **desviación estándar**.
|
nilq/baby-python
|
python
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the functions needed for computing the molecular Hamiltonian.
"""
# pylint: disable= too-many-branches, too-many-arguments, too-many-locals, too-many-nested-blocks
import autograd.numpy as anp
import pennylane as qml
from pennylane import numpy as np
from pennylane.hf.hartree_fock import generate_scf, nuclear_energy
def generate_electron_integrals(mol, core=None, active=None):
r"""Return a function that computes the one- and two-electron integrals in the molecular orbital
basis.
The one- and two-electron integrals are required to construct a molecular Hamiltonian in the
second-quantized form
.. math::
H = \sum_{pq} h_{pq} c_p^{\dagger} c_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} c_p^{\dagger} c_q^{\dagger} c_r c_s,
where :math:`c^{\dagger}` and :math:`c` are the creation and annihilation operators,
respectively, and :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals.
These integrals can be computed by integrating over molecular orbitals :math:`\phi` as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right ) \phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2.
The molecular orbitals are constructed as a linear combination of atomic orbitals as
.. math::
\phi_i = \sum_{\nu}c_{\nu}^i \chi_{\nu}.
The one- and two-electron integrals can be written in the molecular orbital basis as
.. math::
h_{pq} = \sum_{\mu \nu} C_{p \mu} h_{\mu \nu} C_{\nu q},
and
.. math::
h_{pqrs} = \sum_{\mu \nu \rho \sigma} C_{p \mu} C_{q \nu} h_{\mu \nu \rho \sigma} C_{\rho r} C_{\sigma s}.
The :math:`h_{\mu \nu}` and :math:`h_{\mu \nu \rho \sigma}` terms refer to the elements of the
core matrix and the electron repulsion tensor, respectively, and :math:`C` is the molecular
orbital expansion coefficient matrix.
Args:
mol (Molecule): the molecule object
core (list[int]): indices of the core orbitals
active (list[int]): indices of the active orbitals
Returns:
function: function that computes the core constant, and the one- and two-electron integrals
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> generate_electron_integrals(mol)(*args)
(1.0,
array([[-1.3902192695e+00, 0.0000000000e+00],
[-4.4408920985e-16, -2.9165331336e-01]]),
array([[[[ 7.1443907755e-01, -2.7755575616e-17],
[ 5.5511151231e-17, 1.7024144301e-01]],
[[ 5.5511151231e-17, 1.7024144301e-01],
[ 7.0185315353e-01, 6.6613381478e-16]]],
[[[-1.3877787808e-16, 7.0185315353e-01],
[ 1.7024144301e-01, 2.2204460493e-16]],
[[ 1.7024144301e-01, -4.4408920985e-16],
[ 6.6613381478e-16, 7.3883668974e-01]]]]))
"""
def electron_integrals(*args):
r"""Compute the one- and two-electron integrals in the molecular orbital basis.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
tuple[array[float]]: 1D tuple containing core constant, one- and two-electron integrals
"""
_, coeffs, _, h_core, repulsion_tensor = generate_scf(mol)(*args)
one = anp.einsum("qr,rs,st->qt", coeffs.T, h_core, coeffs)
two = anp.swapaxes(
anp.einsum(
"ab,cd,bdeg,ef,gh->acfh", coeffs.T, coeffs.T, repulsion_tensor, coeffs, coeffs
),
1,
3,
)
core_constant = nuclear_energy(mol.nuclear_charges, mol.coordinates)(*args)
if core is None and active is None:
return core_constant, one, two
for i in core:
core_constant = core_constant + 2 * one[i][i]
for j in core:
core_constant = core_constant + 2 * two[i][j][j][i] - two[i][j][i][j]
for p in active:
for q in active:
for i in core:
o = anp.zeros(one.shape)
o[p, q] = 1.0
one = one + (2 * two[i][p][q][i] - two[i][p][i][q]) * o
one = one[anp.ix_(active, active)]
two = two[anp.ix_(active, active, active, active)]
return core_constant, one, two
return electron_integrals
def generate_fermionic_hamiltonian(mol, cutoff=1.0e-12, core=None, active=None):
r"""Return a function that computes the fermionic hamiltonian.
Args:
mol (Molecule): the molecule object
cutoff (float): cutoff value for discarding the negligible electronic integrals
Returns:
function: function that computes the fermionic hamiltonian
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> h = generate_fermionic_hamiltonian(mol)(*args)
"""
def fermionic_hamiltonian(*args):
r"""Compute the fermionic hamiltonian.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
tuple(array[float], list[list[int]]): the Hamiltonian coefficients and operators
"""
core_constant, one, two = generate_electron_integrals(mol, core, active)(*args)
core_constant = anp.array([core_constant])
indices_one = anp.argwhere(abs(one) >= cutoff)
operators_one = (indices_one * 2).tolist() + (
indices_one * 2 + 1
).tolist() # up-up + down-down terms
coeffs_one = anp.tile(one[abs(one) >= cutoff], 2)
indices_two = anp.argwhere(abs(two) >= cutoff)
n = len(indices_two)
operators_two = (
[(indices_two[i] * 2).tolist() for i in range(n)] # up-up-up-up term
+ [
(indices_two[i] * 2 + [0, 1, 1, 0]).tolist() for i in range(n)
] # up-down-down-up term
+ [
(indices_two[i] * 2 + [1, 0, 0, 1]).tolist() for i in range(n)
] # down-up-up-down term
+ [(indices_two[i] * 2 + 1).tolist() for i in range(n)] # down-down-down-down term
)
coeffs_two = anp.tile(two[abs(two) >= cutoff], 4) / 2
coeffs = anp.concatenate((core_constant, coeffs_one, coeffs_two))
operators = [[]] + operators_one + operators_two
indices_sort = [operators.index(i) for i in sorted(operators)]
return coeffs[indices_sort], sorted(operators)
return fermionic_hamiltonian
def generate_hamiltonian(mol, cutoff=1.0e-12, core=None, active=None):
r"""Return a function that computes the qubit hamiltonian.
Args:
mol (Molecule): the molecule object
cutoff (float): cutoff value for discarding the negligible electronic integrals
Returns:
function: function that computes the qubit hamiltonian
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> h = generate_hamiltonian(mol)(*args)
>>> h.terms[0]
tensor([ 0.29817879+0.j, 0.20813365+0.j, 0.20813365+0.j,
0.17860977+0.j, 0.04256036+0.j, -0.04256036+0.j,
-0.04256036+0.j, 0.04256036+0.j, -0.34724873+0.j,
0.13290293+0.j, -0.34724873+0.j, 0.17546329+0.j,
0.17546329+0.j, 0.13290293+0.j, 0.18470917+0.j], requires_grad=True)
"""
def hamiltonian(*args):
r"""Compute the qubit hamiltonian.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
Hamiltonian: the qubit Hamiltonian
"""
h_ferm = generate_fermionic_hamiltonian(mol, cutoff, core, active)(*args)
ops = []
for n, t in enumerate(h_ferm[1]):
if len(t) == 0:
coeffs = np.array([h_ferm[0][n]])
ops = ops + [qml.Identity(0)]
elif len(t) == 2:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for o_ in o:
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
coeffs = np.concatenate([coeffs, np.array(op[0]) * h_ferm[0][n]])
ops = ops + op[1]
elif len(t) == 4:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for o_ in o:
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
coeffs = np.concatenate([coeffs, np.array(op[0]) * h_ferm[0][n]])
ops = ops + op[1]
h = qml.Hamiltonian(coeffs, ops, simplify=True)
return h
return hamiltonian
def _generate_qubit_operator(op):
r"""Convert a fermionic operator to a qubit operator using the Jordan-Wigner mapping.
The one-body fermionic operator :math:`a_2^\dagger a_0` is constructed as [2, 0] and the
two-body operator :math:`a_4^\dagger a_3^\dagger a_2 a_1` is constructed as [4, 3, 2, 1].
Args:
op (list[int]): the fermionic operator
Returns
tuple(list[complex], list[list[int, str]]): list of coefficients and the qubit-operator terms
**Example**
>>> f = [0, 0]
>>> q = _generate_qubit_operator(f)
>>> q
([(0.5+0j), (-0.5+0j)], [[], [(0, 'Z')]]) # corresponds to :math:`\frac{1}{2}(I_0 - Z_0)`
"""
if len(op) == 2:
op = [((op[0], 1), (op[1], 0))]
if len(op) == 4:
op = [((op[0], 1), (op[1], 1), (op[2], 0), (op[3], 0))]
if op[0][0][0] == op[0][1][0] or op[0][2][0] == op[0][3][0]:
return 0
for t in op:
for l in t:
z = [(index, "Z") for index in range(l[0])]
x = z + [(l[0], "X"), 0.5]
if l[1]:
y = z + [(l[0], "Y"), -0.5j]
else:
y = z + [(l[0], "Y"), 0.5j]
if t.index(l) == 0:
q = [x, y]
else:
m = []
for t1 in q:
for t2 in [x, y]:
q1, c1 = _pauli_mult(t1[:-1], t2[:-1])
m.append(q1 + [c1 * t1[-1] * t2[-1]])
q = m
c = [p[-1] for p in q]
o = [p[:-1] for p in q]
for item in o:
k = [i for i, x in enumerate(o) if x == item]
if len(k) >= 2:
for j in k[::-1][:-1]:
del o[j]
c[k[0]] = c[k[0]] + c[j]
del c[j]
return c, o
def _pauli_mult(p1, p2):
r"""Return the result of multiplication between two tensor products of Pauli operators.
The Pauli operator :math:`(P_0)` is denoted by [(0, 'P')], where :math:`P` represents
:math:`X`, :math:`Y` or :math:`Z`.
Args:
p1 (list[tuple[int, str]]): the first tensor product of Pauli operators
p2 (list[tuple[int, str]]): the second tensor product of Pauli operators
Returns
tuple(list[tuple[int, str]], complex): list of the Pauli operators and the coefficient
**Example**
>>> p1 = [(0, "X"), (1, "Y")], # X_0 @ Y_1
>>> p2 = [(0, "X"), (2, "Y")], # X_0 @ Y_2
>>> _pauli_mult(p1, p2)
([(2, "Y"), (1, "Y")], 1.0) # p1 @ p2 = X_0 @ Y_1 @ X_0 @ Y_2
"""
c = 1.0
t1 = [t[0] for t in p1]
t2 = [t[0] for t in p2]
k = []
for i in p1:
if i[0] in t1 and i[0] not in t2:
k.append((i[0], pauli_mult[i[1]]))
for j in p2:
if j[0] in t2 and j[0] not in t1:
k.append((j[0], pauli_mult[j[1]]))
if i[0] == j[0]:
if i[1] + j[1] in pauli_coeff:
k.append((i[0], pauli_mult[i[1] + j[1]]))
c = c * pauli_coeff[i[1] + j[1]]
else:
k.append((i[0], pauli_mult[i[1] + j[1]]))
k = [i for i in k if "I" not in i[1]]
for item in k:
k_ = [i for i, x in enumerate(k) if x == item]
if len(k_) >= 2:
for j in k_[::-1][:-1]:
del k[j]
return k, c
def _return_pauli(p):
r"""Return the PennyLane Pauli operator.
Args:
args (str): symbol representing the Pauli operator
Returns:
pennylane.ops: the PennyLane Pauli operator
**Example**
>>> _return_pauli('X')
qml.PauliX
"""
if p == "X":
return qml.PauliX
if p == "Y":
return qml.PauliY
return qml.PauliZ
pauli_mult = {
"XX": "I",
"YY": "I",
"ZZ": "I",
"ZX": "Y",
"XZ": "Y",
"ZY": "X",
"YZ": "X",
"XY": "Z",
"YX": "Z",
"IX": "X",
"IY": "Y",
"IZ": "Z",
"XI": "X",
"YI": "Y",
"ZI": "Z",
"I": "I",
"II": "I",
"X": "X",
"Y": "Y",
"Z": "Z",
}
pauli_coeff = {
"ZX": 1.0j,
"XZ": -1.0j,
"ZY": -1.0j,
"YZ": 1.0j,
"XY": 1.0j,
"YX": -1.0j,
}
|
nilq/baby-python
|
python
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import ircLower
from zope.interface import implements
from fnmatch import fnmatchcase
class WhoCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "WhoCommand"
core = True
def userCommands(self):
return [ ("WHO", 1, self) ]
def parseParams(self, user, params, prefix, tags):
if not params:
return {
"mask": "*"
}
if len(params) > 1 and params[1] == "o":
return {
"mask": params[0],
"opersonly": True
}
return {
"mask": params[0]
}
def execute(self, user, data):
matchingUsers = []
channel = None
mask = data["mask"]
if mask in ("0", "*"):
for targetUser in self.ircd.users.itervalues():
if not targetUser.isRegistered():
continue
if not set(user.channels).intersection(targetUser.channels) and self.ircd.runActionUntilValue("showuser", user, targetUser, users=[user, targetUser]) is not False:
matchingUsers.append(targetUser)
elif mask in self.ircd.channels:
channel = self.ircd.channels[data["mask"]]
for targetUser in channel.users.iterkeys():
if self.ircd.runActionUntilValue("showchanneluser", channel, user, targetUser, users=[user, targetUser], channels=[channel]) is not False:
matchingUsers.append(targetUser)
else:
for targetUser in self.ircd.users.itervalues():
if not targetUser.isRegistered():
continue # We should exclude all unregistered users from this search
if self.ircd.runActionUntilValue("showuser", user, targetUser, users=[user, targetUser]) is False:
continue
lowerMask = ircLower(mask)
serverName = self.ircd.name if targetUser.uuid[:3] == self.ircd.serverID else self.ircd.servers[targetUser.uuid[:3]].name
if fnmatchcase(ircLower(targetUser.host()), lowerMask) or fnmatchcase(ircLower(targetUser.gecos), lowerMask) or fnmatchcase(ircLower(serverName), lowerMask) or fnmatchcase(ircLower(targetUser.nick), lowerMask):
matchingUsers.append(targetUser)
if "opersonly" in data:
allMatches = matchingUsers
matchingUsers = []
for targetUser in allMatches:
if self.ircd.runActionUntilValue("userhasoperpermission", targetUser, "", users=[targetUser]):
matchingUsers.append(targetUser)
for targetUser in matchingUsers:
server = self.ircd if targetUser.uuid[:3] == self.ircd.serverID else self.ircd.servers[targetUser.uuid[:3]]
serverName = server.name
isOper = self.ircd.runActionUntilValue("userhasoperpermission", targetUser, "", users=[targetUser])
isAway = targetUser.metadataKeyExists("away")
status = self.ircd.runActionUntilValue("channelstatuses", channel, targetUser, user, users=[targetUser, user], channels=[channel]) if channel else ""
hopcount = 0
if user.uuid[:3] != self.ircd.serverID:
countingServer = server
hopcount = 1
while countingServer.nextClosest != self.ircd.serverID:
countingServer = self.ircd.servers[countingServer.nextClosest]
hopcount += 1
user.sendMessage(irc.RPL_WHOREPLY, mask, targetUser.ident, targetUser.host(), serverName, targetUser.nick, "{}{}{}".format("G" if isAway else "H", "*" if isOper else "", status), "{} {}".format(hopcount, targetUser.gecos))
user.sendMessage(irc.RPL_ENDOFWHO, mask, "End of /WHO list")
return True
whoCommand = WhoCommand()
|
nilq/baby-python
|
python
|
"""Abstractions for handling operations with reaktor `WishList` and `Voucher` (`GiftCards`) objects."""
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
# -*- coding: utf8 -*-
"""Port of NeHe Lesson 26 by Ivan Izuver <izuver@users.sourceforge.net>"""
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
import sys
import gc
ESCAPE = b'\033'
# Number of the glut window.
window = 0
LightAmb = (0.7, 0.7, 0.7) # Окружающий свет
LightDif = (1.0, 1.0, 0.0) # Рассеянный свет
LightPos = (4.0, 4.0, 6.0, 1.0) # Позиция источника освещения
# q = GLUquadricObj()
xrot = yrot = 0.0 # Вращение по Х Y
xrotspeed = yrotspeed = 0.0 # Скорость вращения по X Y
zoom = -3.0 # Глубина сцены в экране
height = 0.5 # Высота мяча над полом
textures = {}
def LoadTextures(fname):
if textures.get(fname) is not None:
return textures.get(fname)
texture = textures[fname] = glGenTextures(1)
image = Image.open(fname)
ix = image.size[0]
iy = image.size[1]
image = image.tobytes('raw', 'RGBX', 0, -1)
# Create Texture
glBindTexture(GL_TEXTURE_2D, texture) # 2d texture (x and y size)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0,
GL_RGBA, GL_UNSIGNED_BYTE, image)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
return texture
# A general OpenGL initialization function. Sets all of the initial parameters.
# We call this right after our OpenGL window is created.
def InitGL(Width, Height):
# This Will Clear The Background Color To Black
glClearColor(0.2, 0.5, 1.0, 1.0)
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glClearStencil(0)
glDepthFunc(GL_LEQUAL) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glEnable(GL_TEXTURE_2D)
glLightfv(GL_LIGHT0, GL_AMBIENT, LightAmb)
glLightfv(GL_LIGHT0, GL_DIFFUSE, LightDif)
glLightfv(GL_LIGHT0, GL_POSITION, LightPos)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
# Reset The Current Viewport And Perspective Transformation
glViewport(0, 0, Width, Height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def DrawObject():
glColor3f(1.0, 1.0, 1.0)
glBindTexture(GL_TEXTURE_2D, LoadTextures('NeHe.bmp'))
Q = gluNewQuadric()
gluQuadricNormals(Q, GL_SMOOTH)
gluQuadricTexture(Q, GL_TRUE)
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP)
glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP)
gluSphere(Q, 0.35, 32, 16)
glColor4f(1.0, 1.0, 1.0, 0.4)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE)
glEnable(GL_TEXTURE_GEN_S)
glEnable(GL_TEXTURE_GEN_T)
gluSphere(Q, 0.35, 32, 16)
glDisable(GL_TEXTURE_GEN_S)
glDisable(GL_TEXTURE_GEN_T)
glDisable(GL_BLEND)
gluDeleteQuadric(Q)
def DrawFloor():
glBindTexture(GL_TEXTURE_2D, LoadTextures('NeHe2.bmp'))
glBegin(GL_QUADS) # Begin draw
glNormal3f(0.0, 1.0, 0.0) # Upper normal
glTexCoord2f(0.0, 1.0) # bottom left side of texture
glVertex3f(-2.0, 0.0, 2.0) # bottom left angle of floor
glTexCoord2f(0.0, 0.0) # upper left side of texture
glVertex3f(-2.0, 0.0, -2.0) # upper left angle of floor
glTexCoord2f(1.0, 0.0) # upper right side of texture
glVertex3f(2.0, 0.0, -2.0) # upper right angle of floor
glTexCoord2f(1.0, 1.0) # bottom right side of texture
glVertex3f(2.0, 0.0, 2.0) # bottom right angle of floor
glEnd() # finish draw
# The main drawing function. (optional)
def DrawGLScene(deactivate=True):
if deactivate:
return None
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
eqr = (0.0, -1.0, 0.0, 0.0)
glLoadIdentity() # Reset The View
glTranslatef(0.0, -0.6, zoom)
glColorMask(0, 0, 0, 0)
glEnable(GL_STENCIL_TEST)
glStencilFunc(GL_ALWAYS, 1, 1)
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE)
glDisable(GL_DEPTH_TEST)
DrawFloor()
glEnable(GL_DEPTH_TEST)
glColorMask(1, 1, 1, 1)
glStencilFunc(GL_EQUAL, 1, 1)
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP)
glEnable(GL_CLIP_PLANE0)
glClipPlane(GL_CLIP_PLANE0, eqr)
glPushMatrix()
glScalef(1.0, -1.0, 1.0)
glLightfv(GL_LIGHT0, GL_POSITION, LightPos)
glTranslatef(0.0, height, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
glRotatef(yrot, 0.0, 1.0, 0.0)
DrawObject()
glPopMatrix()
glDisable(GL_CLIP_PLANE0)
glDisable(GL_STENCIL_TEST)
glLightfv(GL_LIGHT0, GL_POSITION, LightPos)
glEnable(GL_BLEND)
glDisable(GL_LIGHTING)
glColor4f(1.0, 1.0, 1.0, 0.8)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
DrawFloor()
glEnable(GL_LIGHTING)
glDisable(GL_BLEND)
glTranslatef(0.0, height, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
glRotatef(yrot, 0.0, 1.0, 0.0)
DrawObject()
glFlush()
# since this is double buffered, swap the buffers to display what just got
# drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples
# to pass in: (key, x, y)
def keyPressed(*args):
global window
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
glutDestroyWindow(window)
def main():
global window
# pass arguments to init
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Realistic Reflection by RISC")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
# glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Print message to console, and kick off the main to get it rolling.
print('Hit ESC key to quit.')
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import functools
import queue
try:
import statistics
stdev = statistics.stdev
mean = statistics.mean
except ImportError:
stdev = None
def mean(l):
return sum(l) / len(l)
try:
import time
clock = time.perf_counter
except Exception:
import timeit
clock = timeit.default_timer
class tfloat(float):
color = 39
def __str__(self):
n = self * 1000
return '\x1b[%dm%f\x1b[mms' % (self.color, n)
def profile(func):
name = func.__name__
samples = queue.deque(maxlen=5)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not self.debug_enabled:
return func(self, *args, **kwargs)
start = clock()
ret = func(self, *args, **kwargs)
n = tfloat(clock() - start)
if len(samples) < 2:
m = 0
d = 0
n.color = 36
else:
m = mean(samples)
if stdev:
d = tfloat(stdev(samples))
else:
d = 0
if n <= m + d:
n.color = 32
elif n > m + d * 2:
n.color = 31
else:
n.color = 33
samples.append(n)
self.info(
'\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)', name, n, m, d
)
return ret
return wrapper
|
nilq/baby-python
|
python
|
from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_pattern_3_xsd.nistschema_sv_iv_atomic_non_negative_integer_pattern_3 import NistschemaSvIvAtomicNonNegativeIntegerPattern3
__all__ = [
"NistschemaSvIvAtomicNonNegativeIntegerPattern3",
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditLoantradeNewloanarRepayApplyModel(object):
def __init__(self):
self._apply_repay_fee = None
self._apply_repay_int = None
self._apply_repay_penalty = None
self._apply_repay_prin = None
self._cust_iprole_id = None
self._loan_ar_no = None
self._repay_amt = None
self._repay_card_no = None
self._repay_type = None
self._request_id = None
@property
def apply_repay_fee(self):
return self._apply_repay_fee
@apply_repay_fee.setter
def apply_repay_fee(self, value):
self._apply_repay_fee = value
@property
def apply_repay_int(self):
return self._apply_repay_int
@apply_repay_int.setter
def apply_repay_int(self, value):
self._apply_repay_int = value
@property
def apply_repay_penalty(self):
return self._apply_repay_penalty
@apply_repay_penalty.setter
def apply_repay_penalty(self, value):
self._apply_repay_penalty = value
@property
def apply_repay_prin(self):
return self._apply_repay_prin
@apply_repay_prin.setter
def apply_repay_prin(self, value):
self._apply_repay_prin = value
@property
def cust_iprole_id(self):
return self._cust_iprole_id
@cust_iprole_id.setter
def cust_iprole_id(self, value):
self._cust_iprole_id = value
@property
def loan_ar_no(self):
return self._loan_ar_no
@loan_ar_no.setter
def loan_ar_no(self, value):
self._loan_ar_no = value
@property
def repay_amt(self):
return self._repay_amt
@repay_amt.setter
def repay_amt(self, value):
self._repay_amt = value
@property
def repay_card_no(self):
return self._repay_card_no
@repay_card_no.setter
def repay_card_no(self, value):
self._repay_card_no = value
@property
def repay_type(self):
return self._repay_type
@repay_type.setter
def repay_type(self, value):
self._repay_type = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def to_alipay_dict(self):
params = dict()
if self.apply_repay_fee:
if hasattr(self.apply_repay_fee, 'to_alipay_dict'):
params['apply_repay_fee'] = self.apply_repay_fee.to_alipay_dict()
else:
params['apply_repay_fee'] = self.apply_repay_fee
if self.apply_repay_int:
if hasattr(self.apply_repay_int, 'to_alipay_dict'):
params['apply_repay_int'] = self.apply_repay_int.to_alipay_dict()
else:
params['apply_repay_int'] = self.apply_repay_int
if self.apply_repay_penalty:
if hasattr(self.apply_repay_penalty, 'to_alipay_dict'):
params['apply_repay_penalty'] = self.apply_repay_penalty.to_alipay_dict()
else:
params['apply_repay_penalty'] = self.apply_repay_penalty
if self.apply_repay_prin:
if hasattr(self.apply_repay_prin, 'to_alipay_dict'):
params['apply_repay_prin'] = self.apply_repay_prin.to_alipay_dict()
else:
params['apply_repay_prin'] = self.apply_repay_prin
if self.cust_iprole_id:
if hasattr(self.cust_iprole_id, 'to_alipay_dict'):
params['cust_iprole_id'] = self.cust_iprole_id.to_alipay_dict()
else:
params['cust_iprole_id'] = self.cust_iprole_id
if self.loan_ar_no:
if hasattr(self.loan_ar_no, 'to_alipay_dict'):
params['loan_ar_no'] = self.loan_ar_no.to_alipay_dict()
else:
params['loan_ar_no'] = self.loan_ar_no
if self.repay_amt:
if hasattr(self.repay_amt, 'to_alipay_dict'):
params['repay_amt'] = self.repay_amt.to_alipay_dict()
else:
params['repay_amt'] = self.repay_amt
if self.repay_card_no:
if hasattr(self.repay_card_no, 'to_alipay_dict'):
params['repay_card_no'] = self.repay_card_no.to_alipay_dict()
else:
params['repay_card_no'] = self.repay_card_no
if self.repay_type:
if hasattr(self.repay_type, 'to_alipay_dict'):
params['repay_type'] = self.repay_type.to_alipay_dict()
else:
params['repay_type'] = self.repay_type
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditLoantradeNewloanarRepayApplyModel()
if 'apply_repay_fee' in d:
o.apply_repay_fee = d['apply_repay_fee']
if 'apply_repay_int' in d:
o.apply_repay_int = d['apply_repay_int']
if 'apply_repay_penalty' in d:
o.apply_repay_penalty = d['apply_repay_penalty']
if 'apply_repay_prin' in d:
o.apply_repay_prin = d['apply_repay_prin']
if 'cust_iprole_id' in d:
o.cust_iprole_id = d['cust_iprole_id']
if 'loan_ar_no' in d:
o.loan_ar_no = d['loan_ar_no']
if 'repay_amt' in d:
o.repay_amt = d['repay_amt']
if 'repay_card_no' in d:
o.repay_card_no = d['repay_card_no']
if 'repay_type' in d:
o.repay_type = d['repay_type']
if 'request_id' in d:
o.request_id = d['request_id']
return o
|
nilq/baby-python
|
python
|
"""
# Syntax of search templates
"""
import re
# SYNTACTIC ANALYSIS OF SEARCH TEMPLATE ###
QWHERE = "/where/"
QHAVE = "/have/"
QWITHOUT = "/without/"
QWITH = "/with/"
QOR = "/or/"
QEND = "/-/"
QINIT = {QWHERE, QWITHOUT, QWITH}
QCONT = {QHAVE, QOR}
QTERM = {QEND}
PARENT_REF = ".."
ESCAPES = (
"\\\\",
"\\ ",
"\\t",
"\\n",
"\\|",
"\\=",
)
VAL_ESCAPES = {
"\\|",
"\\=",
}
opPat = r"(?:[.#&|\[\]<>:=-]+\S*)"
atomOpPat = r"(\s*)({op})\s+([^ \t=#<>~*]+)(?:(?:\s*\Z)|(?:\s+(.*)))$".format(op=opPat)
atomPat = r"(\s*)([^ \t=#<>~*]+)(?:(?:\s*\Z)|(?:\s+(.*)))$"
compPat = r"^([a-zA-Z0-9-@_]+)([<>])(.*)$"
identPat = r"^([a-zA-Z0-9-@_]+)([=#])(.+)$"
indentLinePat = r"^(\s*)(.*)"
kPat = r"^([^0-9]*)([0-9]+)([^0-9]+)$"
namePat = r"[A-Za-z0-9_.-]+"
namesPat = r"^\s*(?:{op}\s+)?([^ \t:=#<>~*]+):"
nonePat = r"^([a-zA-Z0-9-@_]+)(#?)\s*$"
truePat = r"^([a-zA-Z0-9-@_]+)[*]\s*$"
numPat = r"^-?[0-9]+$"
opLinePat = r"^(\s*)({op})\s*$".format(op=opPat)
opStripPat = r"^\s*{op}\s+(.*)$".format(op=opPat)
quPat = f"(?:{QWHERE}|{QHAVE}|{QWITHOUT}|{QWITH}|{QOR}|{QEND})"
quLinePat = r"^(\s*)({qu})\s*$".format(qu=quPat)
relPat = r"^(\s*)({nm})\s+({op})\s+({nm})\s*$".format(nm=namePat, op=opPat)
rePat = r"^([a-zA-Z0-9-@_]+)~(.*)$"
atomOpRe = re.compile(atomOpPat)
atomRe = re.compile(atomPat)
compRe = re.compile(compPat)
identRe = re.compile(identPat)
indentLineRe = re.compile(indentLinePat)
kRe = re.compile(kPat)
nameRe = re.compile(f"^{namePat}$")
namesRe = re.compile(namesPat)
numRe = re.compile(numPat)
noneRe = re.compile(nonePat)
trueRe = re.compile(truePat)
opLineRe = re.compile(opLinePat)
opStripRe = re.compile(opStripPat)
quLineRe = re.compile(quLinePat)
relRe = re.compile(relPat)
reRe = re.compile(rePat)
whiteRe = re.compile(r"^\s*(%|$)")
reTp = type(reRe)
def syntax(searchExe):
error = searchExe.api.TF.error
_msgCache = searchExe._msgCache
searchExe.good = True
searchExe.badSyntax = []
searchExe.searchLines = searchExe.searchTemplate.split("\n")
offset = searchExe.offset
_tokenize(searchExe)
if not searchExe.good:
searchExe.showOuterTemplate(_msgCache)
for (i, line) in enumerate(searchExe.searchLines):
error(f"{i + offset:>2} {line}", tm=False, cache=_msgCache)
for (ln, eline) in searchExe.badSyntax:
txt = eline if ln is None else f"line {ln + offset}: {eline}"
error(txt, tm=False, cache=_msgCache)
def _tokenize(searchExe):
tokens = []
def lastAtomToken():
for token in reversed(tokens):
kind = token["kind"]
if kind == "feat":
continue
if kind == "atom" and "otype" in token:
return token
return None
return None
def readFeatures(x, i):
features = {}
featureString = x.replace("\\ ", chr(1)) if x is not None else ""
featureList = featureString.split()
good = True
for featStr in featureList:
if not parseFeatureVals(searchExe, featStr, features, i):
good = False
return features if good else None
searchLines = searchExe.searchLines
allGood = True
# the template may contain nested quantifiers
# However, we detect only the outer level of quantifiers.
# Everything contained in a quantifiers is collected in
# a new search template, verbatim, without interpretion,
# because it will be fed to search() on another instance.
# We only strip the quantified lines of the outermost quantifiers.
# We can maintain the current quantifier, None if there is none.
# We also remember the current indentation of the current quantifier
# We collect the templates within the quantifier in a list of strings.
# We add all the material into a quantifier token of the shape
#
# Because indentation is not indicative of quantifier nesting
# we need to maintain a stack of inner quantifiers,
# just to be able to determine wich quantifier words
# belong to the outerlevel quantifiers.
curQu = []
curQuTemplates = None
for (i, line) in enumerate(searchLines):
if whiteRe.match(line):
continue
opFeatures = {}
# first check whether we have a line with a quantifier
# and what the indent on the line is
match = quLineRe.match(line)
if match:
(indent, lineQuKind) = match.groups()
else:
lineQuKind = None
match = indentLineRe.match(line)
indent = match.group(1)
lineIndent = len(indent)
# QUANTIFIER FILTERING
#
# now check whether we are in a quantifier or not
# and determine whether a quantifier starts or ends here
# we have the following possible situations:
#
# UUO no outer - no q-keyword
#
# UBO no outer - q-keyword
# * ES no start keyword
# * ET no preceding token
# * EA no preceding atom
# * EI preceding atom not the same indentation
#
# PBI outer - q-keyword init
#
# PPO outer - no q-keyword
#
# PPI inner - no q-keyword
#
# PCO outer - q-keyword continue
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# PCI inner - q-keyword continue
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# PEO outer - q-keyword end
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# PEI inner - q-keyword end
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# at the end we may have a non-empty quantifier stack:
# * generate an unterminated quantifier error for each member
# of the stack
# first we determine what is the case and we store it in booleans
curQuLine = None
curQuKind = None
curQuIndent = None
curQuDepth = len(curQu)
if curQuDepth:
(curQuLine, curQuKind, curQuIndent) = curQu[-1]
UUO = not curQuDepth and not lineQuKind
UBO = not curQuDepth and lineQuKind
PBI = curQuDepth and lineQuKind in QINIT
PPO = curQuDepth == 1 and not lineQuKind
PPI = curQuDepth > 1 and not lineQuKind
PCO = curQuDepth == 1 and lineQuKind in QCONT
PCI = curQuDepth > 1 and lineQuKind in QCONT
PEO = curQuDepth == 1 and lineQuKind in QTERM
PEI = curQuDepth > 1 and lineQuKind in QTERM
(ES, ET, EA, EI, EP, EK) = (False,) * 6
if UBO:
ES = lineQuKind not in QINIT
ET = len(tokens) == 0
lastAtom = lastAtomToken()
EA = len(tokens) and not lastAtomToken
EI = len(tokens) and lastAtom["indent"] != lineIndent
# EA = (len(tokens) and tokens[-1]['kind'] != 'atom' and 'otype' not in tokens[-1])
# EI = (len(tokens) and tokens[-1]['indent'] != lineIndent)
if PCO or PCI:
EP = (lineQuKind == QHAVE and curQuKind != QWHERE) or (
lineQuKind == QOR and curQuKind not in {QWITH, QOR}
)
EK = curQu[-1][2] != lineIndent
if PEO or PEI:
EP = curQuKind in {QWHERE}
EK = curQu[-1][2] != lineIndent
# QUANTIFIER HANDLING
#
# Based on what is the case, we take actions.
# * we swallow quantified templates
# * we handle quantifier lines
# * we let all other lines pass through
good = True
for x in [True]:
if UUO:
# no quantifier business
continue
if UBO:
# start new quantifier from nothing
if ES:
searchExe.badSyntax.append(
(i, f'Quantifier: Can not start with "{lineQuKind}:"')
)
good = False
if ET:
searchExe.badSyntax.append((i, "Quantifier: No preceding tokens"))
good = False
if EA or EI:
searchExe.badSyntax.append(
(
i,
"Quantifier: Does not immediately follow an atom at the same level",
)
)
good = False
prevAtom = tokens[-1]
curQu.append((i, lineQuKind, lineIndent))
curQuTemplates = [[]]
quantifiers = prevAtom.setdefault("quantifiers", [])
quantifiers.append((lineQuKind, curQuTemplates, i))
continue
if PBI:
# start inner quantifier
# lines are passed with stripped indentation
# based on the outermost quantifier level
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
curQu.append((i, lineQuKind, lineIndent))
if PPO:
# inside an outer quantifier
# lines are passed with stripped indentation
strippedLine = line[curQuIndent:]
curQuTemplates[-1].append(strippedLine)
continue
if PPI:
# inside an inner quantifier
# lines are passed with stripped indentation
# based on the outermost quantifier level
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
if PCO or PCI:
if EP:
searchExe.badSyntax.append(
(
i,
f'Quantifier: "{lineQuKind}" can not follow "{curQuKind}" on line {curQuLine}',
)
)
good = False
if EK:
searchExe.badSyntax.append(
(
i,
(
f'Quantifier "{lineQuKind}"'
f' has not same indentation as "{curQuKind}" on line {curQuLine}'
),
)
)
good = False
if PCO:
curQuTemplates.append([])
else:
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
curQu[-1] = (i, lineQuKind, lineIndent)
continue
if PEO or PEI:
if EP:
searchExe.badSyntax.append(
(
i,
(
f'Quantifier: "{lineQuKind}"'
f' : premature end of "{curQuKind}" on line {curQuLine}'
),
)
)
good = False
if EK:
searchExe.badSyntax.append(
(
i,
(
f'Quantifier "{lineQuKind}"'
f' has not same indentation as "{curQuKind}" on line {curQuLine}'
),
)
)
good = False
if PEO:
curQuTemplates = None
else:
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
curQu.pop()
continue
if not good:
allGood = False
if UUO:
# go on with normal template tokenization
pass
else:
# quantifiers stuff has been dealt with
continue
# QUANTIFIER FREE HANDLING
good = False
for x in [True]:
(kind, data) = parseLine(line)
if kind == "op":
(indent, op) = data
if not parseFeatureVals(searchExe, op, opFeatures, i, asEdge=True):
good = False
else:
if opFeatures:
op = (op, opFeatures)
tokens.append(dict(ln=i, kind="atom", indent=len(indent), op=op))
good = True
break
if kind == "rel":
(indent, f, op, t) = data
if not parseFeatureVals(searchExe, op, opFeatures, i, asEdge=True):
good = False
else:
if opFeatures:
op = (op, opFeatures)
tokens.append(dict(ln=i, kind="rel", f=f, op=op, t=t))
good = True
break
if kind == "atom":
(indent, op, name, otype, features) = data
good = True
if name != "":
mt = nameRe.match(name)
if not mt:
searchExe.badSyntax.append((i, f'Illegal name: "{name}"'))
good = False
features = readFeatures(features, i)
if features is None:
good = False
else:
if op is not None:
if not parseFeatureVals(
searchExe, op, opFeatures, i, asEdge=True
):
good = False
if good:
if opFeatures:
op = (op, opFeatures)
tokens.append(
dict(
ln=i,
kind="atom",
indent=len(indent),
op=op,
name=name,
otype=otype,
src=line.lstrip(),
features=features,
)
)
break
if kind == "feat":
features = data[0]
features = readFeatures(features, i)
if features is None:
good = False
else:
tokens.append(dict(ln=i, kind="feat", features=features))
good = True
break
good = False
searchExe.badSyntax.append((i, f"Unrecognized line: {line}"))
if not good:
allGood = False
if curQu:
for (curQuLine, curQuKind, curQuIndent) in curQu:
searchExe.badSyntax.append(
(curQuLine, f'Quantifier: Unterminated "{curQuKind}"')
)
good = False
allGood = False
if allGood:
searchExe.tokens = tokens
else:
searchExe.good = False
def parseLine(line):
for x in [True]:
escLine = _esc(line)
match = opLineRe.match(escLine)
if match:
(indent, op) = match.groups()
kind = "op"
data = (indent, op)
break
match = relRe.match(escLine)
if match:
(indent, f, op, t) = match.groups()
kind = "rel"
data = (indent, f, op, t)
break
matchOp = atomOpRe.match(escLine)
if matchOp:
(indent, op, atom, features) = matchOp.groups()
else:
match = atomRe.match(escLine)
if match:
op = None
(indent, atom, features) = match.groups()
if matchOp or match:
atomComps = atom.split(":", 1)
if len(atomComps) == 1:
name = ""
otype = atomComps[0]
else:
name = atomComps[0]
otype = atomComps[1]
kind = "atom"
if features is None:
features = ""
data = (indent, op, name, otype, features)
break
kind = "feat"
data = (escLine,)
return (kind, data)
def parseFeatureVals(searchExe, featStr, features, i, asEdge=False):
if asEdge:
if not (
(featStr[0] == "-" and featStr[-1] == ">")
or (featStr[0] == "<" and featStr[-1] == "-")
or (featStr[0] == "<" and featStr[-1] == ">")
):
return True
feat = featStr[1:-1]
else:
feat = featStr.replace(chr(1), " ")
good = True
for x in [True]:
match = trueRe.match(feat)
if match:
(featN,) = match.groups()
featName = _unesc(featN)
featVals = (None, True)
break
match = noneRe.match(feat)
if match:
(featN, unequal) = match.groups()
featName = _unesc(featN)
featVals = None if unequal else True
break
match = identRe.match(feat)
if match:
(featN, comp, featValStr) = match.groups()
featName = _unesc(featN)
featValSet = frozenset(_unesc(featVal) for featVal in featValStr.split("|"))
featVals = (comp == "=", featValSet)
break
match = compRe.match(feat)
if match:
(featN, comp, limit) = match.groups()
featName = _unesc(featN)
if not numRe.match(limit):
searchExe.badSyntax.append((i, f'Limit is non numeric "{limit}"'))
good = False
featVals = None
else:
featVals = _makeLimit(int(limit), comp == ">")
break
match = reRe.match(feat)
if match:
(featN, valRe) = match.groups()
featName = _unesc(featN)
valRe = _unesc(valRe, inRe=True)
try:
featVals = re.compile(valRe)
except Exception() as err:
searchExe.badSyntax.append(
(i, f'Wrong regular expression "{valRe}": "{err}"')
)
good = False
featVals = None
break
searchExe.badSyntax.append((i, f'Unrecognized feature condition "{feat}"'))
good = False
featVals = None
if good:
features[featName] = featVals
return good
def _genLine(kind, data):
result = None
for x in [True]:
if kind == "op":
(indent, op) = data
result = f"{indent}{_unesc(op)}"
break
if kind == "rel":
(indent, f, op, t) = data
result = f"{indent}{f} {_unesc(op)} {t}"
break
if kind == "atom":
(indent, op, name, otype, features) = data
opRep = "" if op is None else f"{_unesc(op)} "
nameRep = "" if name == "" else f"{name}:"
featRep = _unesc(features)
if featRep:
featRep = f" {featRep}"
result = f"{indent}{opRep}{nameRep}{otype}{featRep}"
break
features = data[0]
result = _unesc(features)
return result
def cleanParent(atom, parentName):
(kind, data) = parseLine(atom)
(indent, op, name, otype, features) = data
if name == "":
name = parentName
return _genLine(kind, (indent, None, name, otype, features))
def deContext(quantifier, parentName):
(quKind, quTemplates, ln) = quantifier
# choose a name for the parent
# either the given name
if not parentName:
# or make a new name
# collect all used names
# to avoid choosing a name that is already used
usedNames = set()
for template in quTemplates:
for line in template:
for name in namesRe.findall(line):
usedNames.add(name)
parentName = "parent"
while parentName in usedNames:
parentName += "x"
newQuTemplates = []
newQuantifier = (quKind, newQuTemplates, parentName, ln)
# replace .. (PARENT_REF) by parentName
# wherever it is applicable
for template in quTemplates:
newLines = []
for line in template:
(kind, data) = parseLine(line)
newLine = line
if kind == "rel":
(indent, f, op, t) = data
if f == PARENT_REF or t == PARENT_REF:
newF = parentName if f == PARENT_REF else f
newT = parentName if t == PARENT_REF else t
newData = (indent, newF, op, newT)
newLine = _genLine(kind, newData)
elif kind == "atom":
(indent, op, name, otype, features) = data
if name == "" and otype == PARENT_REF:
newData = (indent, op, name, parentName, features)
newLine = _genLine(kind, newData)
newLines.append(newLine)
templateStr = "\n".join(newLines)
newQuTemplates.append(templateStr)
return newQuantifier
def _makeLimit(n, isLower):
if isLower:
return lambda x: x is not None and x > n
return lambda x: x is not None and x < n
def _esc(x):
for (i, c) in enumerate(ESCAPES):
x = x.replace(c, chr(i))
return x
def _unesc(x, inRe=False):
for (i, c) in enumerate(ESCAPES):
if inRe and c in VAL_ESCAPES:
x = x.replace(chr(i), f"\\{c[1]}")
else:
x = x.replace(chr(i), c[1])
return x
|
nilq/baby-python
|
python
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from . import FairseqDataset
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(len(ds) == len(datasets[0]) for ds in datasets), \
'datasets must have the same length'
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum(ds.sizes for ds in self.datasets)
def num_tokens(self, index):
return sum(ds.num_tokens(index) for ds in self.datasets)
def size(self, index):
return sum(ds.size(index) for ds in self.datasets)
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any(
getattr(ds, 'supports_prefetch', False) for ds in self.datasets
)
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
import client
import common
import start
if __name__ == '__main__':
start.check_dirs()
common.w.replace_windows(*start.get_windows())
common.w.curses = True
config = start.read_config()
colour = start.validate_config(config)
if colour:
start.set_colours(config['colour'])
common.w.colour = True
common.w.welcome()
start.login(config['user'])
common.w.addstr(
common.w.infobar,
'Enter \'h\' or \'help\' if you need help.'
)
common.client = client.FullClient() if (
common.mc.is_subscribed
) else common.client.FreeClient()
while True:
common.client.transition()
else:
start.easy_login()
|
nilq/baby-python
|
python
|
from unittest import TestCase
import string
from assertions import assert_result
from analyzer.predefined_recognizers.iban_recognizer import IbanRecognizer, IBAN_GENERIC_SCORE, LETTERS
from analyzer.entity_recognizer import EntityRecognizer
iban_recognizer = IbanRecognizer()
entities = ["IBAN_CODE"]
def update_iban_checksum(iban):
'''
Generates an IBAN, with checksum digits
This is based on: https://www.ibantest.com/en/how-is-the-iban-check-digit-calculated
'''
iban_no_spaces = iban.replace(' ', '')
iban_digits = (iban_no_spaces[4:] +iban_no_spaces[:2] + '00').upper().translate(LETTERS)
check_digits = '{:0>2}'.format(98 - (int(iban_digits) % 97))
return iban[:2] + check_digits + iban[4:]
class TestIbanRecognizer(TestCase):
# Test valid and invalid ibans per each country which supports IBAN - without context
#Albania (8n, 16c) ALkk bbbs sssx cccc cccc cccc cccc
def test_AL_iban_valid_no_spaces(self):
iban = 'AL47212110090000000235698741'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_AL_iban_valid_with_spaces(self):
iban = 'AL47 2121 1009 0000 0002 3569 8741'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_AL_iban_invalid_format_valid_checksum(self):
iban = 'AL47 212A 1009 0000 0002 3569 8741'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AL_iban_invalid_length(self):
iban = 'AL47 212A 1009 0000 0002 3569 874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AL_iban_invalid_checksum(self):
iban = 'AL47 2121 1009 0000 0002 3569 8740'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
#Andorra (8n, 12c) ADkk bbbs sssx cccc cccc cccc
def test_AD_valid_iban_no_spaces(self):
iban = 'AD1200012030200359100100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_AD_iban_valid_with_spaces(self):
iban = 'AD12 0001 2030 2003 5910 0100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_AD_iban_invalid_format_valid_checksum(self):
iban = 'AD12000A2030200359100100'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AD_iban_invalid_length(self):
iban = 'AD12000A203020035910010'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AD_iban_invalid_checksum(self):
iban = 'AD12 0001 2030 2003 5910 0101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Austria (16n) ATkk bbbb bccc cccc cccc
def test_AT_iban_valid_no_spaces(self):
iban = 'AT611904300234573201'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_AT_iban_valid_with_spaces(self):
iban = 'AT61 1904 3002 3457 3201'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_AT_iban_invalid_format_valid_checksum(self):
iban = 'AT61 1904 A002 3457 3201'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AT_iban_invalid_length(self):
iban = 'AT61 1904 3002 3457 320'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AT_iban_invalid_checksum(self):
iban = 'AT61 1904 3002 3457 3202'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Azerbaijan (4c,20n) AZkk bbbb cccc cccc cccc cccc cccc
def test_AZ_iban_valid_no_spaces(self):
iban = 'AZ21NABZ00000000137010001944'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_AZ_iban_valid_with_spaces(self):
iban = 'AZ21 NABZ 0000 0000 1370 1000 1944'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_AZ_iban_invalid_format_valid_checksum(self):
iban = 'AZ21NABZ000000001370100019'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AZ_iban_invalid_length(self):
iban = 'AZ21NABZ0000000013701000194'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AZ_iban_invalid_checksum(self):
iban = 'AZ21NABZ00000000137010001945'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Bahrain (4a,14c) BHkk bbbb cccc cccc cccc cc
def test_BH_iban_valid_no_spaces(self):
iban = 'BH67BMAG00001299123456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def testBH_iban_valid__with_spaces(self):
iban = 'BH67 BMAG 0000 1299 1234 56'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_BH_iban_invalid_format_valid_checksum(self):
iban = 'BH67BMA100001299123456'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BH_iban_invalid_length(self):
iban = 'BH67BMAG0000129912345'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BH_iban_invalid_checksum(self):
iban = 'BH67BMAG00001299123457'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Belarus (4c, 4n, 16c) BYkk bbbb aaaa cccc cccc cccc cccc
def test_BY_iban_valid_no_spaces(self):
iban = 'BY13NBRB3600900000002Z00AB00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_BY_iban_valid_with_spaces(self):
iban = 'BY13 NBRB 3600 9000 0000 2Z00 AB00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_BY_iban_invalid_format_valid_checksum(self):
iban = 'BY13NBRBA600900000002Z00AB00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BY_iban_invalid_length(self):
iban = 'BY13 NBRB 3600 9000 0000 2Z00 AB0'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BY_iban_invalid_checksum(self):
iban = 'BY13NBRB3600900000002Z00AB01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Belgium (12n) BEkk bbbc cccc ccxx
def test_BE_iban_valid_no_spaces(self):
iban = 'BE68539007547034'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 16, EntityRecognizer.MAX_SCORE)
def test_BE_iban_valid_with_spaces(self):
iban = 'BE71 0961 2345 6769'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_BE_iban_invalid_format_valid_checksum(self):
iban = 'BE71 A961 2345 6769'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BE_iban_invalid_length(self):
iban = 'BE6853900754703'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BE_iban_invalid_checksum(self):
iban = 'BE71 0961 2345 6760'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Bosnia and Herzegovina (16n) BAkk bbbs sscc cccc ccxx
def test_BA_iban_valid_no_spaces(self):
iban = 'BA391290079401028494'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_BA_iban_valid_with_spaces(self):
iban = 'BA39 1290 0794 0102 8494'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_BA_iban_invalid_format_valid_checksum(self):
iban = 'BA39 A290 0794 0102 8494'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BA_iban_invalid_length(self):
iban = 'BA39129007940102849'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BA_iban_invalid_checksum(self):
iban = 'BA39 1290 0794 0102 8495'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Brazil (23n,1a,1c) BRkk bbbb bbbb ssss sccc cccc ccct n
def test_BR_iban_valid_no_spaces(self):
iban = 'BR9700360305000010009795493P1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_BR_iban_valid_with_spaces(self):
iban = 'BR97 0036 0305 0000 1000 9795 493P 1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_BR_iban_invalid_format_valid_checksum(self):
iban = 'BR97 0036 A305 0000 1000 9795 493P 1'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BR_iban_invalid_length(self):
iban = 'BR9700360305000010009795493P'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BR_iban_invalid_checksum(self):
iban = 'BR97 0036 0305 0000 1000 9795 493P 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Bulgaria (4a,6n,8c) BGkk bbbb ssss ttcc cccc cc
def test_BG_iban_valid_no_spaces(self):
iban = 'BG80BNBG96611020345678'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_BG_iban_valid_with_spaces(self):
iban = 'BG80 BNBG 9661 1020 3456 78'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_BG_iban_invalid_format_valid_checksum(self):
iban = 'BG80 BNBG 9661 A020 3456 78'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BG_iban_invalid_length(self):
iban = 'BG80BNBG9661102034567'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BG_iban_invalid_checksum(self):
iban = 'BG80 BNBG 9661 1020 3456 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Costa Rica (18n) CRkk 0bbb cccc cccc cccc cc 0 = always zero
def test_CR_iban_valid_no_spaces(self):
iban = 'CR05015202001026284066'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_CR_iban_valid_with_spaces(self):
iban = 'CR05 0152 0200 1026 2840 66'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_CR_iban_invalid_format_valid_checksum(self):
iban = 'CR05 0152 0200 1026 2840 6A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CR_iban_invalid_length(self):
iban = 'CR05 0152 0200 1026 2840 6'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CR_iban_invalid_checksum(self):
iban = 'CR05 0152 0200 1026 2840 67'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Croatia (17n) HRkk bbbb bbbc cccc cccc c
def test_HR_iban_valid_no_spaces(self):
iban = 'HR1210010051863000160'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_HR_iban_valid_with_spaces(self):
iban = 'HR12 1001 0051 8630 0016 0'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_HR_iban_invalid_format_valid_checksum(self):
iban = 'HR12 001 0051 8630 0016 A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_HR_iban_invalid_length(self):
iban = 'HR121001005186300016'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_HR_iban_invalid_Checksum(self):
iban = 'HR12 1001 0051 8630 0016 1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Cyprus (8n,16c) CYkk bbbs ssss cccc cccc cccc cccc
def test_CY_iban_valid_no_spaces(self):
iban = 'CY17002001280000001200527600'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_CY_iban_valid_with_spaces(self):
iban = 'CY17 0020 0128 0000 0012 0052 7600'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_CY_iban_invalid_format_valid_checksum(self):
iban = 'CY17 0020 A128 0000 0012 0052 7600'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CY_iban_invalid_length(self):
iban = 'CY17 0020 0128 0000 0012 0052 760'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CY_iban_invalid_checksum(self):
iban = 'CY17 0020 0128 0000 0012 0052 7601'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Czech Republic (20n) CZkk bbbb ssss sscc cccc cccc
def test_CZ_iban_valid_no_spaces(self):
iban = 'CZ6508000000192000145399'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_CZ_iban_valid_with_spaces(self):
iban = 'CZ65 0800 0000 1920 0014 5399'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_CZ_iban_invalid_format_valid_checksum(self):
iban = 'CZ65 0800 A000 1920 0014 5399'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CZ_iban_invalid_length(self):
iban = 'CZ65 0800 0000 1920 0014 539'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CZ_iban_invalid_checksum(self):
iban = 'CZ65 0800 0000 1920 0014 5390'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Denmark (14n) DKkk bbbb cccc cccc cc
def test_DK_iban_valid_no_spaces(self):
iban = 'DK5000400440116243'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_DK_iban_valid_with_spaces(self):
iban = 'DK50 0040 0440 1162 43'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_DK_iban_invalid_format_valid_checksum(self):
iban = 'DK50 0040 A440 1162 43'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DK_iban_invalid_length(self):
iban = 'DK50 0040 0440 1162 4'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DK_iban_invalid_checksum(self):
iban = 'DK50 0040 0440 1162 44'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Dominican Republic (4a,20n) DOkk bbbb cccc cccc cccc cccc cccc
def test_DO_iban_valid_no_spaces(self):
iban = 'DO28BAGR00000001212453611324'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_DO_iban_valid_with_spaces(self):
iban = 'DO28 BAGR 0000 0001 2124 5361 1324'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_DO_iban_invalid_format_valid_checksum(self):
iban = 'DO28 BAGR A000 0001 2124 5361 1324'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DO_iban_invalid_length(self):
iban = 'DO28 BAGR 0000 0001 2124 5361 132'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DO_iban_invalid_checksum(self):
iban = 'DO28 BAGR 0000 0001 2124 5361 1325'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# East Timor (Timor-Leste) (19n) TLkk bbbc cccc cccc cccc cxx
def test_TL_iban_valid_no_spaces(self):
iban = 'TL380080012345678910157'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_TL_iban_valid_with_spaces(self):
iban = 'TL38 0080 0123 4567 8910 157'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_TL_iban_invalid_format_valid_checksum(self):
iban = 'TL38 A080 0123 4567 8910 157'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_TL_iban_invalid_checksum(self):
iban = 'TL38 0080 0123 4567 8910 158'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Estonia (16n) EEkk bbss cccc cccc cccx
def test_EE_iban_valid_no_spaces(self):
iban = 'EE382200221020145685'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_EE_iban_valid_with_spaces(self):
iban = 'EE38 2200 2210 2014 5685'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_EE_iban_invalid_format_valid_checksum(self):
iban = 'EE38 A200 2210 2014 5685'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_EE_iban_invalid_checksum(self):
iban = 'EE38 2200 2210 2014 5686'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Faroe Islands (14n) FOkk bbbb cccc cccc cx
def test_FO_iban_valid_no_spaces(self):
iban = 'FO6264600001631634'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_FO_iban_valid_with_spaces(self):
iban = 'FO62 6460 0001 6316 34'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_FO_iban_invalid_format_valid_checksum(self):
iban = 'FO62 A460 0001 6316 34'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_FO_iban_invalid_checksum(self):
iban = 'FO62 6460 0001 6316 35'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Finland (14n) FIkk bbbb bbcc cccc cx
def test_FI_iban_valid_no_spaces(self):
iban = 'FI2112345600000785'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_FI_iban_valid_with_spaces(self):
iban = 'FI21 1234 5600 0007 85'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_FI_iban_invalid_format_valid_checksum(self):
iban = 'FI21 A234 5600 0007 85'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_FI_iban_invalid_checksum(self):
iban = 'FI21 1234 5600 0007 86'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# France (10n,11c,2n) FRkk bbbb bsss sscc cccc cccc cxx
def test_FR_iban_valid_no_spaces(self):
iban = 'FR1420041010050500013M02606'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_FR_iban_valid_with_spaces(self):
iban = 'FR14 2004 1010 0505 0001 3M02 606'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_FR_iban_invalid_format_valid_checksum(self):
iban = 'FR14 A004 1010 0505 0001 3M02 606'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_FR_iban_invalid_checksum(self):
iban = 'FR14 2004 1010 0505 0001 3M02 607'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Georgia (2c,16n) GEkk bbcc cccc cccc cccc cc
def test_GE_iban_valid_no_spaces(self):
iban = 'GE29NB0000000101904917'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_GE_iban_valid_with_spaces(self):
iban = 'GE29 NB00 0000 0101 9049 17'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_GE_iban_invalid_format_valid_checksum(self):
iban = 'GE29 NBA0 0000 0101 9049 17'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GE_iban_invalid_checksum(self):
iban = 'GE29 NB00 0000 0101 9049 18'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Germany (18n) DEkk bbbb bbbb cccc cccc cc
def test_DE_iban_valid_no_spaces(self):
iban = 'DE89370400440532013000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_DE_iban_valid_with_spaces(self):
iban = 'DE89 3704 0044 0532 0130 00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_DE_iban_invalid_format_valid_checksum(self):
iban = 'DE89 A704 0044 0532 0130 00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DE_iban_invalid_checksum(self):
iban = 'DE89 3704 0044 0532 0130 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Gibraltar (4a,15c) GIkk bbbb cccc cccc cccc ccc
def test_GI_iban_valid_no_spaces(self):
iban = 'GI75NWBK000000007099453'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_GI_iban_valid_with_spaces(self):
iban = 'GI75 NWBK 0000 0000 7099 453'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_GI_iban_invalid_format_valid_checksum(self):
iban = 'GI75 aWBK 0000 0000 7099 453'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, IBAN_GENERIC_SCORE)
def test_GI_iban_invalid_checksum(self):
iban = 'GI75 NWBK 0000 0000 7099 454'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Greece (7n,16c) GRkk bbbs sssc cccc cccc cccc ccc
def test_GR_iban_valid_no_spaces(self):
iban = 'GR1601101250000000012300695'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_GR_iban_valid_with_spaces(self):
iban = 'GR16 0110 1250 0000 0001 2300 695'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_GR_iban_invalid_format_valid_checksum(self):
iban = 'GR16 A110 1250 0000 0001 2300 695'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GR_iban_invalid_checksum(self):
iban = 'GR16 0110 1250 0000 0001 2300 696'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Greenland (14n) GLkk bbbb cccc cccc cc
def test_GL_iban_valid_no_spaces(self):
iban = 'GL8964710001000206'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_GL_iban_valid_with_spaces(self):
iban = 'GL89 6471 0001 0002 06'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_GL_iban_invalid_format_valid_checksum(self):
iban = 'GL89 A471 0001 0002 06'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GL_iban_invalid_checksum(self):
iban = 'GL89 6471 0001 0002 07'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Guatemala (4c,20c) GTkk bbbb mmtt cccc cccc cccc cccc
def test_GT_iban_valid_no_spaces(self):
iban = 'GT82TRAJ01020000001210029690'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_GT_iban_valid_with_spaces(self):
iban = 'GT82 TRAJ 0102 0000 0012 1002 9690'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_GT_iban_invalid_format_valid_checksum(self):
iban = 'GT82 TRAJ 0102 0000 0012 1002 9690 A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GT_iban_invalid_checksum(self):
iban = 'GT82 TRAJ 0102 0000 0012 1002 9691'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Hungary (24n) HUkk bbbs sssx cccc cccc cccc cccx
def test_HU_iban_valid_no_spaces(self):
iban = 'HU42117730161111101800000000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_HU_iban_valid_with_spaces(self):
iban = 'HU42 1177 3016 1111 1018 0000 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_HU_iban_invalid_format_valid_checksum(self):
iban = 'HU42 A177 3016 1111 1018 0000 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_HU_iban_invalid_checksum(self):
iban = 'HU42 1177 3016 1111 1018 0000 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Iceland (22n) ISkk bbbb sscc cccc iiii iiii ii
def test_IS_iban_valid_no_spaces(self):
iban = 'IS140159260076545510730339'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_IS_iban_valid_with_spaces(self):
iban = 'IS14 0159 2600 7654 5510 7303 39'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 32, EntityRecognizer.MAX_SCORE)
def test_IS_iban_invalid_format_valid_checksum(self):
iban = 'IS14 A159 2600 7654 5510 7303 39'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IS_iban_invalid_checksum(self):
iban = 'IS14 0159 2600 7654 5510 7303 30'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Ireland (4c,14n) IEkk aaaa bbbb bbcc cccc cc
def test_IE_iban_valid_no_spaces(self):
iban = 'IE29AIBK93115212345678'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_IE_iban_valid_with_spaces(self):
iban = 'IE29 AIBK 9311 5212 3456 78'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_IE_iban_invalid_format_valid_checksum(self):
iban = 'IE29 AIBK A311 5212 3456 78'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IE_iban_invalid_checksum(self):
iban = 'IE29 AIBK 9311 5212 3456 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Israel (19n) ILkk bbbn nncc cccc cccc ccc
def test_IL_iban_valid_no_spaces(self):
iban = 'IL620108000000099999999'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_IL_iban_valid_with_spaces(self):
iban = 'IL62 0108 0000 0009 9999 999'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_IL_iban_invalid_format_valid_checksum(self):
iban = 'IL62 A108 0000 0009 9999 999'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IL_iban_valid_checksum(self):
iban = 'IL62 0108 0000 0009 9999 990'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Italy (1a,10n,12c) ITkk xbbb bbss sssc cccc cccc ccc
def test_IT_iban_valid_no_spaces(self):
iban = 'IT60X0542811101000000123456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_IT_iban_valid_with_spaces(self):
iban = 'IT60 X054 2811 1010 0000 0123 456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_IT_iban_invalid_format_valid_checksum(self):
iban = 'IT60 XW54 2811 1010 0000 0123 456'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IT_iban_valid_checksum(self):
iban = 'IT60 X054 2811 1010 0000 0123 457'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Jordan (4a,22n) JOkk bbbb ssss cccc cccc cccc cccc cc
def test_JO_iban_valid_no_spaces(self):
iban = 'JO94CBJO0010000000000131000302'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_JO_iban_valid_with_spaces(self):
iban = 'JO94 CBJO 0010 0000 0000 0131 0003 02'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_JO_iban_invalid_format_valid_checksum(self):
iban = 'JO94 CBJO A010 0000 0000 0131 0003 02'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_JO_iban_valid_checksum(self):
iban = 'JO94 CBJO 0010 0000 0000 0131 0003 03'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Kazakhstan (3n,13c) KZkk bbbc cccc cccc cccc
def test_KZ_iban_valid_no_spaces(self):
iban = 'KZ86125KZT5004100100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_KZ_iban_valid_with_spaces(self):
iban = 'KZ86 125K ZT50 0410 0100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_KZ_iban_invalid_format_valid_checksum(self):
iban = 'KZ86 A25K ZT50 0410 0100'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_KZ_iban_valid_checksum(self):
iban = 'KZ86 125K ZT50 0410 0101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Kosovo (4n,10n,2n) XKkk bbbb cccc cccc cccc
def test_XK_iban_valid_no_spaces(self):
iban = 'XK051212012345678906'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_XK_iban_valid_with_spaces(self):
iban = 'XK05 1212 0123 4567 8906'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_XK_iban_invalid_format_valid_checksum(self):
iban = 'XK05 A212 0123 4567 8906'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_XK_iban_valid_checksum(self):
iban = 'XK05 1212 0123 4567 8907'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Kuwait (4a,22c) KWkk bbbb cccc cccc cccc cccc cccc cc
def test_KW_iban_valid_no_spaces(self):
iban = 'KW81CBKU0000000000001234560101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_KW_iban_valid_with_spaces(self):
iban = 'KW81 CBKU 0000 0000 0000 1234 5601 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_KW_iban_invalid_format_valid_checksum(self):
iban = 'KW81 aBKU 0000 0000 0000 1234 5601 01'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, IBAN_GENERIC_SCORE)
def test_KW_iban_valid_checksum(self):
iban = 'KW81 CBKU 0000 0000 0000 1234 5601 02'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Latvia (4a,13c) LVkk bbbb cccc cccc cccc c
def test_LV_iban_valid_no_spaces(self):
iban = 'LV80BANK0000435195001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_LV_iban_valid_with_spaces(self):
iban = 'LV80 BANK 0000 4351 9500 1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_LV_iban_invalid_format_valid_checksum(self):
iban = 'LV80 bANK 0000 4351 9500 1'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, IBAN_GENERIC_SCORE)
def test_LV_iban_valid_checksum(self):
iban = 'LV80 BANK 0000 4351 9500 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Lebanon (4n,20c) LBkk bbbb cccc cccc cccc cccc cccc
def test_LB_iban_valid_no_spaces(self):
iban = 'LB62099900000001001901229114'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_LB_iban_valid_with_spaces(self):
iban = 'LB62 0999 0000 0001 0019 0122 9114'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_LB_iban_invalid_format_valid_checksum(self):
iban = 'LB62 A999 0000 0001 0019 0122 9114'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LB_iban_valid_checksum(self):
iban = 'LB62 0999 0000 0001 0019 0122 9115'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Liechtenstein (5n,12c) LIkk bbbb bccc cccc cccc c
def test_LI_iban_valid_no_spaces(self):
iban = 'LI21088100002324013AA'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_LI_iban_valid_with_spaces(self):
iban = 'LI21 0881 0000 2324 013A A'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_LI_iban_invalid_format_valid_checksum(self):
iban = 'LI21 A881 0000 2324 013A A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LI_iban_valid_checksum(self):
iban = 'LI21 0881 0000 2324 013A B'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Lithuania (16n) LTkk bbbb bccc cccc cccc
def test_LT_iban_valid_no_spaces(self):
iban = 'LT121000011101001000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_LT_iban_valid_with_spaces(self):
iban = 'LT12 1000 0111 0100 1000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_LT_iban_invalid_format_valid_checksum(self):
iban = 'LT12 A000 0111 0100 1000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LT_iban_valid_checksum(self):
iban = 'LT12 1000 0111 0100 1001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Luxembourg (3n,13c) LUkk bbbc cccc cccc cccc
def test_LU_iban_valid_no_spaces(self):
iban = 'LU280019400644750000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_LU_iban_valid_with_spaces(self):
iban = 'LU28 0019 4006 4475 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_LU_iban_invalid_format_valid_checksum(self):
iban = 'LU28 A019 4006 4475 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LU_iban_valid_checksum(self):
iban = 'LU28 0019 4006 4475 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Malta (4a,5n,18c) MTkk bbbb ssss sccc cccc cccc cccc ccc
def test_MT_iban_valid_no_spaces(self):
iban = 'MT84MALT011000012345MTLCAST001S'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 31, EntityRecognizer.MAX_SCORE)
def test_MT_iban_valid_with_spaces(self):
iban = 'MT84 MALT 0110 0001 2345 MTLC AST0 01S'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 38, EntityRecognizer.MAX_SCORE)
def test_MT_iban_invalid_format_valid_checksum(self):
iban = 'MT84 MALT A110 0001 2345 MTLC AST0 01S'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MT_iban_valid_checksum(self):
iban = 'MT84 MALT 0110 0001 2345 MTLC AST0 01T'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Mauritania (23n) MRkk bbbb bsss sscc cccc cccc cxx
def test_MR_iban_valid_no_spaces(self):
iban = 'MR1300020001010000123456753'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_MR_iban_valid_with_spaces(self):
iban = 'MR13 0002 0001 0100 0012 3456 753'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_MR_iban_invalid_format_valid_checksum(self):
iban = 'MR13 A002 0001 0100 0012 3456 753'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MR_iban_valid_checksum(self):
iban = 'MR13 0002 0001 0100 0012 3456 754'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Mauritius (4a,19n,3a) MUkk bbbb bbss cccc cccc cccc 000m mm
def test_MU_iban_valid_no_spaces(self):
iban = 'MU17BOMM0101101030300200000MUR'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_MU_iban_valid_with_spaces(self):
iban = 'MU17 BOMM 0101 1010 3030 0200 000M UR'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_MU_iban_invalid_format_valid_checksum(self):
iban = 'MU17 BOMM A101 1010 3030 0200 000M UR'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MU_iban_valid_checksum(self):
iban = 'MU17 BOMM 0101 1010 3030 0200 000M US'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Moldova (2c,18c) MDkk bbcc cccc cccc cccc cccc
def test_MD_iban_valid_no_spaces(self):
iban = 'MD24AG000225100013104168'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_MD_iban_valid_with_spaces(self):
iban = 'MD24 AG00 0225 1000 1310 4168'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_MD_iban_invalid_format_valid_checksum(self):
iban = 'MD24 AG00 0225 1000 1310 4168 9'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MD_iban_valid_checksum(self):
iban = 'MD24 AG00 0225 1000 1310 4169'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Monaco (10n,11c,2n) MCkk bbbb bsss sscc cccc cccc cxx
def test_MC_iban_valid_no_spaces(self):
iban = 'MC5811222000010123456789030'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_MC_iban_valid_with_spaces(self):
iban = 'MC58 1122 2000 0101 2345 6789 030'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_MC_iban_invalid_format_valid_checksum(self):
iban = 'MC58 A122 2000 0101 2345 6789 030'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MC_iban_valid_checksum(self):
iban = 'MC58 1122 2000 0101 2345 6789 031'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Montenegro (18n) MEkk bbbc cccc cccc cccc xx
def test_ME_iban_valid_no_spaces(self):
iban = 'ME25505000012345678951'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_ME_iban_valid_with_spaces(self):
iban = 'ME25 5050 0001 2345 6789 51'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_ME_iban_invalid_format_valid_checksum(self):
iban = 'ME25 A050 0001 2345 6789 51'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_ME_iban_valid_checksum(self):
iban = 'ME25 5050 0001 2345 6789 52'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Netherlands (4a,10n) NLkk bbbb cccc cccc cc
def test_NL_iban_valid_no_spaces(self):
iban = 'NL91ABNA0417164300'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_NL_iban_valid_with_spaces(self):
iban = 'NL91 ABNA 0417 1643 00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_NL_iban_invalid_format_valid_checksum(self):
iban = 'NL91 1BNA 0417 1643 00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_NL_iban_valid_checksum(self):
iban = 'NL91 ABNA 0417 1643 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# North Macedonia (3n,10c,2n) MKkk bbbc cccc cccc cxx
def test_MK_iban_valid_no_spaces(self):
iban = 'MK07250120000058984'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_MK_iban_valid_with_spaces(self):
iban = 'MK07 2501 2000 0058 984'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_MK_iban_invalid_format_valid_checksum(self):
iban = 'MK07 A501 2000 0058 984'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MK_iban_valid_checksum(self):
iban = 'MK07 2501 2000 0058 985'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Norway (11n) NOkk bbbb cccc ccx
def test_NO_iban_valid_no_spaces(self):
iban = 'NO9386011117947'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 15, EntityRecognizer.MAX_SCORE)
def test_NO_iban_valid_with_spaces(self):
iban = 'NO93 8601 1117 947'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_NO_iban_invalid_format_valid_checksum(self):
iban = 'NO93 A601 1117 947'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_NO_iban_valid_checksum(self):
iban = 'NO93 8601 1117 948'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Pakistan (4c,16n) PKkk bbbb cccc cccc cccc cccc
def test_PK_iban_valid_no_spaces(self):
iban = 'PK36SCBL0000001123456702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_PK_iban_valid_with_spaces(self):
iban = 'PK36 SCBL 0000 0011 2345 6702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_PK_iban_invalid_format_valid_checksum(self):
iban = 'PK36 SCBL A000 0011 2345 6702'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PK_iban_valid_checksum(self):
iban = 'PK36 SCBL 0000 0011 2345 6703'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Palestinian territories (4c,21n) PSkk bbbb xxxx xxxx xccc cccc cccc c
def test_PS_iban_valid_no_spaces(self):
iban = 'PS92PALS000000000400123456702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_PS_iban_valid_with_spaces(self):
iban = 'PS92 PALS 0000 0000 0400 1234 5670 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_PS_iban_invalid_format_valid_checksum(self):
iban = 'PS92 PALS A000 0000 0400 1234 5670 2'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PS_iban_valid_checksum(self):
iban = 'PS92 PALS 0000 0000 0400 1234 5670 3'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Poland (24n) PLkk bbbs sssx cccc cccc cccc cccc
def test_PL_iban_valid_no_spaces(self):
iban = 'PL61109010140000071219812874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_PL_iban_valid_with_spaces(self):
iban = 'PL61 1090 1014 0000 0712 1981 2874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_PL_iban_invalid_format_valid_checksum(self):
iban = 'PL61 A090 1014 0000 0712 1981 2874'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PL_iban_valid_checksum(self):
iban = 'PL61 1090 1014 0000 0712 1981 2875'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Portugal (21n) PTkk bbbb ssss cccc cccc cccx x
def test_PT_iban_valid_no_spaces(self):
iban = 'PT50000201231234567890154'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 25, EntityRecognizer.MAX_SCORE)
def test_PT_iban_valid_with_spaces(self):
iban = 'PT50 0002 0123 1234 5678 9015 4'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 31, EntityRecognizer.MAX_SCORE)
def test_PT_iban_invalid_format_valid_checksum(self):
iban = 'PT50 A002 0123 1234 5678 9015 4'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PT_iban_valid_checksum(self):
iban = 'PT50 0002 0123 1234 5678 9015 5'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Qatar (4a,21c) QAkk bbbb cccc cccc cccc cccc cccc c
def test_QA_iban_valid_no_spaces(self):
iban = 'QA58DOHB00001234567890ABCDEFG'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_QA_iban_valid_with_spaces(self):
iban = 'QA58 DOHB 0000 1234 5678 90AB CDEF G'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_QA_iban_invalid_format_valid_checksum(self):
iban = 'QA58 0OHB 0000 1234 5678 90AB CDEF G'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_QA_iban_valid_checksum(self):
iban = 'QA58 DOHB 0000 1234 5678 90AB CDEF H'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
#### Reunion
# Romania (4a,16c) ROkk bbbb cccc cccc cccc cccc
def test_RO_iban_valid_no_spaces(self):
iban = 'RO49AAAA1B31007593840000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_RO_iban_valid_with_spaces(self):
iban = 'RO49 AAAA 1B31 0075 9384 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_RO_iban_invalid_format_valid_checksum(self):
iban = 'RO49 0AAA 1B31 0075 9384 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_RO_iban_valid_checksum(self):
iban = 'RO49 AAAA 1B31 0075 9384 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
### Saint Barthelemy
### Saint Lucia
### Saint Martin
### Saint Pierrer
# San Marino (1a,10n,12c) SMkk xbbb bbss sssc cccc cccc ccc
def test_SM_iban_valid_no_spaces(self):
iban = 'SM86U0322509800000000270100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_SM_iban_valid_with_spaces(self):
iban = 'SM86 U032 2509 8000 0000 0270 100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_SM_iban_invalid_format_valid_checksum(self):
iban = 'SM86 0032 2509 8000 0000 0270 100'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SM_iban_valid_checksum(self):
iban = 'SM86 U032 2509 8000 0000 0270 101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
### Sao Tome
# Saudi Arabia (2n,18c) SAkk bbcc cccc cccc cccc cccc
def test_SA_iban_valid_no_spaces(self):
iban = 'SA0380000000608010167519'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_SA_iban_valid_with_spaces(self):
iban = 'SA03 8000 0000 6080 1016 7519'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_SA_iban_invalid_format_valid_checksum(self):
iban = 'SA03 A000 0000 6080 1016 7519'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SA_iban_valid_checksum(self):
iban = 'SA03 8000 0000 6080 1016 7510'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Serbia (18n) RSkk bbbc cccc cccc cccc xx
def test_RS_iban_valid_no_spaces(self):
iban = 'RS35260005601001611379'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_RS_iban_valid_with_spaces(self):
iban = 'RS35 2600 0560 1001 6113 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_RS_iban_invalid_format_valid_checksum(self):
iban = 'RS35 A600 0560 1001 6113 79'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_RS_iban_valid_checksum(self):
iban = 'RS35 2600 0560 1001 6113 70'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Slovakia (20n) SKkk bbbb ssss sscc cccc cccc
def test_RS_iban_valid_no_spaces(self):
iban = 'SK3112000000198742637541'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_RS_iban_valid_with_spaces(self):
iban = 'SK31 1200 0000 1987 4263 7541'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_RS_iban_invalid_format_valid_checksum(self):
iban = 'SK31 A200 0000 1987 4263 7541'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_RS_iban_valid_checksum(self):
iban = 'SK31 1200 0000 1987 4263 7542'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Slovenia (15n) SIkk bbss sccc cccc cxx
def test_SI_iban_valid_no_spaces(self):
iban = 'SI56263300012039086'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_SI_iban_valid_with_spaces(self):
iban = 'SI56 2633 0001 2039 086'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_SI_iban_invalid_format_valid_checksum(self):
iban = 'SI56 A633 0001 2039 086'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SI_iban_valid_checksum(self):
iban = 'SI56 2633 0001 2039 087'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Spain (20n) ESkk bbbb ssss xxcc cccc cccc
def test_ES_iban_valid_no_spaces(self):
iban = 'ES9121000418450200051332'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_ES_iban_valid_with_spaces(self):
iban = 'ES91 2100 0418 4502 0005 1332'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_ES_iban_invalid_format_valid_checksum(self):
iban = 'ES91 A100 0418 4502 0005 1332'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_ES_iban_valid_checksum(self):
iban = 'ES91 2100 0418 4502 0005 1333'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Sweden (20n) SEkk bbbc cccc cccc cccc cccc
def test_SE_iban_valid_no_spaces(self):
iban = 'SE4550000000058398257466'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_SE_iban_valid_with_spaces(self):
iban = 'SE45 5000 0000 0583 9825 7466'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_SE_iban_invalid_format_valid_checksum(self):
iban = 'SE45 A000 0000 0583 9825 7466'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SE_iban_valid_checksum(self):
iban = 'SE45 5000 0000 0583 9825 7467'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Switzerland (5n,12c) CHkk bbbb bccc cccc cccc c
def test_CH_iban_valid_no_spaces(self):
iban = 'CH9300762011623852957'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_CH_iban_valid_with_spaces(self):
iban = 'CH93 0076 2011 6238 5295 7'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_CH_iban_invalid_format_valid_checksum(self):
iban = 'CH93 A076 2011 6238 5295 7'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CH_iban_valid_checksum(self):
iban = 'CH93 0076 2011 6238 5295 8'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Tunisia (20n) TNkk bbss sccc cccc cccc cccc
def test_TN_iban_valid_no_spaces(self):
iban = 'TN5910006035183598478831'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_TN_iban_valid_with_spaces(self):
iban = 'TN59 1000 6035 1835 9847 8831'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_TN_iban_invalid_format_valid_checksum(self):
iban = 'TN59 A000 6035 1835 9847 8831'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_TN_iban_valid_checksum(self):
iban = 'CH93 0076 2011 6238 5295 9'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Turkey (5n,17c) TRkk bbbb bxcc cccc cccc cccc cc
def test_TR_iban_valid_no_spaces(self):
iban = 'TR330006100519786457841326'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_TR_iban_valid_with_spaces(self):
iban = 'TR33 0006 1005 1978 6457 8413 26'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 32, EntityRecognizer.MAX_SCORE)
def test_TR_iban_invalid_format_valid_checksum(self):
iban = 'TR33 A006 1005 1978 6457 8413 26'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_TR_iban_valid_checksum(self):
iban = 'TR33 0006 1005 1978 6457 8413 27'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# United Arab Emirates (3n,16n) AEkk bbbc cccc cccc cccc ccc
def test_AE_iban_valid_no_spaces(self):
iban = 'AE070331234567890123456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_AE_iban_valid_with_spaces(self):
iban = 'AE07 0331 2345 6789 0123 456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_AE_iban_invalid_format_valid_checksum(self):
iban = 'AE07 A331 2345 6789 0123 456'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AE_iban_valid_checksum(self):
iban = 'AE07 0331 2345 6789 0123 457'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# United Kingdom (4a,14n) GBkk bbbb ssss sscc cccc cc
def test_GB_iban_valid_no_spaces(self):
iban = 'GB29NWBK60161331926819'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_GB_iban_valid_with_spaces(self):
iban = 'GB29 NWBK 6016 1331 9268 19'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_GB_iban_invalid_format_valid_checksum(self):
iban = 'GB29 1WBK 6016 1331 9268 19'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GB_iban_valid_checksum(self):
iban = 'GB29 NWBK 6016 1331 9268 10'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Vatican City (3n,15n) VAkk bbbc cccc cccc cccc cc
def test_VA_iban_valid_no_spaces(self):
iban = 'VA59001123000012345678'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_VA_iban_valid_with_spaces(self):
iban = 'VA59 0011 2300 0012 3456 78'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_VA_iban_invalid_format_valid_checksum(self):
iban = 'VA59 A011 2300 0012 3456 78'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_VA_iban_valid_checksum(self):
iban = 'VA59 0011 2300 0012 3456 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Virgin Islands, British (4c,16n) VGkk bbbb cccc cccc cccc cccc
def test_VG_iban_valid_no_spaces(self):
iban = 'VG96VPVG0000012345678901'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_VG_iban_valid_with_spaces(self):
iban = 'VG96 VPVG 0000 0123 4567 8901'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_VG_iban_invalid_format_valid_checksum(self):
iban = 'VG96 VPVG A000 0123 4567 8901'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_VG_iban_valid_checksum(self):
iban = 'VG96 VPVG 0000 0123 4567 8902'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Test Invalid IBANs
def test_iban_invalid_country_code_invalid_checksum(self):
iban = 'AB150120690000003111141'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_iban_invalid_country_code_valid_checksum(self):
iban = 'AB150120690000003111141'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_iban_too_short_valid_checksum(self):
iban = 'IL15 0120 6900 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_iban_too_long_valid_checksum(self):
iban = 'IL15 0120 6900 0000 3111 0120 6900 0000 3111 141'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_invalid_IL_iban_with_exact_context_does_not_change_score(self):
iban = 'IL150120690000003111141'
context = 'my iban number is '
results = iban_recognizer.analyze(context + iban, entities)
assert len(results) == 0
def test_AL_iban_invalid_country_code_but_checksum_is_correct(self):
iban = 'AM47212110090000000235698740'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
|
nilq/baby-python
|
python
|
"""
Define the list of possible commands that TeX might handle. These commands
might be composed of multiple instructions, such as 'input', which requires
characters forming a file-name as an argument.
"""
from enum import Enum
class Commands(Enum):
assign = 'ASSIGN'
relax = 'RELAX'
left_brace = 'LEFT_BRACE'
right_brace = 'RIGHT_BRACE'
begin_group = 'BEGIN_GROUP'
end_group = 'END_GROUP'
show_token = 'SHOW_TOKEN'
show_box = 'SHOW_BOX'
show_lists = 'SHOW_LISTS'
show_the = 'SHOW_THE'
ship_out = 'SHIP_OUT'
ignore_spaces = 'IGNORE_SPACES'
set_after_assignment_token = 'SET_AFTER_ASSIGNMENT_TOKEN'
add_to_after_group_tokens = 'ADD_TO_AFTER_GROUP_TOKENS'
message = 'MESSAGE'
error_message = 'ERROR_MESSAGE'
open_input = 'OPEN_INPUT'
close_input = 'CLOSE_INPUT'
open_output = 'OPEN_OUTPUT'
close_output = 'CLOSE_OUTPUT'
write = 'WRITE'
do_special = 'DO_SPECIAL'
add_penalty = 'ADD_PENALTY'
add_kern = 'ADD_KERN'
add_math_kern = 'ADD_MATH_KERN'
un_penalty = 'UN_PENALTY'
un_kern = 'UN_KERN'
un_glue = 'UN_GLUE'
mark = 'MARK'
insert = 'INSERT'
vertical_adjust = 'VERTICAL_ADJUST'
add_leaders = 'ADD_LEADERS'
add_space = 'ADD_SPACE'
add_box = 'ADD_BOX'
unpack_horizontal_box = 'UNPACK_HORIZONTAL_BOX'
unpack_vertical_box = 'UNPACK_VERTICAL_BOX'
indent = 'INDENT'
no_indent = 'NO_INDENT'
par = 'PAR'
add_horizontal_glue = 'ADD_HORIZONTAL_GLUE'
add_vertical_glue = 'ADD_VERTICAL_GLUE'
move_box_left = 'MOVE_BOX_LEFT'
move_box_right = 'MOVE_BOX_RIGHT'
raise_box = 'RAISE_BOX'
lower_box = 'LOWER_BOX'
add_horizontal_rule = 'ADD_HORIZONTAL_RULE'
add_vertical_rule = 'ADD_VERTICAL_RULE'
horizontal_align = 'HORIZONTAL_ALIGN'
vertical_align = 'VERTICAL_ALIGN'
end = 'END'
dump = 'DUMP'
add_control_space = 'CONTROL_SPACE'
add_character_explicit = 'ADD_CHARACTER_EXPLICIT'
add_character_code = 'ADD_CHARACTER_CODE'
add_character_token = 'ADD_CHARACTER_TOKEN'
add_accent = 'ADD_ACCENT'
add_italic_correction = 'ADD_ITALIC_CORRECTION'
add_discretionary = 'ADD_DISCRETIONARY'
add_discretionary_hyphen = 'ADD_DISCRETIONARY_HYPHEN'
do_math_shift = 'DO_MATH_SHIFT'
|
nilq/baby-python
|
python
|
aluno = dict()
nome = str(input('Nome: '))
media = float(input('Média: '))
aluno['nome'] = nome
aluno['media'] = media
if media < 5:
aluno['status'] = 'Reprovado!'
elif 5 <= media < 7:
aluno['status'] = 'Recuperação!'
else:
aluno['status'] = 'Aprovado!'
print(f'Nome: {aluno["nome"]}.')
print(f'Média: {aluno["media"]}.')
print(f'Situação: {aluno["status"]}')
|
nilq/baby-python
|
python
|
import config
config.setup_examples()
import infermedica_api
if __name__ == '__main__':
api = infermedica_api.get_api()
print('Look for evidences containing phrase headache:')
print(api.search('headache'), end="\n\n")
print('Look for evidences containing phrase breast, only for female specific symptoms:')
print(api.search('breast', sex='female'), end="\n\n")
print('Look for evidences containing phrase breast, only for female specific symptoms, with the limit of 5 results:')
print(api.search('breast', sex='female', max_results=5), end="\n\n")
print('Look for symptoms and risk factors containing phrase trauma:')
print(api.search('trauma', filters=[
infermedica_api.SEARCH_FILTERS.SYMPTOMS, infermedica_api.SEARCH_FILTERS.RISK_FACTORS]), end="\n\n")
|
nilq/baby-python
|
python
|
from flask import Flask, request, jsonify, redirect, url_for
import pyexcel.ext.xlsx
import pandas as pd
from werkzeug import secure_filename
UPLOAD_FOLDER = 'upload'
ALLOWED_EXTENSIONS = set(['xlsx'])
app=Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route("/upload", methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file1 = request.files['file1']
file2 = request.files['file2']
if file1 and allowed_file(file1.filename) and file2 and allowed_file(file2.filename):
dh1 = pd.read_excel(file1)
dh1=dh1.dropna(how='any')
dh1z=dh1.iloc[:,(1,2,3,4,5,6,7)]
dh1z.columns=['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
dh1z.to_json("DH1.json")
dh2 = pd.read_excel(file2)
dh2=dh2.dropna(how='any')
dh2z=dh2.iloc[:,(1,2,3,4,5,6,7)]
dh2z.columns=['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
dh2z.to_json("DH2.json")
return 'Done! :)'
return '''
<!doctype html>
<title>Upload an excel file</title>
<h1>Excel file upload (xlsx only)</h1>
<form action="" method=post enctype=multipart/form-data><p>
DH1:<input type=file name=file1><br>
DH2:<input type=file name=file2><br>
<input type=submit value=Upload>
</form>
'''
if __name__ == "__main__":
app.run()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import rasa_core
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.utils import EndpointConfig
from rasa_core.run import serve_application
logger = logging.getLogger(__name__)
def train_dialogue(domain_file = 'chat_domain.yml',
model_path = './models/dialogue',
training_data_file = './data/stories.md'):
agent = Agent(domain_file, policies = [MemoizationPolicy(), KerasPolicy()])
data = agent.load_data(training_data_file)
agent.train(
data,
epochs = 300,
batch_size = 50,
validation_split = 0.2)
agent.persist(model_path)
return agent
def run_bot(serve_forever=True):
interpreter = RasaNLUInterpreter('./models/nlu/default/iitnlu')
action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
agent = Agent.load('./models/dialogue', interpreter=interpreter, action_endpoint=action_endpoint)
rasa_core.run.serve_application(agent ,channel='cmdline')
return agent
if __name__ == '__main__':
#train_dialogue()
run_bot()
|
nilq/baby-python
|
python
|
import abc
from collections import namedtuple, OrderedDict
from typing import Collection, Optional, Union, Iterable, Tuple, Generator, Set, Dict, List, Any, Callable
from profilehooks import timecall
import logging
import itertools
import random
from .actions import pass_actions, tichu_actions, no_tichu_actions, play_dog_actions, all_wish_actions_gen, TradeAction, \
MutableTrick
from .actions import (PlayerAction, PlayCombination, PlayFirst, PlayBomb, TichuAction, WishAction, PassAction,
WinTrickAction, GiveDragonAwayAction, CardTrade, Trick)
from .cards import CardSet, Card, CardRank, Deck, DOG_COMBINATION
from .error import TichuEnvValueError, LogicError, IllegalActionError
from .utils import check_param, check_isinstance, check_all_isinstance, check_true
__all__ = ('TichuState', 'HandCards', 'History', 'BaseTichuState',
'InitialState', 'FullCardsState', 'BeforeTrading', 'AfterTrading', 'RolloutTichuState')
logger = logging.getLogger(__name__)
class HandCards(object):
__slots__ = ('_cards', )
def __init__(self, cards0: Iterable=(), cards1: Iterable=(), cards2: Iterable=(), cards3: Iterable=()):
self._cards: Tuple[CardSet, CardSet, CardSet, CardSet] = (CardSet(cards0),
CardSet(cards1),
CardSet(cards2),
CardSet(cards3))
def has_cards(self, player: int, cards: Union[Collection[Card], Card]):
assert player in range(4)
try:
res = set(cards).issubset(self._cards[player])
assert all(isinstance(c, Card) for c in cards)
return res
except TypeError:
# cards is only 1 single card
assert isinstance(cards, Card)
return cards in self._cards[player]
def remove_cards(self, player: int, cards: Collection[Card], raise_on_uncomplete=True):
"""
:param player:
:param cards:
:param raise_on_uncomplete: If True, Raises an ValueError when the player does not have all the cards
:return:
>>> hc = HandCards((Card.DOG, Card.TWO_HOUSE), (Card.PHOENIX, Card.DRAGON), (Card.FIVE_HOUSE,), (Card.SIX_HOUSE,))
>>> hc.remove_cards(0, (Card.DOG,))
"""
# make sure cards is a set
if not isinstance(cards, Set):
cards = set(cards)
assert all(isinstance(c, Card) for c in cards)
new_cards = list((c for c in self._cards[player] if c not in cards))
if raise_on_uncomplete and len(new_cards) + len(cards) != len(self._cards[player]):
raise TichuEnvValueError("Not all cards can be removed.")
return HandCards(
*[new_cards if player == k else self._cards[k] for k in range(4)]
)
def iter_all_cards(self, player: int=None):
"""
:param player: If specified, iterates only over the cards of this player.
:return: Iterator over all single cards in all hands if 'player' is not specified
"""
if player is None:
return itertools.chain(*self._cards)
else:
return iter(self._cards[player])
def as_list_of_lists(self):
return [list(cards) for cards in self._cards]
def __iter__(self):
return self._cards.__iter__()
def __getitem__(self, item):
return self._cards[item]
def __hash__(self):
return hash(self._cards)
def __eq__(self, other):
return (other.__class__ == self.__class__
and all(sc == oc for sc, oc in itertools.zip_longest(self.iter_all_cards(),
other.iter_all_cards())))
def __repr__(self):
return(
"""
0: {}
1: {}
2: {}
3: {}
"""
).format(*map(str, self._cards))
def __str__(self):
return self.__repr__()
class MutableHandCards(HandCards):
__slots__ = ('_cards',)
def __init__(self, cards0: Iterable = (), cards1: Iterable = (), cards2: Iterable = (), cards3: Iterable = ()):
super().__init__(cards0, cards1, cards2, cards3)
# make list
self._cards: List[CardSet] = list(self._cards)
@classmethod
def from_immutable(cls, handcards):
return cls(*handcards._cards)
def remove_cards(self, player: int, cards: Collection[Card], raise_on_uncomplete=True):
len_oldcards = len(self._cards[player])
self._cards[player] = CardSet((c for c in self._cards[player] if c not in cards))
if raise_on_uncomplete and len(self._cards[player]) + len(cards) != len_oldcards:
raise TichuEnvValueError("Not all cards can be removed.")
else:
return self
def __hash__(self):
raise AttributeError("MutableHandCards can't be hashed")
def __eq__(self, other):
return self is other
def __str__(self):
return(
""" {me.__class__.__name__}
0: {}
1: {}
2: {}
3: {}
"""
).format(*map(str, self._cards), me=self)
class WonTricks(object):
__slots__ = ('_tricks',)
def __init__(self, tricks0: Iterable[Trick]=(), tricks1: Iterable[Trick]=(), tricks2: Iterable[Trick]=(), tricks3: Iterable[Trick]=()):
self._tricks: Tuple[Tuple[Trick, ...]] = (tuple(tricks0), tuple(tricks1), tuple(tricks2), tuple(tricks3))
assert all(isinstance(t, Trick) for t in itertools.chain(*self._tricks))
def add_trick(self, player: int, trick: Trick):
"""
:param player:
:param trick:
:return: New WonTrick instance with the trick appended to the players won tricks
"""
return WonTricks(*[(tricks + (trick,) if k == player else tricks) for k, tricks in enumerate(self._tricks)])
def iter_all_tricks(self, player: int=None):
"""
:param player: If specified, iterates only over the tricks won by this player.
:return: Iterator over all tricks that have been won if 'player' is not specified.
"""
if player is None:
return itertools.chain(*self._tricks)
else:
iter(self._tricks[player])
def __iter__(self):
return self._tricks.__iter__()
def __getitem__(self, item):
return self._tricks.__getitem__(item)
def __hash__(self):
return hash(self._tricks)
def __eq__(self, other):
return (other.__class__ == self.__class__
and all(st == ot for st, ot in itertools.zip_longest(self.iter_all_tricks(),
other.iter_all_tricks())))
def __str__(self):
return (
"""
0 won {} tricks
1 won {} tricks
2 won {} tricks
3 won {} tricks
"""
).format(*[str(len(wt)) for wt in self._tricks])
class MutableWonTricks(WonTricks):
__slots__ = ('_tricks',)
def __init__(self, tricks0: Iterable[Trick]=(), tricks1: Iterable[Trick]=(), tricks2: Iterable[Trick]=(), tricks3: Iterable[Trick]=()):
super().__init__(tricks0, tricks1, tricks2, tricks3)
self._tricks: Tuple[List[Trick, ...]] = (list(tricks0), list(tricks1), list(tricks2), list(tricks3))
@classmethod
def from_immutable(cls, wontricks):
return cls(*wontricks._tricks)
def add_trick(self, player: int, trick: Trick):
"""
:param player:
:param trick:
:return: self
"""
self._tricks[player].append(trick)
return self
def __hash__(self):
raise AttributeError("MutableWonTricks can't be hashed")
def __eq__(self, other):
return self is other
class History(object):
__slots__ = ('_wished', '_state_action_tuple')
def __init__(self, _wished: bool=False, _tup=tuple()):
self._wished: bool = _wished
self._state_action_tuple: Tuple[Union[TichuState, PlayerAction]] = _tup
def last_state(self)->Optional['BaseTichuState']:
for elem in reversed(self._state_action_tuple):
if isinstance(elem, BaseTichuState):
return elem
return None
def wished(self)->bool:
"""
:return: True iff at some point a wish was made, false otherwise
"""
return self._wished
def new_state_actions(self, state: 'BaseTichuState', actions: Iterable[PlayerAction])->'History':
"""
:param state:
:param actions:
:return: copy of this History instance with the state and actions appended to it.
"""
actions = tuple(actions)
assert isinstance(state, BaseTichuState)
assert all(isinstance(action, PlayerAction) for action in actions)
_wished = self._wished or any(isinstance(action, WishAction) for action in actions)
return History(_wished=_wished, _tup=self._state_action_tuple + (state, *actions))
def new_state_action(self, state: 'BaseTichuState', action: PlayerAction)->'History':
"""
:param state:
:param action:
:return: copy of this History instance with the state and action appended to it.
"""
assert isinstance(state, TichuState)
assert isinstance(action, PlayerAction)
new_tuple = self._state_action_tuple + (state, action)
return History(_wished=self._wished or isinstance(action, WishAction), _tup=new_tuple)
def add_last_state(self, state: 'BaseTichuState'):
assert isinstance(state, TichuState)
new_tuple = self._state_action_tuple + (state, )
return History(_wished=self._wished, _tup=new_tuple)
def actions(self):
yield from (a for a in self._state_action_tuple if isinstance(a, PlayerAction))
def __repr__(self):
return "{me.__class__.__name__}(length: {l})".format(me=self, l=len(self._state_action_tuple))
def __str__(self):
last_state = self.last_state()
try:
ranking = last_state.ranking
except AttributeError:
ranking = "No ranking"
try:
points = last_state.count_points() if last_state.is_terminal() else "State is not Terminal"
except AttributeError:
points = "No points"
actions = list(self.actions())
actions_str = " " if len(actions) else "EMPTY"
for action in actions:
actions_str += " -> "+str(action)
if isinstance(action, WinTrickAction):
actions_str += "\n "
return (
"""
{me.__class__.__name__}
length: {length}
last ranking: {ranking}
last points: {points}
actions:
{actions}
""".format(me=self, length=len(self._state_action_tuple), ranking=ranking, points=points,
actions=actions_str)
)
class BaseTichuState(object, metaclass=abc.ABCMeta):
def __init__(self, allow_tichu=True, allow_wish=True, discard_history:bool=False):
self._allow_tichu = allow_tichu
self._allow_wish = allow_wish
self.discard_history = discard_history
self._possible_actions_set: Set[PlayerAction] = None
self._possible_actions_list: List[PlayerAction] = None
self._state_transitions: Dict[PlayerAction, TichuState] = dict()
@property
@abc.abstractmethod
def player_pos(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def handcards(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def won_tricks(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def trick_on_table(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def wish(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def ranking(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def announced_tichu(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def announced_grand_tichu(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def history(self):
raise NotImplementedError("Is an abstract method!")
@property
def _current_player_handcards(self):
return self.handcards[self.player_pos]
@property
@timecall(immediate=False)
def possible_actions_set(self)->Set[PlayerAction]:
if self._possible_actions_set is None:
self._possible_actions_set = frozenset(self.possible_actions_list)
return self._possible_actions_set
@property
@timecall(immediate=False)
def possible_actions_list(self)->List[PlayerAction]:
if self._possible_actions_list is None:
self._possible_actions_list = list(self.possible_actions_gen())
return self._possible_actions_list
@abc.abstractmethod
def change(self, **attributes_to_change) -> 'TichuState':
"""
:param attributes_to_change: kwargs with the name of TichuState Attributes
:return: A copy ot this TichuState instance with the given attributes replaced
"""
def possible_actions_gen(self)->Generator[PlayerAction, None, None]:
"""
:return: Generator yielding all possible actions in this state
"""
# ######### tichu? ######### (ie. player just played the first time (next_action keeps the player the same in this case))
if self._allow_tichu:
# last acting player has to decide on announcing a tichu
last_act = self.trick_on_table.last_action
if (isinstance(last_act, PlayCombination)
and last_act.player_pos not in self.announced_tichu
and last_act.player_pos not in self.announced_grand_tichu
and 14 - len(last_act.combination) == len(self.handcards[last_act.player_pos])):
yield tichu_actions[last_act.player_pos]
yield no_tichu_actions[last_act.player_pos]
return # player has to decide whether to announce a tichu or not
# ######### Round Ends with double win? #########
if self.is_double_win():
assert self.is_terminal() # -> No action possible
return
# store last played combination (action)
last_combination_action = self.trick_on_table.last_combination_action
last_combination = self.trick_on_table.last_combination
# ######### Round ends with the 3rd player finishing? #########
if len(self.ranking) >= 3: # Round ends -> terminal
if self.trick_on_table.is_empty():
assert self.is_terminal() # -> No action possible
return
else:
# give the remaining trick on table to leader
yield WinTrickAction(player_pos=last_combination_action.player_pos, trick=self.trick_on_table)
return # Round ends
# ######### wish? #########
if (self._allow_wish and not self.history.wished()) and (not self.trick_on_table.is_empty()) and Card.MAHJONG in last_combination:
# Note that self.player_pos is not equal to the wishing player pos.
yield from all_wish_actions_gen(self.trick_on_table.last_combination_action.player_pos)
return # Player must wish something, no other actions allowed
# ######### trick ended? #########
if self.trick_on_table.is_finished():
# dragon away?
if Card.DRAGON in last_combination:
assert isinstance(self.player_pos, int), str(self.player_pos)
yield GiveDragonAwayAction(self.player_pos, (self.player_pos + 1) % 4, trick=self.trick_on_table)
yield GiveDragonAwayAction(self.player_pos, (self.player_pos - 1) % 4, trick=self.trick_on_table)
# Normal Trick
else:
assert isinstance(self.player_pos, int), str(self.player_pos)
yield WinTrickAction(player_pos=self.player_pos, trick=self.trick_on_table)
return # No more actions allowed
# ######### DOG? #########
if DOG_COMBINATION == last_combination: # Dog was played
# logger.debug("Dog was played -> Win trick action")
yield WinTrickAction(player_pos=(last_combination_action.player_pos + 2) % 4, trick=self.trick_on_table)
return # No more actions allowed
# ######### possible combinations and wish fulfilling. #########
can_fulfill_wish = False
# initialise possible combinations ignoring the wish
possible_combinations = list(self._current_player_handcards.possible_combinations(played_on=last_combination))
# logger.debug("possible_combinations: {}".format(possible_combinations))
if self.wish and self._current_player_handcards.contains_cardrank(self.wish):
# player may have to fulfill the wish
possible_combinations_wish = list(self._current_player_handcards.possible_combinations(played_on=last_combination, contains_rank=self.wish))
if len(possible_combinations_wish) > 0:
# player can and therefore has to fulfill the wish
can_fulfill_wish = True
possible_combinations = possible_combinations_wish
# ######### pass? #########
can_pass = not (self.trick_on_table.is_empty() or can_fulfill_wish)
if can_pass:
yield pass_actions[self.player_pos]
# ######### combinations ? ######### -> which combs
PlayactionClass = PlayFirst if self.trick_on_table.is_empty() else PlayCombination # Determine FirstPlay or PlayCombination
for comb in possible_combinations:
if comb == DOG_COMBINATION:
yield play_dog_actions[self.player_pos]
else:
yield PlayactionClass(player_pos=self.player_pos, combination=comb)
# TODO bombs ?
def next_state(self, action: PlayerAction)->'TichuState':
if action not in self.possible_actions_set:
raise IllegalActionError("{} is not a legal action in state: {}".format(action, self))
# cache the state transitions
if action in self._state_transitions:
return self._state_transitions[action]
# tichu (ie. player just played the first time (next_action keeps the player the same in this case))
elif isinstance(action, TichuAction):
next_s = self._next_state_on_tichu(action)
# wish
elif isinstance(action, WishAction):
next_s = self._next_state_on_wish(action)
# win trick (includes dragon away)?
elif isinstance(action, WinTrickAction):
next_s = self._next_state_on_win_trick(action)
# pass
elif isinstance(action, PassAction):
next_s = self._next_state_on_pass(action)
# combinations (includes playfirst, playdog, playbomb)
elif isinstance(action, PlayCombination):
next_s = self._next_state_on_combination(action)
else:
raise LogicError("An unknown action has been played")
self._state_transitions[action] = next_s
return next_s
def random_action(self)->PlayerAction:
return random.choice(self.possible_actions_list)
def _next_state_on_wish(self, wish_action: WishAction)->'TichuState':
return self.change(
wish=wish_action.wish,
trick_on_table=self.trick_on_table + wish_action,
history=self.history.new_state_action(self, wish_action)
)
def _next_state_on_tichu(self, tichu_action: TichuAction)->'TichuState':
h = self.history.new_state_action(self, tichu_action)
tot = self.trick_on_table + tichu_action
if DOG_COMBINATION == self.trick_on_table.last_combination:
tot = tot.finish()
if tichu_action.announce:
assert tichu_action.player_pos not in self.announced_grand_tichu
return self.change(
announced_tichu=self.announced_tichu.union({tichu_action.player_pos}),
trick_on_table=tot,
history=h
)
else:
return self.change(
trick_on_table=tot,
history=h
)
def _next_state_on_win_trick(self, win_trick_action: WinTrickAction)->'TichuState':
winner = win_trick_action.player_pos
assert self.player_pos == winner or len(self.ranking) >= 3, "action: {act}, winner:{winner}, state:{state}".format(act=win_trick_action, winner=winner, state=self)
# give trick to correct player
trick_to = winner
if isinstance(win_trick_action, GiveDragonAwayAction):
trick_to = win_trick_action.to
# determine next player
try:
next_player = winner if len(self.handcards[winner]) else self._next_player_turn()
except StopIteration:
# happens only right before the game ends
next_player = winner
assert self.is_double_win() or len(self.ranking) >= 3
return self.change(
player_pos=next_player,
won_tricks=self.won_tricks.add_trick(player=trick_to, trick=win_trick_action.trick),
trick_on_table=Trick(),
history=self.history.new_state_action(self, win_trick_action)
)
def _next_state_on_pass(self, pass_action: PassAction)->'TichuState':
assert pass_action.player_pos == self.player_pos
leading_player = self.trick_on_table.last_combination_action.player_pos
# try:
next_player_pos = self._next_player_turn()
# except StopIteration:
# # happens only right before the game ends
# next_player_pos = leading_player
if (leading_player == next_player_pos
or self.player_pos < leading_player < next_player_pos
or next_player_pos < self.player_pos < leading_player
or leading_player < next_player_pos < self.player_pos):
# trick ends with leading as winner
return self.change(
player_pos=leading_player,
trick_on_table=self.trick_on_table.finish(last_action=pass_action),
history=self.history.new_state_action(self, pass_action)
)
else:
return self.change(
player_pos=next_player_pos,
trick_on_table=self.trick_on_table + pass_action,
history=self.history.new_state_action(self, pass_action)
)
def _next_state_on_combination(self, comb_action: PlayCombination)->'TichuState':
played_comb = comb_action.combination
assert comb_action.player_pos == self.player_pos
# remove from handcards and add to trick on table
next_trick_on_table = self.trick_on_table + comb_action
next_handcards = self.handcards.remove_cards(player=self.player_pos, cards=played_comb.cards)
assert len(next_handcards[self.player_pos]) < len(self.handcards[self.player_pos])
assert next_handcards[self.player_pos].issubset(self.handcards[self.player_pos])
# ranking
next_ranking = self.ranking
if len(next_handcards[self.player_pos]) == 0:
# player finished
next_ranking = self.ranking + (self.player_pos,)
assert self.player_pos not in self.ranking
assert len(self.ranking) == len(set(self.ranking))
# dog
if played_comb == DOG_COMBINATION:
assert self.trick_on_table.is_empty()
next_player_pos = (self.player_pos+2) % 4 # Teammate
else:
# next players turn
# try:
next_player_pos = self._next_player_turn()
# except StopIteration:
# # happens only right before the game ends
# next_player_pos = (comb_action.player_pos + 1) % 4
# create state
return self.change(
player_pos=next_player_pos,
handcards=next_handcards,
trick_on_table=next_trick_on_table,
wish=None if played_comb.contains_cardrank(self.wish) else self.wish,
ranking=next_ranking,
history=self.history.new_state_action(self, comb_action)
)
def _next_player_turn(self) -> int:
"""
:return: the next player with non empty handcards
"""
return next((ppos % 4 for ppos in range(self.player_pos + 1, self.player_pos + 4) if len(self.handcards[ppos % 4]) > 0))
def has_cards(self, player: int, cards: Collection[Card])->bool:
"""
:param player:
:param cards:
:return: True if the player has the given card, False otherwise
"""
return self.handcards.has_cards(player=player, cards=cards)
def is_terminal(self):
return self.is_double_win() or (self.trick_on_table.is_empty() and len(self.ranking) >= 3)
def is_double_win(self)->bool:
return len(self.ranking) >= 2 and self.ranking[0] == (self.ranking[1] + 2) % 4
def count_points(self) -> Tuple[int, int, int, int]:
"""
Only correct if the state is terminal
:return: tuple of length 4 with the points of each player at the corresponding index.
"""
# TODO Test
if not self.is_terminal():
logger.warning("Calculating points of a NON terminal state! Result may be incorrect.")
# calculate tichu points
tichu_points = [0, 0, 0, 0]
for gt_pos in self.announced_grand_tichu:
tichu_points[gt_pos] += 200 if gt_pos == self.ranking[0] else -200
for t_pos in self.announced_tichu:
tichu_points[t_pos] += 100 if t_pos == self.ranking[0] else -100
points = tichu_points
# fill the ranking to 4
final_ranking = list(self.ranking) + [ppos for ppos in range(4) if ppos not in self.ranking]
assert len(final_ranking) == 4, "{} -> {}".format(self.ranking, final_ranking)
if self.is_double_win():
# double win (200 for winner team)
points[final_ranking[0]] += 100
points[final_ranking[1]] += 100
else:
# not double win
for rank in range(3): # first 3 players get the points in their won tricks
player_pos = final_ranking[rank]
points[player_pos] += sum(t.points for t in self.won_tricks[player_pos])
# first player gets the points of the last players tricks
winner = final_ranking[0]
looser = final_ranking[3]
points[winner] += sum(t.points for t in self.won_tricks[looser])
# the handcards of the last player go to the enemy team
points[(looser + 1) % 4] += sum(t.points for t in self.handcards[looser])
# fi
# sum the points of each team
t1 = points[0] + points[2]
t2 = points[1] + points[3]
points[0] = t1
points[2] = t1
points[1] = t2
points[3] = t2
assert len(points) == 4
assert points[0] == points[2] and points[1] == points[3]
return tuple(points)
def __str__(self):
return (
"""
{me.__class__.__name__}
player: {me.player_pos}
handcards: {me.handcards}
won tricks: {me.won_tricks}
trick on table: {me.trick_on_table}
wish: {me.wish}
ranking: {me.ranking}
tichus: {me.announced_tichu}
grand tichus: {me.announced_grand_tichu}
history: {me.history}
""").format(me=self)
class _BaseTichuStateImpl(BaseTichuState, metaclass=abc.ABCMeta):
"""
Implements the properties of BaseTichuState with 'raise AttributeError'
"""
__slots__ = ()
@property
def player_pos(self):
raise AttributeError()
@property
def handcards(self):
raise AttributeError()
@property
def won_tricks(self):
raise AttributeError()
@property
def trick_on_table(self):
raise AttributeError()
@property
def wish(self):
raise AttributeError()
@property
def ranking(self):
raise AttributeError()
@property
def announced_tichu(self):
raise AttributeError()
@property
def announced_grand_tichu(self):
raise AttributeError()
@property
def history(self):
raise AttributeError()
def change(self, **attributes_to_change):
raise AttributeError()
class TichuState(namedtuple("TichuState", [
"player_pos",
"handcards",
"won_tricks",
"trick_on_table",
"wish",
"ranking",
"announced_tichu",
"announced_grand_tichu",
"history"
]), _BaseTichuStateImpl):
__slots__ = ()
def __new__(cls, *args, allow_tichu=True, allow_wish=True, discard_history: bool=False, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __init__(self, player_pos: int, handcards: HandCards, won_tricks: WonTricks,
trick_on_table: Trick, wish: Optional[CardRank], ranking: tuple,
announced_tichu: frozenset, announced_grand_tichu: frozenset,
history: History, allow_tichu: bool=True, allow_wish: bool=True, discard_history:bool=False):
super().__init__(allow_tichu=allow_tichu, allow_wish=allow_wish, discard_history=discard_history)
# some paranoid checks
assert player_pos in range(4)
assert isinstance(handcards, HandCards)
assert isinstance(won_tricks, WonTricks)
assert wish is None or isinstance(wish, CardRank)
assert isinstance(ranking, tuple)
assert all(r in range(4) for r in ranking)
assert isinstance(announced_tichu, frozenset)
assert isinstance(announced_grand_tichu, frozenset)
assert all(r in range(4) for r in announced_tichu)
assert all(r in range(4) for r in announced_grand_tichu)
assert isinstance(trick_on_table, Trick)
assert isinstance(history, History)
@timecall(immediate=False)
def change(self, **attributes_to_change)->'TichuState':
"""
:param attributes_to_change: kwargs with the name of TichuState Attributes
:return: A copy ot this TichuState instance with the given attributes replaced
"""
if len(attributes_to_change) == 0:
return self
if self.discard_history:
attributes_to_change['history'] = History()
return TichuState(*self._replace(**attributes_to_change), allow_tichu=self._allow_tichu, allow_wish=self._allow_wish, discard_history=self.discard_history)
def copy_discard_history(self)->'TichuState':
ts = self.change(history=History())
ts.discard_history = True
return ts
# state is immutable, so we can simplify the deepcopies.
def __deepcopy__(self, memo):
return self
def __copy__(self):
return self
class InitialState(_BaseTichuStateImpl):
"""
State where all players have 8 cards (before announcing their grand tichus)
"""
__slots__ = ('_handcards', '_history')
def __init__(self):
piles_of_8 = [p[:8] for p in Deck(full=True).split(nbr_piles=4, random_=True)]
assert len(piles_of_8) == 4
assert all(len(p) == 8 for p in piles_of_8)
super().__init__()
self._handcards = HandCards(*piles_of_8)
self._history = History()
@property
def handcards(self):
return self._handcards
@property
def history(self):
return self._history
def next_state(self, players: Iterable[int]) -> 'FullCardsState':
players = frozenset(players)
check_param(all(p in range(4) for p in players), msg="[InitialState.next_state]: All players must be in range(4).")
return self.announce_grand_tichus(players)
def announce_grand_tichus(self, players: Iterable[int])->'FullCardsState':
return FullCardsState(self, players)
def is_terminal(self):
return False
class FullCardsState(_BaseTichuStateImpl):
"""
State where the players have 14 cards and announced their grand tichus.
All players may announce a Tichu now
"""
__slots__ = ('_handcards', '_history', '_announced_grand_tichu')
def __init__(self, initial_state: InitialState, players_announced_grand_tichu: Iterable[int]):
players_announced_grand_tichu = frozenset(players_announced_grand_tichu)
check_param(all(i in range(4) for i in players_announced_grand_tichu))
remaining_cards = set(Deck(full=True)) - set(initial_state.handcards.iter_all_cards())
piles = Deck(full=False, cards=remaining_cards).split(nbr_piles=4, random_=True)
assert len(piles) == 4
assert all(len(p) == 6 for p in piles), str(piles)
super().__init__()
self._handcards = HandCards(*(itertools.chain(crds, piles[k]) for k, crds in enumerate(initial_state.handcards)))
self._announced_grand_tichu = players_announced_grand_tichu
self._history = initial_state.history.new_state_actions(initial_state, (TichuAction(pp, announce_tichu=pp in players_announced_grand_tichu, grand=True) for pp in range(4)))
@property
def handcards(self):
return self._handcards
@property
def history(self):
return self._history
@property
def announced_grand_tichu(self):
return self._announced_grand_tichu
def next_state(self, players: Iterable[int]) -> 'BeforeTrading':
players = frozenset(players)
check_param(all(p in range(4) for p in players), msg="[FullCardsState.next_state]: All players must be in range(4).")
return self.announce_tichus(players)
def announce_tichus(self, players: Iterable[int])->'BeforeTrading':
return BeforeTrading(self, players)
def is_terminal(self):
return False
class BeforeTrading(_BaseTichuStateImpl):
"""
In this state all players have to trade 3 cards.
"""
__slots__ = ('_handcards', '_history', '_announced_grand_tichu', '_announced_tichu')
def __init__(self, prev_state: FullCardsState, players_announced_tichu: Iterable[int]):
players_announced_tichu = frozenset(players_announced_tichu)
check_param(all(i in range(4) for i in players_announced_tichu))
check_isinstance(prev_state, FullCardsState)
super().__init__()
self._handcards = prev_state.handcards
self._announced_grand_tichu = prev_state.announced_grand_tichu
self._announced_tichu = players_announced_tichu
self._history = prev_state.history.new_state_actions(prev_state, (TichuAction(pp, announce_tichu=pp in players_announced_tichu) for pp in range(4)))
@property
def handcards(self):
return self._handcards
@property
def history(self):
return self._history
@property
def announced_grand_tichu(self):
return self._announced_grand_tichu
@property
def announced_tichu(self):
return self._announced_tichu
def next_state(self, trades: Collection[CardTrade]) -> 'AfterTrading':
check_all_isinstance(trades, CardTrade)
return self.trade_cards(trades)
def trade_cards(self, trades: Collection[CardTrade]) -> 'AfterTrading':
"""
Same as: AfterTrading.from_beforetrading(<this BeforeTrading instance>, trades=trades)
:param trades: must have length of 4*3 = 12 and contain only legal trades
:return: The state after the given cards have been traded.
"""
return AfterTrading.from_beforetrading(self, trades=trades)
def is_terminal(self):
return False
class AfterTrading(TichuState):
"""
All players have 14 cards and have already traded. From this state on the Round starts with the player having the MAHJONG.
This is a
"""
__slots__ = ()
def __init__(self, *args, **kwargs):
check_true(Card.MAHJONG in self.handcards[self.player_pos])
check_true(all(len(hc) == 14 for hc in self.handcards))
super().__init__(*args, **kwargs)
@classmethod
def from_beforetrading(cls, before_trading: BeforeTrading, trades: Collection[CardTrade]) -> 'AfterTrading':
assert len(trades) == 0 or len(trades) == 12 # 4 players trade 3 cards each, an empty trades collection bypasses the trading phase
new_handcards = before_trading.handcards.as_list_of_lists()
trade_actions = []
for from_, to, card in trades:
trade_actions.append(TradeAction(from_=from_, to=to, card=card))
assert card in new_handcards[from_]
new_handcards[from_].remove(card)
assert card not in new_handcards[from_]
new_handcards[to].append(card)
assert card in new_handcards[to]
try:
starting_player = next((ppos for ppos, hc in enumerate(new_handcards) if Card.MAHJONG in hc))
except StopIteration as se:
raise LogicError("No player seems to have the MAHJONG.") from se
else:
return cls(
player_pos=starting_player,
handcards=HandCards(*new_handcards),
won_tricks=WonTricks(),
trick_on_table=Trick(),
wish=None,
ranking=(),
announced_tichu=before_trading.announced_tichu,
announced_grand_tichu=before_trading.announced_grand_tichu,
history=before_trading.history.new_state_actions(before_trading, trade_actions),
allow_tichu=True
)
def is_terminal(self):
return False
class RolloutTichuState(BaseTichuState):
__slots__ = ('_handcards', '_announced_grand_tichu', '_announced_tichu', '_player_pos', '_won_tricks',
'_trick_on_table', '_wish', '_ranking')
def __init__(self, player_pos: int, handcards: HandCards, won_tricks: WonTricks,
trick_on_table: Trick, wish: Optional[CardRank], ranking: tuple,
announced_tichu: frozenset, announced_grand_tichu: frozenset,
history: History):
super().__init__(allow_tichu=False, allow_wish=False)
assert isinstance(player_pos, int), str(player_pos)
self._player_pos = player_pos
self._handcards = MutableHandCards.from_immutable(handcards)
self._won_tricks = MutableWonTricks.from_immutable(won_tricks)
self._trick_on_table = MutableTrick.from_immutable(trick_on_table)
self._wish = wish
self._ranking = list(ranking)
self._announced_tichu = announced_tichu
self._announced_grand_tichu = announced_grand_tichu
# self._history = history
@classmethod
def from_tichustate(cls, state: TichuState):
return cls(*state)
@property
def handcards(self):
return self._handcards
@property
def trick_on_table(self):
return self._trick_on_table
@property
def wish(self):
return self._wish
@property
def announced_tichu(self):
return self._announced_tichu
@property
def ranking(self):
return self._ranking
@property
def player_pos(self):
return self._player_pos
@property
def history(self):
raise LogicError()
@property
def announced_grand_tichu(self):
return self._announced_grand_tichu
@property
def won_tricks(self):
return self._won_tricks
def random_action(self)->PlayerAction:
return random.choice(self.possible_actions_list)
def rollout(self, policy: Callable[[BaseTichuState], PlayerAction])->BaseTichuState:
while not self.is_terminal():
action = policy(self)
self.apply_action(action)
return self
def random_rollout(self)->BaseTichuState:
while not self.is_terminal():
self.apply_action(random.choice(self.possible_actions_list))
return self
@timecall(immediate=False)
def apply_action(self, action: PlayerAction)->'RolloutTichuState':
"""
Applies the action on this state (Modifies the calling instance).
:param action:
:return: self
"""
if action not in self.possible_actions_set:
raise IllegalActionError("{} is not a legal action in state: {}".format(action, self))
# (No Tichu and wish in rollout)
# win trick (includes dragon away)?
elif isinstance(action, WinTrickAction):
self._apply_win_trick_action(action)
# pass
elif isinstance(action, PassAction):
self._apply_pass_action(action)
# combinations (includes playfirst, playdog, playbomb)
elif isinstance(action, PlayCombination):
self._apply_combination(action)
else:
raise LogicError("An unknown action has been played")
# reset possible actions cache
self._possible_actions_set = None
self._possible_actions_list = None
return self
def _apply_win_trick_action(self, win_trick_action: WinTrickAction):
winner = win_trick_action.player_pos
assert self.player_pos == winner or len(self.ranking) >= 3, "action: {act}, winner:{winner}, state:{state}".format(act=win_trick_action, winner=winner, state=self)
# give trick to correct player
trick_to = winner
if isinstance(win_trick_action, GiveDragonAwayAction):
trick_to = win_trick_action.to
# determine next player
try:
next_player = winner if len(self.handcards[winner]) else self._next_player_turn()
except StopIteration:
# happens only right before the game ends
next_player = winner
assert self.is_double_win() or len(self.ranking) >= 3
self._player_pos = next_player
assert isinstance(self._won_tricks, MutableWonTricks)
self._won_tricks.add_trick(player=trick_to, trick=win_trick_action.trick)
self._trick_on_table = MutableTrick()
def _apply_pass_action(self, pass_action: PassAction):
assert pass_action.player_pos == self.player_pos
assert isinstance(self.player_pos, int), str(self.player_pos)
leading_player = self.trick_on_table.last_combination_action.player_pos
assert isinstance(leading_player, int), str(leading_player)+" "+str(self.trick_on_table)
next_player_pos = self._next_player_turn()
if (leading_player == next_player_pos
or self.player_pos < leading_player < next_player_pos
or next_player_pos < self.player_pos < leading_player
or leading_player < next_player_pos < self.player_pos):
# trick ends with leading as winner
self._player_pos = leading_player
assert isinstance(self.player_pos, int), str(self.player_pos)
self._trick_on_table = self.trick_on_table.finish(last_action=pass_action)
else:
self._player_pos = next_player_pos
assert isinstance(self._trick_on_table, MutableTrick)
assert isinstance(next_player_pos, int), str(next_player_pos)
self._trick_on_table.append(pass_action)
assert isinstance(self.player_pos, int), str(self.player_pos)
def _apply_combination(self, comb_action: PlayCombination):
played_comb = comb_action.combination
assert comb_action.player_pos == self.player_pos
# remove from handcards and add to trick on table
self._trick_on_table.append(comb_action)
assert isinstance(self._handcards, MutableHandCards)
self._handcards.remove_cards(player=self.player_pos, cards=played_comb.cards)
# ranking
if len(self._handcards[self.player_pos]) == 0:
# player finished
assert self.player_pos not in self.ranking
assert len(self.ranking) == len(set(self.ranking))
self.ranking.append(self.player_pos)
# dog
if played_comb == DOG_COMBINATION:
assert len(self.trick_on_table) == 1, str(self.trick_on_table)
self._player_pos = (self.player_pos + 2) % 4 # Teammate
else:
self._player_pos = self._next_player_turn()
# wish fullfilled?
if played_comb.contains_cardrank(self.wish):
self._wish = None
def _next_player_turn(self) -> int:
"""
:return: the next player with non empty handcards
"""
return next((ppos % 4 for ppos in range(self.player_pos + 1, self.player_pos + 4) if len(self.handcards[ppos % 4]) > 0))
def has_cards(self, player: int, cards: Collection[Card])->bool:
"""
:param player:
:param cards:
:return: True if the player has the given card, False otherwise
"""
return self.handcards.has_cards(player=player, cards=cards)
def is_terminal(self):
return self.is_double_win() or (self.trick_on_table.is_empty() and len(self.ranking) >= 3)
def is_double_win(self)->bool:
return len(self.ranking) >= 2 and self.ranking[0] == (self.ranking[1] + 2) % 4
# FOLLOWING METHODS SHOULD NOT BE USED AND RAISE LOGIC_ERROR
def change(self, **attributes_to_change) -> 'TichuState':
raise LogicError()
def _next_state_on_wish(self, wish_action: WishAction):
raise LogicError()
def _next_state_on_tichu(self, tichu_action: TichuAction):
raise LogicError()
def _next_state_on_win_trick(self, win_trick_action: WinTrickAction):
raise LogicError()
def _next_state_on_pass(self, pass_action: PassAction):
raise LogicError()
def _next_state_on_combination(self, comb_action: PlayCombination):
raise LogicError()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.