content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
game_logic = True winner = None curr_player = "X" board = [["-","-","-"], ["-","-","-"], ["-","-","-"]] def compTurn(): bestScore = -999 move = None for x in range(3): for y in range(3): if board[x][y] == "-": board[x][y] = "O" score = minimax(board,0,False) board[x][y] = "-" if score>bestScore: bestScore=score move = (x,y) board[move[0]][move[1]] = "O" display_board() def minimax(board,depth,isMaximizing): result = checkWinTest() if result != None: if result == "X": return -10 elif result == "O": return 10 else: return 0 if(isMaximizing): bestScore = -999 for x in range(3): for y in range(3): if board[x][y] == "-": board[x][y] = "O" score = minimax(board,depth+1,False) board[x][y] = "-" bestScore=max(score,bestScore) return bestScore else: bestScore = 999 for x in range(3): for y in range(3): if board[x][y] == "-": board[x][y] = "X" score = minimax(board,depth+1,True) board[x][y] = "-" bestScore = min(score,bestScore) return bestScore def display_board(): print("\n") print(board[0][0] + " | " + board[0][1] + " | " + board[0][2] + " 1 | 2 | 3") print(board[1][0] + " | " + board[1][1] + " | " + board[1][2] + " 4 | 5 | 6") print(board[2][0] + " | " + board[2][1] + " | " + board[2][2] + " 7 | 8 | 9") print("\n") def checkWin(): global game_logic row_1 = board[0][0] == board[0][1] == board[0][2] != "-" row_2 = board[1][0] == board[1][1] == board[1][2] != "-" row_3 = board[2][0] == board[2][1] == board[2][2] != "-" col_1 = board[0][0] == board[1][0] == board[2][0] != "-" col_2 = board[0][1] == board[1][1] == board[2][1] != "-" col_3 = board[0][2] == board[1][2] == board[2][2] != "-" diag_1 = board[0][0] == board[1][1] == board[2][2] != "-" diag_2 = board[0][2] == board[1][1] == board[2][0] != "-" if row_1 or row_2 or row_3 or col_1 or col_2 or col_3 or diag_1 or diag_2: game_logic=False if row_1: return board[0][0] elif row_2: return board[1][0] elif row_3: return board[2][0] elif col_1: return board[0][0] elif col_2: return board[0][1] elif col_3: return board[0][2] elif diag_1: return board[0][0] elif diag_2: return board[0][2] elif "-" not in board[0] and "-" not in board[1] and "-" not in board[2]: game_logic = False print("Board Full") return "Tie" def checkWinTest(): row_1 = board[0][0] == board[0][1] == board[0][2] != "-" row_2 = board[1][0] == board[1][1] == board[1][2] != "-" row_3 = board[2][0] == board[2][1] == board[2][2] != "-" col_1 = board[0][0] == board[1][0] == board[2][0] != "-" col_2 = board[0][1] == board[1][1] == board[2][1] != "-" col_3 = board[0][2] == board[1][2] == board[2][2] != "-" diag_1 = board[0][0] == board[1][1] == board[2][2] != "-" diag_2 = board[0][2] == board[1][1] == board[2][0] != "-" if row_1: return board[0][0] elif row_2: return board[1][0] elif row_3: return board[2][0] elif col_1: return board[0][0] elif col_2: return board[0][1] elif col_3: return board[0][2] elif diag_1: return board[0][0] elif diag_2: return board[0][2] elif "-" not in board[0] and "-" not in board[1] and "-" not in board[2]: return "Tie" def flip_player(): global curr_player if curr_player == "X": curr_player = "O" elif curr_player == "O": curr_player = "X" def playerTurn(curr_player): print(curr_player+"'s turn.") position = input("Choose a position form 1-9: ") valid = False while not valid: while position not in ["1","2","3","4","5","6","7","8","9"]: position = input("Choose a position form 1-9: ") if position == "1": pos1 = 0 pos2 = 0 elif position == "2": pos1 = 0 pos2 = 1 elif position == "3": pos1 = 0 pos2 = 2 elif position == "4": pos1 = 1 pos2 = 0 elif position == "5": pos1 = 1 pos2 = 1 elif position == "6": pos1 = 1 pos2 = 2 elif position == "7": pos1 = 2 pos2 = 0 elif position == "8": pos1 = 2 pos2 = 1 else: pos1 = 2 pos2 = 2 if board[pos1][pos2] == "-": valid = True else: print("You can't go there. Go again.") position = input("Choose a position form 1-9: ") board[pos1][pos2] = curr_player display_board() def main(): # ------ Game Here ------ display_board() while game_logic: playerTurn(curr_player) winner=checkWin() if winner == "X" or winner =="O": print(winner+" won.") break elif winner == "Tie": print("Tie.") break compTurn() winner=checkWin() if winner == "X" or winner =="O": print(winner+" won.") break elif winner == "Tie": print("Tie.") break if __name__ == '__main__': main()
nilq/baby-python
python
class task_status: """ Descriptive backend task processing codes, for readability. """ SCHEDULED = 0 PROCESSING = 1 FINISHED = 4 FAILED = -1 CANCELLED = 9 EXPIRED = 8 # only for ResultsPackage WAITING_FOR_INPUT = 2 # only for RunJob RETRYING = 11 # only for WorkflowRun REQUEST_PROCESSING = 21 # only for WorkflowRun REQUEST_CANCELLING = 29 # only for WorkflowRun REQUEST_RETRYING = 31 # only for WorkflowRun NOT_APPLICABLE = None
nilq/baby-python
python
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name = "taxon_parser", version = "0.2.3", author = "Augustin Roche", author_email = "aroche@photoherbarium.fr", description = "A library to parse taxon names into elementary components", long_description = long_description, long_description_content_type = "text/markdown", url = "https://github.com/aroche/taxon_parser", packages = setuptools.find_packages(), package_data = {'': ('*/latin-endings.txt', )}, python_requires = ">=3.4", install_requires = ("regex", ), classifiers = ( "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", ), )
nilq/baby-python
python
import logging from urllib.parse import urlparse import prometheus_client import requests import six from bs4 import BeautifulSoup as bs if six.PY3: from json import JSONDecodeError else: from simplejson import JSONDecodeError logger = logging.getLogger(__name__) class DataFetcher(object): def __init__(self, config): self._config = config self.env = None def get_data( self, since, include_logs, include_task_args, include_xcom, dag_ids, quantity, incomplete_offset, dags_only, ): pass def get_source(self): pass class WebFetcher(DataFetcher): # Common instance of prometheus summary object for all fetchers prometheus_af_response_time_metrics = None def __init__(self, config): # type: (AirflowFetchingConfiguration) -> WebFetcher super(WebFetcher, self).__init__(config) self.env = "Airflow" self.base_url = config.base_url self.endpoint_url = config.url self.api_mode = config.api_mode self.rbac_username = config.rbac_username self.rbac_password = config.rbac_password self.client = requests.session() self.is_logged_in = False if WebFetcher.prometheus_af_response_time_metrics is None: WebFetcher.prometheus_af_response_time_metrics = prometheus_client.Summary( "af_monitor_export_response_time", "Airflow export plugin response time", ["airflow_instance"], ) def get_data( self, since, include_logs, include_task_args, include_xcom, dag_ids, quantity, incomplete_offset, dags_only, ): params = {} if since: params["since"] = since.isoformat() if include_logs: params["include_logs"] = True if include_task_args: params["include_task_args"] = True if include_xcom: params["include_xcom"] = True if dag_ids: params["dag_ids"] = dag_ids if quantity: params["fetch_quantity"] = quantity if incomplete_offset is not None: params["incomplete_offset"] = incomplete_offset if dags_only: params["dags_only"] = dags_only try: data = self._make_request(params) logger.info("Fetched from: {}".format(data.url)) if data.status_code == 200: try: return data.json() except JSONDecodeError: if data.text: logger.info("Failed to decode: %s...", data.text[:100]) raise else: logger.error( "Could not fetch data from url {}, error code: {}. Hint: If the IP address is correct" " but the full path is not, check the configuration of api_mode variable".format( self.endpoint_url, data.status_code, ), ) except ConnectionError as e: logger.error( "An error occurred while connecting to server: {}. Error: {}".format( self.endpoint_url, e ) ) def _try_login(self): login_url = self.base_url + "/login/" auth_params = {"username": self.rbac_username, "password": self.rbac_password} # IMPORTANT: when airflow uses RBAC (Flask-AppBuilder [FAB]) it doesn't return # the relevant csrf token in a cookie, but inside the login page html content. # therefore, we are extracting it, and attaching it to the session manually try: # extract csrf token logger.info( "Trying to login to %s with username: %s.", login_url, self.rbac_username, ) resp = self.client.get(login_url) soup = bs(resp.text, "html.parser") csrf_token = soup.find(id="csrf_token").get("value") if csrf_token: auth_params["csrf_token"] = csrf_token except Exception as e: logger.warning("Could not collect csrf token from %s. %s", login_url, e) # login resp = self.client.post(login_url, data=auth_params) # validate login succeeded soup = bs(resp.text, "html.parser") if "/logout/" in [a.get("href") for a in soup.find_all("a")]: self.is_logged_in = True logger.info("Succesfully logged in to %s.", login_url) else: logger.warning("Could not login to %s.", login_url) def _make_request(self, params): auth = () if self.api_mode == "experimental": auth = (self.rbac_username, self.rbac_password) elif self.api_mode == "rbac" and not self.is_logged_in: # In RBAC mode, we need to login with admin credentials first self._try_login() parsed_uri = urlparse(self.endpoint_url) airflow_instance_url = "{uri.scheme}://{uri.netloc}".format(uri=parsed_uri) with WebFetcher.prometheus_af_response_time_metrics.labels( airflow_instance_url ).time(): return self.client.get(self.endpoint_url, params=params, auth=auth) def get_source(self): return self.endpoint_url class GoogleComposerFetcher(WebFetcher): # requires GOOGLE_APPLICATION_CREDENTIALS env variable def __init__(self, config): # type: (AirflowFetchingConfiguration) -> GoogleComposerFetcher super(GoogleComposerFetcher, self).__init__(config) self.client_id = config.composer_client_id self.env = "GoogleCloudComposer" def _make_request(self, params): from airflow_monitor.make_iap_request import make_iap_request resp = make_iap_request( url=self.endpoint_url, client_id=self.client_id, params=params ) return resp class DbFetcher(DataFetcher): def __init__(self, config): # type: (AirflowFetchingConfiguration) -> DbFetcher super(DbFetcher, self).__init__(config) from sqlalchemy import create_engine self.dag_folder = config.local_dag_folder self.sql_conn_string = config.sql_alchemy_conn self.engine = create_engine(self.sql_conn_string) self.env = "AirflowDB" def get_data( self, since, include_logs, include_task_args, include_xcom, dag_ids, quantity, incomplete_offset, dags_only, ): try: data = self.export_data_directly( since=since, include_logs=include_logs, include_task_args=include_task_args, include_xcom=include_xcom, dag_ids=dag_ids, quantity=quantity, incomplete_offset=incomplete_offset, dags_only=dags_only, ) return data except Exception as ex: logger.exception("Failed to connect to db %s", self.sql_conn_string, ex) raise def get_source(self): return self.sql_conn_string def export_data_directly( self, since, include_logs, include_task_args, include_xcom, dag_ids, quantity, incomplete_offset, dags_only, ): from airflow import models, settings, conf from airflow.settings import STORE_SERIALIZED_DAGS from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from dbnd_airflow_export.dbnd_airflow_export_plugin import get_airflow_data conf.set("core", "sql_alchemy_conn", value=self.sql_conn_string) dagbag = models.DagBag( self.dag_folder if self.dag_folder else settings.DAGS_FOLDER, include_examples=True, store_serialized_dags=STORE_SERIALIZED_DAGS, ) engine = create_engine(self.sql_conn_string) session = sessionmaker(bind=engine) result = get_airflow_data( dagbag=dagbag, since=since, include_logs=include_logs, include_task_args=include_task_args, include_xcom=include_xcom, dag_ids=dag_ids, quantity=quantity, incomplete_offset=incomplete_offset, dags_only=dags_only, session=session(), ) return result class FileFetcher(DataFetcher): def __init__(self, config): # type: (AirflowFetchingConfiguration) -> FileFetcher super(FileFetcher, self).__init__(config) self.env = "JsonFile" self.json_file_path = config.json_file_path def get_data( self, since, include_logs, include_task_args, include_xcom, dag_ids, quantity, incomplete_offset, dags_only, ): import json if not self.json_file_path: raise Exception( "'json_file_path' was not set in AirflowMonitor configuration." ) try: with open(self.json_file_path) as f: data = json.load(f) return data except Exception as e: logger.error( "Could not read json file {}. Error: {}".format(self.json_file_path, e) ) def get_source(self): return self.json_file_path def data_fetcher_factory(config): # type: (AirflowFetchingConfiguration) -> DataFetcher if config.fetcher == "db": return DbFetcher(config) elif config.fetcher == "web": return WebFetcher(config) elif config.fetcher == "composer": return GoogleComposerFetcher(config) elif config.fetcher == "file": return FileFetcher(config) else: err = "Unsupported fetcher_type: {}, use one of the following: web/db/composer/file".format( config.fetcher ) logging.error(err) raise ConnectionError(err)
nilq/baby-python
python
import json import requests from fabric.colors import red def publish_deploy_event(name, component, environment): url = environment.fab_settings_config.deploy_event_url if not url: return token = environment.get_secret("deploy_event_token") if not token: print(red(f"skipping {name} event: deploy_event_token secret not set")) return headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json", } data = json.dumps({ "event_type": name, "client_payload": { "component": component, "environment": environment.meta_config.deploy_env, }, }) response = requests.post(url, data=data, headers=headers) if 200 <= response.status_code < 300: print(f"triggered {name} event") else: print(red(f"{name} event status: {response.status_code}"))
nilq/baby-python
python
class Data: def __init__(self, data_dir="data/FB15k-237/", reverse=False): self.train_data = self.load_data(data_dir, "train", reverse=reverse) self.valid_data = self.load_data(data_dir, "valid", reverse=reverse) self.test_data = self.load_data(data_dir, "test", reverse=reverse) self.data = self.train_data + self.valid_data + self.test_data self.entities = self.get_entities(self.data) self.train_relations = self.get_relations(self.train_data) self.valid_relations = self.get_relations(self.valid_data) self.test_relations = self.get_relations(self.test_data) self.relations = self.train_relations + [i for i in self.valid_relations \ if i not in self.train_relations] + [i for i in self.test_relations \ if i not in self.train_relations] def load_data(self, data_dir, data_type="train", reverse=False): with open("%s%s.txt" % (data_dir, data_type), "r") as f: data = f.read().strip().split("\n") data = [i.split() for i in data] if reverse: data += [[i[2], i[1]+"_reverse", i[0]] for i in data] return data def get_relations(self, data): relations = sorted(list(set([d[1] for d in data]))) return relations def get_entities(self, data): entities = sorted(list(set([d[0] for d in data]+[d[2] for d in data]))) return entities
nilq/baby-python
python
import os import yaml from google.cloud import storage from google.oauth2 import service_account from .storage import Storage class GcsStorage(Storage): def __init__(self, bucket, path, project=None, json_path=None): if bucket is None: raise ValueError('Bucket must be supplied to GCS storage') if path is None: path = 'spinbot/cache' self.path = path if json_path is not None: json_path = os.path.expanduser(json_path) credentials = service_account.Credentials.from_service_account_file(json_path) if credentials.requires_scopes: credentials = credentials.with_scopes(['https://www.googleapis.com/auth/devstorage.read_write']) self.client = storage.Client(project=project, credentials=credentials) else: self.client = storage.Client() if self.client.lookup_bucket(bucket) is None: self.client.create_bucket(bucket) self.bucket = self.client.get_bucket(bucket) super().__init__() def store(self, key, val): origblob = self.bucket.get_blob(self.path) if origblob: contents = origblob.download_as_string() else: contents = '{}' props = yaml.safe_load(contents) if props is None: props = {} props[key] = val # You can't use origblob to upload. Calling `download_as_string` sets # the hash field (crc32) on the object. When you upload, since that # field is already set, it won't get recalculated it for the new # content. So it sends the crc32 to the server and the server says # "whoah buddy, your crc32 doesn't match your content" and returns an # error. Is this a bug or just confusing library design? The crc32 field # on the blob is new, so it's hard for me to say if they intended for it # to work this way. It works in google-cloud-storage 1.29.0, but is # broken in 1.33.0. newblob = self.bucket.blob(self.path) newblob.upload_from_string(yaml.safe_dump(props)) def load(self, key): b = self.bucket.get_blob(self.path) contents = '{}' if b: contents = b.download_as_string() props = yaml.safe_load(contents) if props is None: props = {} return props.get(key)
nilq/baby-python
python
from django.shortcuts import render from rest_framework.decorators import api_view from django.http import JsonResponse import json from .models import Wine from .serializers import WineSerializer from .constants import PAGE_SIZE # Create your views here. @api_view(["GET"]) def wines(request): pageParam = int(request.GET.get('page', '0')) wineList = Wine.objects.order_by('-points')[pageParam * PAGE_SIZE:pageParam * PAGE_SIZE + PAGE_SIZE] serializer = WineSerializer(wineList, many=True) return JsonResponse(serializer.data, safe=False) @api_view(["POST"]) def notes(request): pageParam = int(request.GET.get('page', '0')) if not request.body: body = {} else: body = json.loads(request.body) wineList = Wine.objects.filter(description__contains=body.get('note', '')).order_by('-points')[pageParam * PAGE_SIZE:pageParam * PAGE_SIZE + PAGE_SIZE] serializer = WineSerializer(wineList, many=True) return JsonResponse(serializer.data, safe=False) @api_view(["GET"]) def best(request): pageParam = int(request.GET.get('page', '0')) wineList = Wine.objects.order_by('-ratio')[pageParam * PAGE_SIZE:pageParam * PAGE_SIZE + PAGE_SIZE] serializer = WineSerializer(wineList, many=True) return JsonResponse(serializer.data, safe=False)
nilq/baby-python
python
# Copyright 2020 kubeflow.org. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import os import tempfile import binascii import pytest import kfserving from minio import Minio, error from google.cloud import exceptions import unittest.mock as mock STORAGE_MODULE = 'kfserving.storage' # *.tar.gz contains a single empty file model.pth FILE_TAR_GZ_RAW = binascii.unhexlify('1f8b0800bac550600003cbcd4f49cdd12b28c960a01d3030303033315100d1e666a660dac008c28701054313a090a189919981998281a1b1b1a1118382010ddd0407a5c525894540a754656466e464e2560754969686c71ca83fe0f4281805a360140c7200009f7e1bb400060000') # *.zip contains a single empty file model.pth FILE_ZIP_RAW = binascii.unhexlify('504b030414000800080035b67052000000000000000000000000090020006d6f64656c2e70746855540d000786c5506086c5506086c5506075780b000104f501000004140000000300504b0708000000000200000000000000504b0102140314000800080035b67052000000000200000000000000090020000000000000000000a481000000006d6f64656c2e70746855540d000786c5506086c5506086c5506075780b000104f50100000414000000504b0506000000000100010057000000590000000000') def test_storage_local_path(): abs_path = 'file:///' relative_path = 'file://.' assert kfserving.Storage.download(abs_path) == abs_path.replace("file://", "", 1) assert kfserving.Storage.download(relative_path) == relative_path.replace("file://", "", 1) def test_storage_local_path_exception(): not_exist_path = 'file:///some/random/path' with pytest.raises(Exception): kfserving.Storage.download(not_exist_path) def test_no_prefix_local_path(): abs_path = '/' relative_path = '.' assert kfserving.Storage.download(abs_path) == abs_path assert kfserving.Storage.download(relative_path) == relative_path class MockHttpResponse(object): def __init__( self, status_code=404, raw=b'', content_type='' ): self.status_code = status_code self.raw = io.BytesIO(raw) self.headers = {'Content-Type': content_type} def __enter__(self): return self def __exit__(self, ex_type, ex_val, traceback): pass @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/octet-stream')) def test_http_uri_path(_): http_uri = 'http://foo.bar/model.joblib' http_with_query_uri = 'http://foo.bar/model.joblib?foo=bar' out_dir = '.' assert kfserving.Storage.download(http_uri, out_dir=out_dir) == out_dir assert kfserving.Storage.download(http_with_query_uri, out_dir=out_dir) == out_dir os.remove('./model.joblib') @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/octet-stream')) def test_https_uri_path(_): https_uri = 'https://foo.bar/model.joblib' https_with_query_uri = 'https://foo.bar/model.joblib?foo=bar' out_dir = '.' assert kfserving.Storage.download(https_uri, out_dir=out_dir) == out_dir assert kfserving.Storage.download(https_with_query_uri, out_dir=out_dir) == out_dir os.remove('./model.joblib') @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/x-tar', raw=FILE_TAR_GZ_RAW)) def test_http_uri_path_with_tar_gz(_): with tempfile.TemporaryDirectory() as out_dir: https_uri = 'https://foo.bar/model.tar.gz' assert kfserving.Storage.download(https_uri, out_dir=out_dir) == out_dir assert os.path.exists(os.path.join(out_dir, 'model.pth')) @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/x-tar', raw=FILE_TAR_GZ_RAW)) def test_http_uri_path_with_tar_gz_query_params(_): with tempfile.TemporaryDirectory() as out_dir: https_with_query_uri = 'https://foo.bar/model.tar.gz?foo=bar' assert kfserving.Storage.download(https_with_query_uri, out_dir=out_dir) == out_dir assert os.path.exists(os.path.join(out_dir, 'model.pth')) @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/zip', raw=FILE_ZIP_RAW)) def test_http_uri_path_with_zip(_): with tempfile.TemporaryDirectory() as out_dir: https_uri = 'https://foo.bar/model.zip' assert kfserving.Storage.download(https_uri, out_dir=out_dir) == out_dir assert os.path.exists(os.path.join(out_dir, 'model.pth')) @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/zip', raw=FILE_ZIP_RAW)) def test_http_uri_path_with_zip_query_params(_): with tempfile.TemporaryDirectory() as out_dir: https_with_query_uri = 'https://foo.bar/model.zip?foo=bar' assert kfserving.Storage.download(https_with_query_uri, out_dir=out_dir) == out_dir assert os.path.exists(os.path.join(out_dir, 'model.pth')) @mock.patch('requests.get', return_value=MockHttpResponse(status_code=404)) def test_nonexistent_uri(_): non_existent_uri = 'https://theabyss.net/model.joblib' with pytest.raises(RuntimeError): kfserving.Storage.download(non_existent_uri) @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200)) def test_uri_no_filename(_): bad_uri = 'https://foo.bar/test/' with pytest.raises(ValueError): kfserving.Storage.download(bad_uri) @mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='text/html')) def test_html_content_type(_): bad_uri = 'https://some.site.com/test.model' with pytest.raises(RuntimeError): kfserving.Storage.download(bad_uri) @mock.patch(STORAGE_MODULE + '.storage') def test_mock_gcs(mock_storage): gcs_path = 'gs://foo/bar' mock_obj = mock.MagicMock() mock_obj.name = 'mock.object' mock_storage.Client().bucket().list_blobs().__iter__.return_value = [mock_obj] assert kfserving.Storage.download(gcs_path) def test_storage_blob_exception(): blob_path = 'https://accountname.blob.core.windows.net/container/some/blob/' with pytest.raises(Exception): kfserving.Storage.download(blob_path) @mock.patch('urllib3.PoolManager') @mock.patch(STORAGE_MODULE + '.Minio') def test_storage_s3_exception(mock_connection, mock_minio): minio_path = 's3://foo/bar' # Create mock connection mock_server = mock.MagicMock() mock_connection.return_value = mock_server # Create mock client mock_minio.return_value = Minio("s3.us.cloud-object-storage.appdomain.cloud", secure=True) with pytest.raises(Exception): kfserving.Storage.download(minio_path) @mock.patch('urllib3.PoolManager') @mock.patch(STORAGE_MODULE + '.Minio') def test_no_permission_buckets(mock_connection, mock_minio): bad_s3_path = "s3://random/path" #bad_gcs_path = "gs://random/path" # Access private buckets without credentials mock_minio.return_value = Minio("s3.us.cloud-object-storage.appdomain.cloud", secure=True) mock_connection.side_effect = error.AccessDenied() with pytest.raises(error.AccessDenied): kfserving.Storage.download(bad_s3_path) #mock_connection.side_effect = exceptions.Forbidden(None) #with pytest.raises(exceptions.Forbidden): # kfserving.Storage.download(bad_gcs_path)
nilq/baby-python
python
# Api view of grades from home.models import Grade # from Serializers.GradeSerializer import FlatGradeSerializer from api.services import handelFileSubmit, sendEmail, createTmpFile, getUserGrades from rest_framework import viewsets from rest_framework.response import Response from rest_framework.decorators import action from rest_framework.permissions import IsAdminUser from rest_framework.parsers import JSONParser ''' GradeAPIView: basic oprations (updaste, create, delete, list, ...) Return data format Non-staff: {"grades":[ {name":"张益凯","password":"1324","student_id":1,"class_name":"一(三)班", "test":"期末考试","subject":"数学","score":99.0,"id":38,"rank":1,"count":37, "avg":94.02,"max":99,"min":75.0,"pass_num":37} {name":"张益凯","password":"1324","student_id":1,"class_name":"一(三)班", "test":"期末考试","subject":"语文","score":99.0,"id":38,"rank":1,"count":37, "avg":94.02,"max":99,"min":75.0,"pass_num":37} ], "user":"张益凯", "className":一(三)班, "studentId":1, "isStaff":false} Staff: {"grades":[ {"name":"张益凯","password":"1324","student_id":1,"class_name":"一(三)班", "test":"期末考试","subject":"数学","score":99.0,"id":38} {"name":"常梦冉","password":"1324","student_id":1,"class_name":"一(三)班", "test":"期末考试","subject":"语文","score":99.0,"id":38} ], "user":"admin", "className":null, "studentId":null, "isStaff":true} ''' class GradeAPIView(viewsets.ModelViewSet): # serializer_class = FlatGradeSerializer def list(self, request, *args, **kwargs): user = request.user grades = getUserGrades(user) return Response({"grades": grades, "user": user.name, "className": user.class_name, 'studentId': user.student_id, 'isStaff': user.is_staff}) # Deletion not supported def delete(self, request, *args, **kwargs): pass @action(methods=['post'], detail=False, permission_classes=[IsAdminUser]) def createOrUpdate(self, request, *args, **kwargs): errors = handelFileSubmit(request.FILES['file']) if errors: return Response({'errMsg': [error for error in errors if error]}, status=400) else: return self.list(request) @action(methods=['post'], detail=False) def sendReport(self, request, *args, **kwargs): user = request.user grades = getUserGrades(user, pk__in=request.data['ids']) tmp_file = createTmpFile(grades, prefix='GradesReport-', suffix='.xlsx') e = sendEmail(request.data['addresses'], text='This is an auto-generated grade report (see attachment) from Transtribution.', files=tmp_file).result() if e: return Response({'errMsg': e}, status=400) else: return Response(status=200)
nilq/baby-python
python
#!/usr/bin/env python3 import os import sys import time sys.path.append(os.getcwd()+'/CPDP') sys.path.append(os.getcwd()+'/JinEnv') sys.path.append(os.getcwd()+'/lib') import copy import time import json import numpy as np import transforms3d from dataclasses import dataclass, field from QuadPara import QuadPara from QuadStates import QuadStates from DemoSparse import DemoSparse from QuadAlgorithm import QuadAlgorithm from InputWaypoints import InputWaypoints from ObsInfo import ObsInfo from generate_random_obs import generate_random_obs if __name__ == "__main__": # a json configuration file config_file_name = "config.json" # Read the configuration from the json file json_file = open(config_file_name) config_data = json.load(json_file) # generate random obstacles num_obs = 20 # number of obstacles size_list=[0.2, 0.3, 0.4] # size lenth, width, height in x,y,z axis ObsList = generate_random_obs(num_obs, size_list, config_data) # define the quadrotor dynamics parameters QuadParaInput = QuadPara(inertial_list=[1.0, 1.0, 1.0], mass=1.0, l=1.0, c=0.02) # number of grids for nonlinear programming solver n_grid = 25 # define the initial condition R = np.array([[1,0,0],[0,1,0],[0,0,1]]) # rotation matrix in numpy 2D array QuadInitialCondition = QuadStates() QuadInitialCondition.position = [-2.0, -1.0, 0.6] QuadInitialCondition.velocity = [0, 0, 0] QuadInitialCondition.attitude_quaternion = transforms3d.quaternions.mat2quat(R).tolist() QuadInitialCondition.angular_velocity = [0, 0, 0] # define the desired goal R = np.array([[1,0,0],[0,1,0],[0,0,1]]) # rotation matrix in numpy 2D array QuadDesiredStates = QuadStates() QuadDesiredStates.position = [2.5, 1.0, 1.5] QuadDesiredStates.velocity = [0, 0, 0] QuadDesiredStates.attitude_quaternion = transforms3d.quaternions.mat2quat(R).tolist() QuadDesiredStates.angular_velocity = [0, 0, 0] # run this method to obtain human inputs # SparseInput is an instance of dataclass DemoSparse Input = InputWaypoints(config_data) SparseInput = Input.run(QuadInitialCondition, QuadDesiredStates, ObsList) # create the quadrotor algorithm solver Solver = QuadAlgorithm(config_data, QuadParaInput, n_grid) # load the optimization method for learning iteration para_vanilla = {"learning_rate": 0.06, "iter_num": 100, "method": "Vanilla"} # This is for Vanilla gradient descent para_nesterov = {"learning_rate": 0.01, "iter_num": 100, "method": "Nesterov", "mu": 0.9, "true_loss_print_flag": True} # This is for Nesterov para_adam = {"learning_rate": 0.22, "iter_num": 100, "method": "Adam", "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-8} # This is for Adam para_nadam = {"learning_rate": 0.10, "iter_num": 100, "method": "Nadam", "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-8} # This is for Nadam para_amsgrad = {"learning_rate": 0.06, "iter_num": 100, "method": "AMSGrad", "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-8} # This is for AMSGrad loss_trace_comparison = [] label_list = [] # Vanilla gradient descent Solver.load_optimization_function(para_vanilla) Solver.run(QuadInitialCondition, QuadDesiredStates, SparseInput, ObsList, print_flag=True, save_flag=False) loss_trace_vanilla = copy.deepcopy(Solver.loss_trace) loss_trace_comparison.append(loss_trace_vanilla) label_list.append(para_vanilla["method"]) # Nesterov Solver.load_optimization_function(para_nesterov) Solver.run(QuadInitialCondition, QuadDesiredStates, SparseInput, ObsList, print_flag=True, save_flag=False) loss_trace_nesterov = copy.deepcopy(Solver.loss_trace) loss_trace_comparison.append(loss_trace_nesterov) label_list.append(para_nesterov["method"]) # Adam Solver.load_optimization_function(para_adam) Solver.run(QuadInitialCondition, QuadDesiredStates, SparseInput, ObsList, print_flag=True, save_flag=False) loss_trace_adam = copy.deepcopy(Solver.loss_trace) loss_trace_comparison.append(loss_trace_adam) label_list.append(para_adam["method"]) # Nadam Solver.load_optimization_function(para_nadam) Solver.run(QuadInitialCondition, QuadDesiredStates, SparseInput, ObsList, print_flag=True, save_flag=False) loss_trace_nadam = copy.deepcopy(Solver.loss_trace) loss_trace_comparison.append(loss_trace_nadam) label_list.append(para_nadam["method"]) # AMSGrad Solver.load_optimization_function(para_amsgrad) Solver.run(QuadInitialCondition, QuadDesiredStates, SparseInput, ObsList, print_flag=True, save_flag=False) loss_trace_amsgrad = copy.deepcopy(Solver.loss_trace) loss_trace_comparison.append(loss_trace_amsgrad) label_list.append(para_amsgrad["method"]) # plot the comparison Solver.plot_opt_method_comparison(loss_trace_comparison, label_list)
nilq/baby-python
python
""" https://adventofcode.com/2020/day/2 """ from collections import namedtuple import logging logger = logging.getLogger(__name__) Rule = namedtuple("Rule", ["letter", "f_pos", "s_pos"]) def check_password(rule, password): f_letter = password[rule.f_pos - 1] s_letter = password[rule.s_pos - 1] return (f_letter == rule.letter) ^ (s_letter == rule.letter) def get_rule(rule_text): times, letter = rule_text.split(" ") f_pos, s_pos = times.split("-") f_pos = int(f_pos) s_pos = int(s_pos) return Rule(letter, f_pos, s_pos) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) with open("input", "r") as f: l_input = [l.rstrip("\n") for l in f.readlines()] # noqa E741 valid_passwords = 0 for line in l_input: rule_text, password = line.split(":") password = password.strip() rule = get_rule(rule_text) logger.debug("base: %s", line) logger.debug(f"rule: {rule}, password: {password}") if check_password(rule, password): valid_passwords += 1 print(f"Valid passwords: {valid_passwords} on {len(l_input)}")
nilq/baby-python
python
# #!/usr/bin/env python ################################################################ ## contains code relevant to updating simulation ## e.g. physics, integration ################################################################ from local.particle import * # step particle simulation def simStep(particles, k, dt, pMin, pMax): new_particles = [] for i,p in enumerate(particles): # calculate electrostatic forces (Coulomb's law) emForce = np.array((0, 0, 0), dtype=np.float32) for j,p2 in enumerate(particles): if i != j: n = p.pos - p2.pos dist = np.sqrt(n.dot(n)) emForce += p2.charge*n/(dist**3); emForce *= p.charge*k; # add forces and step velocity --> forward Euler integration pNew = p pNew.vel += emForce*dt pNew.pos += pNew.vel*dt for i in range(pNew.pos.size): while pNew.pos[i] < pMin[i]: pNew.pos[i] += pMax[i] - pMin[i] while pNew.pos[i] > pMax[i]: pNew.pos[i] -= pMax[i] - pMin[i] new_particles.append(pNew) return new_particles
nilq/baby-python
python
#!/usr/bin/env python # This script counts the number of PAM import regex as re import sys data_infile = sys.argv[1] # add 3 to account for PAM nt_overlap_threshold = int(sys.argv[2]) + 3 PAM_plus = re.compile(r'[ACTG]GG') PAM_minus = re.compile(r'CC[ACTG]') with open(data_infile, 'r') as infile: for line in infile: if line.startswith("SeqID"): print("SeqID\tSequence\tScore\tPAM_count") continue SeqID, Sequence, Score = line.split() N_plus = len(re.findall(PAM_plus, Sequence[-nt_overlap_threshold:], overlapped = True)) N_minus = len(re.findall(PAM_minus, Sequence[:nt_overlap_threshold], overlapped = True)) N = N_plus + N_minus print('\t'.join([str(x) for x in [SeqID, Sequence, Score, N]]))
nilq/baby-python
python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import argparse import json import itertools import sys parser = argparse.ArgumentParser(description='Check interop reports.') parser.add_argument('--required', type=str) parser.add_argument('--regressions') parser.add_argument('report') args = parser.parse_args() status = {'ok': True, 'failures': 0} def fail(message): print(message) status['ok'] = False status['failures'] += 1 def format_required_report(report): outcome = {} result_idx = 0 # find the version of s2n-quic to check s2n_quic = 's2n-quic' for impl in itertools.chain(report['clients'], report['servers']): # if we're testing a PR then use that name if impl.startswith('s2n-quic-pr'): s2n_quic = impl break for client in report['clients']: for server in report['servers']: result = report['results'][result_idx] result_idx += 1 # we're only interested in s2n-quic results if client != s2n_quic and server != s2n_quic: continue for test in result: outcome.setdefault(test['name'], {}) info = outcome[test['name']] info.setdefault(client, {'client': False, 'server': False}) info.setdefault(server, {'client': False, 'server': False}) success = test['result'] == 'succeeded' info[client]['client'] = success info[server]['server'] = success return outcome with open(args.report) as f: result = json.load(f) if args.regressions and result['regression']: fail("A regression from main was detected") if args.required: with open(args.required) as r: required = json.load(r) actual = format_required_report(result) for test, impls in required.items(): test_results = actual[test] for impl_name, endpoints in impls.items(): impl = test_results[impl_name] for endpoint in endpoints: if not impl[endpoint]: fail("{} ({}) - {} was expected to pass but failed".format(impl_name, endpoint, test)) if not status['ok']: sys.exit(status['failures'])
nilq/baby-python
python
#! /usr/bin/env python """Give a string-oriented API to the generic "diff" module. The "diff" module is very powerful but practically useless on its own. The "search" and "empty_master" functions below resolve this problem.""" ################################################################################ __author__ = 'Stephen "Zero" Chappell <Noctis.Skytower@gmail.com>' __date__ = '11 February 2010' __version__ = '$Revision: 3 $' ################################################################################ import diff ################################################################################ # Matching Sensitivity - OFF CASE_AND_PUNCTUATION = False ################################################################################ def connect_tree(tree): """Takes the master and finds out what part of the slave matches it. The tree from "diff.search" may contain several different routes for finding matches. This function takes the best one, gets the master match, and fills in the prefix and suffix with the best choices.""" match = tree.nodes[tree.index.index(tree.value)] node = match.a if match.prefix.value: node.prefix = connect_tree(match.prefix) if match.suffix.value: node.suffix = connect_tree(match.suffix) return node def flatten_tree(node): """Flattens a tree from "connect_tree" for linear iteration. The root node created after connecting a tree must be traversed from beginning to end in a linear fashion. This function flattens the tree to make that possible. Further processing is done by other functions.""" array = [0] _flatten(node, array) return array def _flatten(node, array): """Recursively traverse and flatten the given tree. This is a helper function that takes "node" and sequentially processes its prefix, root, and suffix. The results are appended to the array.""" if isinstance(node.prefix, diff.Slice): _flatten(node.prefix, array) else: array.append(node.prefix) array[0] += 1 array.append((array[0], node.root)) if isinstance(node.suffix, diff.Slice): _flatten(node.suffix, array) else: array.append(node.suffix) default = lambda words: ' '.join('_' * len(word) for word in words) ################################################################################ # Note: search, build_answer, & empty_master documentation is copied! # ------^^^^^^--^^^^^^^^^^^^----^^^^^^^^^^^^------------------------- if CASE_AND_PUNCTUATION: def search(master, slave): """Search for differences in the master and slave strings. The strings are translated into key and data, and their difference is calculated. An answer is composed after further processing and returned with the number of right words and total number of words.""" key = tuple(master.split()) data = tuple(slave.split()) tree = diff.search(key, data) if tree.value: node = connect_tree(tree) array = flatten_tree(node) answer = build_answer(array) else: answer = default(key) return tree.value, len(key), answer def build_answer(array): """Take in flattened / serialized data and generate a hint. This implementation returns a string useful for Verse objects. Incorrect or missed words get printed up as empty blank lines.""" cache = [] for chunk in array: if chunk and isinstance(chunk, tuple): if isinstance(chunk[0], int): for word in chunk[1]: cache.append(word) else: for word in chunk: cache.append('_' * len(word)) return ' '.join(cache) def empty_master(master): """Compute the represenation of a master without a slave.""" return default(master.split()) ################################################################################ else: def search(master, slave): """Search for differences in the master and slave strings. The strings are translated into key and data, and their difference is calculated. An answer is composed after further processing and returned with the number of right words and total number of words.""" words = master.split() key = simplify(words) assert len(words) == len(key), 'Cannot Simplify Words' data = simplify(slave.split()) tree = diff.search(key, data) if tree.value: node = connect_tree(tree) array = flatten_tree(node) pairs = flatten_list(array) answer = build_answer(words, pairs) else: answer = default(key) return tree.value, len(key), answer def simplify(words): """Remove non-alphabetic characters from an array of words.""" letter = lambda s: ''.join(filter(lambda s: 'a' <= s <= 'z', s)) return tuple(filter(bool, map(letter, map(str.lower, words)))) def flatten_list(array): """Build (flag, load) pairs for the "build_answer" function.""" pairs = [] for chunk in array: if chunk and isinstance(chunk, tuple): if isinstance(chunk[0], int): for word in chunk[1]: pairs.append((True, word)) else: for word in chunk: pairs.append((False, word)) return pairs def build_answer(words, pairs): """Take in flattened / serialized data and generate a hint. This implementation returns a string useful for Verse objects. Incorrect or missed words get tranformed into underscore lines.""" cache = [] for word, (flag, load) in zip(words, pairs): cache.append(word if flag else '_' * len(load)) return ' '.join(cache) def empty_master(master): """Compute the represenation of a master without a slave.""" return default(simplify(master.split()))
nilq/baby-python
python
from django.shortcuts import render, redirect from django.contrib.auth.models import User, auth from django.contrib import messages from django.contrib.auth.decorators import login_required from content.models import BlogPost, Category from .models import Profile, Contact def register(request): if request.method == 'POST': first_name = request.POST['first_name'] last_name = request.POST['last_name'] image = request.FILES.get('profile_pic') username = request.POST['username'] email = request.POST['email'] password1 = request.POST['password'] password2 = request.POST['confirm_password'] if password1==password2: if User.objects.filter(username=username).exists(): messages.info(request, 'Username taken') return redirect('register_customer') elif User.objects.filter(email=email).exists(): messages.info(request, 'Email ID already exists') return redirect('register') else: user = User.objects.create_user(first_name=first_name, last_name=last_name, username=username, email=email, password = password1) profile = Profile.objects.create(user = user, image = image) user.save() profile.save() return redirect('login') else: messages.info(request, 'Passwords are not matching') return redirect('register') else: return render(request, 'accounts/register.html') def login(request): if request.method =='POST': username = request.POST.get('username') password = request.POST.get('password') user = auth.authenticate(username=username, password=password) if user is not None: auth.login(request, user) return redirect('home') else: messages.info(request, 'Invalid Credentials') return redirect('login') else: return render(request, 'accounts/login.html') def logout(request): auth.logout(request) return redirect('login') @login_required(login_url='login') def profile(request, profile_slug): profile = Profile.objects.get(slug = profile_slug) user = User.objects.get(username = profile) blogs = BlogPost.objects.filter(author = user) context = { "blogs": blogs, "tags": { 'Finance': 'Finance', 'Fashion': 'Fashion', 'Politics' : 'Politics', 'Sports' : 'Sports', 'Travel' : 'Travel', 'Lifestyle' : 'Lifestyle', 'Science' : 'Science', 'Environment' : 'Environment', 'Technology' : 'Technology', }, "profile_det": profile, "user_det": user, } return render(request, "accounts/profile.html", context) def our_team(request): return render(request, "our_team.html") def contact(request): if request.method == 'POST': name = request.POST['name'] email = request.POST['email'] phone = request.POST['phone'] message = request.POST['message'] messages = Contact.objects.create(name=name, email=email, phone=phone, message=message) messages.save() messages.info(request, 'Message sent successfully. Thank you for writing to us.') return render(request, 'contact.html') else: return render(request, 'contact.html')
nilq/baby-python
python
#!/usr/bin/env python3 import argparse import base64 import sys import time import requests def main(): print("Starting Pelion requests") # command line parser = argparse.ArgumentParser(description="Pelion device interactions") parser.add_argument("-a", "--apikey", type=str, help="User api key") parser.add_argument("-d", "--device_ID", type=str, help="Pelion device_ID (endpoint)", default="*") parser.add_argument("-m", "--method", type=str, help="method to device resource", default="GET") #smaple app increasing counter parser.add_argument("-r", "--resource", type=str, help="device resource uri", default="/3200/0/5501") parser.add_argument("-p", "--payload", type=str, help="method payload (to put/post)", default="0") parser.add_argument("-i", "--interval", type=str, help="interval for re-doing", default="0") options = parser.parse_args() if (options.apikey is None): parser.print_help() return 1 print("Create session") session = requests.Session() auth="Bearer "+options.apikey extheaders = {'Authorization': auth} extheaders['Connection'] = 'keep-alive' extheaders['Content-type'] = 'application/json' pelion_url = "https://api.us-east-1.mbedcloud.com/v2/device-requests/" print("Making requst: %s" %options.method, options.device_ID, options.resource, options.payload) '''async_request POST /v2/device-requests/DEVICE_ID?async-id=_NNN_ { "method": "GET", "uri": "/5/0/1" } { "method": "PUT", "uri": "/5/0/1", "accept": "text/plain", "content-type": "text/plain", "payload-b64": "dmFsdWUxCg==" } execute { "method": "POST", "uri": "/123/1/1" } ''' payload={} payload['method'] = options.method payload['uri'] = options.resource if (options.method !="put" or options.method =="PUT"): #payload['accept']= "text/plain" payload['content-type']= "text/plain" ''' example message = "Python is fun" message_bytes = message.encode('ascii') base64_bytes = base64.b64encode(message_bytes) base64_message = base64_bytes.decode('ascii') ''' payload['payload-b64'] = base64.b64encode(options.payload.encode('ascii')).decode('ascii') asyncid=str(time.time()).replace('.','-') print("ASYNCID "+asyncid) url=pelion_url+str(options.device_ID)+'?async-id='+asyncid #check and break the interval later while(True): #POST TO PELION, device method in JSON resp = session.post(url, headers=extheaders, json=payload) if resp.status_code < 400: print("HTTP OK :"+ str(resp.status_code)) print("\t "+str(resp.text)) else: print("HTTP ERROR :" + str(resp.status_code)) print("\t " + str(resp.reason)) print("\t " + str(resp.text)) if int(options.interval) > 0: print("sleep "+str(options.interval)) time.sleep(float(options.interval)) else: print("done") return 0 if __name__ == "__main__": sys.exit(main())
nilq/baby-python
python
from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation import numpy as np def clustering(X=None, model_name='KMeans', n_clusters=3, **param): if model_name == 'KMeans': # K均值 model = KMeans(n_clusters=n_clusters, **param) elif model_name == 'SpectralClustering': # 谱聚类 model = SpectralClustering(n_clusters=n_clusters, **param) elif model_name == 'AffinityPropagation': # AP聚类 model = AffinityPropagation(**param) model.fit(X=X) labels = model.labels_ return model, labels if __name__ == '__main__': # np.random.rand(10) x = [np.random.rand(10) for i in range(20)] y = [np.random.randint(0, 3) for i in range(20)] model, labels = clustering(X=x, model_name='KMeans', n_clusters=3) print(model) print(labels)
nilq/baby-python
python
from typing import List, Optional, Union from lnbits.helpers import urlsafe_short_hash from . import db from .models import createLnurldevice, lnurldevicepayment, lnurldevices ###############lnurldeviceS########################## async def create_lnurldevice( data: createLnurldevice, ) -> lnurldevices: lnurldevice_id = urlsafe_short_hash() lnurldevice_key = urlsafe_short_hash() await db.execute( """ INSERT INTO lnurldevice.lnurldevices ( id, key, title, wallet, currency, device, profit ) VALUES (?, ?, ?, ?, ?, ?, ?) """, ( lnurldevice_id, lnurldevice_key, data.title, data.wallet, data.currency, data.device, data.profit, ), ) return await get_lnurldevice(lnurldevice_id) async def update_lnurldevice(lnurldevice_id: str, **kwargs) -> Optional[lnurldevices]: q = ", ".join([f"{field[0]} = ?" for field in kwargs.items()]) await db.execute( f"UPDATE lnurldevice.lnurldevices SET {q} WHERE id = ?", (*kwargs.values(), lnurldevice_id), ) row = await db.fetchone( "SELECT * FROM lnurldevice.lnurldevices WHERE id = ?", (lnurldevice_id,) ) return lnurldevices(**row) if row else None async def get_lnurldevice(lnurldevice_id: str) -> lnurldevices: row = await db.fetchone( "SELECT * FROM lnurldevice.lnurldevices WHERE id = ?", (lnurldevice_id,) ) return lnurldevices(**row) if row else None async def get_lnurldevices(wallet_ids: Union[str, List[str]]) -> List[lnurldevices]: wallet_ids = [wallet_ids] q = ",".join(["?"] * len(wallet_ids[0])) rows = await db.fetchall( f""" SELECT * FROM lnurldevice.lnurldevices WHERE wallet IN ({q}) ORDER BY id """, (*wallet_ids,), ) return [lnurldevices(**row) if row else None for row in rows] async def delete_lnurldevice(lnurldevice_id: str) -> None: await db.execute( "DELETE FROM lnurldevice.lnurldevices WHERE id = ?", (lnurldevice_id,) ) ########################lnuldevice payments########################### async def create_lnurldevicepayment( deviceid: str, payload: Optional[str] = None, pin: Optional[str] = None, payhash: Optional[str] = None, sats: Optional[int] = 0, ) -> lnurldevicepayment: lnurldevicepayment_id = urlsafe_short_hash() await db.execute( """ INSERT INTO lnurldevice.lnurldevicepayment ( id, deviceid, payload, pin, payhash, sats ) VALUES (?, ?, ?, ?, ?, ?) """, (lnurldevicepayment_id, deviceid, payload, pin, payhash, sats), ) return await get_lnurldevicepayment(lnurldevicepayment_id) async def update_lnurldevicepayment( lnurldevicepayment_id: str, **kwargs ) -> Optional[lnurldevicepayment]: q = ", ".join([f"{field[0]} = ?" for field in kwargs.items()]) await db.execute( f"UPDATE lnurldevice.lnurldevicepayment SET {q} WHERE id = ?", (*kwargs.values(), lnurldevicepayment_id), ) row = await db.fetchone( "SELECT * FROM lnurldevice.lnurldevicepayment WHERE id = ?", (lnurldevicepayment_id,), ) return lnurldevicepayment(**row) if row else None async def get_lnurldevicepayment(lnurldevicepayment_id: str) -> lnurldevicepayment: row = await db.fetchone( "SELECT * FROM lnurldevice.lnurldevicepayment WHERE id = ?", (lnurldevicepayment_id,), ) return lnurldevicepayment(**row) if row else None async def get_lnurlpayload(lnurldevicepayment_payload: str) -> lnurldevicepayment: row = await db.fetchone( "SELECT * FROM lnurldevice.lnurldevicepayment WHERE payload = ?", (lnurldevicepayment_payload,), ) return lnurldevicepayment(**row) if row else None
nilq/baby-python
python
#!/usr/bin/python3 import bs4 import json import requests import http import re import pandas as pd from datetime import datetime import argparse import pathlib from enum import Enum MSG_LBL_BASE = '[PARSER]' MSG_LBL_INFO = f'{MSG_LBL_BASE}[INFO]' MSG_LBL_FAIL = f'{MSG_LBL_BASE}[FAIL]' def parse_args(): parser = argparse.ArgumentParser(description='Parse news atricles from bbc.co.uk') parser.add_argument('--date', type=str, required=True, help='date string (YYYY-MM-DD)') args = parser.parse_args() return args def get_page(url, filename=None): response = requests.get(url, allow_redirects=True) status = response.status_code if status == 200: content = response.content if filename is not None: soup = bs4.BeautifulSoup(content, 'html.parser') with open(filename, 'w') as file: file.write(str(soup.prettify())) return content else: print(f'{MSG_LBL_FAIL} {url}: {http.client.responses[status]}') return None def parse_article(page, url): content = {} try: soup = bs4.BeautifulSoup(page, 'html.parser') attributes = ['article_id', 'title', 'category', 'tags', 'text'] content['article_id'] = url content['category'] = url[url.rfind('/')+1:url.rfind('-')] content['title'] = soup.find(id='main-heading').text.strip() tags = soup.find('section', attrs={'data-component' : 'tag-list'}) if tags is not None: tags = tags.find_all('a', attrs={'class' : 'ssrcss-1yno9a1-StyledLink ed0g1kj0'}) if tags is not None: content['tags'] = ','.join([item.text for item in tags]) else: content['tags'] = None text_blocks = soup.find_all('div', attrs={'data-component' : 'text-block'}) if text_blocks is not None: text_blocks = [tb.text for tb in text_blocks] content['text'] = '\n'.join(text_blocks) except Exception as e: print(f'{MSG_LBL_FAIL} {e}') return None return content def collect_urls(date): date = pd.to_datetime(date) collected = set() archive_url_base = 'https://dracos.co.uk/made/bbc-news-archive' article_url_regex_raw = '(?:^|\W)http:\/\/www\.bbc\.co\.uk\/news(?:^|\W)([a-z|-]+)+([0-9])+' article_url_regex = re.compile(article_url_regex_raw) year, month, day = str(date.date()).split('-') print(f'{MSG_LBL_INFO} Collecting articles for {year}/{month}/{day} ...') archive_url = f'{archive_url_base}/{year}/{month}/{day}/' page = get_page(archive_url) if page is not None: soup = bs4.BeautifulSoup(page, 'html.parser') urls_tags = soup.find_all('a') for tag in urls_tags: url = tag['href'] if article_url_regex.match(url): collected.add(url) print(f'{MSG_LBL_INFO} Collected {len(collected)} articles links for {year}/{month}/{day},') return collected def parse_urls(urls): parsed = [] total = len(urls) for i, url in enumerate(urls): print(f'{MSG_LBL_INFO} Parsing {url}, {i + 1}/{total}') article_page = get_page(url) article_content = parse_article(article_page, url) if article_content is not None: parsed.append(article_content) return parsed def main(): args = parse_args() print(f'{MSG_LBL_BASE} date - {args.date}') urls = collect_urls(args.date) parsed = parse_urls(urls) my_path = pathlib.Path(__file__).parent.resolve() output_filename = f'{my_path}/artifacts/{args.date}.json' catalog = {'catalog': parsed} with open(output_filename, 'w') as fout: json.dump(catalog, fout, indent=2) if __name__ == "__main__": main()
nilq/baby-python
python
import cv2 import numpy as np # 直方图均衡 def hisEqulColor(img): ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB) channels = cv2.split(ycrcb) # print len(channels) cv2.equalizeHist(channels[0], channels[0]) cv2.merge(channels, ycrcb) cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img) return img # 图像预处理的函数 # 调整亮度和对比度 # c:对比度, b:亮度 def contrast_brightness_image(img, c, b): h, w, ch = img.shape # 获取shape的数值,height/width/channel # 新建全零图片数组blank,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片) blank = np.zeros([h, w, ch], img.dtype) dst = cv2.addWeighted(img, c, blank, 1-c, b) # 计算两个图像阵列的加权和 dst=src1*alpha+src2*beta+gamma return dst
nilq/baby-python
python
#!/usr/bin/env python3 import argparse import subprocess import TEST_LOG import TEST_SETUP_IBOFOS import TEST def parse_arguments(args): parser = argparse.ArgumentParser(description='Test journal feature with SPO') parser.add_argument('-f', '--fabric_ip', default=TEST.traddr,\ help='Set target IP, default: ' + TEST.traddr) parser.add_argument('-t', '--transport', default=TEST.trtype, help='Set transport, default: ' + TEST.trtype) parser.add_argument('-p', '--port', type=int, default=TEST.port, help='Set port, default: ' + str(TEST.port)) parser.add_argument('-l', '--log_dir', default=TEST.log_dir,\ help='Set path for log file, default: ' + TEST.log_dir) parser.add_argument('-q', '--quick_mode', default=TEST.quick_mode , action='store_true',\ help='Enable quick test mode, default: ' + str(TEST.quick_mode)) args = parser.parse_args() TEST.traddr = args.fabric_ip TEST.trtype = args.transport TEST.port = args.port TEST.log_dir = args.log_dir TEST.quick_mode = args.quick_mode def cleanup(): TEST_SETUP_IBOFOS.cleanup_process() def set_up(argv, test_name): parse_arguments(argv) TEST_LOG.setup_log(test_name) TEST_LOG.print_notice("[{} Started]".format(test_name)) cleanup() def tear_down(test_name): TEST_SETUP_IBOFOS.shutdown_ibofos() TEST_LOG.print_notice("[Test {} Completed]".format(test_name))
nilq/baby-python
python
""" Primitive operations for 3x3 orthonormal and 4x4 homogeneous matrices. Python implementation by: Luis Fernando Lara Tobar and Peter Corke. Based on original Robotics Toolbox for Matlab code by Peter Corke. Permission to use and copy is granted provided that acknowledgement of the authors is made. @author: Luis Fernando Lara Tobar and Peter Corke """ from numpy import * from utility import * from numpy.linalg import norm import Quaternion as Q def rotx(theta): """ Rotation about X-axis @type theta: number @param theta: the rotation angle @rtype: 3x3 orthonormal matrix @return: rotation about X-axis @see: L{roty}, L{rotz}, L{rotvec} """ ct = cos(theta) st = sin(theta) return mat([[1, 0, 0], [0, ct, -st], [0, st, ct]]) def roty(theta): """ Rotation about Y-axis @type theta: number @param theta: the rotation angle @rtype: 3x3 orthonormal matrix @return: rotation about Y-axis @see: L{rotx}, L{rotz}, L{rotvec} """ ct = cos(theta) st = sin(theta) return mat([[ct, 0, st], [0, 1, 0], [-st, 0, ct]]) def rotz(theta): """ Rotation about Z-axis @type theta: number @param theta: the rotation angle @rtype: 3x3 orthonormal matrix @return: rotation about Z-axis @see: L{rotx}, L{roty}, L{rotvec} """ ct = cos(theta) st = sin(theta) return mat([[ct, -st, 0], [st, ct, 0], [ 0, 0, 1]]) def trotx(theta): """ Rotation about X-axis @type theta: number @param theta: the rotation angle @rtype: 4x4 homogeneous matrix @return: rotation about X-axis @see: L{troty}, L{trotz}, L{rotx} """ return r2t(rotx(theta)) def troty(theta): """ Rotation about Y-axis @type theta: number @param theta: the rotation angle @rtype: 4x4 homogeneous matrix @return: rotation about Y-axis @see: L{troty}, L{trotz}, L{roty} """ return r2t(roty(theta)) def trotz(theta): """ Rotation about Z-axis @type theta: number @param theta: the rotation angle @rtype: 4x4 homogeneous matrix @return: rotation about Z-axis @see: L{trotx}, L{troty}, L{rotz} """ return r2t(rotz(theta)) ##################### Euler angles def tr2eul(m): """ Extract Euler angles. Returns a vector of Euler angles corresponding to the rotational part of the homogeneous transform. The 3 angles correspond to rotations about the Z, Y and Z axes respectively. @type m: 3x3 or 4x4 matrix @param m: the rotation matrix @rtype: 1x3 matrix @return: Euler angles [S{theta} S{phi} S{psi}] @see: L{eul2tr}, L{tr2rpy} """ try: m = mat(m) if ishomog(m): euler = mat(zeros((1,3))) if norm(m[0,2])<finfo(float).eps and norm(m[1,2])<finfo(float).eps: # singularity euler[0,0] = 0 sp = 0 cp = 1 euler[0,1] = arctan2(cp*m[0,2] + sp*m[1,2], m[2,2]) euler[0,2] = arctan2(-sp*m[0,0] + cp*m[1,0], -sp*m[0,1] + cp*m[1,1]) return euler else: euler[0,0] = arctan2(m[1,2],m[0,2]) sp = sin(euler[0,0]) cp = cos(euler[0,0]) euler[0,1] = arctan2(cp*m[0,2] + sp*m[1,2], m[2,2]) euler[0,2] = arctan2(-sp*m[0,0] + cp*m[1,0], -sp*m[0,1] + cp*m[1,1]) return euler except ValueError: euler = [] for i in range(0,len(m)): euler.append(tr2eul(m[i])) return euler def eul2r(phi, theta=None, psi=None): """ Rotation from Euler angles. Two call forms: - R = eul2r(S{theta}, S{phi}, S{psi}) - R = eul2r([S{theta}, S{phi}, S{psi}]) These correspond to rotations about the Z, Y, Z axes respectively. @type phi: number or list/array/matrix of angles @param phi: the first Euler angle, or a list/array/matrix of angles @type theta: number @param theta: the second Euler angle @type psi: number @param psi: the third Euler angle @rtype: 3x3 orthonormal matrix @return: R([S{theta} S{phi} S{psi}]) @see: L{tr2eul}, L{eul2tr}, L{tr2rpy} """ n = 1 if theta == None and psi==None: # list/array/matrix argument phi = mat(phi) if numcols(phi) != 3: error('bad arguments') else: n = numrows(phi) psi = phi[:,2] theta = phi[:,1] phi = phi[:,0] elif (theta!=None and psi==None) or (theta==None and psi!=None): error('bad arguments') elif not isinstance(phi,(int,int32,float,float64)): # all args are vectors phi = mat(phi) n = numrows(phi) theta = mat(theta) psi = mat(psi) if n>1: R = [] for i in range(0,n): r = rotz(phi[i,0]) * roty(theta[i,0]) * rotz(psi[i,0]) R.append(r) return R try: r = rotz(phi[0,0]) * roty(theta[0,0]) * rotz(psi[0,0]) return r except: r = rotz(phi) * roty(theta) * rotz(psi) return r def eul2tr(phi,theta=None,psi=None): """ Rotation from Euler angles. Two call forms: - R = eul2tr(S{theta}, S{phi}, S{psi}) - R = eul2tr([S{theta}, S{phi}, S{psi}]) These correspond to rotations about the Z, Y, Z axes respectively. @type phi: number or list/array/matrix of angles @param phi: the first Euler angle, or a list/array/matrix of angles @type theta: number @param theta: the second Euler angle @type psi: number @param psi: the third Euler angle @rtype: 4x4 homogenous matrix @return: R([S{theta} S{phi} S{psi}]) @see: L{tr2eul}, L{eul2r}, L{tr2rpy} """ return r2t( eul2r(phi, theta, psi) ) ################################## RPY angles def tr2rpy(m): """ Extract RPY angles. Returns a vector of RPY angles corresponding to the rotational part of the homogeneous transform. The 3 angles correspond to rotations about the Z, Y and X axes respectively. @type m: 3x3 or 4x4 matrix @param m: the rotation matrix @rtype: 1x3 matrix @return: RPY angles [S{theta} S{phi} S{psi}] @see: L{rpy2tr}, L{tr2eul} """ try: m = mat(m) if ishomog(m): rpy = mat(zeros((1,3))) if norm(m[0,0])<finfo(float).eps and norm(m[1,0])<finfo(float).eps: # singularity rpy[0,0] = 0 rpy[0,1] = arctan2(-m[2,0], m[0,0]) rpy[0,2] = arctan2(-m[1,2], m[1,1]) return rpy else: rpy[0,0] = arctan2(m[1,0],m[0,0]) sp = sin(rpy[0,0]) cp = cos(rpy[0,0]) rpy[0,1] = arctan2(-m[2,0], cp*m[0,0] + sp*m[1,0]) rpy[0,2] = arctan2(sp*m[0,2] - cp*m[1,2], cp*m[1,1] - sp*m[0,1]) return rpy except ValueError: rpy = [] for i in range(0,len(m)): rpy.append(tr2rpy(m[i])) return rpy def rpy2r(roll, pitch=None,yaw=None): """ Rotation from RPY angles. Two call forms: - R = rpy2r(S{theta}, S{phi}, S{psi}) - R = rpy2r([S{theta}, S{phi}, S{psi}]) These correspond to rotations about the Z, Y, X axes respectively. @type roll: number or list/array/matrix of angles @param roll: roll angle, or a list/array/matrix of angles @type pitch: number @param pitch: pitch angle @type yaw: number @param yaw: yaw angle @rtype: 4x4 homogenous matrix @return: R([S{theta} S{phi} S{psi}]) @see: L{tr2rpy}, L{rpy2r}, L{tr2eul} """ n=1 if pitch==None and yaw==None: roll= mat(roll) if numcols(roll) != 3: error('bad arguments') n = numrows(roll) pitch = roll[:,1] yaw = roll[:,2] roll = roll[:,0] if n>1: R = [] for i in range(0,n): r = rotz(roll[i,0]) * roty(pitch[i,0]) * rotx(yaw[i,0]) R.append(r) return R try: r = rotz(roll[0,0]) * roty(pitch[0,0]) * rotx(yaw[0,0]) return r except: r = rotz(roll) * roty(pitch) * rotx(yaw) return r def rpy2tr(roll, pitch=None, yaw=None): """ Rotation from RPY angles. Two call forms: - R = rpy2tr(r, p, y) - R = rpy2tr([r, p, y]) These correspond to rotations about the Z, Y, X axes respectively. @type roll: number or list/array/matrix of angles @param roll: roll angle, or a list/array/matrix of angles @type pitch: number @param pitch: pitch angle @type yaw: number @param yaw: yaw angle @rtype: 4x4 homogenous matrix @return: R([S{theta} S{phi} S{psi}]) @see: L{tr2rpy}, L{rpy2r}, L{tr2eul} """ return r2t( rpy2r(roll, pitch, yaw) ) ###################################### OA vector form def oa2r(o,a): """Rotation from 2 vectors. The matrix is formed from 3 vectors such that:: R = [N O A] and N = O x A. In robotics A is the approach vector, along the direction of the robot's gripper, and O is the orientation vector in the direction between the fingertips. The submatrix is guaranteed to be orthonormal so long as O and A are not parallel. @type o: 3-vector @param o: The orientation vector. @type a: 3-vector @param a: The approach vector @rtype: 3x3 orthonormal rotation matrix @return: Rotatation matrix @see: L{rpy2r}, L{eul2r} """ n = crossp(o, a) n = unit(n) o = crossp(a, n); o = unit(o).reshape(3,1) a = unit(a).reshape(3,1) return bmat('n o a') def oa2tr(o,a): """otation from 2 vectors. The rotation submatrix is formed from 3 vectors such that:: R = [N O A] and N = O x A. In robotics A is the approach vector, along the direction of the robot's gripper, and O is the orientation vector in the direction between the fingertips. The submatrix is guaranteed to be orthonormal so long as O and A are not parallel. @type o: 3-vector @param o: The orientation vector. @type a: 3-vector @param a: The approach vector @rtype: 4x4 homogeneous transformation matrix @return: Transformation matrix @see: L{rpy2tr}, L{eul2tr} """ return r2t(oa2r(o,a)) ###################################### angle/vector form def rotvec2r(theta, v): """ Rotation about arbitrary axis. Compute a rotation matrix representing a rotation of C{theta} about the vector C{v}. @type v: 3-vector @param v: rotation vector @type theta: number @param theta: the rotation angle @rtype: 3x3 orthonormal matrix @return: rotation @see: L{rotx}, L{roty}, L{rotz} """ v = arg2array(v); ct = cos(theta) st = sin(theta) vt = 1-ct r = mat([[ct, -v[2]*st, v[1]*st],\ [v[2]*st, ct, -v[0]*st],\ [-v[1]*st, v[0]*st, ct]]) return v*v.T*vt+r def rotvec2tr(theta, v): """ Rotation about arbitrary axis. Compute a rotation matrix representing a rotation of C{theta} about the vector C{v}. @type v: 3-vector @param v: rotation vector @type theta: number @param theta: the rotation angle @rtype: 4x4 homogeneous matrix @return: rotation @see: L{trotx}, L{troty}, L{trotz} """ return r2t(rotvec2r(theta, v)) ###################################### translational transform def transl(x, y=None, z=None): """ Create or decompose translational homogeneous transformations. Create a homogeneous transformation =================================== - T = transl(v) - T = transl(vx, vy, vz) The transformation is created with a unit rotation submatrix. The translational elements are set from elements of v which is a list, array or matrix, or from separate passed elements. Decompose a homogeneous transformation ====================================== - v = transl(T) Return the translation vector """ if y==None and z==None: x=mat(x) try: if ishomog(x): return x[0:3,3].reshape(3,1) else: return concatenate((concatenate((eye(3),x.reshape(3,1)),1),mat([0,0,0,1]))) except AttributeError: n=len(x) r = [[],[],[]] for i in range(n): r = concatenate((r,x[i][0:3,3]),1) return r elif y!=None and z!=None: return concatenate((concatenate((eye(3),mat([x,y,z]).T),1),mat([0,0,0,1]))) ###################################### Skew symmetric transform def skew(*args): """ Convert to/from skew-symmetric form. A skew symmetric matrix is a matrix such that M = -M' Two call forms -ss = skew(v) -v = skew(ss) The first form builds a 3x3 skew-symmetric from a 3-element vector v. The second form takes a 3x3 skew-symmetric matrix and returns the 3 unique elements that it contains. """ def ss(b): return matrix([ [0, -b[2], b[1]], [b[2], 0, -b[0]], [-b[1], b[0], 0]]); if len(args) == 1: # convert matrix to skew vector b = args[0]; if isrot(b): return 0.5*matrix( [b[2,1]-b[1,2], b[0,2]-b[2,0], b[1,0]-b[0,1]] ); elif ishomog(b): return vstack( (b[0:3,3], 0.5*matrix( [b[2,1]-b[1,2], b[0,2]-b[2,0], b[1,0]-b[0,1]] ).T) ); # build skew-symmetric matrix b = arg2array(b); if len(b) == 3: return ss(b); elif len(b) == 6: r = hstack( (ss(b[3:6]), mat(b[0:3]).T) ); r = vstack( (r, mat([0, 0, 0, 1])) ); return r; elif len(args) == 3: return ss(args); elif len(args) == 6: r = hstack( (ss(args[3:6]), mat(args[0:3]).T) ); r = vstack( (r, mat([0, 0, 0, 1])) ); return r; else: raise ValueError; def tr2diff(t1, t2): """ Convert a transform difference to differential representation. Returns the 6-element differential motion required to move from T1 to T2 in base coordinates. @type t1: 4x4 homogeneous transform @param t1: Initial value @type t2: 4x4 homogeneous transform @param t2: Final value @rtype: 6-vector @return: Differential motion [dx dy dz drx dry drz] @see: L{skew} """ t1 = mat(t1) t2 = mat(t2) d = concatenate( (t2[0:3,3]-t1[0:3,3], 0.5*( crossp(t1[0:3,0], t2[0:3,0]) + crossp(t1[0:3,1], t2[0:3,1]) + crossp(t1[0:3,2], t2[0:3,2]) ) )) return d ################################## Utility def trinterp(T0, T1, r): """ Interpolate homogeneous transformations. Compute a homogeneous transform interpolation between C{T0} and C{T1} as C{r} varies from 0 to 1 such that:: trinterp(T0, T1, 0) = T0 trinterp(T0, T1, 1) = T1 Rotation is interpolated using quaternion spherical linear interpolation. @type T0: 4x4 homogeneous transform @param T0: Initial value @type T1: 4x4 homogeneous transform @param T1: Final value @type r: number @param r: Interpolation index, in the range 0 to 1 inclusive @rtype: 4x4 homogeneous transform @return: Interpolated value @see: L{quaternion}, L{ctraj} """ q0 = Q.quaternion(T0) q1 = Q.quaternion(T1) p0 = transl(T0) p1 = transl(T1) qr = q0.interp(q1, r) pr = p0*(1-r) + r*p1 return vstack( (concatenate((qr.r(),pr),1), mat([0,0,0,1])) ) def trnorm(t): """ Normalize a homogeneous transformation. Finite word length arithmetic can cause transforms to become `unnormalized', that is the rotation submatrix is no longer orthonormal (det(R) != 1). The rotation submatrix is re-orthogonalized such that the approach vector (third column) is unchanged in direction:: N = O x A O = A x N @type t: 4x4 homogeneous transformation @param t: the transform matrix to convert @rtype: 3x3 orthonormal rotation matrix @return: rotation submatrix @see: L{oa2tr} @bug: Should work for 3x3 matrix as well. """ t = mat(t) # N O A n = crossp(t[0:3,1],t[0:3,2]) # N = O X A o = crossp(t[0:3,2],t[0:3,0]) # O = A x N return concatenate(( concatenate((unit(n),unit(t[0:3,1]),unit(t[0:3,2]),t[0:3,3]),1), mat([0,0,0,1]))) def t2r(T): """ Return rotational submatrix of a homogeneous transformation. @type T: 4x4 homogeneous transformation @param T: the transform matrix to convert @rtype: 3x3 orthonormal rotation matrix @return: rotation submatrix """ if ishomog(T)==False: error( 'input must be a homogeneous transform') return T[0:3,0:3] def r2t(R): """ Convert a 3x3 orthonormal rotation matrix to a 4x4 homogeneous transformation:: T = | R 0 | | 0 1 | @type R: 3x3 orthonormal rotation matrix @param R: the rotation matrix to convert @rtype: 4x4 homogeneous matrix @return: homogeneous equivalent """ return concatenate( (concatenate( (R, zeros((3,1))),1), mat([0,0,0,1])) )
nilq/baby-python
python
import matplotlib.pyplot as plt def plot(activity,lon,lat,lonsmooth,latsmooth) : fig = plt.figure(facecolor = '0.05') ax = plt.Axes(fig, [0., 0., 1., 1.], ) ax.set_aspect('equal') ax.set_axis_off() fig.add_axes(ax) #plt.plot(lonsmooth, latsmooth, '-', lon, lat, '.') #, xp, p(xp), '-') plt.plot(lonsmooth, latsmooth, '-',zorder=1) #, xp, p(xp), '-') plt.scatter(lon, lat, s=1, color='red', edgecolor='',zorder=2) #, xp, p(xp), '-') filename = activity + '.png' plt.savefig(filename, facecolor = fig.get_facecolor(), bbox_inches='tight', pad_inches=0, dpi=900)
nilq/baby-python
python
import argparse from bothub_nlp_rasa_utils.train import train_update as train from bothub_nlp_rasa_utils.evaluate_crossval import evaluate_crossval_update as evaluate_crossval import os if __name__ == '__main__': PARSER = argparse.ArgumentParser() # Input Arguments PARSER.add_argument( '--operation', help='What operation will be done, "train" or "evaluate"') PARSER.add_argument( '--repository-version', help='The version of repository.', type=int) PARSER.add_argument( '--by-id', help='.', type=int) PARSER.add_argument( '--repository-authorization', help='Repository authorization string.') ARGUMENTS, _ = PARSER.parse_known_args() # Run the job if ARGUMENTS.operation == "train": train(ARGUMENTS.repository_version, ARGUMENTS.by_id, ARGUMENTS.repository_authorization, from_queue='ai-platform') elif ARGUMENTS.operation == "evaluate": evaluate_crossval(ARGUMENTS.repository_version, ARGUMENTS.by_id, ARGUMENTS.repository_authorization, from_queue='ai-platform')
nilq/baby-python
python
from pathlib import Path import csv __all__ = ["text_writer", "str_file_read", "file_locate"] def str_file_read(file_path, encoding="utf-8"): """ Returns a file's contents as a string. Parameters ---------- file_path : str Path to GCA file to read encoding : str Encoding method to use. (Default: "utf-8) Returns ------- contents : str The gca file's contents as a string """ file_path = Path(file_path) # read egf file file_contents = file_path.read_text(encoding=encoding) return file_contents def file_locate(folder_path, file_extension=None, return_paths=True): """ Locates '.egf' files within a specified folder and returns their absolute paths in a list. Parameters ---------- folder_path : str The path to a folder containing one or more '.egf' files file_extension : str file extension (example: ".txt") return_paths : bool False will return file names instead of paths Returns ------- egf_file_paths : list of str A list of absolute paths to '.egf' files in folder or their files names. Notes ----- Function will return None of no '.egf' files are found """ folder_path = Path(folder_path) # Locate files of specified extension in folder if file_extension is None: file_paths = [item for item in folder_path.iterdir() if item.is_file()] else: file_paths = [item for item in folder_path.iterdir() if item.is_file() and item.suffix == file_extension] # names of files file_names = [file.name for file in file_paths] # Create appropriate result for provided 'return_paths` value if return_paths is True: result = [str(path) for path in file_paths] elif return_paths is False: result = file_names else: result = None # Return None if the list of files is empty if len(file_paths) == 0: raise Exception(f"No Files of '{file_extension}' type were found in '{folder_path}'") return result def text_writer(file_path, content_str): with open(file_path, "w") as f: f.write(content_str) def read_csv(file_path, delimiter=",", encoding="utf-8"): """ Reads a CSV file Parameters ---------- file_path : str delimiter : str encoding : str Returns ------- collection """ with open(file_path, encoding=encoding) as file: data_in = list(csv.reader(file, delimiter=delimiter)) return data_in def write_csv(file_path, csv_data, delimiter=","): """ Writes CSV file from 2D list Parameters ---------- file_path : str csv_data : collection delimiter : str Returns ------- None """ with open(file_path, "w") as csv_out: write = csv.writer(csv_out, delimiter=delimiter, lineterminator='\n') for i in csv_data: write.writerow(i)
nilq/baby-python
python
import pytest from seqeval.scheme import (BILOU, IOB1, IOB2, IOBES, IOE1, IOE2, Entities, Entity, Prefix, Token, Tokens, auto_detect) def test_entity_repr(): data = (0, 0, 0, 0) entity = Entity(*data) assert str(data) == str(entity) @pytest.mark.parametrize( 'data1, data2, expected', [ ((0, 0, 0, 0), (0, 0, 0, 0), True), ((1, 0, 0, 0), (0, 0, 0, 0), False), ((0, 1, 0, 0), (0, 0, 0, 0), False), ((0, 0, 1, 0), (0, 0, 0, 0), False), ((0, 0, 0, 1), (0, 0, 0, 0), False) ] ) def test_entity_equality(data1, data2, expected): entity1 = Entity(*data1) entity2 = Entity(*data2) is_equal = entity1 == entity2 assert is_equal == expected @pytest.mark.parametrize( 'sequences, tag_name, expected', [ ([['B-PER', 'B-ORG']], '', set()), ([['B-PER', 'B-ORG']], 'ORG', {Entity(0, 1, 2, 'ORG')}), ([['B-PER', 'B-ORG']], 'PER', {Entity(0, 0, 1, 'PER')}) ] ) def test_entities_filter(sequences, tag_name, expected): entities = Entities(sequences, IOB2) filtered = entities.filter(tag_name) assert filtered == expected @pytest.mark.parametrize( 'token, suffix, expected', [ ('I-MISC', False, Prefix.I), ('B-MISC', False, Prefix.B), ('O', False, Prefix.O), ('MISC-I', True, Prefix.I), ('MISC-B', True, Prefix.B), ('O', True, Prefix.O) ] ) def test_token_prefix(token, suffix, expected): token = Token(token, suffix=suffix) prefix = token.prefix assert prefix == expected @pytest.mark.parametrize( 'token, suffix, expected', [ ('I-MISC', False, 'MISC'), ('MISC-I', True, 'MISC'), ('I', False, '_'), ('O', False, '_'), ('I-ORG-COMPANY', False, 'ORG-COMPANY'), ('ORG-COMPANY-I', True, 'ORG-COMPANY'), ('I-組織', False, '組織') ] ) def test_token_tag(token, suffix, expected): token = Token(token, suffix=suffix) tag = token.tag assert tag == expected def expects_start_inside_end_to_be_correct(prev, token, expected, scheme): prev = scheme(prev) token = scheme(token) is_start = token.is_start(prev) is_inside = token.is_inside(prev) is_end = token.is_end(prev) actual = [is_start, is_inside, is_end] assert actual == expected @pytest.mark.parametrize( 'prev, token, expected', [ ('O', 'O', [False, False, False]), ('O', 'I-PER', [True, False, False]), ('O', 'B-PER', [False, False, False]), ('I-PER', 'O', [False, False, True]), ('I-PER', 'I-PER', [False, True, False]), ('I-PER', 'I-ORG', [True, False, True]), ('I-PER', 'B-PER', [True, False, True]), ('I-PER', 'B-ORG', [False, False, True]), ('B-PER', 'O', [False, False, True]), ('B-PER', 'I-PER', [True, True, False]), ('B-PER', 'I-ORG', [True, False, True]), ('B-PER', 'B-PER', [True, False, True]), ('B-PER', 'B-ORG', [False, False, False]) ] ) def test_iob1_start_inside_end(prev, token, expected): expects_start_inside_end_to_be_correct(prev, token, expected, IOB1) @pytest.mark.parametrize( 'prev, token, expected', [ ('O', 'O', [False, False, False]), ('O', 'I-PER', [False, False, False]), ('O', 'B-PER', [True, False, False]), ('I-PER', 'O', [False, False, True]), ('I-PER', 'I-PER', [False, True, False]), ('I-PER', 'I-ORG', [False, False, True]), ('I-PER', 'B-PER', [True, False, True]), ('I-PER', 'B-ORG', [True, False, True]), ('B-PER', 'O', [False, False, True]), ('B-PER', 'I-PER', [False, True, False]), ('B-PER', 'I-ORG', [False, False, True]), ('B-PER', 'B-PER', [True, False, True]), ('B-PER', 'B-ORG', [True, False, True]) ] ) def test_iob2_start_inside_end(prev, token, expected): expects_start_inside_end_to_be_correct(prev, token, expected, IOB2) @pytest.mark.parametrize( 'prev, token, expected', [ ('O', 'O', [False, False, False]), ('O', 'I-PER', [True, False, False]), ('O', 'E-PER', [False, False, False]), ('I-PER', 'O', [False, False, True]), ('I-PER', 'I-PER', [False, True, False]), ('I-PER', 'I-ORG', [True, False, True]), ('I-PER', 'E-PER', [False, True, False]), ('I-PER', 'E-ORG', [False, False, True]), ('E-PER', 'O', [False, False, False]), ('E-PER', 'I-PER', [True, False, True]), ('E-PER', 'I-ORG', [True, False, False]), ('E-PER', 'E-PER', [True, False, True]), ('E-PER', 'E-ORG', [False, False, False]) ] ) def test_ioe1_start_inside_end(prev, token, expected): expects_start_inside_end_to_be_correct(prev, token, expected, IOE1) @pytest.mark.parametrize( 'prev, token, expected', [ ('O', 'O', [False, False, False]), ('O', 'I-PER', [True, False, False]), ('O', 'E-PER', [True, False, False]), ('I-PER', 'O', [False, False, False]), ('I-PER', 'I-PER', [False, True, False]), ('I-PER', 'I-ORG', [True, False, False]), ('I-PER', 'E-PER', [False, True, False]), ('I-PER', 'E-ORG', [True, False, False]), ('E-PER', 'O', [False, False, True]), ('E-PER', 'I-PER', [True, False, True]), ('E-PER', 'I-ORG', [True, False, True]), ('E-PER', 'E-PER', [True, False, True]), ('E-PER', 'E-ORG', [True, False, True]) ] ) def test_ioe2_start_inside_end(prev, token, expected): expects_start_inside_end_to_be_correct(prev, token, expected, IOE2) @pytest.mark.parametrize( 'prev, token, expected', [ ('O', 'O', [False, False, False]), ('O', 'I-PER', [False, False, False]), ('O', 'B-PER', [True, False, False]), ('O', 'E-PER', [False, False, False]), ('O', 'S-PER', [True, False, False]), ('I-PER', 'O', [False, False, False]), ('I-PER', 'I-PER', [False, True, False]), ('I-PER', 'I-ORG', [False, False, False]), ('I-PER', 'B-PER', [True, False, False]), ('I-PER', 'E-PER', [False, True, False]), ('I-PER', 'E-ORG', [False, False, False]), ('I-PER', 'S-PER', [True, False, False]), ('B-PER', 'O', [False, False, False]), ('B-PER', 'I-PER', [False, True, False]), ('B-PER', 'I-ORG', [False, False, False]), ('B-PER', 'E-PER', [False, True, False]), ('B-PER', 'E-ORG', [False, False, False]), ('B-PER', 'S-PER', [True, False, False]), ('E-PER', 'O', [False, False, True]), ('E-PER', 'I-PER', [False, False, True]), ('E-PER', 'B-PER', [True, False, True]), ('E-PER', 'E-PER', [False, False, True]), ('E-PER', 'S-PER', [True, False, True]), ('S-PER', 'O', [False, False, True]), ('S-PER', 'I-PER', [False, False, True]), ('S-PER', 'B-PER', [True, False, True]), ('S-PER', 'E-PER', [False, False, True]), ('S-PER', 'S-PER', [True, False, True]) ] ) def test_iobes_start_inside_end(prev, token, expected): expects_start_inside_end_to_be_correct(prev, token, expected, IOBES) @pytest.mark.parametrize( 'prev, token, expected', [ ('O', 'O', [False, False, False]), ('O', 'I-PER', [False, False, False]), ('O', 'B-PER', [True, False, False]), ('O', 'L-PER', [False, False, False]), ('O', 'U-PER', [True, False, False]), ('I-PER', 'O', [False, False, False]), ('I-PER', 'I-PER', [False, True, False]), ('I-PER', 'I-ORG', [False, False, False]), ('I-PER', 'B-PER', [True, False, False]), ('I-PER', 'L-PER', [False, True, False]), ('I-PER', 'L-ORG', [False, False, False]), ('I-PER', 'U-PER', [True, False, False]), ('B-PER', 'O', [False, False, False]), ('B-PER', 'I-PER', [False, True, False]), ('B-PER', 'I-ORG', [False, False, False]), ('B-PER', 'L-PER', [False, True, False]), ('B-PER', 'L-ORG', [False, False, False]), ('B-PER', 'U-PER', [True, False, False]), ('L-PER', 'O', [False, False, True]), ('L-PER', 'I-PER', [False, False, True]), ('L-PER', 'B-PER', [True, False, True]), ('L-PER', 'L-PER', [False, False, True]), ('L-PER', 'U-PER', [True, False, True]), ('U-PER', 'O', [False, False, True]), ('U-PER', 'I-PER', [False, False, True]), ('U-PER', 'B-PER', [True, False, True]), ('U-PER', 'L-PER', [False, False, True]), ('U-PER', 'U-PER', [True, False, True]) ] ) def test_bilou_start_inside_end(prev, token, expected): expects_start_inside_end_to_be_correct(prev, token, expected, BILOU) @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['B-PER'], []), (['I-PER'], [('PER', 0, 1)]), (['O'], []), (['O', 'I-PER'], [('PER', 1, 2)]), (['O', 'B-PER'], []), (['I-PER', 'O'], [('PER', 0, 1)]), (['I-PER', 'I-PER'], [('PER', 0, 2)]), (['I-PER', 'I-ORG'], [('PER', 0, 1), ('ORG', 1, 2)]), (['I-PER', 'B-PER'], [('PER', 0, 1), ('PER', 1, 2)]), (['I-PER', 'B-ORG'], [('PER', 0, 1)]), (['B-PER', 'O'], []), (['B-PER', 'I-PER'], [('PER', 1, 2)]), (['B-PER', 'I-ORG'], [('ORG', 1, 2)]), (['B-PER', 'B-PER'], [('PER', 1, 2)]), (['B-PER', 'B-ORG'], []) ] ) def test_iob1_tokens(tokens, expected): tokens = Tokens(tokens, IOB1) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['B'], []), (['I'], [('_', 0, 1)]), (['O'], []), (['O', 'O'], []), (['O', 'I'], [('_', 1, 2)]), (['O', 'B'], []), (['I', 'O'], [('_', 0, 1)]), (['I', 'I'], [('_', 0, 2)]), (['I', 'B'], [('_', 0, 1), ('_', 1, 2)]), (['B', 'O'], []), (['B', 'I'], [('_', 1, 2)]), (['B', 'B'], [('_', 1, 2)]) ] ) def test_iob1_tokens_without_tag(tokens, expected): tokens = Tokens(tokens, IOB1) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['B-PER'], [('PER', 0, 1)]), (['I-PER'], []), (['O'], []), (['O', 'I-PER'], []), (['O', 'B-PER'], [('PER', 1, 2)]), (['I-PER', 'O'], []), (['I-PER', 'I-PER'], []), (['I-PER', 'I-ORG'], []), (['I-PER', 'B-PER'], [('PER', 1, 2)]), (['I-PER', 'B-ORG'], [('ORG', 1, 2)]), (['B-PER', 'O'], [('PER', 0, 1)]), (['B-PER', 'I-PER'], [('PER', 0, 2)]), (['B-PER', 'I-ORG'], [('PER', 0, 1)]), (['B-PER', 'B-PER'], [('PER', 0, 1), ('PER', 1, 2)]), (['B-PER', 'B-ORG'], [('PER', 0, 1), ('ORG', 1, 2)]) ] ) def test_iob2_tokens(tokens, expected): tokens = Tokens(tokens, IOB2) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['B'], [('_', 0, 1)]), (['I'], []), (['O'], []), (['O', 'O'], []), (['O', 'I'], []), (['O', 'B'], [('_', 1, 2)]), (['I', 'O'], []), (['I', 'I'], []), (['I', 'B'], [('_', 1, 2)]), (['B', 'O'], [('_', 0, 1)]), (['B', 'I'], [('_', 0, 2)]), (['B', 'B'], [('_', 0, 1), ('_', 1, 2)]) ] ) def test_iob2_tokens_without_tag(tokens, expected): tokens = Tokens(tokens, IOB2) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['E-PER'], []), (['I-PER'], [('PER', 0, 1)]), (['O'], []), (['O', 'I-PER'], [('PER', 1, 2)]), (['O', 'E-PER'], []), (['I-PER', 'O'], [('PER', 0, 1)]), (['I-PER', 'I-PER'], [('PER', 0, 2)]), (['I-PER', 'I-ORG'], [('PER', 0, 1), ('ORG', 1, 2)]), # (['I-PER', 'E-PER'], [('PER', 0, 1)]), (['I-PER', 'E-ORG'], [('PER', 0, 1)]), (['E-PER', 'O'], []), (['E-PER', 'I-PER'], [('PER', 1, 2)]), (['E-PER', 'I-ORG'], [('ORG', 1, 2)]), (['E-PER', 'E-PER'], []), (['E-PER', 'E-ORG'], []) ] ) def test_ioe1_tokens(tokens, expected): tokens = Tokens(tokens, IOE1) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['E'], []), (['I'], [('_', 0, 1)]), (['O'], []), (['O', 'O'], []), (['O', 'I'], [('_', 1, 2)]), (['O', 'E'], []), (['I', 'O'], [('_', 0, 1)]), (['I', 'I'], [('_', 0, 2)]), # (['I', 'E'], [('_', 0, 1)]), (['E', 'O'], []), (['E', 'I'], [('_', 1, 2)]), (['E', 'E'], []) ] ) def test_ioe1_tokens_without_tag(tokens, expected): tokens = Tokens(tokens, IOE1) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['E-PER'], [('PER', 0, 1)]), (['I-PER'], []), (['O'], []), (['O', 'I-PER'], []), (['O', 'E-PER'], [('PER', 1, 2)]), (['I-PER', 'O'], []), (['I-PER', 'I-PER'], []), (['I-PER', 'I-ORG'], []), (['I-PER', 'E-PER'], [('PER', 0, 2)]), (['I-PER', 'E-ORG'], [('ORG', 1, 2)]), (['E-PER', 'O'], [('PER', 0, 1)]), (['E-PER', 'I-PER'], [('PER', 0, 1)]), (['E-PER', 'I-ORG'], [('PER', 0, 1)]), (['E-PER', 'E-PER'], [('PER', 0, 1), ('PER', 1, 2)]), (['E-PER', 'E-ORG'], [('PER', 0, 1), ('ORG', 1, 2)]) ] ) def test_ioe2_tokens(tokens, expected): tokens = Tokens(tokens, IOE2) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ ([], []), (['E'], [('_', 0, 1)]), (['I'], []), (['O'], []), (['O', 'O'], []), (['O', 'I'], []), (['O', 'E'], [('_', 1, 2)]), (['I', 'O'], []), (['I', 'I'], []), (['I', 'E'], [('_', 0, 2)]), (['E', 'O'], [('_', 0, 1)]), (['E', 'I'], [('_', 0, 1)]), (['E', 'E'], [('_', 0, 1), ('_', 1, 2)]) ] ) def test_ioe2_tokens_without_tag(tokens, expected): tokens = Tokens(tokens, IOE2) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ (['O'], []), (['I-PER'], []), (['B-PER'], []), (['E-PER'], []), (['S-PER'], [('PER', 0, 1)]), (['O', 'O'], []), (['O', 'I-PER'], []), (['O', 'B-PER'], []), (['O', 'E-PER'], []), (['O', 'S-PER'], [('PER', 1, 2)]), (['I-PER', 'O'], []), (['I-PER', 'I-PER'], []), (['I-PER', 'I-ORG'], []), (['I-PER', 'B-PER'], []), (['I-PER', 'E-PER'], []), (['I-PER', 'E-ORG'], []), (['I-PER', 'S-PER'], [('PER', 1, 2)]), (['B-PER', 'O'], []), (['B-PER', 'I-PER'], []), (['B-PER', 'I-ORG'], []), (['B-PER', 'B-PER'], []), (['B-PER', 'E-PER'], [('PER', 0, 2)]), (['B-PER', 'E-ORG'], []), (['B-PER', 'S-PER'], [('PER', 1, 2)]), (['E-PER', 'O'], []), (['E-PER', 'I-PER'], []), (['E-PER', 'B-PER'], []), (['E-PER', 'E-PER'], []), (['E-PER', 'S-PER'], [('PER', 1, 2)]), (['S-PER', 'O'], [('PER', 0, 1)]), (['S-PER', 'I-PER'], [('PER', 0, 1)]), (['S-PER', 'B-PER'], [('PER', 0, 1)]), (['S-PER', 'E-PER'], [('PER', 0, 1)]), (['S-PER', 'S-PER'], [('PER', 0, 1), ('PER', 1, 2)]) ] ) def test_iobes_tokens(tokens, expected): tokens = Tokens(tokens, IOBES) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ (['O'], []), (['I'], []), (['B'], []), (['E'], []), (['S'], [('_', 0, 1)]), (['O', 'O'], []), (['O', 'I'], []), (['O', 'B'], []), (['O', 'E'], []), (['O', 'S'], [('_', 1, 2)]), (['I', 'O'], []), (['I', 'I'], []), (['I', 'B'], []), (['I', 'E'], []), (['I', 'S'], [('_', 1, 2)]), (['B', 'O'], []), (['B', 'I'], []), (['B', 'B'], []), (['B', 'E'], [('_', 0, 2)]), (['B', 'S'], [('_', 1, 2)]), (['E', 'O'], []), (['E', 'I'], []), (['E', 'B'], []), (['E', 'E'], []), (['E', 'S'], [('_', 1, 2)]), (['S', 'O'], [('_', 0, 1)]), (['S', 'I'], [('_', 0, 1)]), (['S', 'B'], [('_', 0, 1)]), (['S', 'E'], [('_', 0, 1)]), (['S', 'S'], [('_', 0, 1), ('_', 1, 2)]) ] ) def test_iobes_tokens_without_tag(tokens, expected): tokens = Tokens(tokens, IOBES) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ (['O'], []), (['I-PER'], []), (['B-PER'], []), (['L-PER'], []), (['U-PER'], [('PER', 0, 1)]), (['O', 'O'], []), (['O', 'I-PER'], []), (['O', 'B-PER'], []), (['O', 'L-PER'], []), (['O', 'U-PER'], [('PER', 1, 2)]), (['I-PER', 'O'], []), (['I-PER', 'I-PER'], []), (['I-PER', 'I-ORG'], []), (['I-PER', 'B-PER'], []), (['I-PER', 'L-PER'], []), (['I-PER', 'L-ORG'], []), (['I-PER', 'U-PER'], [('PER', 1, 2)]), (['B-PER', 'O'], []), (['B-PER', 'I-PER'], []), (['B-PER', 'I-ORG'], []), (['B-PER', 'B-PER'], []), (['B-PER', 'L-PER'], [('PER', 0, 2)]), (['B-PER', 'L-ORG'], []), (['B-PER', 'U-PER'], [('PER', 1, 2)]), (['L-PER', 'O'], []), (['L-PER', 'I-PER'], []), (['L-PER', 'B-PER'], []), (['L-PER', 'L-PER'], []), (['L-PER', 'U-PER'], [('PER', 1, 2)]), (['U-PER', 'O'], [('PER', 0, 1)]), (['U-PER', 'I-PER'], [('PER', 0, 1)]), (['U-PER', 'B-PER'], [('PER', 0, 1)]), (['U-PER', 'L-PER'], [('PER', 0, 1)]), (['U-PER', 'U-PER'], [('PER', 0, 1), ('PER', 1, 2)]) ] ) def test_bilou_tokens(tokens, expected): tokens = Tokens(tokens, BILOU) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected @pytest.mark.parametrize( 'tokens, expected', [ (['O'], []), (['I'], []), (['B'], []), (['L'], []), (['U'], [('_', 0, 1)]), (['O', 'O'], []), (['O', 'I'], []), (['O', 'B'], []), (['O', 'L'], []), (['O', 'U'], [('_', 1, 2)]), (['I', 'O'], []), (['I', 'I'], []), (['I', 'B'], []), (['I', 'L'], []), (['I', 'U'], [('_', 1, 2)]), (['B', 'O'], []), (['B', 'I'], []), (['B', 'B'], []), (['B', 'L'], [('_', 0, 2)]), (['B', 'U'], [('_', 1, 2)]), (['L', 'O'], []), (['L', 'I'], []), (['L', 'B'], []), (['L', 'L'], []), (['L', 'U'], [('_', 1, 2)]), (['U', 'O'], [('_', 0, 1)]), (['U', 'I'], [('_', 0, 1)]), (['U', 'B'], [('_', 0, 1)]), (['U', 'L'], [('_', 0, 1)]), (['U', 'U'], [('_', 0, 1), ('_', 1, 2)]) ] ) def test_bilou_tokens_without_tag(tokens, expected): tokens = Tokens(tokens, BILOU) entities = [entity.to_tuple()[1:] for entity in tokens.entities] assert entities == expected class TestToken: def test_raises_type_error_if_input_is_binary_string(self): with pytest.raises(KeyError): token = Token('I-組織'.encode('utf-8')) def test_raises_index_error_if_input_is_empty_string(self): with pytest.raises(IndexError): token = Token('') def test_representation(self): token = Token('B-ORG') assert 'B-ORG' == str(token) class TestIOB2Token: def test_invalid_prefix(self): with pytest.raises(KeyError): token = IOB2('T') @pytest.mark.parametrize( 'token, scheme', [ ('I', IOB1), ('O', IOB1), ('B', IOB1), ('I', IOB2), ('O', IOB2), ('B', IOB2), ('I', IOE1), ('O', IOE1), ('E', IOE1), ('I', IOE2), ('O', IOE2), ('E', IOE2), ('I', IOBES), ('O', IOBES), ('B', IOBES), ('E', IOBES), ('S', IOBES) ] ) def test_valid_prefix(token, scheme): token = scheme(token) is_valid = token.is_valid assert is_valid @pytest.mark.parametrize( 'token, scheme', [ ('E', IOB1), ('S', IOB1), ('E', IOB2), ('S', IOB2), ('B', IOE1), ('S', IOE1), ('B', IOE2), ('S', IOE2) ] ) def test_invalid_prefix(token, scheme): token = scheme(token) with pytest.raises(ValueError): token.is_valid() class TestTokens: def test_raise_exception_when_iobes_tokens_with_iob2_scheme(self): tokens = Tokens(['B-PER', 'E-PER', 'S-PER'], IOB2) with pytest.raises(ValueError): entities = tokens.entities class TestAutoDetect: @pytest.mark.parametrize( 'sequences, expected', [ ([['B', 'I', 'O']], IOB2), ([['B', 'I']], IOB2), ([['B', 'O']], IOB2), ([['B']], IOB2), ([['I', 'O', 'E']], IOE2), ([['I', 'E']], IOE2), ([['E', 'O']], IOE2), ([['E']], IOE2), ([['I', 'O', 'B', 'E', 'S']], IOBES), ([['I', 'B', 'E', 'S']], IOBES), ([['I', 'O', 'B', 'E']], IOBES), ([['O', 'B', 'E', 'S']], IOBES), ([['I', 'B', 'E']], IOBES), ([['B', 'E', 'S']], IOBES), ([['O', 'B', 'E']], IOBES), ([['B', 'E']], IOBES), ([['S']], IOBES), ([['I', 'O', 'B', 'L', 'U']], BILOU), ([['I', 'B', 'L', 'U']], BILOU), ([['I', 'O', 'B', 'L']], BILOU), ([['O', 'B', 'L', 'U']], BILOU), ([['I', 'B', 'L']], BILOU), ([['B', 'L', 'U']], BILOU), ([['O', 'B', 'L']], BILOU), ([['B', 'L']], BILOU), ([['U']], BILOU) ] ) def test_valid_scheme(self, sequences, expected): scheme = auto_detect(sequences) assert scheme == expected @pytest.mark.parametrize( 'sequences, expected', [ ([['I', 'O']], IOB2), ([['H']], IOB2) ] ) def test_invalid_scheme(self, sequences, expected): with pytest.raises(ValueError): scheme = auto_detect(sequences)
nilq/baby-python
python
def model_snippet(): from pathlib import Path from multilevel_py.constraints import is_str_constraint, is_float_constraint, \ prop_constraint_ml_instance_of_th_order_functional from multilevel_py.core import create_clabject_prop, Clabject from multilevel_py.constraints import ReInitPropConstr # DslRoot for illustration purposes - integrating three classification hierarchies DslRoot = Clabject(name="DSLRoot") # Mass Unit Hierarchy symbol_prop = create_clabject_prop( n='symbol', t=2, f='*', i_f=True, c=[is_str_constraint]) MassUnit = DslRoot(name="MassUnit") MassUnit.define_props([symbol_prop]) kilogram = MassUnit( declare_as_instance=True, name="Kilogram", speed_adjustments={'symbol': -1}, init_props={'symbol': 'kg'}) conversion_factor_prop = create_clabject_prop( n='conversion_factor', t=1, f='*', i_f=True, c=[is_float_constraint]) is_mass_unit_constr = prop_constraint_ml_instance_of_th_order_functional( MassUnit, instantiation_order=1) base_unit_prop = create_clabject_prop( n='base_unit', t=0, f='*', i_assoc=True, c=[is_mass_unit_constr], v=kilogram) DerivedMassUnit = MassUnit(name="DerivedMassUnit") DerivedMassUnit.define_props([conversion_factor_prop, base_unit_prop]) pound = DerivedMassUnit( name="Pound", declare_as_instance=True, init_props={"symbol": "lb", "conversion_factor": 0.45359}) # Weight Load Hierarchy planned_value_prop = create_clabject_prop( n='planned_value', t=1, f='*', i_f=False, c=[is_float_constraint]) actual_value_prop = create_clabject_prop( n='actual_value', t=2, f='*', i_f=True, c=[is_float_constraint]) mass_unit_prop = create_clabject_prop( n='mass_unit', t=0, f='*', i_f=False, i_assoc=True, v=MassUnit) WeightLoad = DslRoot(name="WeightLoad") WeightLoad.define_props([planned_value_prop, actual_value_prop, mass_unit_prop]) is_fst_or_snd_order_mass_unit_instance = \ prop_constraint_ml_instance_of_th_order_functional( MassUnit, instantiation_order=(1, 2)) re_init_constr = ReInitPropConstr(del_constr=[], add_constr=[is_fst_or_snd_order_mass_unit_instance]) WeightLoad.require_re_init_on_next_step(prop_name="mass_unit", re_init_prop_constr=re_init_constr) ParameterisedWeightLoad = WeightLoad( name='ParameterisedWeightLoad', init_props={'planned_value': 180.0, 'mass_unit': pound}) realisedWeightLoad = ParameterisedWeightLoad( declare_as_instance=True, name="RealisedWeightLoad", init_props={"actual_value": 182.5}) # Deadlift Hierarchy weight_load_prop = create_clabject_prop( n='weight_load', t=0, f='*', i_f=False, i_assoc=True, v=WeightLoad) Deadlift = DslRoot(name="Deadlift") Deadlift.define_props([weight_load_prop]) is_weight_load_instance = prop_constraint_ml_instance_of_th_order_functional( WeightLoad, instantiation_order=1) re_init_constr = ReInitPropConstr(del_constr=[], add_constr=[is_weight_load_instance]) Deadlift.require_re_init_on_next_step( prop_name="weight_load", re_init_prop_constr=re_init_constr) ParameterisedDeadlift = Deadlift( name="ParameterisedDeadlift", init_props={'weight_load': ParameterisedWeightLoad}) is_param_weight_load_instance = prop_constraint_ml_instance_of_th_order_functional(ParameterisedWeightLoad, instantiation_order=1) re_init_constr = ReInitPropConstr( del_constr=[is_weight_load_instance], add_constr=[is_param_weight_load_instance]) ParameterisedDeadlift.require_re_init_on_next_step( prop_name="weight_load", re_init_prop_constr=re_init_constr) realisedDeadlift = ParameterisedDeadlift( declare_as_instance=True, name="RealisedDeadlift", init_props={"weight_load": realisedWeightLoad}) hidden_root = False viz_name = str(Path(__file__).stem) return DslRoot, viz_name, hidden_root
nilq/baby-python
python
''' Parser for the ConstantsDumper variables. ''' import re from typing import Optional, Text from ..parser import Context, ParserBase from ..cpp.types import parse_value class ConstantsParser(ParserBase): ''' Parses the constants outputted by the ConstantsDumper clang plugin. ''' VALUE_MATCHER = re.compile(r'^\s*(?P<name>[^#].*?)\s*:=\s*(?P<value>.+?)\s*,?\s*$') def parse_line(self, line: Text, context: Context) -> bool: value_match: Optional[re.Match] if value_match := ConstantsParser.VALUE_MATCHER.match(line): name = value_match.group('name') value = parse_value(value_match.group('value'), context.global_scope) context.global_scope[name] = value return bool(value_match)
nilq/baby-python
python
""" Author: Param Deshpande Date created: Fri Jul 10 23:48:41 IST 2020 Description: plots individual piecewise spline curves according to the timestamps in the splineCoeffs.txt License : ------------------------------------------------------------ "THE BEERWARE LICENSE" (Revision 42): Param Deshpande wrote this code. As long as you retain this notice, you can do whatever you want with this stuff. If we meet someday, and you think this stuff is worth it, you can buy me a beer in return. ------------------------------------------------------------ date modified: Fri Jul 10 23:48:41 IST 2020 """ #import #import import matplotlib.pyplot as plt #%matplotlib inline import numpy as np import statistics import Polynomial as poly import scipy from scipy.interpolate import BSpline, splev, splrep, PPoly #if __name__ == '__main__': #import #import """ WRITE YOUR FUNCTIONS HERE """ #def ...: # """ # () -> () # Description: # >>> # # """ def curve3(x,a,b,c,d): """ (np.array,int,int,int,int) -> (np.array) Description: Returns a cubic curve pts formed by x,a,b,c,d >>> """ assert (type(x) ==np.ndarray), 'x should be passed input array' y = a + b*x + c*x**2 + d*x**3 return y #def ...: # """ # () -> () # Description: # >>> # # """ """ START YOUR CODE HERE """ if __name__ == '__main__': pass #import doctest #doctest.testmod() data = np.genfromtxt("splineCoeffs.txt", delimiter=",", \ names=["time",\ "coeffAd", \ "coeffAc", \ "coeffAb", \ "coeffAa", \ "coeffRd", \ "coeffRc", \ "coeffRb", \ "coeffRa", \ "coeffPd", \ "coeffPc", \ "coeffPb", \ "coeffPa", \ "coeffYd", \ "coeffYc", \ "coeffYb", \ "coeffYa", \ ]) #BUFFERSIZE = 15 #dataBuffer = [0]*BUFFERSIZE #print(type(data)) #p = poly.Polynomial(4, 0, -4, 3, 0) #print(p) totalTime = data["time"][-1] - data["time"][0] data["time"] = list(range(0,len(data["time"]))) for i in range(len(data["time"])): currentTimeStamp = data["time"][i] a = data["coeffPa"][i] b = data["coeffPb"][i] c = data["coeffPc"][i] d = data["coeffPd"][i] if(i != (len(data["time"]) - 1)): nextTimeStamp = data["time"][i+1] unitTimeStep = np.linspace(currentTimeStamp,nextTimeStamp , 50) x = unitTimeStep - currentTimeStamp F = curve3(x,a,b,c,d) #print("x[0] is " + str(X[0]*totalTime) + "F value is " + str(F[0])) #plt.plot(unitTimeStep, F, label=("piecewise spline from t = " + str(currentTimeStamp) + " to " + str(nextTimeStamp) )) plt.plot(unitTimeStep, F ) #plt.plot(X, F) plt.xlabel('frames ( assuming ~fixed fps camera ) ') plt.ylabel('absolute gimbal Pitch angles') #plt.legend() plt.show() data["time"] = list(range(0,len(data["time"]))) for i in range(len(data["time"])): currentTimeStamp = data["time"][i] a = data["coeffYa"][i] b = data["coeffYb"][i] c = data["coeffYc"][i] d = data["coeffYd"][i] if(i != (len(data["time"]) - 1)): nextTimeStamp = data["time"][i+1] unitTimeStep = np.linspace(currentTimeStamp,nextTimeStamp , 50) x = unitTimeStep - currentTimeStamp F = curve3(x,a,b,c,d) #print("x[0] is " + str(X[0]*totalTime) + "F value is " + str(F[0])) #plt.plot(unitTimeStep, F, label=("piecewise spline from t = " + str(currentTimeStamp) + " to " + str(nextTimeStamp) )) plt.plot(unitTimeStep, F ) #plt.plot(X, F) plt.xlabel('frames ( assuming ~fixed fps camera ) ') plt.ylabel('absolute gimbal Yaw angles') #plt.legend() plt.show() #plt.plot(X, F_derivative, label="F_der") y2 = [0, 3, 1, 2, 3, 5, 8, 13, 17, 24] x2 = np.linspace(0, 1, 30) y3 = curve3(x2,1,2,3,4) #plt.plot(x2, y3) #plt.show() """ y3 = [1,7,3,4,10,2] x3 = list(range(1,7)) tck = splrep(x2, y2) print( " len of knots is " + str(len(tck[0]))) print( " len of coeffs is " + str(len(tck[1]))) print( " degree of Bspline is " + str((tck[2]))) Bspl = BSpline(tck[0],tck[1],tck[2]) By2 = Bspl(x2) print( " len of bspline is " + str(len(By2))) print(" knots / nodes are " + str(tck[0])) plt.plot(x2, y2,'o', label=" Y output passed") knotx =list(range(0,len(tck[0]))) knotx[:] = (x/len(tck[0]) for x in knotx) plt.plot(knotx , tck[0], 'gs', label="Nodes or knots") plt.plot(x2, By2, label="Bspline curve ") """ """ END OF FILE """
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # ----------------------------------------------------------------------------- # # P A G E B O T N A N O # # Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens # www.pagebot.io # Licensed under MIT conditions # # Supporting DrawBot, www.drawbot.com # ----------------------------------------------------------------------------- # # style.py # # Defines the default style set and theme that every publication and document # can start with. # from copy import copy import os # Import standard Python library to create the _export directory import sys if __name__ == "__main__": sys.path.insert(0, "../..") # So we can import pagebotnano without installing. from pagebotnano_010.toolbox.color import color from pagebotnano_010.constants import (EN, CENTER, LEFT, RIGHT, DEFAULT_FONT, DEFAULT_BODYSIZE, LIGHT, DARK) class BaseTheme: def __init__(self, mood=LIGHT, name=None, fonts=None, styles=None): self.colors = self.makeColorMatrix(mood) # Defines the relation between typographic functions and font names. if fonts is None: fonts = self.getDefaultFonts() self.fonts = fonts # Collection of typographic style dictionaries # At least implementing the set of tag names that come from the # Typesetter parsing a markdown file. if styles is None: styles = self.getDefaultStyles(self.fonts, self.colors) # To have basic set installed. self.styles = styles self.name = name or self.NAME def getStyle(self, name): return self.styles.get(name) def getColor(self, shade, base): return self.colors[base][shade] MATRIX_RECIPE = [1, 0.85, 0.6, 0.3, 0, 0.3, 0.6, 0.85, 1] MATRIX_RECIPE = [1, 0.85, 0.6, 0.3, 0, 1, 0.6, 0.3, 0.15] def makeColorMatrix(self, mood): """Create a 7 (shades) x 6 (base color) table, as source for theme styles. (white <--) lightest <-- light <-- lighter <-- base base --> darker --> dark --> darkest (--> black) >>> from pagebotnano_010.document import Document >>> from pagebotnano_010.themes import AllThemes, BackToTheCity >>> from pagebotnano_010.constants import * >>> from pagebotnano_010.elements import Rect, Text >>> from pagebotnano_010.babelstring import BabelString >>> theme = BackToTheCity() >>> len(theme.colors) 6 >>> len(theme.colors[0]) 9 >>> w = h = 800 >>> doc = Document(w=w, h=h) >>> for Theme in AllThemes: ... for mood in (DARK, LIGHT): ... theme = Theme(mood=mood) ... page = doc.newPage() ... page.padding = 80 ... cw = page.pw/len(theme.colors[0]) # Column width ... ch = page.ph/len(theme.colors) # Column height ... for shade in range(len(theme.colors[0])): ... for base in range(len(theme.colors)): ... c = theme.colors[base][shade] ... e = Rect(x=page.pl+shade*cw, y=page.pb+base*ch, w=cw, h=ch, fill=c) ... page.addElement(e) ... # Add background rectangle on top with theme name and mood. getColor(shade, base) ... e = Rect(x=page.pl, y=page.h-page.pt, w=page.pw, h=page.pt, fill=theme.getColor(0,2)) ... page.addElement(e) ... style = dict(font='Georgia', fontSize=24, fill=theme.getColor(-2,2), indent=20) ... bs = BabelString('%s – %s' % (theme.name, mood), style) ... tw, th = bs.textSize ... e = Text(bs, x=page.pl, y=page.h-page.pt*3/5) ... page.addElement(e) >>> doc.export('_export/ThemeColors.pdf') """ if mood is None: mood = LIGHT r = self.MATRIX_RECIPE # Defined by the inheriting class matrix = [] for baseName, c in sorted(self.BASE_COLORS.items()): if mood == LIGHT: matrix.append( (c.lighter(r[0]), c.lighter(r[1]), c.lighter(r[2]), c.lighter(r[3]), c, c.darker(r[5]), c.darker(r[6]), c.darker(r[7]), c.darker(r[8]), )) else: # mood == DARK: matrix.append( (c.darker(r[8]), c.darker(r[7]), c.darker(r[6]), c.darker(r[5]), c, c.lighter(r[3]), c.lighter(r[2]), c.lighter(r[1]), c.lighter(r[0]), )) return matrix def getDefaultFonts(self): regular = DEFAULT_FONT bold = DEFAULT_FONT+'-Bold' italic = DEFAULT_FONT+'-Italic' boldItalic = DEFAULT_FONT+'-BoldItalic' # Default font set, used by Theme return dict( regular=regular, bold=bold, italic=italic, boldItalic=boldItalic, monospaced='Courier-Regular' ) def getDefaultStyles(self, fonts, colors): """Answer the default set of styles, to get any theme started. At least, implement the tags defined in HTML_TEXT_TAGS """ ps = DEFAULT_BODYSIZE ps5 = 3*ps ps4 = 2.5*ps ps3 = 2*ps ps2 = 1.5*ps lh11 = 1.1*ps lh12 = 1.2*ps lh13 = 1.3*ps lh14 = 1.4*ps textColor = self.getColor(7, 3) accentColor = self.getColor(7, 4) regular = fonts['regular'] bold = fonts['bold'] italic = fonts['italic'] boldItalic = fonts['boldItalic'] monospaced = fonts['monospaced'] return { 'h1': dict(font=bold, fontSize=ps5, lineHeight=lh11, fill=textColor), 'h2': dict(font=bold, fontSize=ps4, lineHeight=lh12, fill=textColor), 'h3': dict(font=italic, fontSize=ps3, lineHeight=lh13, fill=textColor), 'h3 b': dict(font=bold, fontSize=ps3, lineHeight=lh13, fill=textColor), 'h4': dict(font=regular, fontSize=ps2, lineHeight=lh14, fill=textColor), 'h5': dict(font=bold, fontSize=ps, lineHeight=lh14, fill=textColor), 'h6': dict(font=italic, fontSize=ps, lineHeight=lh14, fill=textColor), 'p': dict(font=regular, fontSize=ps, lineHeight=lh14, fill=textColor), 'b': dict(font=bold, fontSize=ps, lineHeight=lh14, fill=textColor), 'em': dict(font=italic, fontSize=ps, lineHeight=lh14, fill=textColor), 'i': dict(font=italic, fontSize=ps, lineHeight=lh14, fill=textColor), 'bi': dict(font=boldItalic, fontSize=ps, lineHeight=lh14, fill=textColor), 'img': dict(font=boldItalic, fontSize=ps, lineHeight=lh14, fill=textColor), 'strong': dict(font=bold, fontSize=ps, lineHeight=lh14, fill=textColor), 'a': dict(font=bold, fontSize=ps, lineHeight=lh14, fill=accentColor), 'a.hover': dict(font=bold, fontSize=ps, lineHeight=lh14, fill=accentColor.darker()), 'hr': dict(font=regular, fontSize=ps, lineHeight=lh14, fill=textColor), 'python': dict(font=monospaced, fontSize=ps, lineHeight=lh14, fill=textColor), 'code': dict(font=monospaced, fontSize=ps, lineHeight=lh14, fill=textColor), 'blockquote': dict(font=italic, fontSize=ps, lineHeight=lh14, fill=textColor), 'ul': dict(font=regular, fontSize=ps, lineHeight=lh14, fill=textColor), 'ol': dict(font=regular, fontSize=ps, lineHeight=lh14, fill=textColor), 'li': dict(font=regular, fontSize=ps, lineHeight=lh14, fill=textColor), } if __name__ == "__main__": # Running this document will execute all >>> comments as test of this source. import doctest doctest.testmod()[0]
nilq/baby-python
python
def configure(conf): conf.env.ARCHITECTURE = 'mips64el' conf.env.VALID_ARCHITECTURES = ['mips64el', 'mipsel64'] conf.env.ARCH_FAMILY = 'mips' conf.env.ARCH_LP64 = True conf.env.append_unique('DEFINES', ['_MIPS', '_MIPS64', '_MIPSEL', '_MIPSEL64', '_MIPS64EL', '_LP64'])
nilq/baby-python
python
# -*- python -*- # pymode:lint_ignore=E501 """Common settings and globals.""" from sys import path import os from django.contrib.messages import constants as message_constants from django.core.exceptions import ImproperlyConfigured from django.utils.translation import ugettext_lazy as _ from os.path import abspath, basename, dirname, join, normpath def get_env_variable(var_name, default=None): """ Get a setting from an environment variable. :param str var_name: variable name """ try: return os.environ[var_name] except KeyError: error_msg = "Set the %s environment variable" % var_name raise ImproperlyConfigured(error_msg) # ######### PATH CONFIGURATION # Absolute filesystem path to the Django project directory: DJANGO_ROOT = dirname(dirname(abspath(__file__))) # Absolute filesystem path to the top-level project folder: SITE_ROOT = dirname(DJANGO_ROOT) # Site name: SITE_NAME = basename(DJANGO_ROOT) # Add our project to our pythonpath, this way we don't need to type our project # name in our dotted import paths: path.append(DJANGO_ROOT) # ######### END PATH CONFIGURATION # ######### DEBUG CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = False # ######### END DEBUG CONFIGURATION # ######### MANAGER CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = ( ('Coffeestats Team', get_env_variable('COFFEESTATS_SITE_ADMINMAIL')), ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email DEFAULT_FROM_EMAIL = get_env_variable('COFFEESTATS_SITE_ADMINMAIL') # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS # ######### END MANAGER CONFIGURATION # ######### DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': get_env_variable('COFFEESTATS_PGSQL_DATABASE'), 'USER': get_env_variable('COFFEESTATS_PGSQL_USER'), 'PASSWORD': get_env_variable('COFFEESTATS_PGSQL_PASSWORD'), 'HOST': get_env_variable('COFFEESTATS_PGSQL_HOSTNAME'), 'PORT': get_env_variable('COFFEESTATS_PGSQL_PORT'), } } # ######### END DATABASE CONFIGURATION # ######### GENERAL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'Europe/Berlin' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 SITES_DOMAIN_NAME = get_env_variable('COFFEESTATS_DOMAIN_NAME') SITES_SITE_NAME = get_env_variable('COFFEESTATS_SITE_NAME') # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = False # ######### END GENERAL CONFIGURATION # ######### MEDIA CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = normpath(join(SITE_ROOT, 'media')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' # ######### END MEDIA CONFIGURATION # ######### STATIC FILE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = normpath(join(SITE_ROOT, 'assets')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS # noqa STATICFILES_DIRS = ( normpath(join(SITE_ROOT, 'static')), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders # noqa STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # ######### END STATIC FILE CONFIGURATION # ######### SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key should only be used for development and testing. SECRET_KEY = get_env_variable('COFFEESTATS_SITE_SECRET') # ######### END SECRET CONFIGURATION # ######### SITE CONFIGURATION # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # ######### END SITE CONFIGURATION # ######### FIXTURE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS # noqa FIXTURE_DIRS = ( normpath(join(SITE_ROOT, 'fixtures')), ) # ######### END FIXTURE CONFIGURATION # ######### TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/1.9/ref/settings/#templates TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'DIRS': [normpath(join(SITE_ROOT, 'templates'))], 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.request', 'absolute.context_processors.absolute', 'caffeine.context_processors.mainnav', 'caffeine.context_processors.socialurls', ], } }, ] # ######### MIDDLEWARE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes MIDDLEWARE = ( # Default Django middleware. 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # uncomment next line to enable translation to browser locale # 'django.middleware.locale.LocaleMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # middleware to redirect user to set timezone 'caffeine.middleware.EnforceTimezoneMiddleware', ) # ######### END MIDDLEWARE CONFIGURATION # ######### URL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf ROOT_URLCONF = '%s.urls' % SITE_NAME # ######### END URL CONFIGURATION # ######### APP CONFIGURATION DJANGO_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'django.contrib.admin', 'oauth2_provider', 'rest_framework', ) # Apps specific for this project go here. LOCAL_APPS = ( 'django_registration', 'caffeine', 'caffeine_api_v1', 'caffeine_oauth2', ) AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'caffeine.authbackend.LegacyCoffeestatsAuth', ) LOGIN_URL = '/auth/login/' LOGIN_REDIRECT_URL = '/' AUTH_USER_MODEL = 'caffeine.User' ACCOUNT_ACTIVATION_DAYS = 2 EMAIL_CHANGE_ACTION_VALIDITY = 2 MINIMUM_DRINK_DISTANCE = 5 CAFFEINE_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S' GOOGLE_PLUS_URL = 'https://plus.google.com/108309823073824027966' TWITTER_URL = 'https://twitter.com/coffeestats' MESSAGE_TAGS = { message_constants.DEBUG: 'flash-debug', message_constants.INFO: 'flash-info', message_constants.SUCCESS: 'flash-success', message_constants.WARNING: 'flash-warning', message_constants.ERROR: 'flash-error', } # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS # ######### END APP CONFIGURATION # ######### REST FRAMEWORK CONFIGURATION REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'oauth2_provider.contrib.rest_framework.OAuth2Authentication', 'rest_framework.authentication.SessionAuthentication', ], 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.IsAuthenticated', ], 'PAGINATE_BY': 10 } # ######### END REST FRAMEWORK CONFIGURATION # ######### OAUTH2 settings # this setting is required to make oauth2_provider work OAUTH2_PROVIDER_APPLICATION_MODEL = 'caffeine_oauth2.CoffeestatsApplication' OAUTH2_PROVIDER = { 'OAUTH2_SERVER_CLASS': 'caffeine_oauth2.oauth2_server.CoffeestatsServer', # list of allowed URI schemes for redirect URIs 'ALLOWED_REDIRECT_URI_SCHEMES': [ 'http', 'https', 'org.coffeestats.android', 'org.coffeestats.cli', 'org.coffeestats.ios', ], # the list of available scopes 'SCOPES': { 'read': _('Read your caffeine consumption'), 'write': _('Add and modify your caffeine consumption'), # 'openid': _('Get information about you'), }, 'DEFAULT_SCOPES': ['read', 'write'], } # ######### END OAUTH2 settings API_USAGE_AGREEMENT = '/api/v2/agreement/' # ######### LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'simple': { 'format': '%(levelname)s %(message)s', }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'simple' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'caffeine': { 'handlers': ['mail_admins'], 'level': 'ERROR', } } } # ######### END LOGGING CONFIGURATION # ######### WSGI CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME # ######### END WSGI CONFIGURATION TEST_RUNNER = 'django.test.runner.DiscoverRunner'
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Dynamic inventory script for ansible that works with ymir data. # See also: http://docs.ansible.com/ansible/intro_dynamic_inventory.html # import os from ymir import load_service_from_json YMIR_SERVICE_JSON = os.path.abspath( os.environ.get( 'YMIR_SERVICE_JSON', os.path.join( os.path.dirname(__file__), '..', # because this file resides in service_root/ansible 'service.json'))) # Create the ymir service from the service description _service = load_service_from_json(YMIR_SERVICE_JSON, quiet=True) # print out JSON suitable for use as ansible dynamic inventory _service.ansible_inventory()
nilq/baby-python
python
from collections import namedtuple import subprocess import os import sys from test_cases import Test from utils import pipe_read class RedundancyTest(Test): Config = namedtuple('Config', ['spatial_read_files', 'spatial_read_reds', 'spatial_write_files', 'spatial_write_reds', 'temporal_read_files', 'temporal_read_reds', 'temporal_write_files', 'temporal_write_reds', 'total', 'sampling', 'tolerate']) def __init__(self, arch): super().__init__('RedundancyTest', arch) def setup(self, choices): for choice in choices: if choice == 'vectorAdd.f128': self._configs[choice] = RedundancyTest.Config( spatial_read_files=['spatial_read_t0.csv'], spatial_read_reds=[3], spatial_write_files=['spatial_write_t0.csv'], spatial_write_reds=[1], temporal_read_files=['temporal_read_t0.csv'], temporal_read_reds=[0], temporal_write_files=['temporal_write_t0.csv'], temporal_write_reds=[0], total=[12], sampling=0, tolerate=0.0) elif choice == 'bfs': self._configs[choice] = RedundancyTest.Config( spatial_read_files=['spatial_read_t0.csv'], spatial_read_reds=[27707987], spatial_write_files=['spatial_write_t0.csv'], spatial_write_reds=[7997516], temporal_read_files=['temporal_read_t0.csv'], temporal_read_reds=[5603846], temporal_write_files=['temporal_write_t0.csv'], temporal_write_reds=[0], total=[52653451], sampling=0, tolerate=0.02) elif choice == 'backprop': self._configs[choice] = [ RedundancyTest.Config( spatial_read_files=['spatial_read_t0.csv'], spatial_read_reds=[4194507], spatial_write_files=['spatial_write_t0.csv'], spatial_write_reds=[1048623], temporal_read_files=['temporal_read_t0.csv'], temporal_read_reds=[3149872], temporal_write_files=['temporal_write_t0.csv'], temporal_write_reds=[0], total=[19988592], sampling=0, tolerate=0.01), RedundancyTest.Config( spatial_read_files=['spatial_read_t0.csv'], spatial_read_reds=[84039], spatial_write_files=['spatial_write_t0.csv'], spatial_write_reds=[21009], temporal_read_files=['temporal_read_t0.csv'], temporal_read_reds=[63058], temporal_write_files=['temporal_write_t0.csv'], temporal_write_reds=[0], total=[400160], sampling=50, tolerate=0.05)] def _run_impl(self, case_name, version): runs = self._configs[case_name] if not isinstance(runs, list): runs = [runs] command = Test.cases[case_name].command options = Test.cases[case_name].options path = Test.cases[case_name].path for run in runs: sampling = '' if run.sampling != 0: sampling = 'sampling' pipe_read(['gvprof', '-cfg', '-e', 'redundancy@' + str(run.sampling), command] + options) else: pipe_read(['gvprof', '-cfg', '-e', 'redundancy', command] + options) def redundancy_compare(red_files, true_reds): for i, red_file in enumerate(red_files): red_file = 'gvprof-database/' + red_file res = pipe_read(['tail', '-n', '1', red_file]).decode() red = float(res.split(',')[0]) true_red = float(true_reds[i]) epsilon = red if true_red == 0.0 else abs( red - true_red) / true_red if epsilon > run.tolerate: print('Error {} {}: (true: {} vs test: {})'.format( path, red_file, true_red, red)) else: print('Pass ' + path + ' ' + red_file + ' ' + sampling) redundancy_compare(run.spatial_read_files, run.spatial_read_reds) redundancy_compare(run.spatial_write_files, run.spatial_write_reds) redundancy_compare(run.temporal_read_files, run.temporal_read_reds) redundancy_compare(run.temporal_write_files, run.temporal_write_reds)
nilq/baby-python
python
"""Module to preprocess detection data and robot poses to create usable input for ML model""" from copa_map.util import hist_grid, rate_grid, util import numpy as np from dataclasses import dataclass from termcolor import colored import os from matplotlib import pyplot as plt import pickle from matplotlib.widgets import Slider from copa_map.util.fov import FOV from copa_map.util.occ_grid import OccGrid from copy import copy import pandas as pd from sklearn.ensemble import IsolationForest @dataclass class GridParams: """Dataclass for model parameters""" # Origin of the grid in world coordinates origin: list # Rotation of the grid in rad rotation: float = 0. # Height of the grid in m height: float = 70.0 # Width of the grid in m width: float = 70.0 # Resolution of the histogram cells in meters cell_resolution: float = 1.0 # Ratio of cells with zero counts to keep zero_ratio: float = 1.0 # Ratio of cells with small rate to keep small_rate_ratio: float = 1.0 # Ratio of number of all cells to number of inducing points (alpha) inducing_ratio: float = 0.02 # Seed for use in random selections bin_seed: float = None # 2D-Kmeans, 3D-Kmeans inducing_method: str = "2D-KMeans" # Minimum rate to set because gamma distribution requires values greater than zero rate_min: float = 1e-5 # Minimum observation time in seconds for use cells in training data (reduces instability). observation_time_min: float = 20. # Bin size in seconds bin_size: float = 60. * 60. # Normalize rate to bin timestamps # Rate will then correspond to count per bin_size normalize_to_bin: bool = True # Outlier contamination # Remove this ratio of total data points as outliers outlier_contam: float = 0.003 # Instead of removing outliers by masking, set the value to min value set_outlier_to_min: bool = False # When creating the cells based on robot positions, incorporate the occupancy grid map to exclude cells that # were not visible because of obstructions fov_use_map: bool = True # Save the input data to the grid object. If this class is pickled via the respective method, the original # input data can then be accessed later save_raw_data: bool = True class Gridifier: """Class to preprocess the people detection and robot data, creating a grid like setup with counts and rates""" def __init__(self, occ_map: OccGrid, fov: FOV, params: GridParams, create_gt=False): """ Constructor Args: occ_map: Occupancy map to calculate observation durations of different areas of the environment. Obstacles (black areas in the map) will be regarded as impenetrable by the robot so that the observation duration behind obstacles is not increased fov: FOV object which represents the field of view of the robot (e.g. a Circle with radius) params: GridParams parametrizing the grid create_gt: If true the observation durations and obstacles will be ignored. Still, only the cells are considered that lie within the maximum range of the robot's FOV. But every visible cell will have maximum observation duration. """ self.occ_map = occ_map self.fov = fov self.params = params self.obv_duration = None self.mask_fov = None self.X_data = None self.Y_data = None self.Y_data_all = None self.df_data = None self.df_rob = None self.num_bins = None # Scales the timestamps to bin counts if self.params.normalize_to_bin: self.scale_seconds = self.params.bin_size self.scale_seconds_text = str(self.scale_seconds / 60) + "min" else: self.scale_seconds = 60. self.scale_seconds_text = {self.scale_seconds == 60: "min", self.scale_seconds == 3600: "hour"}[True] # Number of cpu cores self.cpu_count = util.get_cpu_count() self.create_gt = create_gt def to_file(self, path): """ Write the instance of this class to a pickle Args: path: Absolute path """ assert self._data_read() data_dict = {'X': self.X_data, 'Y': self.Y_data, 'Yg': self.Y_data_all, 'param': self.params, 'occ_map': self.occ_map, 'fov': self.fov, 'grids': self.grids, 'timestamps': self.timestamps, 'data': self.df_data} print("Writing gridifier data to " + str(path)) pickle.dump(data_dict, open(path, "wb")) def output_to_text(self, path): """ Write the output values (X, Y, Z) to csv files. Args: path: Path with csv suffix """ assert self._data_read() sX = self.X_data sY = self.Y_data_all[:, 0].reshape(-1, 1) sY[sY == 1e-3] = 1e-6 pdXY = pd.DataFrame(data=np.hstack([sX, sY]), columns=["x1", "x2", "t", "y"]) pdXY.to_csv(path, index=False) @classmethod def from_file(cls, path): """ Read an instance of this class from a pickle Args: path: Absolute path to file """ assert os.path.isfile(path) print("Restoring gridifier data from " + str(path)) data_dict = pickle.load(open(path, "rb")) inst = cls(data_dict['occ_map'], data_dict['fov'], data_dict['param']) inst.X_data = data_dict['X'] inst.Y_data = data_dict['Y'] inst.Y_data_all = data_dict['Yg'] inst.grids = data_dict['grids'] inst.timestamps = data_dict['timestamps'] inst.df_data = data_dict['data'] if 'data' in data_dict else None return inst def get_grid(self, num=-1): """Return the specified grid for a bin number""" return self.grids[num] def get_input_points(self): """Return the lattice like input points""" self._chk_data_set(self.X_data) return self.X_data def get_count(self): """Return the counts at these points""" self._chk_data_set(self.Y_data) return self.Y_data def get_observations(self): """Return the rate, std dev, counts, observation duration""" self._chk_data_set(self.Y_data_all) return self.Y_data_all def _chk_data_set(self, dat): if dat is None: raise NameError("No gridified data available. Call setup_data(..) first.") def setup_data(self, df_data: pd.DataFrame, df_rob: pd.DataFrame = pd.DataFrame({'': []})): """ Set the data to the gridifier. Based on detections, robot path and parameters given in constructor constructs lattice like input points and observations that can be used as input for machine learning models Args: df_data: Pandas dataframe containing the people data to use. Expected format are columns named (pos_x, pos_y, tidx_bin, t) where tidx_bin refers to the time index of the time bin. df_rob: Pandas dataframe containing the robot positions. Expected format are columns named (robot_x, robot_y, delta_t, t). delta_t refers to the dwell time at each pose. """ # For each timestamp, grids are created in a loop, which extend the following training data arrays self.X_data = np.array([]).reshape(0, df_data.shape[1] - 1) # Minus 1 because of t column # self.vis_data = np.array([]).reshape(0, X_detections[0].shape[1]) self.Y_data = np.array([]).reshape(0, 1) self.Y_data_all = np.array([]).reshape(0, 4) # if self.params.save_raw_data: self.df_data = df_data self.df_rob = df_rob self.grids = list() self.timestamps = list() # Delta_t is given if not df_rob.empty and 'delta_t' in df_rob: self.grid = rate_grid.RateGrid(width=self.params.width, height=self.params.height, resolution=self.params.cell_resolution, origin=self.params.origin, min_rate=self.params.rate_min, scale_seconds=self.scale_seconds, rotation=self.params.rotation) else: # Histogram to represent person counts in a grid-like fashion self.grid = hist_grid.HistGrid(width=self.params.width, height=self.params.height, resolution=self.params.cell_resolution, origin=self.params.origin, rotation=self.params.rotation) t_arr = df_rob.tidx_bin.unique() if not df_rob.empty else df_data.tidx_bin.unique() for idx, tidx in enumerate(t_arr): # self.grid = copy(grid_orig) df_sub_data = df_data.loc[df_data.tidx_bin == tidx] print("\nComputing grid {}/{}".format(idx + 1, len(t_arr))) print("Timespan Data: " + str(df_sub_data.t.min()) + " -- " + str(df_sub_data.t.max())) if not df_rob.empty: df_sub_rob = df_rob.loc[df_rob.tidx_bin == tidx] print("Timespan Robot: " + str(df_sub_rob.t.min()) + " -- " + str(df_sub_rob.t.max())) X = df_sub_data.drop(columns=['t']).to_numpy() R_bin, delta_t, new_path = self._bin_rob_pos_and_dwell(tidx, df_rob) X_data, Y_data, Y_data_all = \ self._bin_prepare_data(self.params, X, R_bin, delta_t, new_path, self.fov, self.occ_map, tidx) # Add training data of the timestamp to remaining training data self.X_data = np.vstack((self.X_data, X_data)) # TODO: Replace member variable # ##self.vis_data = np.vstack((self.vis_data, vis_data)) self.Y_data = np.vstack((self.Y_data, Y_data)) self.Y_data_all = np.vstack((self.Y_data_all, Y_data_all)) if Y_data_all is not None else None self.grids.append(copy(self.grid)) self.timestamps.append(df_sub_data.t.min()) if self.params.outlier_contam > 0.0: self.X_data, self.Y_data_all, self.Y_data = \ self._mask_outliers(self.X_data, self.Y_data_all, self.Y_data) def _mask_outliers(self, X, Yg, Y): """Removes outliers from the final data""" Yc = IsolationForest(contamination=self.params.outlier_contam).fit_predict(Yg[:, 0].reshape(-1, 1)) rX = X[Yc == 1] rY = Y[Yc == 1] rYg = Yg[Yc == 1] util.logger().info("Removed " + str(Y[Yc == -1].shape[0]) + " outliers from " + str(Y.shape[0]) + " data points. Old max: " + str(Y[:, 0].max()) + ", mean: " + str(Y[:, 0].mean()) + ", New max: " + str(rY[:, 0].max()) + ", mean: " + str(rY[:, 0].mean())) ts_with_outl = np.unique(X[Yc == -1][:, 2]) def mask_grid(ti, Yc): # Get index of cells which should be masked for this timestamp ti = int(ti) poses = self.grids[ti].tf_to(X[(X[:, 2] == ti) & (Yc == -1)][:, :2]) ind_mask = self.grids[ti].index_from_pos(poses) if not self.params.set_outlier_to_min: # Mask the rate array self.grids[ti].rate.mask[ind_mask[:, 0], ind_mask[:, 1]] = True else: # Instead of masking, set the value to zero (or min value) if parameter is set self.grids[ti].rate[ind_mask[:, 0], ind_mask[:, 1]] = self.params.rate_min # np.apply_along_axis(mask_grid, arr=ts_with_outl.reshape(-1, 1), axis=1) # map(mask_grid, ts_with_outl.reshape(-1, 1)) [mask_grid(ti, Yc) for ti in ts_with_outl.reshape(-1, 1)] if self.params.set_outlier_to_min: return X, Yg, Y else: return rX, rYg, rY def _data_read(self): return self.X_data is not None and self.Y_data is not None def _get_fov_mask(self, R, fov, occ_map): if R is None: mask_fov = ~self.grid.empty_mask() else: # Using the field of view and robot poses, determine which cells were visible for the robot print("Compute mask_fov...") mask_fov = fov.path_mask(poses=R, grid=self.grid, occ_map=occ_map if self.params.fov_use_map else None, cpu_count=self.cpu_count) assert ~np.all(~mask_fov), "Visibility area contains no cells" return mask_fov def _bin_prepare_data(self, params, X_bin, R_bin, delta_t, new_path, fov, occ_map, timestamp): # Histogram to represent person counts in a grid-like fashion self.grid.set_counts(X_bin[:, :2]) if new_path: self.obv_duration = None self.mask_fov = self._get_fov_mask(R=R_bin, fov=fov, occ_map=occ_map) elif new_path is None: self.mask_fov = None if params.bin_seed is not None: # Modify the seed based on the timestamp (or how many grid were already saved, which is the same) # This keeps different methods comparable, but avoids that samples are distributed equally for consecutive # timesteps seed = len(self.grids) * params.bin_seed else: seed = None self.counts = self.grid.masked_counts(ratio=params.zero_ratio, mask=~self.mask_fov if self.mask_fov is not None else None, seed=seed) # vis_data = self.grid.get_centers(as_3d=True)[self.mask_fov] if isinstance(self.grid, rate_grid.RateGrid): self.grid.set_by_path(R_bin, delta_t, fov, occ_map, min_obs_time=params.observation_time_min, create_gt=self.create_gt, mask_fov=self.mask_fov, new_path=new_path) self.grid.mask_small_rate(rate_min=params.rate_min, ratio=params.small_rate_ratio) Y_data_all = \ self.grid.get_stacked(norm_obs_dur=self.params.bin_size if self.params.normalize_to_bin else None) print(colored("Created grid with " + str(params.cell_resolution) + "m resolution", "green")) def print_vars(name, matrix, unit): print(colored("Max " + name + ": " + f"{matrix.max():.2f}" + " " + unit + ", Min " + name + ": " + f"{matrix.min():.2f}" + " " + unit + ", Mean " + name + ": " + f"{matrix.mean():.2f}" + " " + unit, "green")) print_vars("Counts", self.grid.counts_masked[~self.grid.rate.mask], unit="people") print_vars("Obv duration", self.grid.obv_duration, unit=self.scale_seconds_text) print_vars("Rate", self.grid.rate, unit="people/" + self.scale_seconds_text) else: Y_data_all = None print(colored("Created grid with " + str(params.cell_resolution) + "m resolution, max counts per cell: " + str(np.max(self.counts)), "green")) X_data, Y_data = self._bin_input_for_gp(self.counts) X_data, Y_data, Y_data_all = \ self._bin_drop_outside_map(occ_map, X_data, Y_data, Y_data_all) # Add respective timestamp to training data if it is known def add_timestamp_if_not_none(arr, timestamp): if arr is not None: return np.hstack((arr, np.ones((arr.shape[0], 1)) * timestamp)) else: return arr if timestamp is not None: X_data = add_timestamp_if_not_none(X_data, timestamp) return X_data, Y_data, Y_data_all def _bin_drop_outside_map(self, occ_map, X_data, Y_data, Y_data_all): """Drop all data outside of the occupancy map""" def keep_ind(data): if data is None: return [] data_t = self.occ_map.tf_to(data) return (data_t[:, 0] > 0) & (data_t[:, 1] > 0) & (data_t[:, 0] <= occ_map.width) & \ (data_t[:, 1] <= occ_map.height) keep_X = keep_ind(X_data) X_data = X_data[keep_X] if X_data is not None else None if Y_data_all is not None: Y_data_all = Y_data_all[keep_X] Y_data = Y_data[keep_X] return X_data, Y_data, Y_data_all def _bin_input_for_gp(self, counts): if isinstance(self.grid, rate_grid.RateGrid): # If observation duration known, do not use cells with low observation time and rate mask = np.logical_or(counts.mask, self.grid.rate.mask) else: # Masked array representing the counts in each cell, masked by the visibility area mask = counts.mask # Input data for the GP X_data = self.grid.get_centers(as_3d=True)[~mask] # Counting data as the outcome Y_data = counts.data[~mask].ravel(order='F').reshape(-1, 1) return X_data, Y_data def _bin_rob_pos_and_dwell(self, tidx, df_rob: pd.DataFrame): """Get the robot path and dwell times during a specific bin. Also returns a variable that indicates if this exact path was returned for the last bin, to avoid multiple calculations of the same path """ try: # Positions of the robot path df_bin = df_rob.loc[(df_rob.tidx_bin == tidx) & (df_rob.delta_t > 0)].drop(columns=['t']) R = df_bin[['robot_x', 'robot_y']].to_numpy() # To the positions associated dwell times delta_t = df_bin[['delta_t']].to_numpy() delta_t = delta_t.reshape(delta_t.shape[0], ) tidxs = df_rob.tidx_bin.unique() tidx_before = (np.argwhere(tidxs == tidx) - 1)[0][0] # If the same simulative robot path is used for all timestamps, some computations can be reused if tidx == df_rob.tidx_bin.unique()[0]: new_path = True else: # If all data is the same as in the bin before, its the same path arr_bef = df_rob[df_rob.tidx_bin == tidx_before][['robot_x', 'robot_y', 'delta_t']].values arr_now = df_rob[df_rob.tidx_bin == tidx][['robot_x', 'robot_y', 'delta_t']].values if np.array_equal(arr_bef, arr_now): new_path = False else: new_path = True except Exception as e: print(colored("Robot path not found: {}".format(e), "red")) R = delta_t = new_path = None return R, delta_t, new_path def plot(self, figurename="Gridifier"): # pragma: no cover """Plots the gridified data to a 2x2 plot window""" assert self.Y_data_all is not None, "Plot only works with Rate data" """Plot the counts, observation duration, variance and rate""" fig, axs = plt.subplots(2, 2, figsize=(18, 14), sharex=True, sharey=True, num=figurename) self.occ_map.plot(axs[0, 0], transparent=True, zorder=2, black_thresh=200) self.occ_map.plot(axs[0, 1], transparent=True, zorder=2, black_thresh=200) self.occ_map.plot(axs[1, 0], transparent=True, zorder=2, black_thresh=200) self.occ_map.plot(axs[1, 1], transparent=True, zorder=2, black_thresh=200) axs[0, 0].set_ylim((self.params.origin[1], self.params.origin[1] + self.params.height)) axs[0, 0].set_xlim((self.params.origin[0], self.params.origin[0] + self.params.width)) axs[0, 0].set_title("Data: People counts") grid_edges = self.grids[0].get_edges(as_3d=True) # mesh_c, mesh_m = self.grids[0].plot_counts(axs[0, 0], masked=True, vmin=0, # vmax=(self.Y_data_all[:, 0] * self.Y_data_all[:, 1]).max()) counts_max = 120 mesh_c, mesh_m = self.grids[0].plot_counts(axs[0, 0], masked=True, vmin=0, vmax=counts_max) # def get_bin_data(num): # Z = self.Z_data[self.Z_data[:, 2] == num][:, :2] # return Z # # Z = get_bin_data(0) def plot_dat(axs, ma, vmax=1): return axs.pcolormesh(grid_edges[:, :, 0], grid_edges[:, :, 1], ma, shading='auto', cmap='jet', alpha=0.5, vmin=0, vmax=vmax) rate_max = max(np.max(grid.rate) for grid in self.grids) obv_mesh = plot_dat(axs[0, 1], self.grids[0].obv_duration, vmax=self.grids[0].obv_duration.max()) var_mesh = plot_dat(axs[1, 0], self.grids[0].stddev, vmax=self.Y_data_all[:, 1].max() * 0.75) rat_mesh = plot_dat(axs[1, 1], self.grids[0].rate, vmax=rate_max) # z_scatter = axs[1, 1].scatter(Z[:, 0], Z[:, 1], marker='o', color='black') axs[0, 1].set_title("Observation duration (filtered)") axs[1, 0].set_title("Std deviation (filtered) (Deprecated)") axs[1, 1].set_title("Rate (filtered)") fig.colorbar(mesh_c, ax=axs[0, 0]) fig.colorbar(obv_mesh, ax=axs[0, 1]) fig.colorbar(var_mesh, ax=axs[1, 0]) fig.colorbar(rat_mesh, ax=axs[1, 1]) ax_sl = fig.add_axes([0.2, 0.05, 0.75, 0.03]) def update(val): i = int(slider.val) # Z = get_bin_data(i) obv_mesh.set_array(self.grids[i].obv_duration.ravel()) var_mesh.set_array(self.grids[i].stddev.ravel()) rat_mesh.set_array(self.grids[i].rate.ravel()) mesh_c.set_array(self.grids[i].counts.ravel()) mesh_m.set_array(~self.grids[i].counts_masked.mask.ravel()) # z_scatter.set_offsets(np.vstack([Z[:, 0], Z[:, 1]]).T) ts = self.timestamps[i] if pd.isnull(ts): strtime = "" else: strtime = ts.strftime('%d-%m-%Y: %H:%M') axs[0, 0].set_title("Data: People counts. Timestamp: {}, {}".format(i, strtime)) fig.canvas.draw() fig.canvas.flush_events() # ax_sl.set_xlim(0, len(self.grids) - 1) slider = Slider(ax_sl, 'Timestamp %i' % 1, 0, len(self.grids) - 1, valinit=0, valfmt='%i') slider.on_changed(update) plt.show()
nilq/baby-python
python
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the NiBabel package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Testing filesets - a draft """ import numpy as np from .. import Nifti1Image, Nifti1Pair, MGHImage, all_image_classes from io import BytesIO from ..fileholders import FileHolderError from ..spatialimages import SpatialImage from numpy.testing import assert_array_equal import pytest def test_files_spatialimages(): # test files creation in image classes arr = np.zeros((2, 3, 4)) aff = np.eye(4) klasses = [klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage)] for klass in klasses: file_map = klass.make_file_map() for key, value in file_map.items(): assert value.filename is None assert value.fileobj is None assert value.pos == 0 # If we can't create new images in memory without loading, bail here if not klass.makeable: continue # MGHImage accepts only a few datatypes # so we force a type change to float32 if klass == MGHImage: img = klass(arr.astype(np.float32), aff) else: img = klass(arr, aff) for key, value in img.file_map.items(): assert value.filename is None assert value.fileobj is None assert value.pos == 0 def test_files_interface(): # test high-level interface to files mapping arr = np.zeros((2, 3, 4)) aff = np.eye(4) img = Nifti1Image(arr, aff) # single image img.set_filename('test') assert img.get_filename() == 'test.nii' assert img.file_map['image'].filename == 'test.nii' with pytest.raises(KeyError): img.file_map['header'] # pair - note new class img = Nifti1Pair(arr, aff) img.set_filename('test') assert img.get_filename() == 'test.img' assert img.file_map['image'].filename == 'test.img' assert img.file_map['header'].filename == 'test.hdr' # fileobjs - single image img = Nifti1Image(arr, aff) img.file_map['image'].fileobj = BytesIO() img.to_file_map() # saves to files img2 = Nifti1Image.from_file_map(img.file_map) # img still has correct data assert_array_equal(img2.get_fdata(), img.get_fdata()) # fileobjs - pair img = Nifti1Pair(arr, aff) img.file_map['image'].fileobj = BytesIO() # no header yet with pytest.raises(FileHolderError): img.to_file_map() img.file_map['header'].fileobj = BytesIO() img.to_file_map() # saves to files img2 = Nifti1Pair.from_file_map(img.file_map) # img still has correct data assert_array_equal(img2.get_fdata(), img.get_fdata()) def test_round_trip_spatialimages(): # write an image to files data = np.arange(24, dtype='i4').reshape((2, 3, 4)) aff = np.eye(4) klasses = [klass for klass in all_image_classes if klass.rw and klass.makeable and issubclass(klass, SpatialImage)] for klass in klasses: file_map = klass.make_file_map() for key in file_map: file_map[key].fileobj = BytesIO() img = klass(data, aff) img.file_map = file_map img.to_file_map() # read it back again from the written files img2 = klass.from_file_map(file_map) assert_array_equal(img2.get_fdata(), data) # write, read it again img2.to_file_map() img3 = klass.from_file_map(file_map) assert_array_equal(img3.get_fdata(), data)
nilq/baby-python
python
import sys from PyQt5 import QtGui, QtCore, QtWidgets from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from time import sleep from view import Scene class Inventory(QWidget): def __init__(self, parent=None): super(Inventory, self).__init__(parent) self.setup_window() self.create_buttons() def setup_window(self): xSize = 1400 ySize = 800 self.setFixedSize(QSize(xSize, ySize)) painter = QtGui.QPainter(self) rectangle = self.contentsRect() self.background = QtGui.QPixmap() # # Starting coordinates of the window. This centers it on the desktop. Optional. # desktop = QtWidgets.QDesktopWidget().screenGeometry() myWindow = self.geometry() xLocation = (desktop.width() - myWindow.width()) / 2 yLocation = (desktop.height() - myWindow.height()) / 2 self.move(xLocation, yLocation) # # Misc window settings that you can use. # self.setWindowTitle("Inventory") self.setWindowIcon(QtGui.QIcon('./icons/book.png')) painter.drawPixmap(rectangle, self.background , rectangle) painter.drawText(100, 100, "Hello") self.show() def create_buttons(self): self.button = QtWidgets.QPushButton(' ', self) # self.button.clicked.connect(self.handleButton) self.button.setIcon(QtGui.QIcon('myImage.jpg')) self.button.setIconSize(QtCore.QSize(24, 24)) self.button.move(200, 500)
nilq/baby-python
python
# Copyright 2015 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paasta_tools import remote_git def test_make_determine_wants_func(): refs = { 'refs/heads/foo': 'abcde', 'refs/tags/blah': '12345', } # nothing changed, so nothing should change determine_wants = remote_git._make_determine_wants_func(lambda x: x) assert determine_wants(refs) == refs # don't delete anything. determine_wants = remote_git._make_determine_wants_func(lambda x: {}) assert determine_wants(refs) == refs # don't modify anything existing. determine_wants = remote_git._make_determine_wants_func( lambda x: dict((k, v[::-1]) for k, v in x.items()) ) assert determine_wants(refs) == refs # only allow new things determine_wants = remote_git._make_determine_wants_func( lambda x: {'foo': 'bar'} ) actual = determine_wants(refs) expected = dict(refs.items() + [('foo', 'bar')]) assert actual == expected
nilq/baby-python
python
from django.urls import path from app.file_writer.views import FileWriterView urlpatterns = [ path('write/', FileWriterView) ]
nilq/baby-python
python
#!/usr/bin/python3 """ Functional Test: STABILITY Written by David McDougall, 2018 This verifies basic properties of the Stable Spatial Pooler. This generates a new artificial dataset every time. The dataset consists of randomly generated SDRs which are fed into the system as a timeseries. The dataset represents objects. Objects are composed of non-mutually-exclusive sets of inputs. Each computation cycle the system sees part of an object, and the system sees multiple parts of an object on consequtive cycles. The test is to form a single stable representation of each object, despite each object being composed of disimilar components. The second test is to recognise each object as it is seen. """ import numpy as np import itertools import random import math import sys sys.path.append('.') from sdr import SDR from encoders import EnumEncoder from spatial_pooler import StableSpatialPooler from nupic.algorithms.sdr_classifier import SDRClassifier from synapses import debug as synapses_debug def object_dataset(num_objects, object_sizes): alphabet = [chr(ord('A') + x) for x in range(26)] inputs = [''.join(chrs) for chrs in itertools.product(alphabet, repeat=3)] # objects = [random.sample(inputs, random.choice(object_sizes)) for x in range(num_objects)] objects = [] for object_id in range(num_objects): objects.append([]) for aspect in range(random.choice(object_sizes)): objects[-1].append(random.choice(inputs)) return inputs, objects def measure_inter_intra_overlap(catagories, verbose = True): """ Argument catagories is a dictionary of lists of SDRs, where the keys are the data labels, and the values are all of the sampled activation pattern from the catagory. """ if isinstance(catagories, dict): catagories = list(catagories.values()) # Shuffle all of the samples so that they can be safely discarded when # enough have been used. for sdr_vec in catagories: random.shuffle(sdr_vec) n_samples = 1e6 # Measure average overlap within categories. stability = 0 stability_samples = 0 for obj_samples in catagories: catagory_samples = 0 for sdr1, sdr2 in itertools.combinations(obj_samples, 2): stability += sdr1.overlap(sdr2) stability_samples += 1 catagory_samples += 1 if catagory_samples > n_samples / len(catagories): break if stability_samples == 0: stability_samples = 1 print("Warning: stability_samples == 0") stability = stability / stability_samples if verbose: print('Intra Category Overlap %g%% (%d samples)'%(100*stability, stability_samples)) # Measure average overlap between categories. distinctiveness = 0 distinctiveness_samples = 0 n_combos = len(list(itertools.combinations(catagories, 2))) subsample = int( (n_samples / n_combos) ** .5 ) for obj1_samples, obj2_samples in itertools.combinations(catagories, 2): for sdr1 in random.sample(obj1_samples, min(subsample, len(obj1_samples))): for sdr2 in random.sample(obj2_samples, min(subsample, len(obj2_samples))): distinctiveness += sdr1.overlap(sdr2) distinctiveness_samples += 1 if distinctiveness_samples == 0: distinctiveness_samples = 1 print("Warning: distinctiveness_samples == 0") distinctiveness = distinctiveness / distinctiveness_samples try: stability_metric = stability / distinctiveness except ZeroDivisionError: stability_metric = float('nan') if verbose: print('Inter Category Overlap %g%% (%d samples)'%(100*distinctiveness, distinctiveness_samples)) samples_per_cat = np.mean([len(cat) for cat in catagories]) print("Num Catagories %d, Avg Samples per Catagory %g"%(len(catagories), samples_per_cat)) print('Stability Metric %g'%stability_metric) return stability, distinctiveness, stability_metric default_parameters = { 'active_thresh': 10, 'boosting_alpha': 0.005694742035947877, 'mini_columns': 2112, 'permanence_dec': 0.0034674259121478907, 'permanence_inc': 0.06557477182718344, 'permanence_thresh': 0.11140401898422288, 'potential_pool': 2010, 'segments': 6, 'sparsity': 0.026277223129229404, 'stability_rate': 0.06921208621873447} def main(parameters=default_parameters, argv=None, verbose=True): # Setup num_objects = 100 object_sizes = range(20, 40+1) train_iterations = 100 test_iterations = 5 steps_per_object = range(3, 17+1) inputs, objects = object_dataset(num_objects, object_sizes) enc = EnumEncoder(2400, 0.02) enc.output_sdr = SDR(enc.output_sdr, activation_frequency_alpha = parameters['boosting_alpha'], average_overlap_alpha = parameters['boosting_alpha'],) sp = StableSpatialPooler( input_sdr = enc.output_sdr, macro_columns = (1,), **parameters) sdrc = SDRClassifier(steps=[0]) def measure_catagories(): # Compute every sensation for every object. objects_columns = [] for obj in objects: objects_columns.append([]) for sensation in obj: sp.reset() enc.encode(sensation) sp.compute(learn=False) objects_columns[-1].append(SDR(sp.columns)) sp.reset() return objects_columns if verbose: print("Num-Inputs ", len(set(itertools.chain.from_iterable(objects)))) print('Num-Objects ', num_objects) print("Object-Sizes", object_sizes) print("Steps/Object", steps_per_object) print(sp.statistics()) objects_columns = measure_catagories() measure_inter_intra_overlap(objects_columns, verbose) print("") # TRAIN train_time = train_iterations * num_objects * np.mean(steps_per_object) print('TRAINING for ~%d Cycles (%d dataset iterations) ...'%(train_time, train_iterations)) print("") sp.reset() t = 0 for iteration in range(train_iterations): object_order = list(range(num_objects)) random.shuffle(object_order) for object_id in object_order: for step in range(random.choice(steps_per_object)): sensation = random.choice(objects[object_id]) enc.encode(sensation) sp.compute() try: sdrc.compute(t, sp.columns.flat_index, classification = {"bucketIdx": object_id, "actValue": object_id,}, learn=True, infer=False) except ValueError: print("Warning: len(active) = %d."%(len(sp.columns))) t += 1 if verbose: print("TESTING ...") print("") print('Encoder Output', enc.output_sdr.statistics()) print(sp.statistics()) objects_columns = measure_catagories() _, __, stability_metric = measure_inter_intra_overlap(objects_columns, verbose) # Measure classification accuracy. This test consists of looking at every # object a few times and then classifying it. The AI is evaluated on every # cycle. score = 0 max_score = 0 sp.reset() if verbose: print("") print("Test length: %d dataset iterations."%(test_iterations)) test_data = list(range(num_objects)) for iteration in range(test_iterations): random.shuffle(test_data) for object_id in test_data: for step in range(random.choice(steps_per_object)): sensation = random.choice(objects[object_id]) enc.encode(sensation) sp.compute(learn=True) inference = sdrc.infer(sp.columns.flat_index, None)[0] inference = np.argmax(inference) if inference == object_id: score += 1 max_score += 1 if verbose: print('Classification Accuracy: %g %%'%(100 * score / max_score)) if synapses_debug: sp.synapses.check_data_integrity() print("Synapse data structure integrity is OK.") return stability_metric + 10 * (score / max_score) if __name__ == '__main__': main()
nilq/baby-python
python
import unittest from docx2md import DocxMedia class FakeDocxFile: def __init__(self, text): self.text = text def rels(self): return ( b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Relationships>' + self.text + b"</Relationships>" ) class TestDocxMedia(unittest.TestCase): def test_length_is_zoro(self): """no definition""" docx = FakeDocxFile(b"") m = DocxMedia(docx) self.assertEqual(len(m), 0) self.assertFalse("id" in m) self.assertIsNone(m["id"]) def test_no_media(self): """no media""" docx = FakeDocxFile(b'<Relationship Id="id" Target="test.png"/>') m = DocxMedia(docx) self.assertEqual(len(m), 0) self.assertFalse("id" in m) self.assertIsNone(m["id"]) def test_one_png_media(self): """media/test.png""" docx = FakeDocxFile(b'<Relationship Id="id" Target="media/test.png"/>') m = DocxMedia(docx) self.assertEqual(len(m), 1) self.assertTrue("id" in m) self.assertEqual(m["id"].path, "media/test.png") self.assertFalse(m["id"].use_alt) def test_one_xxx_media(self): """media/test.xxx""" docx = FakeDocxFile(b'<Relationship Id="id" Target="media/test.xxx"/>') m = DocxMedia(docx) self.assertEqual(len(m), 1) self.assertTrue("id" in m) self.assertEqual(m["id"].path, "media/test.xxx") self.assertEqual(m["id"].alt_path, "media/test.png") self.assertTrue(m["id"].use_alt)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Mon Jan 25 15:21:34 2021 @author: crtjur """ import tkinter as tk from PIL import Image, ImageTk root = tk.Tk() root.title("Title") root.geometry("280x350") root.configure(background="black") class Example(tk.Frame): def __init__(self, master, *pargs): tk.Frame.__init__(self, master, *pargs) self.image = Image.open("diagram-v2.png") self.img_copy= self.image.copy() self.background_image = ImageTk.PhotoImage(self.image) self.background = tk.Label(self, image=self.background_image) self.background.pack(fill= tk.BOTH, expand=True) #, self.background.bind('<Configure>', self._resize_image) def _resize_image(self,event): new_width = event.width new_height = event.height self.image = self.img_copy.resize((new_width, new_height)) self.background_image = ImageTk.PhotoImage(self.image) self.background.configure(image = self.background_image) e = Example(root) e.pack(fill=tk.BOTH, expand=True) root.mainloop()
nilq/baby-python
python
""" WGAN-applicable Fully Convolutional Neural Network """ import tensorflow as tf from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose, AveragePooling2D, UpSampling2D, Concatenate, Dropout, LeakyReLU, BatchNormalization from architectures.activations import UV_activation from architectures.layers import SymmetricPadding from architectures.blocks import GeneratorBlock2D def WGAN_GP_UNet(img_dim=(256,256), dropout_rate=0., batchnorm=True): """WGAN FCNN architecture for generating UV colour components given the Y (luminance) component Network input: grayscale image of specified dimensions Network output: YUV image of specified dimensions Parameters ---------- img_dim : tuple Dimensions of input image (excluding the channel) Returns ------- generator_input : tf.keras.layers.Input Input image node generator_out : tf.keras.layers.Output Output image node """ # Network input image generator_input = Input(shape=(*img_dim, 1), name="generator_input") # Network architecture # Encoder # Encoder block 1 generator_E1a = GeneratorBlock2D(32, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_input) generator_E1b = GeneratorBlock2D(32, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_E1a) #generator_E1D = Conv2D(32, (4,4), strides=2, padding="same")(generator_E1b) generator_E1D = AveragePooling2D((2,2))(generator_E1b) # Encoder block 2 generator_E2a = GeneratorBlock2D(64, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_E1D) generator_E2b = GeneratorBlock2D(64, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_E2a) #generator_E2D = Conv2D(64, (4,4), strides=2, padding="same")(generator_E2) generator_E2D = AveragePooling2D((2,2))(generator_E2b) # Bottleneck generator_BN = GeneratorBlock2D(128, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_E2D) generator_BN = GeneratorBlock2D(128, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_BN) # Decoder # Decoder block 2 #generator_D2U = Conv2DTranspose(128, (4,4), strides=2, padding="same")(generator_BN) generator_D2U = UpSampling2D((2,2))(generator_BN) generator_D2 = Concatenate(axis=-1)([generator_D2U, generator_E2b]) generator_D2b = GeneratorBlock2D(64, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_D2) generator_D2a = GeneratorBlock2D(64, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_D2b) # Decoder block 1 #generator_D1U = Conv2DTranspose(64, (4,4), strides=2, padding="same")(generator_D2) generator_D1U = UpSampling2D((2,2))(generator_D2a) generator_D1 = Concatenate(axis=-1)([generator_D1U, generator_E1b]) generator_D1b = GeneratorBlock2D(32, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_D1) generator_D1a = GeneratorBlock2D(32, (3,3), padding="symmetric", dropout_rate=dropout_rate, batchnorm=batchnorm)(generator_D1b) # Output layer - output channels are U and V components, so need to append to Y channel (input image) generator_output = Conv2D(2, (1,1), activation=UV_activation, padding="same", name="generator_output_UV")(generator_D1a) generator_output = Concatenate(axis=-1, dtype=tf.dtypes.float32, name="generator_output_YUV")([generator_input, generator_output]) return generator_input, generator_output
nilq/baby-python
python
from prometheus_client import start_http_server from prometheus_client.core import REGISTRY from collector import ElasticBeanstalkCollector import time import datetime def main(): port = 9552 print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], "Starting exporter on :", port) try: start_http_server(port) REGISTRY.register(ElasticBeanstalkCollector()) while True: time.sleep(60) except KeyboardInterrupt: print(" Interrupted") exit(0) if __name__ == "__main__": main()
nilq/baby-python
python
import os import csv import datetime from flask import Flask, render_template, redirect, url_for app = Flask(__name__) file_data_list = [] def convert_to_timestamp(epoch_time='0'): return datetime.datetime.fromtimestamp(int(float(epoch_time))).strftime('%Y-%m-%dT%H:%M:%S') def cache_file_data(): global file_data_list # clear previous cache first file_data_list = [] files_dir = os.path.join(os.getcwd(), 'data') csv_files = os.listdir(files_dir) for file in csv_files: file_data = dict() file_data['name'] = file.replace('.csv', '').replace('_complete', '') file_data['link'] = 'http://redd.it/' + file_data['name'] file_data['path'] = os.path.join(files_dir, file) file_data['complete'] = True if 'complete' in file else False with open(file_data['path'], mode='r') as csvfile: csvreader = csv.reader(csvfile) data_list = list(csvreader) file_data['age'] = round((float(data_list[-1][0]) - float(data_list[0][0])) / (60 * 60), 2) file_data['last_read'] = convert_to_timestamp(data_list[-1][0]) file_data_list.append(file_data) @app.route('/') def index(): global file_data_list # return render_template('index.html', file_data=file_data_list) return render_template('index.html', file_data=sorted(file_data_list, key=lambda data: data['age'])) @app.route('/refresh') def refresh_data(): cache_file_data() return redirect(url_for('index')) @app.route('/graph/<name>') def graph(name): global file_data_list for file in file_data_list: if file['name'] == name: with open(file['path'], mode='r') as csvfile: csvreader = csv.reader(csvfile) data_list = list(csvreader) # convert all the epoch times to timestamps for graphing data_list = [ [ convert_to_timestamp(data[0]), data[1], data[2], data[3], data[4] ] for data in data_list ] file['data'] = data_list return render_template('graph.html', data=file) if __name__ == '__main__': cache_file_data() # app.run(host='127.0.0.1', port=5000, debug=True) app.run(host='0.0.0.0', port=5000, debug=False)
nilq/baby-python
python
"""Jenkins test report metric collector.""" from datetime import datetime from typing import Dict, Final, List, cast from dateutil.parser import parse from base_collectors import SourceCollector from collector_utilities.functions import days_ago from collector_utilities.type import URL from source_model import Entity, SourceMeasurement, SourceResponses TestCase = Dict[str, str] Suite = Dict[str, List[TestCase]] class JenkinsTestReportTests(SourceCollector): """Collector to get the amount of tests from a Jenkins test report.""" JENKINS_TEST_REPORT_COUNTS: Final[Dict[str, str]] = dict( failed="failCount", passed="passCount", skipped="skipCount") async def _api_url(self) -> URL: return URL(f"{await super()._api_url()}/lastSuccessfulBuild/testReport/api/json") async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement: json = await responses[0].json() statuses = cast(List[str], self._parameter("test_result")) status_counts = [self.JENKINS_TEST_REPORT_COUNTS[status] for status in statuses] all_status_counts = self.JENKINS_TEST_REPORT_COUNTS.values() results = [report["result"] for report in json["childReports"]] if "childReports" in json else [json] value = sum(int(result.get(status_count, 0)) for status_count in status_counts for result in results) total = sum(int(result.get(status_count, 0)) for status_count in all_status_counts for result in results) suites: List[Suite] = [] for result in results: suites.extend(result["suites"]) entities = [ self.__entity(case) for suite in suites for case in suite.get("cases", []) if self.__status(case) in statuses] return SourceMeasurement(value=str(value), total=str(total), entities=entities) def __entity(self, case: TestCase) -> Entity: """Transform a test case into a test case entity.""" name = case.get("name", "<nameless test case>") return Entity( key=name, name=name, class_name=case.get("className", ""), test_result=self.__status(case), age=str(case.get("age", 0))) @staticmethod def __status(case: TestCase) -> str: """Return the status of the test case.""" # The Jenkins test report has three counts: passed, skipped, and failed. Individual test cases # can be skipped (indicated by the attribute skipped being "true") and/or have a status that can # take the values: "failed", "passed", "regression", and "fixed". test_case_status = "skipped" if case.get("skipped") == "true" else case.get("status", "").lower() return dict(regression="failed", fixed="passed").get(test_case_status, test_case_status) class JenkinsTestReportSourceUpToDateness(SourceCollector): """Collector to get the age of the Jenkins test report.""" async def _get_source_responses(self, *urls: URL) -> SourceResponses: test_report_url = URL(f"{urls[0]}/lastSuccessfulBuild/testReport/api/json") job_url = URL(f"{urls[0]}/lastSuccessfulBuild/api/json") return await super()._get_source_responses(test_report_url, job_url) async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement: timestamps = [suite.get("timestamp") for suite in (await responses[0].json()).get("suites", []) if suite.get("timestamp")] report_datetime = parse(max(timestamps)) if timestamps else \ datetime.fromtimestamp(float((await responses[1].json())["timestamp"]) / 1000.) return SourceMeasurement(value=str(days_ago(report_datetime)))
nilq/baby-python
python
from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function from builtins import str as unicode from quark_runtime import * _lazyImport.plug("inheritance.pets") import quark.reflect class Pet(_QObject): def _init(self): pass def __init__(self): self._init() def greet(self): raise NotImplementedError('`Pet.greet` is an abstract method') def _getClass(self): return u"inheritance.pets.Pet" def _getField(self, name): return None def _setField(self, name, value): pass Pet.inheritance_pets_Pet_ref = None class Cat(Pet): def _init(self): Pet._init(self) def __init__(self): super(Cat, self).__init__(); def greet(self): _println(u"meow!"); def _getClass(self): return u"inheritance.pets.Cat" def _getField(self, name): return None def _setField(self, name, value): pass Cat.inheritance_pets_Cat_ref = None class Dog(Pet): def _init(self): Pet._init(self) def __init__(self): super(Dog, self).__init__(); def greet(self): _println(u"woof!"); def _getClass(self): return u"inheritance.pets.Dog" def _getField(self, name): return None def _setField(self, name, value): pass Dog.inheritance_pets_Dog_ref = None def _lazy_import_quark_ffi_signatures_md(): import quark_ffi_signatures_md globals().update(locals()) _lazyImport("import quark_ffi_signatures_md", _lazy_import_quark_ffi_signatures_md) _lazyImport.pump("inheritance.pets")
nilq/baby-python
python
#! /usr/bin/env python import numpy as np import random from time import sleep from curses import wrapper def two_dim(pos, width): ''' Return 2d co-ordinates represented as 1d array. pos: position in one dimension width: total width of 2d array(columns) ''' width += 1 return pos//width, pos%width def possible_choices(array): ''' Return possible choices(within bounds) of directions from given state. array: 1x4 array of a single state's directions ''' return [i for i in np.where(~np.isnan(array))[0]] def next_state(state, direction, colsize): ''' Returns next state given current state and direction state: current state(position) direction: direction of next move colsize: width of the board ''' if direction == 0: next = state - (colsize + 1) elif direction == 1: next = state + 1 elif direction == 2: next = state + colsize + 1 else: next = state - 1 return next class Board: ''' Stores board layout and rewards table. tiles: np.array, square matrix of tiles represented by chars costs: dict of costs of each tile type ''' def __init__(self, tiles, costs): self.tiles = tiles # Build rewards table by taking costs self.reward = np.array( list([costs[val] for val in l] for l in self.tiles), dtype=np.int16) self.player_pos = (0, 0) def __str__(self): # Temporarily changing tile at player's position. # This can probably be done in a better way tile_under_player = self.tiles[self.player_pos[0], self.player_pos[1]] self.tiles[self.player_pos[0], self.player_pos[1]] = 'K' table_string = '\n' for r in self.tiles: table_string += ''.join(r) + '\n' self.tiles[self.player_pos[0], self.player_pos[1]] = tile_under_player return table_string class Qtable: ''' Stores Q values, methods to train Q-table using the Bellman-Ford algorithm. ''' def __init__(self, board, cur_state=0): self.board = board self.cur_state = cur_state self.direction = 0 self.acc_cost = 0 # width of board (not qtable) self.colsize = board.tiles.shape[1] - 1 self.table = np.zeros((board.tiles.size, 4)) # setting illegal(outside bounds) choices to NaN # top edge self.table[:self.colsize+1, 0] = np.nan # right edge self.table[self.colsize::self.colsize+1, 1] = np.nan # bottom edge self.table[-(self.colsize+1):, 2] = np.nan # left edge self.table[::self.colsize+1, 3] = np.nan def calculate_q(self): ''' Calculate Q value using the Bellman Ford algorithm ''' self.table[self.cur_state, self.direction] += \ self.learn_rate * ( self.board.reward[ two_dim(self.cur_state, self.colsize)[0], two_dim(self.cur_state, self.colsize)[1]] + self.discount * np.nanmax(self.table[self.next_state]) - self.table[self.cur_state, self.direction]) def train( self, iterations, learn_rate=0.1, discount=0.9, epsilon=1.0, decay=True): ''' Train the table. iterations: number of iterations learn_rate: the learning rate discount: the delta discount epsilon: starting value of epsilon parameter decay: boolean value indicating whether to enable epsilon decay.(True by default) ''' self.learn_rate = learn_rate self.discount = discount if decay: epsilon_decay = epsilon / iterations else: epsilon_decay = 0 for i in range(iterations): if random.random() > epsilon: # exploit self.direction = np.nanargmax(self.table[self.cur_state]) else: # explore self.direction = int(random.choice( possible_choices(self.table[self.cur_state]))) self.acc_cost += self.table[self.cur_state, self.direction] self.next_state = next_state( self.cur_state, self.direction, self.colsize) self.calculate_q() self.print_info(i) self.cur_state = self.next_state epsilon -= epsilon_decay # Reset to original state if game finishes(win or lose) if self.acc_cost < -100 or self.acc_cost > 90: self.cur_state = 0 self.acc_cost = 0 def print_info(self, i): print('Training iteration ', i) print('Current state: ', self.cur_state) print('Choice costs: ', self.table[self.cur_state]) print('Chosen direction: ', self.direction) print('Accumulated cost: ', self.acc_cost) print('-' * 20) print('\n\n') pass def play(self): # TODO user-defined starting state state = 0 cost = 0 print('\n\nQ-Table:') print(self.table) print('Game:') print(self.board) while cost < 50: sleep(1) direction = np.nanargmax(self.table[state]) cost += self.table.item(state, direction) state = next_state(state, direction, self.colsize) self.board.player_pos = two_dim(state, self.colsize) # if player gets stuck in loop if cost < -10: print('Ended game due to Bad performance,\n' 'Please retrain with different parameters.') break print(self.board)
nilq/baby-python
python
""" UrlCanonicalizerMiddleware A spider middleware to canonicalize the urls of all requests generated from a spider. imported from http://snipplr.com/view/67007/url-canonicalizer-spider-middleware/ # Snippet imported from snippets.scrapy.org (which no longer works) # author: pablo # date : Sep 07, 2010 """ from scrapy.http import Request from scrapy.utils.url import canonicalize_url class UrlCanonicalizerMiddleware(object): def process_spider_output(self, response, result, spider): for r in result: if isinstance(r, Request): curl = canonicalize_url(r.url) if curl != r.url: r = r.replace(url=curl) yield r
nilq/baby-python
python
from .diophantine import diophantine, classify_diop, diop_solve __all__ = [ 'diophantine', 'classify_diop', 'diop_solve' ]
nilq/baby-python
python
# Generated by Django 3.1.7 on 2021-04-14 16:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0012_auto_20210414_1252'), ] operations = [ migrations.AlterField( model_name='post', name='additional_address_info', field=models.CharField(blank=True, max_length=100, null=True), ), ]
nilq/baby-python
python
""" DELL SDP P-Search API """ try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET class SDPException(Exception): pass class SDPApi(object): """ SDP API Calls """ def __init__(self, sdpclient, logger, response_json=None): self.ecs_authentication_failure = int('497') self.response_json = response_json self.logger = logger self.sdp_pravega_api_client = sdpclient self.response_xml_file = None def get_sdp_cluster_info(self): while True: r = self.sdp_pravega_api_client.info() # If we didn't get anything back there was a problem if r is None: self.logger.debug('SDPApi::get_sdp_cluster_info()::/ call did not return any data.') break else: self.response_json = r if type(self.response_json) is list: self.logger.debug('SDPApi::get_sdp_cluster_info()::r.json() returned a list. ') elif type(self.response_json) is dict: self.logger.debug('SDPApi::get_sdp_cluster_info()::r.json() returned a dictionary. ') else: self.logger.debug('SDPApi::get_sdp_cluster_info()::r.json() returned unknown. ') break return self.response_json def get_sdp_cluster_state(self): while True: r = self.sdp_pravega_api_client.cluster.state() # If we didn't get anything back there was a problem if r is None: self.logger.debug('SDPApi::get_sdp_cluster_state()::/ call did not return any data.') break else: self.response_json = r if type(self.response_json) is list: self.logger.debug('SDPApi::get_sdp_cluster_state()::r.json() returned a list. ') elif type(self.response_json) is dict: self.logger.debug('SDPApi::get_sdp_cluster_state()::r.json() returned a dictionary. ') else: self.logger.debug('SDPApi::get_sdp_cluster_state()::r.json() returned unknown. ') break return self.response_json def get_sdp_indices(self, index): while True: r = self.sdp_pravega_api_client.indices.get(index) # If we didn't get anything back there was a problem if r is None: self.logger.debug('SDPApi::get_sdp_indices()::/ call did not return any data.') break else: self.response_json = r if type(self.response_json) is list: self.logger.debug('SDPApi::get_sdp_indices()::r.json() returned a list. ') elif type(self.response_json) is dict: self.logger.debug('SDPApi::get_sdp_indices()::r.json() returned a dictionary. ') else: self.logger.debug('SDPApi::get_sdp_indices()::r.json() returned unknown. ') break return self.response_json def search_sdp_index(self, query, index_name): while True: r = self.sdp_pravega_api_client.search(body=query, index=index_name) # If we didn't get anything back there was a problem if r is None: self.logger.debug('SDPApi::get_sdp_indices()::/ call did not return any data.') break else: self.response_json = r if type(self.response_json) is list: self.logger.debug('SDPApi::get_sdp_indices()::r.json() returned a list. ') elif type(self.response_json) is dict: self.logger.debug('SDPApi::get_sdp_indices()::r.json() returned a dictionary. ') else: self.logger.debug('SDPApi::get_sdp_indices()::r.json() returned unknown. ') break return self.response_json
nilq/baby-python
python
#!Venv/bin python3 # -*- coding: utf-8 -*- import sqlite3 from urllib.request import urlopen import numpy as np import pandas as pd from bs4 import BeautifulSoup html = urlopen('https://www.opap.org.cy/el/page/joker-results').read() html = html.decode('utf-8') soup = BeautifulSoup(html, features='html.parser') column2 = soup.find(id='column2') competition_title = column2.find(id='competition_title') table = column2.findAll('table')[0].findAll('tr')[1] tableParts = list(filter(None, table.text.split('\n'))) parts = competition_title.div.text.split('η') newItem = { 'X1': int(tableParts[0]), 'X2': int(tableParts[1]), 'X3': int(tableParts[2]), 'X4': int(tableParts[3]), 'X5': int(tableParts[4]), 'Joker': int(tableParts[6]), 'Draw': int(parts[0]), 'Date': pd.to_datetime(parts[2], dayfirst=True), } # format filePath = 'joker.csv' drawData = pd.read_csv(filePath) drawData['Date'] = pd.to_datetime(drawData['Date']) if newItem['Draw'] not in drawData['Draw'].values: drawData = drawData.append(newItem, ignore_index=True) drawData.to_csv(filePath, index=False) def JoinNumbers(x): return '%s%s%s%s%s' % (x['X1'], x['X2'], x['X3'], x['X4'], x['X5']) drawData['ResultString'] = drawData.apply(JoinNumbers, axis=1) drawData['ResultLength'] = drawData['ResultString'].map(lambda x: len(x)) # drawData = drawData[drawData['Date'] >= pd.to_datetime('2015-01-01')] ಠ_ಠ = drawData['ResultLength'].mean() print(ಠ_ಠ) numberData = pd.DataFrame() numberData['Number'] = np.arange(0, 46, 1) numberData['Count'] = drawData['X1'].value_counts().sort_index() numberData['Count'] += drawData['X2'].value_counts().sort_index() numberData['Count'] += drawData['X3'].value_counts().sort_index() numberData['Count'] += drawData['X4'].value_counts().sort_index() numberData['Count'] += drawData['X5'].value_counts().sort_index() numberData = numberData.drop(numberData.index[0]) numberData = numberData.sort_values(by='Count', ascending=False) print(numberData.head(15)) jokerData = pd.DataFrame() jokerData['Number'] = np.arange(0, 21, 1) jokerData['Count'] = drawData['Joker'].value_counts().sort_index() jokerData = jokerData.drop(jokerData.index[0]) jokerData = jokerData.sort_values(by='Count', ascending=False) print(jokerData.head()) # https://docs.python.org/3/library/sqlite3.html with sqlite3.connect('test.db') as conn: c = conn.cursor() # Draw,Date,X1,X2,X3,X4,X5,Joker # c.execute('''CREATE TABLE IF NOT EXISTS some_table # (id INTEGER PRIMARY KEY AUTOINCREMENT, ...);''') conn.commit()
nilq/baby-python
python
import torch from torch import nn class RNN_LSTM(nn.Module): def __init__(self, input_dim, hidden_dim, n_lyrs = 1, do = .05, device = "cpu"): """Initialize the network architecture Args: input_dim ([int]): [Number of time steps in the past to look at for current prediction] hidden_dim ([int]): [The dimension of RNN output] n_lyrs (int, optional): [Number of stacked RNN layers]. Defaults to 1. do (float, optional): [Dropout for regularization]. Defaults to .05. """ super(RNN_LSTM, self).__init__() self.ip_dim = input_dim self.hidden_dim = hidden_dim self.n_layers = n_lyrs self.dropout = do self.device = device self.rnn = nn.LSTM(input_size = input_dim, hidden_size = hidden_dim, num_layers = n_lyrs, dropout = do) self.fc1 = nn.Linear(in_features = hidden_dim, out_features = int(hidden_dim / 2)) self.act1 = nn.ReLU(inplace = True) self.bn1 = nn.BatchNorm1d(num_features = int(hidden_dim / 2)) self.estimator = nn.Linear(in_features = int(hidden_dim / 2), out_features = 1) def init_hiddenState(self, bs): """Initialize the hidden state of RNN to all zeros Args: bs ([int]): [Batch size during training] """ return torch.zeros(self.n_layers, bs, self.hidden_dim) def forward(self, input): #inputHidden State """Define the forward propogation logic here Args: input ([Tensor]): [A 3-dimensional float tensor containing parameters] """ bs = input.shape[1] #(Batch) hidden_state = self.init_hiddenState(bs).to(self.device) # out , _ = self.rnn(input, hidden_state) cell_state = hidden_state out, _ = self.rnn(input, (hidden_state, cell_state)) out = out.contiguous().view(-1, self.hidden_dim) out = self.act1(self.bn1(self.fc1(out))) out = self.estimator(out) return out def predict(self, input): """Makes prediction for the set of inputs provided and returns the same Args: input ([torch.Tensor]): [A tensor of inputs] """ with torch.no_grad(): predictions = self.forward(input) return predictions
nilq/baby-python
python
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the License); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Description: # Contains unit tests for npu_encode_bias API for an external consumer import random import numpy as np from ethosu.vela.api import npu_encode_bias def test_encode_bias(): bias_lower_limit = -(1 << (40 - 1)) bias_upper_limit = (1 << (40 - 1)) - 1 scale_lower_limit = 0 scale_upper_limit = (1 << 32) - 1 shift_lower_limit = 0 shift_upper_limit = (1 << 6) - 1 for _ in range(30): bias = np.int64(random.randint(bias_lower_limit, bias_upper_limit)) scale = int(random.randint(scale_lower_limit, scale_upper_limit)) shift = int(random.randint(shift_lower_limit, shift_upper_limit)) biases_enc = npu_encode_bias(bias, scale, shift) assert isinstance(biases_enc, bytearray) assert len(biases_enc) == 10
nilq/baby-python
python
import argparse import json import os import pickle import numpy as np from nasbench_analysis.search_spaces.search_space_1 import SearchSpace1 from nasbench_analysis.search_spaces.search_space_2 import SearchSpace2 from nasbench_analysis.search_spaces.search_space_3 import SearchSpace3 from nasbench_analysis.utils import NasbenchWrapper from optimizers.random_search_with_weight_sharing.darts_wrapper_discrete import DartsWrapper def correlation_between_one_shot_nb(model_path, config, epoch): if config['search_space'] == '1': search_space = SearchSpace1() elif config['search_space'] == '2': search_space = SearchSpace2() elif config['search_space'] == '3': search_space = SearchSpace3() else: raise ValueError('Unknown search space') model = DartsWrapper(save_path=model_path, seed=0, batch_size=128, grad_clip=5, epochs=200, num_intermediate_nodes=search_space.num_intermediate_nodes, search_space=search_space, cutout=False) if 'random_ws' in model_path: discrete = True normalize = False else: discrete = False normalize = True model.load(epoch=epoch) nb_test_errors = [] nb_valid_errors = [] one_shot_test_errors = [] for adjacency_matrix, ops, model_spec in search_space.generate_search_space_without_loose_ends(): if str(config['search_space']) == '1' or str(config['search_space']) == '2': adjacency_matrix_ss = np.delete(np.delete(adjacency_matrix, -2, 0), -2, 0) # Remove input, output and 5th node ops_ss = ops[1:-2] elif str(config['search_space']) == '3': adjacency_matrix_ss = adjacency_matrix # Remove input and output node ops_ss = ops[1:-1] else: raise ValueError('Unknown search space') one_shot_test_error = model.evaluate_test((adjacency_matrix_ss, ops_ss), split='test', discrete=discrete, normalize=normalize) one_shot_test_errors.extend(np.repeat(one_shot_test_error, 3)) # Query NASBench data = nasbench.query(model_spec) nb_test_errors.extend([1 - item['test_accuracy'] for item in data]) nb_valid_errors.extend([1 - item['validation_accuracy'] for item in data]) print('NB', nb_test_errors[-1], 'OS', one_shot_test_errors[-1], 'weights', model.model.arch_parameters()) correlation = np.corrcoef(one_shot_test_errors, nb_test_errors)[0, -1] return correlation, nb_test_errors, nb_valid_errors, one_shot_test_errors def eval_directory_on_epoch(path, epoch): """Evaluates all one-shot architecture methods in the directory.""" # Read in config with open(os.path.join(path, 'config.json')) as fp: config = json.load(fp) correlations = [] nb_test_errors, nb_valid_errors, one_shot_test_errors = [], [], [] correlation, nb_test_error, nb_valid_error, one_shot_test_error = \ correlation_between_one_shot_nb(model_path=path, config=config, epoch=epoch) correlations.append(correlation) nb_test_errors.append(nb_test_error) nb_valid_error.append(nb_valid_error) one_shot_test_errors.append(one_shot_test_error) with open(os.path.join(path, 'correlation_{}.obj'.format(epoch)), 'wb') as fp: pickle.dump(correlations, fp) with open(os.path.join(path, 'nb_test_errors_{}.obj'.format(epoch)), 'wb') as fp: pickle.dump(nb_test_errors, fp) with open(os.path.join(path, 'nb_valid_errors_{}.obj'.format(epoch)), 'wb') as fp: pickle.dump(nb_valid_errors, fp) with open(os.path.join(path, 'one_shot_test_errors_{}.obj'.format(epoch)), 'wb') as fp: pickle.dump(one_shot_test_errors, fp) def main(): # Load NASBench eval_directory_on_epoch(args.model_path, args.epoch) parser = argparse.ArgumentParser("correlation_analysis") parser.add_argument('--data', type=str, default='../data', help='location of the darts corpus') parser.add_argument('--model_path', default="experiments/darts/search_space_1/search-baseline-20190821-171946-0-1", help='Path to where the models are stored.') parser.add_argument('--epoch', type=int, help='Epoch') args = parser.parse_args() if __name__ == '__main__': nasbench = NasbenchWrapper('nasbench_analysis/nasbench_data/108_e/nasbench_full.tfrecord') main()
nilq/baby-python
python
from pylab import * def imhist(img): rows, cols = img.shape h = np.zeros((256, 1), dtype=np.double) for k in range(255): h[k] = 0 for i in range(rows): for j in range(cols): if img[i,j] == k-1: h[k] = h[k]+1 return h def My_MaxEntropy(img): rows, cols = img.shape gray_p = np.zeros((256, 1), dtype=np.double) V_max = np.double(np.max(img)) V_min = np.double(np.min(img)) T0 = (V_max + V_min) / 2.0 h = imhist(img) #print(T0 ) for i in range(255): ###################i from 0 gray_p[i] = h[i] / np.double(rows * cols) H0= 0 for i in range(1,256): if gray_p[i] > 0: H0 = H0 - gray_p[i]*math.log(gray_p[i]) #print(T0) cout = 100 while cout >0: Tmax = 0 T1 =T0 A1 = 0 # 分割区域G1的点数 A2 = 0 # 分割区域G2的点数 B1 = 0 # 分割区域G1的灰度总和 B2 = 0 # 分割区域G2灰度总和 gray_Pd=0 for i in range (rows): for j in range(cols): if img[i,j]<= T1: A1 = A1 + 1 B1 = B1 + img[i, j] else: A2 = A2 + 1 B2 = B2 + img[i, j] M1 = B1 / A1 #分割区域G1的平均灰度 M2 = B2 / A2 # 分割区域G2的平均灰度 T2 = (M1 + M2) / 2 # 更新阈值 TT = np.uint8(floor(T2)) for i in range(1,TT): ########### gray_Pd = gray_Pd + gray_p[i] gray_Pb = 1 - gray_Pd Hd = 0 Hb = 0 for i in range (255): if i <= TT: if gray_p[i] > 0: Hd = Hd - gray_p[i] / gray_Pd * math.log(gray_p[i] / gray_Pd) else: if gray_p[i] > 0: Hb = Hb - gray_p[i] / gray_Pb * math.log(gray_p[i] / gray_Pb) H1 = Hd + Hb if abs(H0 - H1) < 0.0001: Tmax = T2 break else: T0 = T2 H0 = H1 cout = cout - 1; ThreshValue = floor(Tmax) return ThreshValue
nilq/baby-python
python
import tensorflow as tf import optotf.nabla import unittest class Nabla2d(tf.keras.layers.Layer): def __init__(self, hx=1, hy=1): super().__init__() self.op = lambda x: optotf.nabla.nabla_2d(x, hx=hx, hy=hy) def call(self, x): if x.dtype == tf.complex64 or x.dtype == tf.complex128: return tf.complex(self.op(tf.math.real(x)), self.op(tf.math.imag(x))) else: return self.op(x) class Nabla3d(tf.keras.layers.Layer): def __init__(self, hx=1, hy=1, hz=1): super().__init__() self.op = lambda x: optotf.nabla.nabla_3d(x, hx=hx, hy=hy, hz=hz) def call(self, x): if x.dtype == tf.complex64 or x.dtype == tf.complex128: return tf.complex(self.op(tf.math.real(x)), self.op(tf.math.imag(x))) else: return self.op(x) class Nabla4d(tf.keras.layers.Layer): def __init__(self, hx=1, hy=1, hz=1, ht=1): super().__init__() self.op = lambda x: optotf.nabla.nabla_4d(x, hx=hx, hy=hy, hz=hz, ht=ht) def call(self, x): if x.dtype == tf.complex64 or x.dtype == tf.complex128: return tf.complex(self.op(tf.math.real(x)), self.op(tf.math.imag(x))) else: return self.op(x) class NablaT2d(tf.keras.layers.Layer): def __init__(self, hx=1, hy=1): super().__init__() self.op = lambda x: optotf.nabla.nabla_2d_adjoint(x, hx=hx, hy=hy) def call(self, x): if x.dtype == tf.complex64 or x.dtype == tf.complex128: return tf.complex(self.op(tf.math.real(x)), self.op(tf.math.imag(x))) else: return self.op(x) class NablaT3d(tf.keras.layers.Layer): def __init__(self, hx=1, hy=1, hz=1): super().__init__() self.op = lambda x: optotf.nabla.nabla_3d_adjoint(x, hx=hx, hy=hy, hz=hz) def call(self, x): if x.dtype == tf.complex64 or x.dtype == tf.complex128: return tf.complex(self.op(tf.math.real(x)), self.op(tf.math.imag(x))) else: return self.op(x) class NablaT4d(tf.keras.layers.Layer): def __init__(self, hx=1, hy=1, hz=1, ht=1): super().__init__() self.op = lambda x: optotf.nabla.nabla_4d_adjoint(x, hx=hx, hy=hy, hz=hz, ht=ht) def call(self, x): if x.dtype == tf.complex64 or x.dtype == tf.complex128: return tf.complex(self.op(tf.math.real(x)), self.op(tf.math.imag(x))) else: return self.op(x) class TestNabla(unittest.TestCase): def test2d(self): x = tf.random.normal((10, 10)) op = Nabla2d() Kx = op(x) self.assertTrue((2, *x.shape) == Kx.shape) def test2d_complex(self): x = tf.complex(tf.random.normal((10, 10)), tf.random.normal((10, 10))) op = Nabla2d() Kx = op(x) self.assertTrue((2, *x.shape) == Kx.shape) def test2d_adjoint(self): x = tf.random.normal((2, 10, 10)) op = NablaT2d() Kx = op(x) self.assertTrue(x.shape[1:] == Kx.shape) def test2d_adjoint_complex(self): x = tf.complex(tf.random.normal((2, 10, 10)), tf.random.normal((2, 10, 10))) op = NablaT2d() Kx = op(x) self.assertTrue(x.shape[1:] == Kx.shape) def test3d(self): x = tf.random.normal((10, 10, 10)) op = Nabla3d() Kx = op(x) self.assertTrue((3, *x.shape) == Kx.shape) def test3d_complex(self): x = tf.complex(tf.random.normal((10, 10, 10)), tf.random.normal((10, 10, 10))) op = Nabla3d() Kx = op(x) self.assertTrue((3, *x.shape) == Kx.shape) def test3d_adjoint(self): x = tf.random.normal((3, 10, 10, 10)) op = NablaT3d() Kx = op(x) self.assertTrue(x.shape[1:] == Kx.shape) def test3d_adjoint_complex(self): x = tf.complex(tf.random.normal((3, 10, 10, 10)), tf.random.normal((3, 10, 10, 10))) op = NablaT3d() Kx = op(x) self.assertTrue(x.shape[1:] == Kx.shape) def test4d(self): x = tf.random.normal((10, 10, 10, 10)) op = Nabla4d() Kx = op(x) self.assertTrue((4, *x.shape) == Kx.shape) def test4d_complex(self): x = tf.complex(tf.random.normal((10, 10, 10, 10)), tf.random.normal((10, 10, 10, 10))) op = Nabla4d() Kx = op(x) self.assertTrue((4, *x.shape) == Kx.shape) def test4d_adjoint(self): x = tf.random.normal((4, 10, 10, 10, 10)) op = NablaT4d() Kx = op(x) self.assertTrue(x.shape[1:] == Kx.shape) def test4d_adjoint_complex(self): x = tf.complex(tf.random.normal((4, 10, 10, 10, 10)), tf.random.normal((4, 10, 10, 10, 10))) op = NablaT4d() Kx = op(x) self.assertTrue(x.shape[1:] == Kx.shape) if __name__ == "__main__": unittest.test()
nilq/baby-python
python
# -*- coding: utf-8 -*- """Top-level package for skipchunk.""" __author__ = """Max Irwin""" __email__ = 'max_irwin@yahoo.com' __version__ = '0.1.0'
nilq/baby-python
python
from django.contrib import admin from .models import User, EmailSender from django.core.mail import send_mass_mail import smtplib admin.site.register(User) class EmailSenderAdmin(admin.ModelAdmin): actions = ['send_email'] @admin.action(description='Send mass email') def send_email(self, request, queryset): for obj in queryset: lst = obj.adresses email_list = list(lst.replace(' ', '').split(',')) subject = obj.subject msg = obj.text t = (subject, msg, 'from@example.com', email_list) send_mass_mail((t,), fail_silently=False, auth_user=None, auth_password=None, connection=None) admin.site.register(EmailSender, EmailSenderAdmin)
nilq/baby-python
python
from django.contrib import admin from bookings.models import Booking # Register your models here. admin.site.register(Booking)
nilq/baby-python
python
from .utils import cached_property from .utils.types import is_list, get_link, validate, is_nullable from .resource import Resource from .conf import settings from .expression import execute from .schemas import FieldSchema from .exceptions import TypeValidationError def is_resolved(x): if isinstance(x, Resource): return True if isinstance(x, list) and all((isinstance(c, Resource) for c in x)): return True if isinstance(x, dict) and all((isinstance(c, Resource) for c in x.values())): return True return False class Field(Resource): class Schema(FieldSchema): pass def __init__(self, *args, **kwargs): super(Field, self).__init__(*args, **kwargs) type = self.get_option('type') self._is_link = get_link(type) self._is_list = is_list(type) self._is_nullable = is_nullable(type) @property def is_link(self): return self._is_list @property def is_list(self): return self._is_list @property def is_nullable(self): return self._is_nullable def setup(self): if not self._setup: # set initial value via parent source = self.get_option('source') type = self.get_option('type') name = self.get_option('name') id = self.get_option('id') if source: # get value from source expression value = self.get_from_expression(source) else: # get value from parent by name default = self.get_option('default') value = self.parent.get_option(name, default) # transform field spec dict into field array if ( id == 'resources.fields' ): if value == '*': value = {k: True for k in self.parent.get_field_source_names()} if isinstance(value, dict): value = [self.parent.get_field(name) for name in value] self.set_value(value) def get_from_expression(self, source): return execute(source, {'fields': self.parent}) @cached_property def related(self): link = self._is_link if not link: return None if '.' in link: # link is resource ID return self.space.server.get_resource_by_id(link) else: # link is resource name referencing the current space return self.space.resources_by_name.get(link) @property def parent(self): return self.get_option('parent') @classmethod def make(cls, *args, **kwargs): return cls(*args, **kwargs) # lazy(lambda: cls(*args, **kwargs), cls)() def get_value(self, resolve=True, id=False): self.setup() if resolve and self._is_link: link = self._link return link.get_id() if id else link else: return self._value @property def space(self): return self.get_space() def get_space(self): from .space import Space space = None parent = self.parent space = parent.get_option('space') parent_name = parent.get_meta_attribute('name') if space and ( space == settings.METASPACE_NAME or (isinstance(space, Space) and space.name == settings.METASPACE_NAME) ): # get root space while parent_name == 'fields': parent = parent.parent parent_name = parent.get_meta_attribute('name') if parent_name == 'server': space = parent.metaspace elif parent_name == 'resources': space = parent.get_option('space') if not is_resolved(space): space = parent.space space = space.server.metaspace elif parent_name == 'spaces': space = parent.server.metaspace elif parent_name == 'types': space = parent.server.metaspace else: # get space from parent resource space = parent.space return space def get_link(self, value): if is_resolved(value): return value return self.space.resolve(self.type, value) def validate(self, type, value): try: return validate(type, value) except TypeValidationError as e: raise TypeValidationError(f'Failed to validate {self.id}: {e}') def set_value(self, value, set_inverse=True): type = self.get_option('type') self.validate(type, value) if self._is_link: link = None if is_resolved(value): # resource given -> get ID or IDs link = value value = [v.get_id() for v in value] if self._is_list else link.get_id() self._value = value self.__dict__["_link"] = link if set_inverse and self.inverse: self.set_inverse(link) else: # id or ids given self._value = value link = self.__dict__['_link'] = self.get_link(value) if link and set_inverse and self.inverse: self.set_inverse(link) else: # simple assignment without links self._value = value self._setup = True def set_inverse(self, value): parent = self.parent if not parent: return if not isinstance(value, list): value = [value] inverse = self.inverse for v in value: inverse_field = v.get_attribute(inverse) if inverse_field._is_list: inverse_field.add_value(parent, set_inverse=False) else: inverse_field.set_value(parent, set_inverse=False) def add_value(self, new_value, set_inverse=True, index=None): if self._is_list: self.setup() value = self._value link = None if value is None: value = self._value = [] if not isinstance(new_value, list): new_value = [new_value] ids = None link = None resolved = None if self._is_link: resolved = is_resolved(new_value) link = self._link ids = set([v.get_id() if hasattr(v, 'get_id') else v for v in link]) news = [] for v in new_value: if self._is_link: # check ids before adding if resolved: id = v.get_id() if id not in ids: ids.add(id) news.append(v) value.append(id) link.append(v) else: if v not in ids: ids.add(v) value.append(v) news.append(v) else: # add directly value.append(v) if self._is_link and not resolved: # news has ids news = self.get_link(news) link.extend(news) if set_inverse and self.inverse and news: self.set_inverse(news) else: # cannot add on a non-list # TODO: support this for strings, objects, numbers raise NotImplementedError() @cached_property def _link(self): return self.get_link(self._value)
nilq/baby-python
python
#!/usr/bin/env python3 # Copyright 2017 Johns Hopkins University (Shinji Watanabe) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # This code is ported from the following implementation written in Torch. # https://github.com/chainer/chainer/blob/master/examples/ptb/train_ptb_custom_loop.py """Language model training script.""" import logging import os import random import subprocess import sys import configargparse import numpy as np from espnet.nets.lm_interface import dynamic_import_lm from espnet.optimizer.adaptor import dynamic_import_optimizer from espnet.scheduler.scaler import dynamic_import_scaler # NOTE: you need this func to generate our sphinx doc def get_parser(parser=None, required=True): """Get parser.""" if parser is None: parser = configargparse.ArgumentParser( description='Train a new language model on one CPU or one GPU', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter) # general configuration parser.add('--config', is_config_file=True, help='config file path') parser.add('--config2', is_config_file=True, help='second config file path that overwrites the settings in `--config`.') parser.add('--config3', is_config_file=True, help='third config file path that overwrites the settings in `--config` and `--config2`.') parser.add_argument('--ngpu', default=None, type=int, help='Number of GPUs. If not given, use all visible devices') parser.add_argument('--train-dtype', default="float32", choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"], help='Data type for training (only pytorch backend). ' 'O0,O1,.. flags require apex. See https://nvidia.github.io/apex/amp.html#opt-levels') parser.add_argument('--backend', default='chainer', type=str, choices=['chainer', 'pytorch'], help='Backend library') parser.add_argument('--outdir', type=str, required=required, help='Output directory') parser.add_argument('--debugmode', default=1, type=int, help='Debugmode') parser.add_argument('--dict', type=str, required=required, help='Dictionary') parser.add_argument('--seed', default=1, type=int, help='Random seed') parser.add_argument('--resume', '-r', default='', nargs='?', help='Resume the training from snapshot') parser.add_argument('--verbose', '-V', default=0, type=int, help='Verbose option') parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help="Tensorboard log dir path") parser.add_argument('--report-interval-iters', default=100, type=int, help="Report interval iterations") # task related parser.add_argument('--train-label', type=str, required=required, help='Filename of train label data') parser.add_argument('--valid-label', type=str, required=required, help='Filename of validation label data') parser.add_argument('--test-label', type=str, help='Filename of test label data') parser.add_argument('--dump-hdf5-path', type=str, default=None, help='Path to dump a preprocessed dataset as hdf5') # training configuration parser.add_argument('--opt', default='sgd', type=str, help='Optimizer') parser.add_argument('--sortagrad', default=0, type=int, nargs='?', help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs") parser.add_argument('--batchsize', '-b', type=int, default=300, help='Number of examples in each mini-batch') parser.add_argument('--accum-grad', type=int, default=1, help='Number of gradient accumueration') parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train') parser.add_argument('--early-stop-criterion', default='validation/main/loss', type=str, nargs='?', help="Value to monitor to trigger an early stopping of the training") parser.add_argument('--patience', default=3, type=int, nargs='?', help="Number of epochs to wait without improvement before stopping the training") parser.add_argument('--scalers', default=None, action="append", type=lambda kv: kv.split("="), help='optimizer schedulers, e.g., "--scalers lr=noam --lr-noam-warmup 1000".') parser.add_argument('--gradclip', '-c', type=float, default=5, help='Gradient norm threshold to clip') parser.add_argument('--maxlen', type=int, default=40, help='Batch size is reduced if the input sequence > ML') parser.add_argument('--model-module', type=str, default='default', help='model defined module (default: espnet.nets.xxx_backend.lm.default:DefaultRNNLM)') return parser def main(cmd_args): """Train LM.""" parser = get_parser() args, _ = parser.parse_known_args(cmd_args) if args.backend == "chainer" and args.train_dtype != "float32": raise NotImplementedError( f"chainer backend does not support --train-dtype {args.train_dtype}." "Use --dtype float32.") if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"): raise ValueError(f"--train-dtype {args.train_dtype} does not support the CPU backend.") # parse arguments dynamically model_class = dynamic_import_lm(args.model_module, args.backend) model_class.add_arguments(parser) if args.scalers is not None: for k, v in args.scalers: scaler_class = dynamic_import_scaler(v) scaler_class.add_arguments(k, parser) opt_class = dynamic_import_optimizer(args.opt, args.backend) opt_class.add_arguments(parser) args = parser.parse_args(cmd_args) # logging info if args.verbose > 0: logging.basicConfig( level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s') else: logging.basicConfig( level=logging.WARN, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s') logging.warning('Skip DEBUG/INFO messages') # If --ngpu is not given, # 1. if CUDA_VISIBLE_DEVICES is set, all visible devices # 2. if nvidia-smi exists, use all devices # 3. else ngpu=0 if args.ngpu is None: cvd = os.environ.get("CUDA_VISIBLE_DEVICES") if cvd is not None: ngpu = len(cvd.split(',')) else: logging.warning("CUDA_VISIBLE_DEVICES is not set.") try: p = subprocess.run(['nvidia-smi', '-L'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (subprocess.CalledProcessError, FileNotFoundError): ngpu = 0 else: ngpu = len(p.stderr.decode().split('\n')) - 1 else: ngpu = args.ngpu logging.info(f"ngpu: {ngpu}") # display PYTHONPATH logging.info('python path = ' + os.environ.get('PYTHONPATH', '(None)')) # seed setting nseed = args.seed random.seed(nseed) np.random.seed(nseed) # load dictionary with open(args.dict, 'rb') as f: dictionary = f.readlines() char_list = [entry.decode('utf-8').split(' ')[0] for entry in dictionary] char_list.insert(0, '<blank>') char_list.append('<eos>') args.char_list_dict = {x: i for i, x in enumerate(char_list)} args.n_vocab = len(char_list) # train logging.info('backend = ' + args.backend) if args.backend == "chainer": from espnet.lm.chainer_backend.lm import train train(args) elif args.backend == "pytorch": from espnet.lm.pytorch_backend.lm import train train(args) else: raise ValueError("Only chainer and pytorch are supported.") if __name__ == '__main__': main(sys.argv[1:])
nilq/baby-python
python
# !/usr/bin/env python # -*- coding: utf-8 -*- # Copyright(c) 2019 Nippon Telegraph and Telephone Corporation # Filename: EmLoggingTool.py ''' Log tool for EM. ''' import logging.handlers import time import gzip import os import re import shutil import GlobalModule class Formatter(logging.Formatter): """ Log formatter class. Modify dateFormat(datefmt). """ def formatTime(self, record, datefmt=None): """ Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, the ISO8601 format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class. """ ct = self.converter(record.created) if datefmt: s = time.strftime(datefmt, ct) else: t = time.strftime("%Y/%m/%d %H:%M:%S", ct) s = "%s.%03d" % (t, record.msecs) return s class FileHandler(logging.FileHandler): """ A handler class which writes formatted logging records to disk files. """ def __init__(self, filename, mode='a', encoding=None, delay=0, notify_log_levels=[]): """ Open the specified file and use it as the stream for logging. """ logging.FileHandler.__init__( self, filename, mode=mode, encoding=encoding, delay=delay) self.notify_log_levels = notify_log_levels def handle(self, record): """ Call the handlers for the specified record. This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied. """ super(FileHandler, self).handle(record) if record.levelno in self.notify_log_levels: snd_msg = self.format(record) GlobalModule.EM_LOG_NOTIFY.notify_logs(snd_msg, record.levelno) class TimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler): """ Handler for logging to a file, rotating the log file at certain timed intervals. If backupCount is > 0, when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. """ def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, gzbackupCount=0, notify_log_levels=[], gzip=False): logging.handlers.TimedRotatingFileHandler.__init__( self, filename, when=when, interval=interval, backupCount=backupCount, encoding=encoding, delay=delay, utc=utc) self.file_name = filename self.gzbackupCount = gzbackupCount self.notify_log_levels = notify_log_levels self._child_handler = [] self.gzip = gzip def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ super(TimedRotatingFileHandler, self).doRollover() for ch_handle in self._child_handler: ch_handle.close() if self.gzip: try: self._gzMake() except Exception: pass def getFileHandler(self): tmp_delay = 1 if self.stream is None else 0 tmp_handler = FileHandler(self.baseFilename, mode=self.mode, encoding=self.encoding, delay=tmp_delay, notify_log_levels=self.notify_log_levels) tmp_handler.setLevel(self.level) self._child_handler.append(tmp_handler) return tmp_handler def _gzMake(self): gzip_name, gz_target_file = self._gzFilecheck() if gzip_name: self._gzFileMaker(gzip_name, gz_target_file) for s in self._getGzFileDeleteList(): rm_f_path = os.path.join(os.path.dirname(self.file_name), s) os.remove(rm_f_path) def _gzFilecheck(self): application_log_list = self._getFile_list() gz_target_file_name = application_log_list[len( application_log_list) - 1] file_dir = os.path.dirname(self.file_name) gz_target_file = os.path.join(file_dir, gz_target_file_name) gzfile_name = "{0}.gz".format(gz_target_file) if not os.path.isfile(gz_target_file): gzfile_name = None return gzfile_name, gz_target_file def _gzFileMaker(self, gzfile_name, gz_target_file): with open(gz_target_file, 'rb') as f_in: with gzip.open(gzfile_name, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(gz_target_file) def _getGzFileDeleteList(self,): file_list = os.listdir(os.path.dirname(self.file_name)) base_name = os.path.basename(self.file_name) application_log_list = [] delete_list = [] for r in file_list: if re.search("^{0}\..*\.gz$".format(base_name), r): application_log_list.append(r) application_log_list.sort() if (len(application_log_list) > self.gzbackupCount and self.gzbackupCount != 0): application_log_list =\ delete_list = application_log_list[:len( application_log_list) - self.gzbackupCount] return delete_list def _getFile_list(self,): file_list = os.listdir(os.path.dirname(self.file_name)) base_name = os.path.basename(self.file_name) new_file_list = [] for r in file_list: if self._check_rotate_file(r, base_name): new_file_list.append(r) if len(new_file_list) > 0: new_file_list.sort() return new_file_list def _check_rotate_file(self, file_name, base_name): if not re.search("^{0}\.".format(base_name), file_name): return False prefix = base_name + "." suffix = file_name[len(prefix):] return_val = True if self.extMatch.match(suffix) else False return return_val def handle(self, record): """ Call the handlers for the specified record. This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied. """ super(TimedRotatingFileHandler, self).handle(record) if record.levelno in self.notify_log_levels: snd_msg = self.format(record) GlobalModule.EM_LOG_NOTIFY.notify_logs(snd_msg, record.levelno)
nilq/baby-python
python
import pytest from pipet.core.shop_conn.wc import * from pipet.core.transform.model_to_wc import * from pipet.core.transform.wc_to_model import * from pprint import pprint
nilq/baby-python
python
def maxMultiple(divisor, bound): return (bound // divisor) * divisor
nilq/baby-python
python
#Kyle Sizemore #2/7/2020 # pull historical data from the market for backtesting purposes and output to a # csv file on our mySQL database import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import datetime import pandas as pd import pandas_datareader as web import csv import pymysql from sqlalchemy import create_engine import numpy as np class DataHandler(): def __init__(self): self.engine = create_engine('mysql+pymysql://root:pass@127.0.0.1:3306/zoltarpricedata') self.start = datetime.datetime(1980,1,1) self.end = datetime.datetime.now() self.tickers = [] self.numberOfTickers = 15 def readTickers(): with open('TargetTickers.csv') as csvDataFile: csvReader = csv.reader(csvDataFile) for row in csvReader: tickers.append(row[0]) def GenYahDataFrame(self, t): try: df = web.DataReader(t, 'yahoo',self.start,self.end) return df except: print('Bad ticker: ' + t) return None def TrimDataFrame(self, df): return df.drop(columns = ['High','Low','Volume','Adj Close']) def sqlExport(df, t): try: df.to_sql(t.lower(),con = engine,index = True,index_label='Date',if_exists = 'append',method = None) except ValueError as vx: print(vx) except Exception as ex: print(ex) else: print('Exported ' + t + ' data to SQL') # This takes the data frame and creates a numpy array. The numpy array is # 2-D with the number of rows being the datapoints/dayInterval and the row # width being the dayIntercal. EX: toNumpy(currentDate, 100, AOS) returns # a numpy array of size (91, 100) because it has at least 9100 days of # history and 100 is the interval of interest def toNumpy(self, endDate, dayInterval, ticker): df = self.GenYahDataFrame(ticker) df = self.TrimDataFrame(df) arr = df.to_numpy() numRows = int(np.size(arr,0)/dayInterval) outputArr = np.zeros([numRows, dayInterval]) for i in range(numRows): outputArr[i] = np.copy(arr[i*dayInterval:((i+1)*dayInterval), 1]) return outputArr def exportTickers(): for t in tickers[:numberOfTickers]: GenYahDataFrame(t) if df != None: TrimDataFrame(df) cveExport(df,t) return('Tickers Succesfully Exported') def csvExport(dFrame,ticker): timeInterval = start + 'to' + end df.to_csv('Tickers/'+ ticker + '_PriceData_' + timeInterval) return ('Exported ' + ticker + ' data to CSV file') def main(self): end = datetime.datetime.now() numpyarray = self.toNumpy(end, 100, 'AOS') print(numpyarray) print("Size: " + str(np.size(numpyarray))) if __name__== "__main__": dh = Datahandler() dh.main()
nilq/baby-python
python
import pypro.core import os class CreateConfig(pypro.core.Recipe): def __init__(self, source, destination): self.source = source self.destination = destination def run(self, runner, arguments=None): # Read the template file content = '' with open(self.source, 'r') as f: content = f.read(os.path.getsize(self.source)) # Replace notations with actual values content = pypro.core.Variables.replace(content) # Write the config file with open(self.destination, 'w') as f: f.write(content)
nilq/baby-python
python
import random import numpy as np import torch import torch.utils.data as data import data.util as util import os.path as osp class LQGT_dataset(data.Dataset): ''' Read LQ (Low Quality, here is LR) and GT image pairs. If only GT image is provided, generate LQ image on-the-fly. The pair is ensured by 'sorted' function, so please check the name convention. ''' def __init__(self, opt): super(LQGT_dataset, self).__init__() self.opt = opt self.data_type = self.opt['data_type'] self.paths_LQ, self.paths_GT = None, None self.sizes_GT, self.paths_GT = util.get_image_paths(self.data_type, opt['dataroot_GT']) self.sizes_LQ, self.paths_LQ = util.get_image_paths(self.data_type, opt['dataroot_LQ']) assert self.paths_GT, 'Error: GT path is empty.' if self.paths_LQ and self.paths_GT: assert len(self.paths_LQ) == len( self.paths_GT ), 'GT and LQ datasets have different number of images - {}, {}.'.format( len(self.paths_LQ), len(self.paths_GT)) self.cond_folder = opt['dataroot_cond'] def __getitem__(self, index): GT_path, LQ_path = None, None GT_size = self.opt['GT_size'] # get GT image GT_path = self.paths_GT[index] img_GT = util.read_img(None, GT_path) # get LQ image LQ_path = self.paths_LQ[index] img_LQ = util.read_img(None, LQ_path) # # get condition cond_scale = self.opt['cond_scale'] if self.cond_folder is not None: if '_' in osp.basename(LQ_path): cond_name = '_'.join(osp.basename(LQ_path).split('_')[:-1])+'_bicx'+str(cond_scale)+'.png' else: cond_name = osp.basename(LQ_path).split('.')[0]+'_bicx'+str(cond_scale)+'.png' cond_path = osp.join(self.cond_folder, cond_name) cond_img = util.read_img(None, cond_path) else: cond_img = util.imresize_np(img_LQ, 1/cond_scale) if self.opt['phase'] == 'train': H, W, C = img_LQ.shape H_gt, W_gt, C = img_GT.shape if H != H_gt: print('*******wrong image*******:{}'.format(LQ_path)) # randomly crop if GT_size is not None: LQ_size = GT_size rnd_h = random.randint(0, max(0, H - LQ_size)) rnd_w = random.randint(0, max(0, W - LQ_size)) img_LQ = img_LQ[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :] img_GT = img_GT[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :] # augmentation - flip, rotate img_LQ, img_GT = util.augment([img_LQ, img_GT], self.opt['use_flip'], self.opt['use_rot']) # BGR to RGB, HWC to CHW, numpy to tensor if img_GT.shape[2] == 3: img_GT = img_GT[:, :, [2, 1, 0]] img_LQ = img_LQ[:, :, [2, 1, 0]] cond_img = cond_img[:, :, [2, 1, 0]] H, W, _ = img_LQ.shape img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float() img_LQ = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ, (2, 0, 1)))).float() cond = torch.from_numpy(np.ascontiguousarray(np.transpose(cond_img, (2, 0, 1)))).float() if LQ_path is None: LQ_path = GT_path return {'LQ': img_LQ, 'GT': img_GT, 'cond': cond, 'LQ_path': LQ_path, 'GT_path': GT_path} def __len__(self): return len(self.paths_GT)
nilq/baby-python
python
""" byceps.services.ticketing.dbmodels.archived_attendance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2014-2022 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from datetime import datetime from ....database import db from ....typing import PartyID, UserID from ....util.instances import ReprBuilder class ArchivedAttendance(db.Model): """A user's attendance of a party. This is a link between a party and a user that attended it. While such a link is usually established through a ticket for a party that is assigned to a user, this entity was introduced for legacy data for which no information on tickets, orders, seating areas and so on exists anymore (or should not be migrated). The data for this entity is expected to be inserted from the outside. BYCEPS itself currently does not write any archived attendances (but incorporates them to be displayed on user profiles). """ __tablename__ = 'user_archived_party_attendances' user_id = db.Column(db.Uuid, db.ForeignKey('users.id'), primary_key=True) party_id = db.Column(db.UnicodeText, db.ForeignKey('parties.id'), primary_key=True) created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) def __init__(self, user_id: UserID, party_id: PartyID) -> None: self.user_id = user_id self.party_id = party_id def __repr__(self) -> str: return ReprBuilder(self) \ .add('user_id', str(self.user_id)) \ .add('party_id', self.party_id) \ .build()
nilq/baby-python
python
#!/usr/bin/env python3 """nargs=+""" import argparse parser = argparse.ArgumentParser( description='nargs=+', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('files', metavar='FILE', nargs='+', help='Some files') args = parser.parse_args() files = args.files print('number = {}'.format(len(files))) print('files = {}'.format(', '.join(files)))
nilq/baby-python
python
""" @authors: Filip Maciejewski, Oskar Słowik, Tomek Rybotycki @contact: filip.b.maciejewski@gmail.com REFERENCES: [0] Filip B. Maciejewski, Zoltán Zimborás, Michał Oszmaniec, "Mitigation of readout noise in near-term quantum devices by classical post-processing based on detector tomography", Quantum 4, 257 (2020) [0.5] Filip B. Maciejewski, Flavio Baccari, Zoltán Zimborás, Michał Oszmaniec, "Modeling and mitigation of cross-talk effects in readout noise with applications to the Quantum Approximate Optimization Algorithm", Quantum 5, 464 (2021). """ import numpy as np import copy import functions.ancillary_functions as anf from tqdm import tqdm from typing import Optional, List, Dict, Union from functions.povmtools import get_enumerated_rev_map_from_indices from noise_characterization.tomography.DDTMarginalsAnalyzer import DDTMarginalsAnalyzer from functions.functions_noise_model_heuristic import partition_algorithm_v1_cummulative class NoiseModelGenerator(DDTMarginalsAnalyzer): """ This is class that uses results of Diagonal Detector Tomography (DDT) to construct potentially_stochastic_matrix noise model for potentially_stochastic_matrix measuring device. The model is classical and based on Ref. [0.5]. The main functionalities include computing sets of strongly correlated qubits (clusters) and for each such set, computing the qubits which affect the exact form of the noise on those clusters (hence, neighborhoods of cluster) """ def __init__(self, results_dictionary_ddot: Dict[str, Dict[str, int]], bitstrings_right_to_left: bool, number_of_qubits: int, marginals_dictionary: Optional[Dict[str, Dict[str, np.ndarray]]] = None, noise_matrices_dictionary: Optional[ Dict[str, Union[np.ndarray, Dict[str, Dict[str, np.ndarray]]]]] = None, clusters_list: Optional[List[List[int]]] = None, neighborhoods: Dict[str, List[int]] = None ) -> None: super().__init__(results_dictionary_ddot, bitstrings_right_to_left, marginals_dictionary, noise_matrices_dictionary ) self._number_of_qubits = number_of_qubits self._qubit_indices = list(range(number_of_qubits)) self._correlations_table_pairs = None if clusters_list is None: clusters_list = [] if neighborhoods is None: neighborhoods = {} self._clusters_list = clusters_list self._neighborhoods = neighborhoods @property def correlations_table_pairs(self) -> np.ndarray: return self._correlations_table_pairs @correlations_table_pairs.setter def correlations_table_pairs(self, correlations_table_pairs: np.ndarray) -> None: self._correlations_table_pairs = correlations_table_pairs @property def clusters_list(self) -> List[List[int]]: return self._clusters_list @clusters_list.setter def clusters_list(self, clusters_list: List[List[int]]) -> None: for cluster in clusters_list: cluster_string = self.get_qubits_key(cluster) if cluster_string not in self._noise_matrices_dictionary.keys(): average_noise_matrix_now = self._compute_noise_matrix_averaged(cluster) dictionary_now = {'averaged': average_noise_matrix_now} if cluster_string in self._neighborhoods.keys(): neighborhood_now = self._neighborhoods[cluster_string] dependent_noise_matrices = self._compute_noise_matrix_dependent(cluster, neighborhood_now) dictionary_now = {**dictionary_now, **dependent_noise_matrices} anf.cool_print('im doing this') self._noise_matrices_dictionary[self.get_qubits_key(cluster)] = dictionary_now self._clusters_list = clusters_list @property def neighborhoods(self) -> Dict[str, List[int]]: return self._neighborhoods @neighborhoods.setter def neighborhoods(self, neighborhoods: Dict[str, List[int]]) -> None: self._neighborhoods = neighborhoods self.clusters_list = [self.get_qubit_indices_from_string(cluster_string) for cluster_string in neighborhoods.keys()] for cluster_string in neighborhoods.keys(): dictionary_now = self._noise_matrices_dictionary[cluster_string] neighborhood_now = neighborhoods[cluster_string] # print(dictionary_now.keys()) neighbors_key = self.get_qubits_key(neighborhood_now) if neighbors_key not in dictionary_now.keys(): cluster = anf.get_qubit_indices_from_string(cluster_string) dependent_noise_matrices = self._compute_noise_matrix_dependent(cluster, neighborhood_now) self._noise_matrices_dictionary[cluster_string] = {**dictionary_now, **dependent_noise_matrices} def compute_correlations_table_pairs(self, qubit_indices: Optional[List[int]] = None, chopping_threshold: Optional[float] = 0.) -> np.ndarray: """From marginal noise matrices, get correlations between pairs of qubits. Correlations are defined as: c_{j -> i_index} = 1/2 * || Lambda_{i_index}^{Y_j = '0'} - Lambda_{i_index}^{Y_j = '0'}||_{l1} Where Lambda_{i_index}^{Y_j} is an effective noise matrix on qubit "i_index" (averaged over all other of qubits except "j"), provided that input state of qubit "j" was "Y_j". Hence, c_{j -> i_index} measures how much noise on qubit "i_index" depends on the input state of qubit "j". :param qubit_indices: list of integers labeling the qubits we want to consider if not provided, uses class property self._qubit_indices :param chopping_threshold: numerical value, for which correlations lower than chopping_threshold are set to 0. If not provided, does not chop. In general, it is potentially_stochastic_matrix advisable to set such cluster_threshold that cuts off values below expected statistical fluctuations. :return: correlations_table_quantum (ARRAY): element correlations_table_quantum[i_index,j] = how qubit "j" AFFECTS qubit "i_index" [= how noise on qubit "i_index" depends on "j"] """ add_property = False if qubit_indices is None: add_property = True qubit_indices = self._qubit_indices number_of_qubits = len(qubit_indices) correlations_table = np.zeros((number_of_qubits, number_of_qubits)) if np.max(qubit_indices) > number_of_qubits: mapping = get_enumerated_rev_map_from_indices(qubit_indices) else: mapping = {qi: qi for qi in qubit_indices} for qi in qubit_indices: for qj in qubit_indices: ha, he = mapping[qi], mapping[qj] if qj > qi: lam_i_j = self.get_noise_matrix_dependent([qi], [qj]) lam_j_i = self.get_noise_matrix_dependent([qj], [qi]) diff_i_j = lam_i_j['0'] - lam_i_j['1'] diff_j_i = lam_j_i['1'] - lam_j_i['0'] correlation_i_j = 1 / 2 * np.linalg.norm(diff_i_j, ord=1) correlation_j_i = 1 / 2 * np.linalg.norm(diff_j_i, ord=1) if correlation_i_j >= chopping_threshold: correlations_table[ha, he] = correlation_i_j if correlation_j_i >= chopping_threshold: correlations_table[he, ha] = correlation_j_i if add_property: self._correlations_table_pairs = correlations_table return correlations_table def _compute_clusters_pairwise(self, maximal_size: int, cluster_threshold: float ) -> list: """ Get partition of qubits in potentially_stochastic_matrix device into disjoint "clusters". This function uses "naive" method_name by assigning qubits to the same cluster if correlations between them are higher than some "neighbors_threshold". It restricts size of the cluster to "maximal_size" by disregarding the lowest correlations (that are above neighbors_threshold). It uses table of correlations from class property self._correlations_table_pairs :param cluster_threshold: correlations magnitude above which qubits are assigned to the same cluster :param maximal_size: maximal allowed size of the cluster :return: clusters_labels_list: list of lists, each representing potentially_stochastic_matrix single cluster """ self._clusters_list = [] qubit_indices = self._qubit_indices # number_of_qubits = len(qubit_indices) clusters = {'q%s' % qi: [[qi, 0., 0.]] for qi in qubit_indices} for qi in qubit_indices: for qj in qubit_indices: if qj > qi: corr_j_i, corr_i_j = self._correlations_table_pairs[qj, qi], \ self._correlations_table_pairs[qi, qj] # if any of the qubit affects the other strongly enough, # we assign them to the same cluster if corr_j_i >= cluster_threshold or corr_i_j >= cluster_threshold: clusters['q%s' % qi].append([qj, corr_i_j, corr_j_i]) clusters['q%s' % qj].append([qi, corr_i_j, corr_j_i]) # Merge clusters containing the same qubits new_lists = [] for key, value in clusters.items(): clusters[key] = sorted(value, key=lambda arg: arg[0]) new_lists.append([vi[0] for vi in clusters[key]]) while anf.check_if_there_are_common_elements(new_lists): for i in range(len(new_lists)): cl0 = new_lists[i] for j in range(len(new_lists)): cl1 = new_lists[j] if len(anf.lists_intersection(cl0, cl1)) != 0: new_lists[i] = anf.lists_sum(cl0, cl1) unique_stuff = [sorted(lis) for lis in np.unique(new_lists)] new_lists = copy.deepcopy(unique_stuff) clusters_list = new_lists # Chop clusters if they exceed max size chopped_clusters = [] for cluster in clusters_list: if len(cluster) > maximal_size: correlations_sorting = [] for qi in cluster: # as figure of merit, we will sum all correlations that are between # given qubit and other guys in its cluster. x = 0.0 for list_now in clusters['q%s' % qi]: x += np.max([list_now[1], list_now[2]]) correlations_sorting.append([qi, x]) correlations_sorted = sorted(correlations_sorting, key=lambda arg: arg[1], reverse=True) # choose only "maximal_size" qubits to belong to given cluster qubits_sorted = [correlations_sorted[index][0] for index in range(maximal_size)] else: qubits_sorted = cluster chopped_clusters.append(qubits_sorted) chopped_clusters_sorted = sorted(chopped_clusters, key=lambda y: y[0]) self._clusters_list = chopped_clusters_sorted return chopped_clusters_sorted def _find_neighbors_of_cluster_holistic(self, cluster: List[int], maximal_size: int, chopping_threshold: Optional[float] = 0.) -> List[int]: """ For potentially_stochastic_matrix given cluster of qubits, find qubits which are their neighbors, i.e., they affect the noise matrix of cluster significantly. Figure of merit for correlations here is: c_{j -> cluster} = 1/2 || Lambda_{cluster}^{Y_j='0'}- Lambda_{cluster}^{Y_j='1'}||_{l1} where Lambda_{cluster}^{Y_j} is the noise matrix describing noise on qubits in "cluster" provided that input state of qubit "j" was "Y_j". See also description of self._compute_clusters_pairwise. :param cluster: list of labels of qubits in potentially_stochastic_matrix cluster :param maximal_size: maximal allowed size of the set "cluster+neighborhood" :param chopping_threshold: numerical value, for which correlations lower than chopping_threshold are set to 0. If not provided, it adds all_neighbors until maximal_size is met. :return: neighbors_list: list of lists, each representing potentially_stochastic_matrix single cluster """ size_cut = maximal_size - len(cluster) potential_neighbours = [] for qi in self._qubit_indices: if qi not in cluster: lam_ci_j = self.get_noise_matrix_dependent(cluster, [qi]) diff_ci_j = lam_ci_j['0'] - lam_ci_j['1'] correlation_ci_j = 1 / 2 * np.linalg.norm(diff_ci_j, ord=1) potential_neighbours.append([qi, correlation_ci_j]) sorted_neighbours = sorted(potential_neighbours, key=lambda x: x[1], reverse=True) neighbors_list = sorted( [sorted_neighbours[i][0] for i in range(int(np.min([size_cut, len(sorted_neighbours)]))) if chopping_threshold < sorted_neighbours[i][1]]) cluster_key = self.get_qubits_key(cluster) self._neighborhoods[cluster_key] = neighbors_list return neighbors_list def _find_all_neighborhoods_holistic(self, maximal_size, chopping_threshold: float, show_progress_bar: Optional[bool] = False) \ -> Dict[str, List[int]]: """ Run self._find_neighbors_of_cluster_holistic for all clusters. :param maximal_size: maximal allowed size of the set "cluster+neighborhood" :param chopping_threshold: numerical value, for which correlations lower than chopping_threshold are set to 0. If not provided, it adds all_neighbors until maximal_size is met. :param show_progress_bar: specify whether to show progress bar :return: neighbors_dictionary: dictionary where KEY is label for cluster, and VALUE is list of its neighbors """ self._neighborhoods = {} clusters_list = self._clusters_list range_clusters = range(len(clusters_list)) if show_progress_bar: range_clusters = tqdm(range_clusters) for index_cluster in range_clusters: cluster = clusters_list[index_cluster] self._neighborhoods[ self.get_qubits_key(cluster)] = self._find_neighbors_of_cluster_holistic( cluster, maximal_size, chopping_threshold) return self._neighborhoods def _find_neighbors_of_cluster_pairwise(self, cluster: List[int], maximal_size: int, neighbors_threshold: float ) -> List[int]: """ Like self._find_neighbors_of_cluster_holistic but looks how noise on qubits in given cluster depend on input state of other qubits (potential neighbors) *separately*. NOTE: see description of self._find_neighbors_of_cluster_holistic for definition of correlations' measure we use :param cluster: list of labels of qubits in potentially_stochastic_matrix cluster :param maximal_size: maximal allowed size of the set "cluster+neighborhood" :param neighbors_threshold: numerical value, for which correlations higher than neighbors_threshold assign qubit to the neighborhood of other qubit :return: neighbors_list: list of lists, each representing potentially_stochastic_matrix single cluster """ qubit_indices = self._qubit_indices potential_neighbors = [] for qj in qubit_indices: affections_qj = [] for qi in cluster: if qj not in cluster: corr_j_i = self._correlations_table_pairs[qi, qj] affections_qj.append(corr_j_i) if qj not in cluster: corr_j_i = np.max(affections_qj) if corr_j_i >= neighbors_threshold: potential_neighbors.append([qj, corr_j_i]) sorted_neighbors = sorted(potential_neighbors, key=lambda x: x[1], reverse=True) target_size = maximal_size - len(cluster) range_final = int(np.min([len(sorted_neighbors), target_size])) return sorted([sorted_neighbors[index][0] for index in range(range_final)]) def _find_all_neighborhoods_pairwise(self, maximal_size: int, neighbors_threshold: float, show_progress_bar: Optional[bool] = False ) -> Dict[str, List[int]]: """ Like self._find_neighbors_of_cluster_holistic but looks how noise on qubits in given cluster depend on input state of other qubits (potential neighbors) *separately*. NOTE: see description of self._find_neighbors_of_cluster_holistic for definition of correlations' measure we use :param maximal_size: maximal allowed size of the set "cluster+neighborhood" :param neighbors_threshold: numerical value, for which correlations higher than neighbors_threshold assign qubit to the neighborhood of other qubit :return: neighbors_dictionary: dictionary where KEY is label for cluster, and VALUE is list of its neighbors """ if self._correlations_table_pairs is None: self.compute_correlations_table_pairs() self._neighborhoods = {} clusters_list = self._clusters_list range_clusters = range(len(clusters_list)) if show_progress_bar: range_clusters = tqdm(range_clusters) for index_cluster in range_clusters: cluster = clusters_list[index_cluster] self._neighborhoods[ self.get_qubits_key(cluster)] = self._find_neighbors_of_cluster_pairwise( cluster, maximal_size=maximal_size, neighbors_threshold=neighbors_threshold) return self._neighborhoods def compute_clusters(self, maximal_size: int, method: Optional[str] = 'holistic_v1', method_kwargs: Optional[dict] = None) -> list: """ Get partition of qubits in potentially_stochastic_matrix device into disjoint "clusters". This function uses various heuristic methods, specified via string "version". It uses table of correlations from class property self._correlations_table_pairs :param maximal_size: maximal allowed size of the cluster :param method: string specifying stochasticity_type of heuristic Possible values: 'pairwise' - heuristic that uses Algorithm 3 from Ref.[] 'holistic_v1' - heuristic that uses function partition_algorithm_v1_cummulative :param method_kwargs: potential arguments that will be passed to clustering function. For possible parameters see descriptions of particular functions. :return: clusters_labels_list: list of lists, each representing potentially_stochastic_matrix single cluster """ self._clusters_list = [] if method == 'pairwise': if method_kwargs is None: default_kwargs = {'maximal_size': maximal_size, 'cluster_threshold': 0.02 } method_kwargs = default_kwargs elif 'maximal_size' in method_kwargs.keys(): if method_kwargs['maximal_size'] != maximal_size: raise ValueError('Disagreement between maximal size argument and method_name kwargs') else: method_kwargs['maximal_size'] = maximal_size clusters_list = self._compute_clusters_pairwise(**method_kwargs) elif method == 'holistic_v1': if method_kwargs is None: alpha = 1 algorithm_runs = 1000 default_kwargs = {'alpha': alpha, 'N_alg': algorithm_runs, 'printing': False, 'drawing': False} method_kwargs = default_kwargs elif 'C_maxsize' in method_kwargs.keys(): # TODO FBM, OS: this variable should have name consistent with rest of functions if method_kwargs['C_maxsize'] != maximal_size: raise ValueError('Disagreement between maximal size argument and method_name kwargs') else: method_kwargs['C_maxsize'] = maximal_size clusters_list, score = partition_algorithm_v1_cummulative(self._correlations_table_pairs, **method_kwargs) anf.cool_print('Current partitioning got score:', score) else: raise ValueError('No heuristic with that name: ' + method) self._clusters_list = clusters_list return clusters_list def find_all_neighborhoods(self, maximal_size: int, method: Optional[str] = 'holistic', method_kwargs: Optional[dict] = None): if method == 'pairwise': if method_kwargs is None: default_kwargs = {'neighbors_threshold': 0.01} method_kwargs = default_kwargs method_kwargs['maximal_size'] = maximal_size neighborhoods = self._find_all_neighborhoods_pairwise(**method_kwargs) elif method == 'holistic': if method_kwargs is None: default_kwargs = {'chopping_threshold': 0.0, 'show_progress_bar': True} method_kwargs = default_kwargs method_kwargs['maximal_size'] = maximal_size neighborhoods = self._find_all_neighborhoods_holistic(**method_kwargs) else: raise ValueError('Wrong method_name name') return neighborhoods def print_properties(self): # TODO FBM, OS: add this return None def draw_noise_model(self): # TODO FBM, OS: add this return None
nilq/baby-python
python
import numpy as np import modeling.geometric_model as gm import modeling.collision_model as cm import visualization.panda.world as wd import basis.robot_math as rm import math import pickle from scipy.spatial import cKDTree import vision.depth_camera.surface.gaussian_surface as gs import vision.depth_camera.surface.rbf_surface as rbfs base = wd.World(cam_pos=np.array([-.3, -.9, .3]), lookat_pos=np.array([0, 0, 0])) # gm.gen_frame().attach_to(base) model_pcd = pickle.load(open("helmet_gaussian.pkl", "rb"))['objpcd'] * 1e-3 origin = np.mean(model_pcd, axis=0) bowl_samples = model_pcd - origin bowl_model = cm.CollisionModel(initor=bowl_samples) bowl_model.set_rgba([.3, .3, .3, .3]) # bowl_model.set_rotmat(rm.rotmat_from_euler(math.pi,0,0)) bowl_model.attach_to(base) # print(model_pcd) bowl_model.attach_to(base) # base.run() import random for point_id in range(3000,10000,100): tree = cKDTree(bowl_samples) # point_id = random.randint(3000, 10000) nearby_sample_ids = tree.query_ball_point(bowl_samples[point_id, :], .03) nearby_samples = bowl_samples[nearby_sample_ids] colors = np.tile(np.array([1, 0, 0,1]), (len(nearby_samples),1)) print(nearby_samples.shape) print(colors.shape) nearby_samples_withcolor = np.column_stack((nearby_samples, colors)) gm.GeometricModel(nearby_samples_withcolor).attach_to(base) plane_center, plane_normal = rm.fit_plane(nearby_samples) plane_tangential = rm.orthogonal_vector(plane_normal) plane_tmp = np.cross(plane_normal, plane_tangential) plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples - plane_center).T).T surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:, 2], n_mix=1) # t_npt_on_xy = plane_rotmat.T.dot(t_npt - plane_center) # projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]])) # projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]]) # projected_point = plane_rotmat.dot(projected_t_npt_on_xy) + plane_center surface_gm = surface.get_gometricmodel([[-.05, .05], [-.05, .05]], rgba=[.5, .7, 1, 1]) surface_gm.set_pos(plane_center) surface_gm.set_rotmat(plane_rotmat) surface_gm.attach_to(base) base.run() pn_direction = np.array([0, 0, -1]) # surface = rbfs.RBFSurface(bowl_samples[:, :2], bowl_samples[:,2]) surface = gs.MixedGaussianSurface(bowl_samples[:, :2], bowl_samples[:, 2], n_mix=1) surface_cm = surface.get_gometricmodel(rgba=[.3, .3, .3, 1]).attach_to(base) base.run() # bowl_samples, bowl_sample_normals = bowl_model.sample_surface(toggle_option='normals', radius=.002) # selection = bowl_sample_normals.dot(-pn_direction)>.1 # bowl_samples = bowl_samples[selection] # bowl_sample_normals=bowl_sample_normals[selection] pt_direction = rm.orthogonal_vector(pn_direction, toggle_unit=True) tmp_direction = np.cross(pn_direction, pt_direction) plane_rotmat = np.column_stack((pt_direction, tmp_direction, pn_direction)) homomat = np.eye(4) homomat[:3, :3] = plane_rotmat homomat[:3, 3] = np.array([-.07, -.03, .1]) twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[1, 1, 1, .3]) twod_plane.attach_to(base) circle_radius = .05 line_segs = [[homomat[:3, 3], homomat[:3, 3] + pt_direction * .05], [homomat[:3, 3] + pt_direction * .05, homomat[:3, 3] + pt_direction * .05 + tmp_direction * .05], [homomat[:3, 3] + pt_direction * .05 + tmp_direction * .05, homomat[:3, 3] + tmp_direction * .05], [homomat[:3, 3] + tmp_direction * .05, homomat[:3, 3]]] # gm.gen_linesegs(line_segs).attach_to(base) for sec in line_segs: gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base) epos = (line_segs[0][1] - line_segs[0][0]) * .7 + line_segs[0][0] gm.gen_arrow(spos=line_segs[0][0], epos=epos, thickness=0.004).attach_to(base) spt = homomat[:3, 3] # gm.gen_stick(spt, spt + pn_direction * 10, rgba=[0,1,0,1]).attach_to(base) # base.run() gm.gen_dasharrow(spt, spt - pn_direction * .07, thickness=.004).attach_to(base) # p0 cpt, cnrml = bowl_model.ray_hit(spt, spt + pn_direction * 10000, option='closest') gm.gen_dashstick(spt, cpt, rgba=[.57, .57, .57, .7], thickness=0.003).attach_to(base) gm.gen_sphere(pos=cpt, radius=.005).attach_to(base) gm.gen_dasharrow(cpt, cpt - pn_direction * .07, thickness=.004).attach_to(base) # p0 gm.gen_dasharrow(cpt, cpt + cnrml * .07, thickness=.004).attach_to(base) # p0 angle = rm.angle_between_vectors(-pn_direction, cnrml) vec = np.cross(-pn_direction, cnrml) rotmat = rm.rotmat_from_axangle(vec, angle) new_plane_homomat = np.eye(4) new_plane_homomat[:3, :3] = rotmat.dot(homomat[:3, :3]) new_plane_homomat[:3, 3] = cpt twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=new_plane_homomat, rgba=[1, 1, 1, .3]) twod_plane.attach_to(base) new_line_segs = [[cpt, cpt + rotmat.dot(pt_direction) * .05], [cpt + rotmat.dot(pt_direction) * .05, cpt + rotmat.dot(pt_direction) * .05 + rotmat.dot(tmp_direction) * .05], [cpt + rotmat.dot(pt_direction) * .05 + rotmat.dot(tmp_direction) * .05, cpt + rotmat.dot(tmp_direction) * .05], [cpt + rotmat.dot(tmp_direction) * .05, cpt]] # gm.gen_linesegs(new_line_segs).attach_to(base) # for sec in [new_line_segs[0]]: # gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base) epos = (new_line_segs[0][1] - new_line_segs[0][0]) * .7 + new_line_segs[0][0] gm.gen_arrow(spos=new_line_segs[0][0], epos=epos, thickness=0.004).attach_to(base) t_cpt = cpt last_normal = cnrml direction = rotmat.dot(pt_direction) tmp_direction = rotmat.dot(tmp_direction) n = 5 for tick in range(1, n + 1): t_npt = cpt + direction * .05 / n gm.gen_arrow(spos=t_npt, epos=t_npt + last_normal * .025, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base) nearby_sample_ids = tree.query_ball_point(t_npt, .005) nearby_samples = bowl_samples[nearby_sample_ids] gm.GeometricModel(nearby_samples).attach_to(base) plane_center, plane_normal = rm.fit_plane(nearby_samples) plane_tangential = rm.orthogonal_vector(plane_normal) plane_tmp = np.cross(plane_normal, plane_tangential) plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples - plane_center).T).T surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:, 2], n_mix=1) t_npt_on_xy = plane_rotmat.T.dot(t_npt - plane_center) projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]])) projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]]) projected_point = plane_rotmat.dot(projected_t_npt_on_xy) + plane_center surface_gm = surface.get_gometricmodel([[-.05, .05], [-.05, .05]], rgba=[.5, .7, 1, .1]) surface_gm.set_pos(plane_center) surface_gm.set_rotmat(plane_rotmat) surface_gm.attach_to(base) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # twod_plane = gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.2]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) new_normal = rm.unit_vector(t_npt - projected_point) if pn_direction.dot(new_normal) > .1: new_normal = -new_normal gm.gen_arrow(spos=projected_point, epos=projected_point + new_normal * .025, thickness=0.001).attach_to(base) angle = rm.angle_between_vectors(last_normal, new_normal) vec = rm.unit_vector(np.cross(last_normal, new_normal)) new_rotmat = rm.rotmat_from_axangle(vec, angle) direction = new_rotmat.dot(direction) tmp_direction = new_rotmat.dot(tmp_direction) # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # gm.gen_linesegs(new_line_segs).attach_to(base) gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1, .6, 0, 1], thickness=.002, type='round').attach_to(base) cpt = projected_point last_normal = new_normal # break t_cpt = cpt direction = new_rotmat.dot(tmp_direction) for tick in range(1, n + 1): t_npt = cpt + direction * .05 / n gm.gen_arrow(spos=t_npt, epos=t_npt + last_normal * .025, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base) nearby_sample_ids = tree.query_ball_point(t_npt, .005) nearby_samples = bowl_samples[nearby_sample_ids] gm.GeometricModel(nearby_samples).attach_to(base) plane_center, plane_normal = rm.fit_plane(nearby_samples) plane_tangential = rm.orthogonal_vector(plane_normal) plane_tmp = np.cross(plane_normal, plane_tangential) plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples - plane_center).T).T surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:, 2], n_mix=1) t_npt_on_xy = plane_rotmat.T.dot(t_npt - plane_center) projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]])) projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]]) projected_point = plane_rotmat.dot(projected_t_npt_on_xy) + plane_center surface_gm = surface.get_gometricmodel([[-.05, .05], [-.05, .05]], rgba=[.5, .7, 1, .1]) surface_gm.set_pos(plane_center) surface_gm.set_rotmat(plane_rotmat) surface_gm.attach_to(base) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # # if tick == 5: # gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) new_normal = rm.unit_vector(t_npt - projected_point) if pn_direction.dot(new_normal) > .1: new_normal = -new_normal gm.gen_arrow(spos=projected_point, epos=projected_point + new_normal * .025, thickness=0.001).attach_to(base) angle = rm.angle_between_vectors(last_normal, new_normal) vec = rm.unit_vector(np.cross(last_normal, new_normal)) new_rotmat = rm.rotmat_from_axangle(vec, angle) # direction = new_rotmat.dot(direction) direction = new_rotmat.dot(tmp_direction) # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # gm.gen_linesegs(new_line_segs).attach_to(base) gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1, .6, 0, 1], thickness=.002, type='round').attach_to(base) cpt = projected_point last_normal = new_normal # break # # t_cpt = cpt # direction = new_rotmat.dot(-pt_direction) # for tick in range(1, n+1): # t_npt = cpt+direction*.05/n # # gm.gen_arrow(spos=cpt, epos=t_npt, thickness=0.001, rgba=[0,1,1,1]).attach_to(base) # # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1,1,0,1]).attach_to(base) # nearby_sample_ids = tree.query_ball_point(t_npt, .0015) # nearby_samples = bowl_samples[nearby_sample_ids] # # gm.GeometricModel(nearby_samples).attach_to(base) # plane_center, plane_normal = rm.fit_plane(nearby_samples) # plane_tangential = rm.orthogonal_vector(plane_normal) # plane_tmp = np.cross(plane_normal, plane_tangential) # plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) # new_normal = rm.unit_vector(t_npt-projected_point) # if pn_direction.dot(new_normal) > .1: # new_normal = -new_normal # # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base) # angle = rm.angle_between_vectors(last_normal, new_normal) # vec = rm.unit_vector(np.cross(last_normal, new_normal)) # new_rotmat = rm.rotmat_from_axangle(vec, angle) # # direction = new_rotmat.dot(direction) # direction = new_rotmat.dot(-pt_direction) # # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # # gm.gen_linesegs(new_line_segs).attach_to(base) # gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base) # cpt=projected_point # last_normal = new_normal # # if tick ==2: # # break # # t_cpt = cpt # direction = new_rotmat.dot(-tmp_direction) # for tick in range(1, n+1): # t_npt = cpt+direction*.05/n # # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base) # nearby_sample_ids = tree.query_ball_point(t_npt, .0015) # nearby_samples = bowl_samples[nearby_sample_ids] # # gm.GeometricModel(nearby_samples).attach_to(base) # plane_center, plane_normal = rm.fit_plane(nearby_samples) # plane_tangential = rm.orthogonal_vector(plane_normal) # plane_tmp = np.cross(plane_normal, plane_tangential) # plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.3]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) # new_normal = rm.unit_vector(t_npt-projected_point) # if pn_direction.dot(new_normal) > .1: # new_normal = -new_normal # # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base) # angle = rm.angle_between_vectors(last_normal, new_normal) # vec = rm.unit_vector(np.cross(last_normal, new_normal)) # new_rotmat = rm.rotmat_from_axangle(vec, angle) # # direction = new_rotmat.dot(direction) # direction = new_rotmat.dot(-tmp_direction) # # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # # gm.gen_linesegs(new_line_segs).attach_to(base) # gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base) # cpt=projected_point # last_normal = new_normal # # break base.run()
nilq/baby-python
python
from django.http import Http404 from django.test import TestCase from model_bakery import baker from django.shortcuts import get_object_or_404 from paranoid_model.tests import models class TestGetObjectOr404(TestCase): def test_raise_when_object_does_not_exists(self): with self.assertRaises(Http404): get_object_or_404( models.Person.objects, id=None ) def test_raise_when_object_is_soft_deleted(self): person = baker.make(models.Person) person.delete() with self.assertRaises(Http404): get_object_or_404( models.Person.objects, id=person.id ) def test_returns_the_object_when_object_exists(self): person = baker.make(models.Person) found = get_object_or_404(models.Person.objects, id=person.id) self.assertEqual(person, found)
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Sep 13 12:14:02 2018 @author: jguillaumes """ import os import sqlite3 import configparser import threading import calendar import pkg_resources from time import sleep from weatherLib.weatherUtil import WLogger,parseLine _SELECT_TSA = 'select maxtsa from tsas where day = ?' _INSERT_QUEUE = 'insert into queue(id, timeReceived, data, isES, isDB) ' + \ 'values(?,strftime(\'%Y-%m-%dT%H:%M:%f+00:00\',\'now\'),?,0,0)' _INSERT_DAY = 'insert into tsas(day, maxtsa) values(?,1)' _UPDATE_TSA = 'update tsas set maxtsa = ? where day = ?' _SELECT_DB = 'select id,data,isDB from queue where isDB = 0 order by isDB,id' _UPDATE_DB = 'update queue set isDB = 1 where id = ?' _SELECT_ES = 'select id,data,isDB from queue where isES = 0 order by isES,id' _UPDATE_ES = 'update queue set isES = 1 where id = ?' _PURGE_QUEUE = 'delete from queue where isDB=1 and isES=1' _COUNT_QUEUE = 'select count(*) from queue where isDB=1 and isES=1' class WeatherQueue(object): """ Weather measurements queue. Implemented on a sqlite3 database """ def __init__(self,dbdir): """ Initialize the queue database connection and, if necessary, create the database. Also create the lock object that will be used to synchronize access """ self.logger = WLogger() self.theLock = threading.Lock() self.curDay = 0 self.curTSA = 0 ini_file = pkg_resources.resource_filename(__name__,'./database/wQueue.ini') config = configparser.ConfigParser() config.read([ini_file]) tableDDL = config['queueDatabase']['table'] tsasDDL = config['queueDatabase']['control'] indexESDDL = config['queueDatabase']['indexES'] indexDBDDL = config['queueDatabase']['indexDB'] dbFile = os.path.join(dbdir,'wQueue.db') try: self.theConn = sqlite3.connect(dbFile,check_same_thread=False) self.theConn.isolation_level = 'IMMEDIATE' self.theConn.execute(tableDDL) self.theConn.execute(indexESDDL) self.theConn.execute(indexDBDDL) self.theConn.execute(tsasDDL) self.theConn.commit() self.logger.logMessage(level="INFO",message="Queue database opened at {0:s}".format(dbFile)) except: self.logger.logException('Error initializing queue database') def pushLine(self,line): """ Push a line into the queue. This function blocks until the database is not locked """ stamp,_,_,_,_,_,_,_,_,_,_,_ = parseLine(line) datestamp = calendar.timegm(stamp.date().timetuple()) theTsa = 1 with self.theLock: try: result = self.theConn.execute(_SELECT_TSA, [datestamp]) resCol = result.fetchone() if resCol == None: self.theConn.execute(_INSERT_DAY, [datestamp]) else: theTsa = resCol[0] + 1 self.theConn.execute(_UPDATE_TSA, [theTsa, datestamp]) fullTsa = (stamp.year * 10000 + stamp.month * 100 + stamp.day) * 1000000 + theTsa self.theConn.execute(_INSERT_QUEUE, [fullTsa,line]) self.theConn.commit() except: self.logger.logException('Error inserting line into the queue database') self.theConn.rollback() def getDbQueue(self): """ Get al the queue lines NOT marked as inserted into the database. (isDB == 0) """ with self.theLock: try: result = self.theConn.execute(_SELECT_DB) queueContent = result.fetchall() return queueContent except: self.logger.logException('Error fetching DB queue') self.theConn.rollback() return None def markDbQueue(self, theId): """ Mark a queue entry as inserted into the database Parameters: - theId: row identifier to mark """ with self.theLock: with self.theConn: self.theConn.execute(_UPDATE_DB, [theId]) self.theConn.commit() self.logger.logMessage(level='DEBUG', message = 'Queue entry {0} marked as DB-done'.format(theId)) def getESQueue(self): """ Get al the queue lines NOT marked as indexed in elasticserch. (isES == 0) """ with self.theLock: try: result = self.theConn.execute(_SELECT_ES) queueContent = result.fetchall() return queueContent except: self.logger.logException('Error fetching ES queue') self.theConn.rollback() return None def markESQueue(self, theId): """ Mark a queue entry as indexed in elasticsearch Parameters: - theId: row identifier to mark """ with self.theLock: with self.theConn: self.theConn.execute(_UPDATE_ES, [theId]) self.theConn.commit() self.logger.logMessage(level='DEBUG', message = 'Queue entry {0} marked as ES-done'.format(theId)) def purgeQueue(self): with self.theLock: with self.theConn as conn: result = conn.execute(_COUNT_QUEUE) r = result.fetchone() count = r[0] self.logger.logMessage(message="About to purge {0} queue entries.".format(count)) conn.execute(_PURGE_QUEUE) conn.commit() self.logger.logMessage(message="Queue purged.") class QueueJanitorThread(threading.Thread): """ Class to implement a thread to do maintenance tasks in the queue database. It will awake itself periodically to delete the queue elements which have already been processed. """ _logger = WLogger() def __init__(self,queue,period=60): super(QueueJanitorThread, self).__init__() self.theQueue = queue self.thePeriod = period self._stopSwitch = False self.name = 'QueueJanitorThread' self._pending = False QueueJanitorThread._logger.logMessage("Janitor configured to run every {0} seconds".format(period)) def stop(self): self._stopSwitch = True def run(self): """ Run method. It creates a timer object and schedules it according to the configured perdiod. The method runs an infinite loop with 1-second delays to check if the termination flag (_stopSwitch) has been raised. In this case it cancels the timer request (if pending) and ends. """ theTimer = None self._pending = False QueueJanitorThread._logger.logMessage("Starting thread {0}.".format(self.getName()), level="INFO") while not self._stopSwitch: if not self._pending: theTimer = threading.Timer(self.thePeriod,self.doCleanup) theTimer.name = "JanitorTimer" self._pending = True theTimer.start() sleep(1) theTimer.cancel() QueueJanitorThread._logger.logMessage("Thread {0} stopped by request.".format(self.getName()), level="INFO") def doCleanup(self): """ This method is scheduled inside a Timer object by the run() loop. """ self.theQueue.purgeQueue() self._pending = False
nilq/baby-python
python
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. # ********************************************* Composition ************************************************************ """ Contents -------- * `Composition_Overview` * `Composition_Creation` - `Composition_Nested` * `Composition_Run` - `Composition_Run_Inputs` - `Composition_Input_as_Function` - `Composition_Scope_of_Execution` * `Composition_Controller` - `Composition_Controller_Assignment` - `Composition_Controller_Execution` * `Composition_Learning` - `Composition_Learning_Standard` • `Composition_Learning_Unsupervised` • `Composition_Learning_Unsupervised` - `Composition_Learning_Methods` - `Composition_Learning_Components` - `Composition_Learning_Execution` - `Composition_Learning_AutodiffComposition` - `Composition_Learning_UDF` * `Composition_Visualization` * `Composition_Class_Reference` .. _Composition_Overview: Overview -------- Composition is the base class for objects that combine PsyNeuLink `Components <Component>` into an executable model. It defines a common set of attributes possessed, and methods used by all Composition objects. Composition "Nodes" are `Mechanisms <Mechanism>` and/or nested `Compositions <Composition>`. `Projections <Projection>` connect two Nodes. The Composition's `graph <Composition.graph>` stores the structural relationships among the Nodes of a Composition and the Projections that connect them. The Composition's `scheduler <Composition.scheduler>` generates an execution queue based on these structural dependencies, allowing for other user-specified scheduling and termination conditions to be specified. .. _Composition_Creation: Creating a Composition ---------------------- A generic Composition can be created by calling the constructor, and then adding `Components <Component>` using the following Composition methods: - `add_node <Composition.add_node>` adds a node to the Composition - `add_nodes <Composition.add_nodes>` adds mutiple nodes to the Composition - `add_projection <Composition.add_projection>` adds a connection between a pair of nodes in the Composition - `add_projections <Composition.add_projections>` adds connection between multiple pairs of nodes in the Composition - `add_linear_processing_pathway <Composition.add_linear_processing_pathway>` adds and connects a list of nodes and/or Projections to the Composition; Inserts a default Projection between any adjacent Nodes. In addition, a Composition has the following set of `learning methods <Composition_Learning_Methods>` that can also be used to create a Composition from (or add) pathways that implement `learning <Composition_Learning>`: - `add_linear_learning_pathway` <Composition.add_linear_learning_pathway>` adds and connects a list of nodes, including `learning components <Composition_Learning_Components>` needed to implement the algorithm specified in its **learning_function** argument in the specified pathway. - `add_reinforcement_learning_pathway <Composition.add_reinforcement_learning_pathway>` adds and connects a list of nodes, including `learning components <Composition_Learning_Components>` needed to implement `reinforcement learning` in the specified pathway; - `add_td_learning_pathway <Composition.add_td_learning_pathway>` adds and connects a list of nodes, including `learning components <Composition_Learning_Components>` needed to implement the `temporal differences` method of reinforcement learning` in the specified pathway; - `add_backpopagation_learning_pathway <Composition.add_backpopagation_learning_pathway>` adds and connects a list of nodes, including `learning components <Composition_Learning_Components>` needed to implement the `backpropagation learning algorithm` in the specified pathway. .. note:: Only Mechanisms and Projections added to a Composition via the methods above constitute a Composition, even if other Mechanism and/or Projections are constructed in the same script. COMMENT: • MOVE THE EXAPLES BELOW TO AN "Examples" SECTION COMMENT In the following script comp_0, comp_1 and comp_2 are identical, but constructed using different methods. *Create Mechanisms:* >>> import psyneulink as pnl >>> A = pnl.ProcessingMechanism(name='A') >>> B = pnl.ProcessingMechanism(name='B') >>> C = pnl.ProcessingMechanism(name='C') *Create Projections:* >>> A_to_B = pnl.MappingProjection(name="A-to-B") >>> B_to_C = pnl.MappingProjection(name="B-to-C") *Create Composition; Add Nodes (Mechanisms) and Projections via the add_linear_processing_pathway method:* >>> comp_0 = pnl.Composition(name='comp-0') >>> comp_0.add_linear_processing_pathway(pathway=[A, A_to_B, B, B_to_C, C]) *Create Composition; Add Nodes (Mechanisms) and Projections via the add_nodes and add_projection methods:* >>> comp_1 = pnl.Composition(name='comp-1') >>> comp_1.add_nodes(nodes=[A, B, C]) >>> comp_1.add_projection(projection=A_to_B) >>> comp_1.add_projection(projection=B_to_C) *Create Composition; Add Nodes (Mechanisms) and Projections via the add_node and add_projection methods:* >>> comp_2 = pnl.Composition(name='comp-2') >>> comp_2.add_node(node=A) >>> comp_2.add_node(node=B) >>> comp_2.add_node(node=C) >>> comp_2.add_projection(projection=A_to_B) >>> comp_2.add_projection(projection=B_to_C) *Run each Composition:* >>> input_dict = {A: [[[1.0]]]} >>> comp_0_output = comp_0.run(inputs=input_dict) >>> comp_1_output = comp_1.run(inputs=input_dict) >>> comp_2_output = comp_2.run(inputs=input_dict) .. _Composition_Nested: *Nested Compositions* ===================== A Composition can be used as a node of another Composition, by calling `add_node <Composition.add_node>` from the parent composition using the child Composition as an argument. Projections can then be specifed to and from the nested composition just as for any other node. *Create outer Composition:* >>> outer_A = pnl.ProcessingMechanism(name='outer_A') >>> outer_B = pnl.ProcessingMechanism(name='outer_B') >>> outer_comp = pnl.Composition(name='outer_comp') >>> outer_comp.add_nodes([outer_A, outer_B]) *Create and configure inner Composition:* >>> inner_A = pnl.ProcessingMechanism(name='inner_A') >>> inner_B = pnl.ProcessingMechanism(name='inner_B') >>> inner_comp = pnl.Composition(name='inner_comp') >>> inner_comp.add_linear_processing_pathway([inner_A, inner_B]) *Nest inner Composition within outer Composition using `add_node <Composition.add_node>`:* >>> outer_comp.add_node(inner_comp) *Create Projections:* >>> outer_comp.add_projection(pnl.MappingProjection(), sender=outer_A, receiver=inner_comp) >>> outer_comp.add_projection(pnl.MappingProjection(), sender=inner_comp, receiver=outer_B) >>> input_dict = {outer_A: [[[1.0]]]} *Run Composition:* >>> outer_comp.run(inputs=input_dict) *Using `add_linear_processing_pathway <Composition.add_linear_processing_pathway>` with nested compositions for brevity:* >>> outer_A = pnl.ProcessingMechanism(name='outer_A') >>> outer_B = pnl.ProcessingMechanism(name='outer_B') >>> outer_comp = pnl.Composition(name='outer_comp') >>> inner_A = pnl.ProcessingMechanism(name='inner_A') >>> inner_B = pnl.ProcessingMechanism(name='inner_B') >>> inner_comp = pnl.Composition(name='inner_comp') >>> inner_comp.add_linear_processing_pathway([inner_A, inner_B]) >>> outer_comp.add_linear_processing_pathway([outer_A, inner_comp, outer_B]) >>> input_dict = {outer_A: [[[1.0]]]} >>> outer_comp.run(inputs=input_dict) .. _Composition_Run: Running a Composition --------------------- .. _Composition_Run_Inputs: *Run with Input Dictionary* =========================== The `run <Composition.run>` method presents the inputs for each `TRIAL` to the input_ports of the INPUT Nodes in the `scope of execution <Composition_Scope_of_Execution>`. These input values are specified in the **inputs** argument of a Composition's `execute <Composition.execute>` or `run <Composition.run>` methods. COMMENT: From KAM 2/7/19 - not sure "scope of execution" is the right phrase. To me, it implies that only a subset of the nodes in the Composition belong to the "scope of execution". What we want to convey (I think) is that ALL of the Nodes execute, but they do so in a "state" (history, parameter vals) corresponding to a particular execution id. COMMENT The standard way to specificy inputs is a Python dictionary in which each key is an `INPUT <NodeRole.INPUT>` Node and each value is a list. The lists represent the inputs to the key `INPUT <NodeRole.INPUT>` Nodes, in which the i-th element of the list represents the input value to the key Node on trial i. .. _Composition_Run_Inputs_Fig_States: .. figure:: _static/input_spec_states.svg :alt: Example input specifications with input ports Each input value must be compatible with the shape of the key `INPUT <NodeRole.INPUT>` Node's `external_input_values <MechanismBase.external_input_values>`. As a result, each item in the list of inputs is typically a 2d list/array, though `some shorthand notations are allowed <Composition_Input_Specification_Examples>`. >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a', ... default_variable=[[0.0, 0.0]]) >>> b = pnl.TransferMechanism(name='b', ... default_variable=[[0.0], [0.0]]) >>> c = pnl.TransferMechanism(name='c') >>> pathway1 = [a, c] >>> pathway2 = [b, c] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> comp.add_linear_processing_pathway(pathway2) >>> input_dictionary = {a: [[[1.0, 1.0]], [[1.0, 1.0]]], ... b: [[[2.0], [3.0]], [[2.0], [3.0]]]} >>> comp.run(inputs=input_dictionary) .. note:: A Node's `external_input_values <MechanismBase.external_input_values>` attribute is always a 2d list in which the index i element is the value of the Node's index i `external_input_port <MechanismBase.external_input_ports>`. In many cases, `external_input_values <MechanismBase.external_input_values>` is the same as `variable <MechanismBase.variable>`. Keep in mind that any InputPorts marked as "internal" are excluded from `external_input_values <MechanismBase.external_input_values>`, and do not receive user-specified input values. If num_trials is not in use, the number of inputs provided determines the number of trials in the run. For example, if five inputs are provided for each INPUT Node, and num_trials is not specified, the Composition executes five times. +----------------------+-------+------+------+------+------+ | Trial # |0 |1 |2 |3 |4 | +----------------------+-------+------+------+------+------+ | Input to Mechanism a |1.0 |2.0 |3.0 |4.0 |5.0 | +----------------------+-------+------+------+------+------+ >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a') >>> b = pnl.TransferMechanism(name='b') >>> pathway1 = [a, b] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> input_dictionary = {a: [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]]]} >>> comp.run(inputs=input_dictionary) The number of inputs specified **must** be the same for all Nodes in the input dictionary (except for any Nodes for which only one input is specified). In other words, all of the values in the input dictionary must have the same length as each other (or length 1). If num_trials is in use, `run` iterates over the inputs until num_trials is reached. For example, if five inputs are provided for each `INPUT <NodeRole.INPUT>` Node, and num_trials = 7, the system executes seven times. The input values from trials 0 and 1 are used again on trials 5 and 6, respectively. +----------------------+-------+------+------+------+------+------+------+ | Trial # |0 |1 |2 |3 |4 |5 |6 | +----------------------+-------+------+------+------+------+------+------+ | Input to Mechanism a |1.0 |2.0 |3.0 |4.0 |5.0 |1.0 |2.0 | +----------------------+-------+------+------+------+------+------+------+ >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a') >>> b = pnl.TransferMechanism(name='b') >>> pathway1 = [a, b] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> input_dictionary = {a: [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]]]} >>> comp.run(inputs=input_dictionary, ... num_trials=7) .. _Composition_Input_Specification_Examples: For convenience, condensed versions of the input specification described above are also accepted in the following situations: * **Case 1: INPUT Node has only one InputPort** +--------------------------+-------+------+------+------+------+ | Trial # |0 |1 |2 |3 |4 | +--------------------------+-------+------+------+------+------+ | Input to **Mechanism a** |1.0 |2.0 |3.0 |4.0 |5.0 | +--------------------------+-------+------+------+------+------+ Complete input specification: >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a') >>> b = pnl.TransferMechanism(name='b') >>> pathway1 = [a, b] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> input_dictionary = {a: [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]]]} >>> comp.run(inputs=input_dictionary) Shorthand - drop the outer list on each input because **Mechanism a** only has one InputPort: >>> input_dictionary = {a: [[1.0], [2.0], [3.0], [4.0], [5.0]]} >>> comp.run(inputs=input_dictionary) Shorthand - drop the remaining list on each input because **Mechanism a**'s one InputPort's value is length 1: >>> input_dictionary = {a: [1.0, 2.0, 3.0, 4.0, 5.0]} >>> comp.run(inputs=input_dictionary) * **Case 2: Only one input is provided for the INPUT Node** +--------------------------+------------------+ | Trial # |0 | +--------------------------+------------------+ | Input to **Mechanism a** |[[1.0], [2.0]] | +--------------------------+------------------+ Complete input specification: >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a', default_variable=[[0.0], [0.0]]) >>> b = pnl.TransferMechanism(name='b') >>> pathway1 = [a, b] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> input_dictionary = {a: [[[1.0], [2.0]]]} >>> comp.run(inputs=input_dictionary) Shorthand - drop the outer list on **Mechanism a**'s input specification because there is only one trial: >>> input_dictionary = {a: [[1.0], [2.0]]} >>> comp.run(inputs=input_dictionary) * **Case 3: The same input is used on all trials** +--------------------------+----------------+-----------------+----------------+----------------+----------------+ | Trial # |0 |1 |2 |3 |4 | +--------------------------+----------------+-----------------+----------------+----------------+----------------+ | Input to **Mechanism a** | [[1.0], [2.0]] | [[1.0], [2.0]] | [[1.0], [2.0]] | [[1.0], [2.0]] | [[1.0], [2.0]] | +--------------------------+----------------+-----------------+----------------+----------------+----------------+ Complete input specification: :: >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a', ... default_variable=[[0.0], [0.0]]) >>> b = pnl.TransferMechanism(name='b') >>> pathway1 = [a, b] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> input_dictionary = {a: [[[1.0], [2.0]], [[1.0], [2.0]], [[1.0], [2.0]], [[1.0], [2.0]], [[1.0], [2.0]]]} >>> comp.run(inputs=input_dictionary) .. Shorthand - drop the outer list on **Mechanism a**'s input specification and use `num_trials` to repeat the input value :: >>> input_dictionary = {a: [[1.0], [2.0]]} >>> comp.run(inputs=input_dictionary, ... num_trials=5) .. * **Case 4: There is only one INPUT Node** +--------------------------+-------------------+-------------------+ | Trial # |0 |1 | +--------------------------+-------------------+-------------------+ | Input to **Mechanism a** | [1.0, 2.0, 3.0] | [1.0, 2.0, 3.0] | +--------------------------+-------------------+-------------------+ Complete input specification: :: >>> import psyneulink as pnl >>> a = pnl.TransferMechanism(name='a', ... default_variable=[[1.0, 2.0, 3.0]]) >>> b = pnl.TransferMechanism(name='b') >>> pathway1 = [a, b] >>> comp = Composition(name='comp') >>> comp.add_linear_processing_pathway(pathway1) >>> input_dictionary = input_dictionary = {a: [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]} >>> comp.run(inputs=input_dictionary) .. Shorthand - specify **Mechanism a**'s inputs in a list because it is the only INPUT Node :: >>> input_list = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]] >>> comp.run(inputs=input_list) .. .. _Composition_Input_as_Function: *Run with Input Function* ========================= An alternative way to specify inputs is with a function. The function must return a dictionary that satisfies the rules above for standard input specification. The only difference is that on each execution, the function returns the input values for each INPUT Node for a single trial. COMMENT: The script below, for example, uses a function to specify inputs in order to interact with the Gym Forarger Environment. .. import psyneulink as pnl a = pnl.TransferMechanism(name='a') b = pnl.TransferMechanism(name='b') pathway1 = [a, b] comp = Composition(name='comp') comp.add_linear_processing_pathway(pathway1) def input_function(env, result): action = np.where(result[0] == 0, 0, result[0] / np.abs(result[0])) env_step = env.step(action) observation = env_step[0] done = env_step[2] if not done: # NEW: This function MUST return a dictionary of input values for a single trial for each INPUT node return {player: [observation[player_coord_idx]], prey: [observation[prey_coord_idx]]} return done return {a: [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]} comp.run(inputs=input_dictionary) COMMENT COMMENT: .. _Composition_Initial_Values_and_Feedback FIX: ADD SECTION ON CYCLES, FEEDBACK, INITIAL VALUES, RELEVANCE TO MODULATORY MECHANISMS REINITIALIZATION MODIFIED FROM SYSTEM (_System_Execution_Input_And_Initialization): ..[another type] of input can be provided in corresponding arguments of the `run <System.run>` method: a list or ndarray of **initial_values**[...] The **initial_values** are assigned at the start of a `TRIAL` as input to Nodes that close recurrent loops (designated as `FEEDBACK_SENDER`, and listed in the Composition's ?? attribute), .. _Composition_Scope_of_Execution: *Execution Contexts* ==================== An *execution context* is a scope of execution which has its own set of values for Components and their `parameters <Parameters>`. This is designed to prevent computations from interfering with each other, when Components are reused, which often occurs when using multiple or nested Compositions, or running `simulations <OptimizationControlMechanism_Execution>`. Each execution context is or is associated with an *execution_id*, which is often a user-readable string. An *execution_id* can be specified in a call to `Composition.run`, or left unspecified, in which case the Composition's `default execution_id <Composition.default_execution_id>` would be used. When looking for values after a run, it's important to know the execution context you are interested in, as shown below. :: >>> import psyneulink as pnl >>> c = pnl.Composition() >>> d = pnl.Composition() >>> t = pnl.TransferMechanism() >>> c.add_node(t) >>> d.add_node(t) >>> t.execute(1) array([[1.]]) >>> c.run({t: 5}) [[array([5.])]] >>> d.run({t: 10}) [[array([10.])]] >>> c.run({t: 20}, context='custom execution id') [[array([20.])]] # context None >>> print(t.parameters.value.get()) [[1.]] >>> print(t.parameters.value.get(c)) [[5.]] >>> print(t.parameters.value.get(d)) [[10.]] >>> print(t.parameters.value.get('custom execution id')) [[20.]] In general, anything that happens outside of a Composition run and without an explicit setting of execution context occurs in the `None` execution context. COMMENT .. _Composition_Controller: Controlling a Composition ------------------------- A Composition can be assigned a `controller <Composition.controller>`. This is a `ControlMechanism`, or a subclass of one, that modulates the parameters of Components within the Composition (including Components of nested Compositions). It typically does this based on the output of an `ObjectiveMechanism` that evaluates the value of other Mechanisms in the Composition, and provides the result to the `controller <Composition.controller>`. .. _Composition_Controller_Assignment: Assigning a Controller ====================== A `controller <Composition.controller>` can be assigned either by specifying it in the **controller** argument of the Composition's constructor, or using its `add_controller <Composition.add_controller>` method. COMMENT: TBI FOR COMPOSITION Specyfing Parameters to Control ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A controller can also be specified for the System, in the **controller** argument of the `System`. This can be an existing `ControlMechanism`, a constructor for one, or a class of ControlMechanism in which case a default instance of that class will be created. If an existing ControlMechanism or the constructor for one is used, then the `OutputPorts it monitors <ControlMechanism_ObjectiveMechanism>` and the `parameters it controls <ControlMechanism_Control_Signals>` can be specified using its `objective_mechanism <ControlMechanism.objective_mechanism>` and `control_signals <ControlMechanism.control_signals>` attributes, respectively. In addition, these can be specified in the **monitor_for_control** and **control_signal** arguments of the `System`, as described below. * **monitor_for_control** argument -- used to specify OutputPorts of Mechanisms in the System that should be monitored by the `ObjectiveMechanism` associated with the System's `controller <System.controller>` (see `ControlMechanism_ObjectiveMechanism`); these are used in addition to any specified for the ControlMechanism or its ObjectiveMechanism. These can be specified in the **monitor_for_control** argument of the `System` using any of the ways used to specify the *monitored_output_ports* for an ObjectiveMechanism (see `ObjectiveMechanism_Monitor`). In addition, the **monitor_for_control** argument supports two other forms of specification: * **string** -- must be the `name <OutputPort.name>` of an `OutputPort` of a `Mechanism <Mechanism>` in the System (see third example under `System_Control_Examples`). This can be used anywhere a reference to an OutputPort can ordinarily be used (e.g., in an `InputPort tuple specification <InputPort_Tuple_Specification>`). Any OutputPort with a name matching the string will be monitored, including ones with the same name that belong to different Mechanisms within the System. If an OutputPort of a particular Mechanism is desired, and it shares its name with other Mechanisms in the System, then it must be referenced explicitly (see `InputPort specification <InputPort_Specification>`, and examples under `System_Control_Examples`). | * **MonitoredOutputPortsOption** -- must be a value of `MonitoredOutputPortsOption`, and must appear alone or as a single item in the list specifying the **monitor_for_control** argument; any other specification(s) included in the list will take precedence. The MonitoredOutputPortsOption applies to all of the Mechanisms in the System except its `controller <System.controller>` and `LearningMechanisms <LearningMechanism>`. The *PRIMARY_OUTPUT_PORTS* value specifies that the `primary OutputPort <OutputPort_Primary>` of every Mechanism be monitored, whereas *ALL_OUTPUT_PORTS* specifies that *every* OutputPort of every Mechanism be monitored. | The default for the **monitor_for_control** argument is *MonitoredOutputPortsOption.PRIMARY_OUTPUT_PORTS*. The OutputPorts specified in the **monitor_for_control** argument are added to any already specified for the ControlMechanism's `objective_mechanism <ControlMechanism.objective_mechanism>`, and the full set is listed in the ControlMechanism's `monitored_output_ports <EVCControlMechanism.monitored_output_ports>` attribute, and its ObjectiveMechanism's `monitored_output_ports <ObjectiveMechanism.monitored_output_ports>` attribute). .. * **control_signals** argument -- used to specify the parameters of Components in the System to be controlled. These can be specified in any of the ways used to `specify ControlSignals <ControlMechanism_Control_Signals>` in the *control_signals* argument of a ControlMechanism. These are added to any `ControlSignals <ControlSignal>` that have already been specified for the `controller <System.controller>` (listed in its `control_signals <ControlMechanism.control_signals>` attribute), and any parameters that have directly been `specified for control <ParameterPort_Specification>` within the System (see `System_Control` below for additional details). COMMENT .. _Composition_Controller_Execution: Controller Execution ==================== The `controller <Composition.controller>` is executed only if the Composition's `enable_controller <Composition.enable_controller>` attribute is True. This generally done automatically when the `controller <Composition.controller>` is `assigned <Composition_Controller_Assignment>`. If enabled, the `controller <Composition.controller>` is generally executed either before or after all of the other Components in the Composition have been executed, as determined by the Composition's `controller_mode <Composition.controller_mode>` attribute. However, the Composition's `controller_condition <Composition.controller_condition>` attribute can be used to customize when it is executed. All three of these attributes can be specified in corresponding arguments of the Composition's constructor, or programmatically after it is constructed by assigning the desired value to the attribute. COMMENT: For Developers -------------- .. _Composition_Execution_Contexts_Init: Initialization of Execution Contexts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - The parameter values for any execution context can be copied into another execution context by using \ Component._initialize_from_context, which when called on a Component copies the values for all its parameters \ and recursively for all of the Component's `_dependent_components <Component._dependent_components>` - `_dependent_components <Component._dependent_components>` should be added to for any new Component that requires \ other Components to function properly (beyond "standard" things like Component.function, \ or Mechanism.input_ports, as these are added in the proper classes' _dependent_components) - the intent is that with ``_dependent_components`` set properly, calling \ ``obj._initialize_from_context(new_context, base_context)`` should be sufficient to run obj \ under **new_context** - a good example of a "nonstandard" override is `OptimizationControlMechanism._dependent_components` .. _Composition_TIming: *Timing* ======== When `run <Composition.run>` is called by a Composition, it calls that Composition's `execute <Composition.execute>` method once for each `input <Composition_Run_Inputs>` (or set of inputs) specified in the call to `run <Composition.run>`, which constitutes a `TRIAL` of execution. For each `TRIAL`, the Component makes repeated calls to its `scheduler <Composition.scheduler>`, executing the Components it specifies in each `TIME_STEP`, until every Component has been executed at least once or another `termination condition <Scheduler_Termination_Conditions>` is met. The `scheduler <Composition.scheduler>` can be used in combination with `Condition` specifications for individual Components to execute different Components at different time scales. Runtime Params COMMENT .. _Composition_Learning: Learning in a Composition ------------------------- * `Composition_Learning_Standard` * `Composition_Learning_AutodiffComposition` * `Composition_Learning_UDF` Learning is used to modify the `Projections <Projection>` between Mechanisms in a Composition. More specifically, it modifies the `matrix <MappingProjection.matrix>` parameter of those `MappingProjections <MappingProjection>`, which implements the strengths ("weights") of the associations between representations in the Mechanisms they connect. There are three ways of implementing learning in a Composition: i) using `standard PsyNeuLink Components <Composition_Learning_Standard>`; ii) using the `AutodiffComposition <Composition_Learning_AutodiffComposition>` -- a specialized subclass of Composition that executes learning using `PyTorch <https://pytorch.org>`_; and iii) by using `UserDefinedFunctions <UserDefinedFunction>`. The advantage of using standard PsyNeuLink compoments is that it assigns each operation involved in learning to a dedicated Component. This helps make clear exactly what those operations are, the sequence in which they are carried out, and how they interact with one another. However, this can also make execution inefficient, due to the "overhead" incurred by distributing the calculations over different Components. If more efficient computation is critical, then the `AutodiffComposition` can be used to execute a compatible PsyNeuLink Composition in PyTorch, or one or more `UserDefinedFunctions <UserDefinedFunction>` can be assigned to either PyTorch functions or those in any other Python environment that implements learning and accepts and returns tensors. Each of these approaches is described in more detail below. .. _Composition_Learning_Standard: *Learning Using PsyNeuLink Components* ====================================== * `Composition_Learning_Unsupervised` * `Composition_Learning_Supervised` When learning is `implemented using standard PsyNeuLink Components <Composition_Learning_Standard>`, each calculation and/or operation involved in learning -- including those responsible for computing errors, and for using those to modify the Projections between Mechanisms, is assigned to a different PsyNeuLink `learning-related Component <Composition_Learning_Components>`. These can be used to implement any form of learning. Learning is generally considered to fall into two broad classes: *unsupervised*, in which associative strenghts are modified by mere exposure to the inputs, in order to capture structure and/or relationships among them; and *supervised*, which in which the associative strengths are modified so that each input generates a desired output (see `<https://www.geeksforgeeks.org/supervised-unsupervised-learning/>`_ for a useful summary). Both forms of learning can be implemented in a Composition, using `LearningMechanisms <LearningMechanism>` that compute the changes to make to the `matrix <MappingProjection.matrix>` parameter of `MappingProjections <MappingProjection>` being learned, and `LearningProjections <LearningProjection>` that apply those changes to the MappingProjections). In addition, supervised learning uses a `ComparatorMechanism` to compute the error between the response generated by the Composition to the input stimulus, and the target stimulus used to designate the desired response. In most cases, the LearningMechanisms, LearningProjections and, where needed, ComparatorMechanism are generated automatically, as described for each form of learning below. However, these can also be configured manually using their constructors, or modified by assigning values to their attributes. .. _Composition_Learning_Unsupervised: Unsupervised Learning ~~~~~~~~~~~~~~~~~~~~~ Undersupervised learning is implemented using a `RecurrentTransferMechanism`, setting its **enable_learning** argument to True, and specifying the desired `LearningFunction <LearningFunctions>` in its **learning_function** argument. The default is `Hebbian`, however others can be specified (such as `ContrastiveHebbian` or `Kohonen`). When a RecurrentTransferMechanism with learning enabled is added to a Composition, an `AutoAssociativeLearningMechanism` that that is appropriate for the specified learning_function is automatically constructured and added to the Composition, as is a `LearningProjection` from the AutoAssociativeLearningMechanism to the RecurrentTransferMechanism's `recurrent_projection <RecurrentTransferMechanism.recurrent_projection>`. When the Composition is run and the RecurrentTransferMechanism is executed, its AutoAssociativeLearningMechanism is also executed, which updates the `matrix <AutoAssociativeProjection.matrix>` of its `recurrent_projection <RecurrentTransferMechanism.recurrent_projection>` in response to its input. COMMENT: • DISCUSS LEARNING COMPONENTS RETURNED ONCE add_node AND add_linear_processing_pathway RETURN THEM • ADD EXAMPLE HERE COMMENT .. _Composition_Learning_Supervised: Supervised Learning ~~~~~~~~~~~~~~~~~~~ * `Composition_Learning_Methods` * `Composition_Learning_Components` * `Compositon_Learning_Execution` COMMENT: TBI: Supervised learning is implemented using a Composition's `add_learning_pathway` method, and specifying an appropriate `LearningFunction <LearningFunctions>` in its **learning_function** argument. XXXMORE HERE ABOUT TYPES OF FUNCTIONS • MODIFY REFERENCE TO LEARNING COMPONENT NAMES WHEN THEY ARE IMPLEMENTED AS AN ENUM CLASS • ADD EXAMPLES - POINT TO ONES IN BasicsAndSampler COMMENT .. _Composition_Learning_Methods: *Learning Methods* ^^^^^^^^^^^^^^^^^^ Supervised learning is implemented using a Composition's method for the desired type of learning. There are currently three such methods: • `add_linear_learning_pathway` • `add_reinforcement_learning_pathway` • `add_td_learning_pathway` • `add_backpropagation_learning_pathway`. Each uses the Composition's `add_linear_processing_pathway` method to create a *learning sequence* specified in their **pathway** argument: a contiguous sequence of `ProcessingMechanisms <ProcessingMechanism>` and the `MappingProjections <MappingProjection>` between them, in which learning modifies the `matrix <MappingProjection.matrix>` parameter of the MappingProjections in the sequence, so that the input to the first ProcessingMechanism in the sequence generates an output from the last ProcessingMechanism that matches as closely as possible the value specified for the `target mechanism <Process_Learning_Components>` in the **inputs** argument of the Composition's `run <Composition.run>` method. The Mechanisms in the pathway must be compatible with learning (that is, their `function <Mechanism_Base.function>` must be compatible with the `function <LearningMechanism.function>` of the `LearningMechanism` for the MappingProjections they receive (see `LearningMechanism_Function`). The Composition's `learning methods <Composition_Learning_Methods>` return the set of learning components generates for the pathway, as described below. .. _Composition_Learning_Components: *Learning Components* ^^^^^^^^^^^^^^^^^^^^^ For each learning sequence specified in a `learning method <Composition_Learning_Methods>`, it creates the following Components, and assigns to them the `NodeRoles <NodeRole>` indicated: .. _COMPARATOR_MECHANISM: * *COMPARATOR_MECHANISM* `ComparatorMechanism` -- used to `calculate an error signal <ComparatorMechanism_Execution>` for the sequence by comparing the value received by the ComparatorMechanism's *SAMPLE* `InputPort <ComparatorMechanism_Structure>` (from the `output <LearningMechanism_Activation_Output>` of the last Processing Mechanism in the learning sequence) with the value received in the *COMPARATOR_MECHANISM*'s *TARGET* `InputPort <ComparatorMechanism_Structure>` (from the *TARGET_MECHANISM* generated by the method -- see below); this is assigned the `NodeRole` `LEARNING` in the Composition. .. .. _TARGET_MECHANISM: * *TARGET_MECHANISM* -- receives the value to be used by the *COMPARATOR_MECHANISM* as the target in computing the error signal (see above); that value must be specified in the **inputs** argument of the Composition's `run <Composition.run>` method (as the input to the *TARGET_MECHANISM*; this is assigned the `NodeRoles <NodeRole>` `TARGET` and `LEARNING` in the Composition; .. * a MappingProjection that projects from the last ProcessingMechanism in the learning sequence to the *SAMPLE* `InputPort <ComparatorMechanism_Structure>` of the *COMPARATOR_MECHANISM*; .. * a MappingProjection that projects from the *TARGET_MECHANISM* to the *TARGET* `InputPort <ComparatorMechanism_Structure>` of the *COMPARATOR_MECHANISM*; .. .. _LEARNING_MECHANISM: * a *LEARNING_MECHANISM* for each MappingProjection in the sequence, each of which calculates the `learning_signal <LearningMechanism.learning_signal>` used to modify the `matrix <MappingProjection.matrix>` parameter for the coresponding MappingProjection, along with a `LearningSignal` and `LearningProjection` that convey the `learning_signal <LearningMechanism.learning_signal>` to the MappingProjection's *MATRIX* `ParameterPort <Mapping_Matrix_ParameterPort>`; depending on learning method, additional MappingProjections may be created to and/or from the LearningMechanism -- see `LearningMechanism_Learning_Configurations` for details); these are assigned the `NodeRole` `LEARNING` in the Composition. The items with names in the list above are returned by the learning method in a dictionary, in which each name is the key of an entry, and the object(s) created of that type are its value. See `LearningMechanism_Single_Layer_Learning` for a more detailed description and figure showing these Components. If the learning sequence involves more than two ProcessingMechanisms (e.g. using `add_backpropagation_learning_pathway` for a multilayered neural network), then additional LearningMechanisms are created, along with MappingProjections that provides them with the `error_signal <LearningMechanism.error_signal>` from the preceding LearningMechanism, and `LearningProjections <LearningProjection>` that modify the additional MappingProjections (*LEARNED_PROJECTION*\\s) in the sequence, as shown for an example in the figure below. These additional learning components are listed in the *LEARNING_MECHANISM* and *LEARNED_PROJECTION* entries of the dictionary returned by the learning method. .. _Composition_MultilayerLearning_Figure: **Learning Components** .. figure:: _static/Composition_Multilayer_Learning_fig.svg :alt: Schematic of LearningMechanism and LearningProjections in a Process :scale: 50 % Components for sequence of three Mechanisms generated by a call to a learning method (e.g., ``add_backpropagation_learning_pathway(pathway=[A,B,C])``), with `NodeRole` assigned to each node in the Composition's `graph <Composition.graph>` (in italics below Mechanism type) and the names of the learning components (capitalized in italics) returned by the learning method. .. _Composition_XOR_Example: The following example implements a simple three-layered network that learns the XOR function (see `figure <Composition_Learning_Output_vs_Terminal_Figure>` below):: # Construct Composition: >>> input = TransferMechanism(name='Input', default_variable=np.zeros(2)) >>> hidden = TransferMechanism(name='Hidden', default_variable=np.zeros(10), function=Logistic()) >>> output = TransferMechanism(name='Output', default_variable=np.zeros(1), function=Logistic()) >>> input_weights = MappingProjection(name='Input Weights', matrix=np.random.rand(2,10)) >>> output_weights = MappingProjection(name='Output Weights', matrix=np.random.rand(10,1)) >>> xor_comp = Composition('XOR Composition') >>> learning_components = xor_comp.add_backpropagation_learning_pathway( >>> pathway=[input, input_weights, hidden, output_weights, output]) >>> target = learning_components[TARGET_MECHANISM] # Create inputs: Trial 1 Trial 2 Trial 3 Trial 4 >>> xor_inputs = {'stimuli':[[0, 0], [0, 1], [1, 0], [1, 1]], >>> 'targets':[ [0], [1], [1], [0] ]} >>> xor_comp.run(inputs={input:xor_inputs['stimuli'], >>> target:xor_inputs['targets']}, >>> num_trials=1, >>> animate={'show_learning':True}) The description and example above pertain to simple linear sequences. However, more complex configurations, with convergent, divergent and/or intersecting sequences can be built using multiple calls to the learning method (see `example <BasicsAndSampler_Rumelhart_Model>` in `BasicsAndSampler`). In each call, the learning method determines how the sequence to be added relates to any existing ones with which it abuts or intersects, and automatically creates andconfigures the relevant learning components so that the error terms are properly computed and propagated by each LearningMechanism to the next in the configuration. It is important to note that, in doing so, the status of a Mechanism in the final configuration takes precedence over its status in any of the individual sequences specified in the `learning methods <Composition_Learning_Methods>` when building the Composition. In particular, whereas ordinarily the last ProcessingMechanism of a sequence specified in a learning method projects to a *COMPARATOR_MECHANISM*, this may be superceded if multiple sequences are created. This is the case if: i) the Mechanism is in a seqence that is contiguous (i.e., abuts or intersects) with others already in the Composition, ii) the Mechanism appears in any of those other sequences and, iii) it is not the last Mechanism in *all* of them; in that in that case, it will not project to a *COMPARATOR_MECHANISM* (see `figure below <Composition_Learning_Output_vs_Terminal_Figure>` for an example). Furthermore, if it *is* the last Mechanism in all of them (that is, all of the specified pathways converge on that Mechanism), only one *COMPARATOR_MECHANISM* is created for that Mechanism (i.e., not one for each sequence). Finally, it should be noted that, by default, learning components are *not* assigned the `NodeRole` of `OUTPUT` even though they may be the `TERMINAL` Mechanism of a Composition; conversely, even though the last Mechanism of a learning sequence projects to a *COMPARATOR_MECHANISM*, and thus is not the `TERMINAL` node of a Composition, if it does not project to any other Mechanisms in the Composition it is nevertheless assigned as an `OUTPUT` of the Composition. That is, Mechanisms that would otherwise have been the `TERMINAL` Mechanism of a Composition preserve their role as an `OUTPUT` of the Composition if they are part of a learning sequence even though they project to another Mechanism (the *COMPARATOR_MECHANISM*) in the Composition. .. _Composition_Learning_Output_vs_Terminal_Figure: **OUTPUT** vs. **TERMINAL** Roles in Learning Configuration .. figure:: _static/Composition_Learning_OUTPUT_vs_TERMINAL_fig.svg :alt: Schematic of Mechanisms and Projections involved in learning :scale: 50 % Configuration of Components generated by the creation of two intersecting learning sequences (e.g., ``add_backpropagation_learning_pathway(pathway=[A,B])`` and ``add_backpropagation_learning_pathway(pathway=[D,B,C])``). Mechanism B is the last Mechanism of the sequence specified for the first pathway, and so would project to a `ComparatorMechanism`, and would be assigned as an `OUTPUT` node of the Composition, if that pathway was created on its own. However, since Mechanims B is also in the middle of the sequence specified for the second pathway, it does not project to a ComparatorMechanism, and is relegated to being an `INTERNAL` node of the Composition Mechanism C is now the one that projects to the ComparatorMechanism and assigned as the `OUTPUT` node. .. _Composition_Learning_Execution: *Execution of Learning* ^^^^^^^^^^^^^^^^^^^^^^^ When a Composition is run that contains one or more learning sequences, all of the ProcessingMechanisms for a sequence are executed first, and then its LearningComponents. This is shown in an animation of the XOR network from the `example above <Composition_XOR_Example>`: .. _Composition_Learning_Animation_Figure: **Composition with Learning** .. figure:: _static/Composition_XOR_animation.gif :alt: Animation of Composition with learning :scale: 50 % Animation of XOR Composition in example above when it is executed by calling its `run <Composition.run>` method with the argument ``animate={'show_learning':True}``. Note that, since the `learning components <Composition_Learning_Components>` are not executed until after the processing components, the change to the weights of the MappingProjections in the processing pathway are not made until after it has executed. Thus, as with `execution of a Projection <Projection_Execution>`, those changes will not be observed in the values of their `matrix <MappingProjection.matrix>` parameters until after they are next executed (see :ref:`Lazy Evaluation <LINK>` for an explanation of "lazy" updating). .. _Composition_Learning_AutodiffComposition: *Learning Using AutodiffCompositon* =================================== COMMENT: Change reference to example below to point to Rumelhart Semantic Network Model Script once implemented COMMENT The `AutodiffComposition` can be used to implement a Composition in PsyNeuLink, which is then executed using `PyTorch <https://pytorch.org>`_ (see `example <BasicsAndSampler_Rumelhart_Model>` in `BasicsAndSampler`). The AutodiffComposition constructor provides arguments for configuring the PyTorch implementation in various ways; the Composition is then built using the same methods (e.g., `add_node`, `add_projection`, `add_linear_processing_pathway`, etc.) as any other Composition, and it is executed using its `run <AutodiffComposition.run>` method. Note that there is no need to use any `learning methods <Composition_Learning_Methods>` — the Composition is translated into PyTorch objects and functions, which are called when it is run. It can be run in training mode (during which learning occurs) or test mode (which runs the Composition without learning). The advantage of this approach is that it allows the Composition to be implemented in PsyNeuLink, while exploiting the efficiency of execution in PyTorch (which can yield as much as three orders of magnitude improvement). However, a disadvantage is that there are restrictions on the kinds of Compositions that be implemented in this way. First, because it relies on PyTorch, it is best suited for use with `supervised learning <Composition_Learning_Supervised>`, although it can be used for some forms of `unsupervised learning <Composition_Learning_Unsupervised>` that are supported in PyTorch (e.g., `self-organized maps <https://github.com/giannisnik/som>`_). Second, all of the Components in the Composition are be subject to and must be with compatible with learning. This means that it cannot be used with a Composition that contains any `modulatory components <ModulatorySignal_Anatomy_Figure>` or that are subject to modulation, whether by ControlMechanisms within or outside the Composition; this includes a `controller <Composition_Controller>` or any LearningMechanisms. An AutodiffComposition can be `nested in a Composition <Composition_Nested>` that has such other Components. During learning, none of the internal Components of the AutodiffComposition (e.g., intermediate layers of a neural network model) are accessible to the other Components of the outer Composition, (e.g., as sources of information, or for modulation). However, when learning turned off, then the AutodiffComposition functions like any other, and all of its internal Components accessible to other Components of the outer Composition. Thus, as long as access to its internal Components is not needed during learning, an `AutodiffComposition` can be trained, and then used to execute the trained Composition like any other. .. _Composition_Learning_UDF: *Learning Using UserDefinedFunctions* ===================================== If execution efficiency is critical and the `AutodiffComposition` is too restrictive, a function from any Python environment that supports learning can be assigned as the `function <Mechanism_Base.function>` of a `Mechanism <Mechanism>`, in which case it is automatically wrapped as `UserDefinedFunction`. For example, the `forward and backward methods <https://pytorch.org/docs/master/notes/extending.html>`_ of a PyTorch object can be assigned in this way. The advanatage of this approach is that it can be applied to any Python function that adheres to the requirements of a `UserDefinedFunction`. The disadvantage is that it can't be `compiled`, so efficiency may be compromised. It must also be carefully coordinated with the execution of other learning-related Components in the Composition, to insure that each function is called at the appropriate times during execution. Furthermore, as with an `AutodiffComposition`, the internal constituents of the object (e.g., intermediates layers of a neural network model) are not accessible to other Components in the Composition (e.g., as a source of information or for modulation). .. _Composition_Visualization: Visualizing a Composition ------------------------- COMMENT: XXX - ADD EXAMPLE OF NESTED COMPOSITION XXX - ADD DISCUSSION OF show_controller AND show_learning COMMENT The `show_graph <Composition.show_graph>` method generates a display of the graph structure of Nodes (Mechanisms and Nested Compositions) and Projections in the Composition (based on the Composition's `processing graph <Composition.processing_graph>`). By default, Nodes are shown as ovals labeled by their `names <Mechanism.name>`, with the Composition's `INPUT <NodeRole.INPUT>` Mechanisms shown in green, its `OUTPUT <NodeRole.OUTPUT>` Mechanisms shown in red, and Projections shown as unlabeled arrows, as illustrated for the Composition in the example below: .. _Composition_show_graph_basic_figure: +-----------------------------------------------------------+----------------------------------------------------------+ | >>> from psyneulink import * | .. figure:: _static/Composition_show_graph_basic_fig.svg | | >>> a = ProcessingMechanism( | | | name='A', | | | ... size=3, | | | ... output_ports=[RESULT, MEAN] | | | ... ) | | | >>> b = ProcessingMechanism( | | | ... name='B', | | | ... size=5 | | | ... ) | | | >>> c = ProcessingMechanism( | | | ... name='C', | | | ... size=2, | | | ... function=Logistic(gain=pnl.CONTROL) | | | ... ) | | | >>> comp = Composition( | | | ... name='Comp', | | | ... enable_controller=True | | | ... ) | | | >>> comp.add_linear_processing_pathway([a,c]) | | | >>> comp.add_linear_processing_pathway([b,c]) | | | >>> ctlr = OptimizationControlMechanism( | | | ... name='Controller', | | | ... monitor_for_control=[(pnl.MEAN, a)], | | | ... control_signals=(GAIN, c), | | | ... agent_rep=comp | | | ... ) | | | >>> comp.add_controller(ctlr) | | +-----------------------------------------------------------+----------------------------------------------------------+ Note that the Composition's `controller <Composition.controller>` is not shown by default. However this can be shown, along with other information, using options in the Composition's `show_graph <Composition.show_graph>` method. The figure below shows several examples. .. _Composition_show_graph_options_figure: **Output of show_graph using different options** .. figure:: _static/Composition_show_graph_options_fig.svg :alt: Composition graph examples :scale: 150 % Displays of the Composition in the `example above <Composition_show_graph_basic_figure>`, generated using various options of its `show_graph <Composition.show_graph>` method. **Panel A** shows the graph with its Projections labeled and Component dimensions displayed. **Panel B** shows the `controller <Composition.controller>` for the Composition and its associated `ObjectiveMechanism` using the **show_controller** option (controller-related Components are displayed in blue by default). **Panel C** adds the Composition's `CompositionInterfaceMechanisms <CompositionInterfaceMechanism>` using the **show_cim** option. **Panel D** shows a detailed view of the Mechanisms using the **show_node_structure** option, that includes their `Ports <Port>` and their `roles <NodeRole>` in the Composition. **Panel E** shows an even more detailed view using **show_node_structure** as well as **show_cim**. If a Composition has one ore more Compositions nested as Nodes within it, these can be shown using the **show_nested** option. For example, the pathway in the script below contains a sequence of Mechanisms and nested Compositions in an outer Composition, ``comp``: .. _Composition_show_graph_show_nested_figure: +------------------------------------------------------+---------------------------------------------------------------+ | >>> mech_stim = ProcessingMechanism(name='STIMULUS') |.. figure:: _static/Composition_show_graph_show_nested_fig.svg | | >>> mech_A1 = ProcessingMechanism(name='A1') | | | >>> mech_B1 = ProcessingMechanism(name='B1') | | | >>> comp1 = Composition(name='comp1') | | | >>> comp1.add_linear_processing_pathway([mech_A1, | | | ... mech_B1]) | | | >>> mech_A2 = ProcessingMechanism(name='A2') | | | >>> mech_B2 = ProcessingMechanism(name='B2') | | | >>> comp2 = Composition(name='comp2') | | | >>> comp2.add_linear_processing_pathway([mech_A2, | | | ... mech_B2]) | | | >>> mech_resp = ProcessingMechanism(name='RESPONSE') | | | >>> comp = Composition() | | | >>> comp.add_linear_processing_pathway([mech_stim, | | | ... comp1, comp2,| | | ... mech_resp]) | | | >>> comp.show_graph(show_nested=True) | | +------------------------------------------------------+---------------------------------------------------------------+ .. _Composition_Class_Reference: Class Reference --------------- """ import collections import inspect import itertools import logging import warnings import sys import numpy as np import typecheck as tc from PIL import Image from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import Component, ComponentsMeta from psyneulink.core.components.functions.function import is_function_type from psyneulink.core.components.functions.interfacefunctions import InterfacePortMap from psyneulink.core.components.functions.learningfunctions import \ LearningFunction, Reinforcement, BackPropagation, TDLearning from psyneulink.core.components.functions.combinationfunctions import LinearCombination, PredictionErrorDeltaFunction from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import \ OptimizationControlMechanism from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import \ LearningMechanism, ACTIVATION_INPUT_INDEX, ACTIVATION_OUTPUT_INDEX, ERROR_SIGNAL, ERROR_SIGNAL_INDEX from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import AGENT_REP from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism from psyneulink.core.components.projections.projection import DuplicateProjectionError from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection from psyneulink.core.components.projections.modulatory.learningprojection import LearningProjection from psyneulink.core.components.shellclasses import Composition_Base from psyneulink.core.components.shellclasses import Mechanism, Projection from psyneulink.core.components.ports.port import Port from psyneulink.core.components.ports.inputport import InputPort, SHADOW_INPUTS from psyneulink.core.components.ports.parameterport import ParameterPort from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ AFTER, ALL, BEFORE, BOLD, BOTH, COMPARATOR_MECHANISM, COMPONENT, COMPOSITION, CONDITIONS, \ CONTROL, CONTROLLER, CONTROL_SIGNAL, FUNCTIONS, HARD_CLAMP, IDENTITY_MATRIX, INPUT, \ LABELS, LEARNED_PROJECTION, LEARNING_MECHANISM, MATRIX, MATRIX_KEYWORD_VALUES, MAYBE, MECHANISM, MECHANISMS, \ MODEL_SPEC_ID_COMPOSITION, MODEL_SPEC_ID_NODES, MODEL_SPEC_ID_PROJECTIONS, MODEL_SPEC_ID_PSYNEULINK, \ MODEL_SPEC_ID_RECEIVER_MECH, MODEL_SPEC_ID_SENDER_MECH, MONITOR, MONITOR_FOR_CONTROL, MSE, NAME, NO_CLAMP, \ ONLINE, OUTCOME, OUTPUT, OWNER_VALUE, PATHWAY, PROJECTION, PROJECTIONS, PULSE_CLAMP, ROLES, \ SAMPLE, SIMULATIONS, SOFT_CLAMP, SSE, TARGET, TARGET_MECHANISM, VALUES, VARIABLE, WEIGHT from psyneulink.core.globals.log import CompositionLog, LogCondition from psyneulink.core.globals.parameters import Parameter, ParametersBase from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.utilities import ContentAddressableList, NodeRole, call_with_pruned_args, convert_to_list from psyneulink.core.scheduling.condition import All, Always, Condition, EveryNCalls from psyneulink.core.scheduling.scheduler import Scheduler from psyneulink.core.scheduling.time import TimeScale from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, PreferenceSet, _assign_prefs from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet from psyneulink.library.components.projections.pathway.autoassociativeprojection import AutoAssociativeProjection from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import ComparatorMechanism, MSE from psyneulink.library.components.mechanisms.processing.objective.predictionerrormechanism import \ PredictionErrorMechanism __all__ = [ 'Composition', 'CompositionError', 'CompositionRegistry', 'MECH_FUNCTION_PARAMS', 'STATE_FUNCTION_PARAMS' ] # show_graph animation options NUM_TRIALS = 'num_trials' NUM_RUNS = 'num_Runs' UNIT = 'unit' DURATION = 'duration' MOVIE_DIR = 'movie_dir' MOVIE_NAME = 'movie_name' SAVE_IMAGES = 'save_images' SHOW = 'show' INITIAL_FRAME = 'INITIAL_FRAME' EXECUTION_SET = 'EXECUTION_SET' SHOW_CIM = 'show_cim' SHOW_CONTROLLER = 'show_controller' SHOW_LEARNING = 'show_learning' logger = logging.getLogger(__name__) CompositionRegistry = {} class CompositionError(Exception): def __init__(self, error_value): self.error_value = error_value def __str__(self): return repr(self.error_value) class RunError(Exception): def __init__(self, error_value): self.error_value = error_value def __str__(self): return repr(self.error_value) class Vertex(object): """ Stores a Component for use with a `Graph` Arguments --------- component : Component the `Component <Component>` represented by this Vertex parents : list[Vertex] the `Vertices <Vertex>` corresponding to the incoming edges of this `Vertex` children : list[Vertex] the `Vertices <Vertex>` corresponding to the outgoing edges of this `Vertex` Attributes ---------- component : Component the `Component <Component>` represented by this Vertex parents : list[Vertex] the `Vertices <Vertex>` corresponding to the incoming edges of this `Vertex` children : list[Vertex] the `Vertices <Vertex>` corresponding to the outgoing edges of this `Vertex` """ def __init__(self, component, parents=None, children=None, feedback=None): self.component = component if parents is not None: self.parents = parents else: self.parents = [] if children is not None: self.children = children else: self.children = [] self.feedback = feedback self.backward_sources = set() def __repr__(self): return '(Vertex {0} {1})'.format(id(self), self.component) class Graph(object): """ A Graph of vertices and edges. Attributes ---------- comp_to_vertex : Dict[`Component <Component>` : `Vertex`] maps `Component` in the graph to the `Vertices <Vertex>` that represent them. vertices : List[Vertex] the `Vertices <Vertex>` contained in this Graph. dependency_dict : Dict[`Component` : Set(`Compnent`)] maps each Component to those from which it receives Projections """ def __init__(self): self.comp_to_vertex = collections.OrderedDict() # Translate from PNL Mech, Comp or Proj to corresponding vertex self.vertices = [] # List of vertices within graph def copy(self): """ Returns ------- A copy of the Graph. `Vertices <Vertex>` are distinct from their originals, and point to the same `Component <Component>` object : `Graph` """ g = Graph() for vertex in self.vertices: g.add_vertex(Vertex(vertex.component, feedback=vertex.feedback)) for i in range(len(self.vertices)): g.vertices[i].parents = [g.comp_to_vertex[parent_vertex.component] for parent_vertex in self.vertices[i].parents] g.vertices[i].children = [g.comp_to_vertex[parent_vertex.component] for parent_vertex in self.vertices[i].children] return g def add_component(self, component, feedback=False): if component in [vertex.component for vertex in self.vertices]: logger.info('Component {1} is already in graph {0}'.format(component, self)) else: vertex = Vertex(component, feedback=feedback) self.comp_to_vertex[component] = vertex self.add_vertex(vertex) def add_vertex(self, vertex): if vertex in self.vertices: logger.info('Vertex {1} is already in graph {0}'.format(vertex, self)) else: self.vertices.append(vertex) self.comp_to_vertex[vertex.component] = vertex def remove_component(self, component): try: self.remove_vertex(self.comp_to_vertex[component]) except KeyError as e: raise CompositionError('Component {1} not found in graph {2}: {0}'.format(e, component, self)) def remove_vertex(self, vertex): try: for parent in vertex.parents: parent.children.remove(vertex) for child in vertex.children: child.parents.remove(vertex) self.vertices.remove(vertex) del self.comp_to_vertex[vertex.component] # TODO: # check if this removal puts the graph in an inconsistent state except ValueError as e: raise CompositionError('Vertex {1} not found in graph {2}: {0}'.format(e, vertex, self)) def connect_components(self, parent, child): try: self.connect_vertices(self.comp_to_vertex[parent], self.comp_to_vertex[child]) except KeyError as e: if parent not in self.comp_to_vertex: raise CompositionError("Sender ({}) of {} ({}) not (yet) assigned". format(repr(parent.name), Projection.__name__, repr(child.name))) elif child not in self.comp_to_vertex: raise CompositionError("{} ({}) to {} not (yet) assigned". format(Projection.__name__, repr(parent.name), repr(child.name))) else: raise KeyError(e) def connect_vertices(self, parent, child): if child not in parent.children: parent.children.append(child) if parent not in child.parents: child.parents.append(parent) def get_parents_from_component(self, component): """ Arguments --------- component : Component the Component whose parents will be returned Returns ------- A list[Vertex] of the parent `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`] """ return self.comp_to_vertex[component].parents def get_children_from_component(self, component): """ Arguments --------- component : Component the Component whose children will be returned Returns ------- A list[Vertex] of the child `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`] """ return self.comp_to_vertex[component].children def get_forward_children_from_component(self, component): """ Arguments --------- component : Component the Component whose parents will be returned Returns ------- # FIX 8/12/19: MODIFIED FEEDBACK - # IS THIS A CORRECT DESCRIPTION? (SAME AS get_forward_parents_from_component) A list[Vertex] of the parent `Vertices <Vertex>` of the Vertex associated with **component**: list[`Vertex`] """ forward_children = [] for child in self.comp_to_vertex[component].children: if component not in self.comp_to_vertex[child.component].backward_sources: forward_children.append(child) return forward_children def get_forward_parents_from_component(self, component): """ Arguments --------- component : Component the Component whose parents will be returned Returns ------- # FIX 8/12/19: MODIFIED FEEDBACK - # IS THIS A CORRECT DESCRIPTION? (SAME AS get_forward_children_from_component) A list[Vertex] of the parent `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`] """ forward_parents = [] for parent in self.comp_to_vertex[component].parents: if parent.component not in self.comp_to_vertex[component].backward_sources: forward_parents.append(parent) return forward_parents def get_backward_children_from_component(self, component): """ Arguments --------- component : Component the Component whose children will be returned Returns ------- A list[Vertex] of the child `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`] """ backward_children = [] for child in self.comp_to_vertex[component].children: if component in self.comp_to_vertex[child.component].backward_sources: backward_children.append(child) return backward_children def get_backward_parents_from_component(self, component): """ Arguments --------- component : Component the Component whose children will be returned Returns ------- A list[Vertex] of the child `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`] """ return list(self.comp_to_vertex[component].backward_sources) @property def dependency_dict(self): return dict((v.component,set(d.component for d in v.parents)) for v in self.vertices) # Options for show_node_structure argument of show_graph() MECH_FUNCTION_PARAMS = "MECHANISM_FUNCTION_PARAMS" STATE_FUNCTION_PARAMS = "STATE_FUNCTION_PARAMS" class Composition(Composition_Base, metaclass=ComponentsMeta): """ Composition( controller=None, enable_controller=None, controller_mode=AFTER, controller_condition=Always, enable_learning=True, name=None, prefs=Composition.classPreferences context=None) Base class for Composition. Arguments --------- controller: `OptimizationControlmechanism` : default None specifies the `OptimizationControlMechanism` to use as the Composition's `controller <Composition.controller>` (see `Composition_Controller` for details). enable_controller: bool : default None specifies whether the Composition's `controller <Composition.controller>` is executed when the Composition is executed. Set to True by default if **controller** specified; if set to False, the `controller <Composition.controller>` is ignored when the Composition is executed. controller_mode: Enum[BEOFRE|AFTER] : default AFTER specifies whether the controller is executed before or after the rest of the Composition in each trial. Must be either the keyword *BEFORE* or *AFTER*. controller_condition: Condition : default Always specifies when the Composition's `controller <Composition.controller>` is executed in a trial. enable_learning: bool : default True specifies whether `LearningMechanisms <LearningMechanism>` in the Composition are executed when it is executed. name : str : default see `name <Composition.name>` specifies the name of the Composition. prefs : PreferenceSet or specification dict : default Composition.classPreferences specifies the `PreferenceSet` for the Composition; see `prefs <Composition.prefs>` for details. Attributes ---------- graph : `Graph` the full `Graph` associated with this Composition. Contains both Nodes (`Mechanisms <Mechanism>` or `Compositions <Composition>`) and `Projections <Projection>` nodes : `list[Mechanisms and Compositions]` a list of all Nodes (`Mechanisms <Mechanism>` and/or `Compositions <Composition>`) contained in this Composition input_CIM : `CompositionInterfaceMechanism` mediates input values for the INPUT nodes of the Composition. If the Composition is nested, then the input_CIM and its InputPorts serve as proxies for the Composition itself in terms of afferent projections. input_CIM_ports : dict a dictionary in which keys are InputPorts of INPUT Nodes in a composition, and values are lists containing two items: the corresponding InputPort and OutputPort on the input_CIM. afferents : ContentAddressableList a list of all of the `Projections <Projection>` to the Composition's `input_CIM`. output_CIM : `CompositionInterfaceMechanism` aggregates output values from the OUTPUT nodes of the Composition. If the Composition is nested, then the output_CIM and its OutputPorts serve as proxies for Composition itself in terms of efferent projections. output_CIM_ports : dict a dictionary in which keys are OutputPorts of OUTPUT Nodes in a composition, and values are lists containing two items: the corresponding InputPort and OutputPort on the input_CIM. efferents : ContentAddressableList a list of all of the `Projections <Projection>` from the Composition's `output_CIM`. env : Gym Forager Environment : default: None stores a Gym Forager Environment so that the Composition may interact with this environment within a single call to `run <Composition.run>`. shadows : dict a dictionary in which the keys are all in the Composition and the values are lists of any Nodes that `shadow <InputPort_Shadow_Inputs>` the original Node's input. controller : OptimizationControlMechanism identifies the `OptimizationControlMechanism` used as the Composition's controller (see `Composition_Controller` for details). enable_controller : bool determines whether the Composition's `controller <Composition.controller>` is executed in each trial (see controller_mode <Composition.controller_mode>` for timing of execution). Set to True by default if `controller <Composition.controller>` is specified. Setting it to False suppresses exectuion of the `controller <Composition.controller>`. controller_mode : BEFORE or AFTER determines whether the controller is executed before or after the rest of the `Composition` is executed on each trial. controller_condition : Condition specifies whether the controller is executed in a given trial. The default is `Always`, which executes the controller on every trial. default_execution_id if no *context* is specified in a call to run, this *context* is used; by default, it is the Composition's `name <Composition.name>`. execution_ids : set stores all execution_ids used by this Composition. enable_learning: bool : default True determines whether `LearningMechanisms <LearningMechanism>` in the Composition are executed when it is executed. learning_components : list contains the learning-related components in the Composition, all or many of which may have been created automatically in a call to one of its `add_<*learning_type*>_pathway' methods (see `Composition_Learning` for details). This does *not* contain the `ProcessingMechanisms <ProcessingMechanism>` or `MappingProjections <MappingProjection>` in the pathway(s) being learned; those are contained in `learning_pathways <Composition.learning_pathways>` attribute. learned_components : list[list] contains a list of the components subject to learning in the Composition (`ProcessingMechanisms <ProcessingMechanism>` and `MappingProjections <MappingProjection>`); this does *not* contain the components used for learning; those are contained in `learning_components <Composition.learning_components>` attribute. COMMENT: learning_pathways : list[list] contains a list of the learning pathways specified for the Composition; each item contains a list of the `ProcessingMechanisms <ProcessingMechanism>` and `MappingProjection(s) <MappingProjection>` specified a a call to one of the Composition's `add_<*learning_type*>_pathway' methods (see `Composition_Learning` for details). This does *not* contain the components used for learning; those are contained in `learning_components <Composition.learning_components>` attribute. COMMENT results : 3d array stores the `output_values <Mechanism_Base.output_values>` of the `OUTPUT` Mechanisms in the Composition for every `TRIAL <TimeScale.TRIAL>` executed in a call to `run <Composition.run>`. Each item in the outermost dimension (axis 0) of the array corresponds to a trial; each item within a trial corresponds to the `output_values <Mechanism_Base.output_values>` of an `OUTPUT` Mechanism. simulation_results : 3d array stores the `results <Composition.results>` for executions of the Composition when it is executed using its `evaluate <Composition.evaluate>` method. retain_old_simulation_data : bool if True, all Parameter values generated during simulations will be saved for later inspection; if False, simulation values will be deleted unless otherwise specified by individual Parameters name : str the name of the Composition; if it is not specified in the **name** argument of the constructor, a default is assigned by CompositionRegistry (see `Naming` for conventions used for default and duplicate names). prefs : PreferenceSet or specification dict the `PreferenceSet` for the Composition; if it is not specified in the **prefs** argument of the constructor, a default is assigned using `classPreferences` defined in __init__.py (see :doc:`PreferenceSet <LINK>` for details). """ # Composition now inherits from Component, so registry inherits name None componentType = 'Composition' classPreferenceLevel = PreferenceLevel.CATEGORY _model_spec_generic_type_name = 'graph' class Parameters(ParametersBase): """ Attributes ---------- results see `results <Composition.results>` :default value: [] :type: list retain_old_simulation_data see `retain_old_simulation_data <Composition.retain_old_simulation_data>` :default value: False :type: bool simulation_results see `simulation_results <Composition.simulation_results>` :default value: [] :type: list """ results = Parameter([], loggable=False, pnl_internal=True) simulation_results = Parameter([], loggable=False, pnl_internal=True) retain_old_simulation_data = Parameter(False, stateful=False, loggable=False) class _CompilationData(ParametersBase): ptx_execution = None parameter_struct = None state_struct = None data_struct = None scheduler_conditions = None def __init__( self, name=None, controller:ControlMechanism=None, enable_controller=None, controller_mode:tc.enum(BEFORE,AFTER)=AFTER, controller_condition:Condition=Always(), enable_learning=False, retain_old_simulation_data=None, prefs=None, **param_defaults ): # also sets name register_category( entry=self, base_class=Composition, registry=CompositionRegistry, name=name, ) # core attribute self.graph = Graph() # Graph of the Composition self._graph_processing = None self.nodes = ContentAddressableList(component_type=Component) self.required_node_roles = [] self.node_ordering = [] # 'env' attr required for dynamic inputs generated by gym forager env self.env = None # Interface Mechanisms self.input_CIM = CompositionInterfaceMechanism(name=self.name + " Input_CIM", composition=self) self.output_CIM = CompositionInterfaceMechanism(name=self.name + " Output_CIM", composition=self) self.parameter_CIM = CompositionInterfaceMechanism(name=self.name + " Parameter_CIM", composition=self) self.input_CIM_ports = {} self.output_CIM_ports = {} self.parameter_CIM_ports = {} self.shadows = {} self.default_execution_id = self.name self.execution_ids = {self.default_execution_id} self.projections = ContentAddressableList(component_type=Component) self._scheduler = None self.enable_learning = False # status attributes self.graph_consistent = True # Tracks if Composition is in runnable state (no dangling projections (what else?) self.needs_update_graph = True # Tracks if Composition graph has been analyzed to assign roles to components self.needs_update_graph_processing = True # Tracks if the processing graph is current with the full graph self.needs_update_scheduler = True # Tracks if the scheduler needs to be regenerated self.nodes_to_roles = collections.OrderedDict() self.feedback_senders = set() self.feedback_receivers = set() self._initialize_parameters( **param_defaults, retain_old_simulation_data=retain_old_simulation_data, context=Context(source=ContextFlags.COMPOSITION) ) # Compiled resources self.__generated_node_wrappers = {} self.__generated_run = None self.__generated_simulation = None self.__generated_sim_run = None self._compilation_data = self._CompilationData(owner=self) # If a PreferenceSet was provided, assign to instance _assign_prefs(self, prefs, BasePreferenceSet) self.log = CompositionLog(owner=self) self._terminal_backprop_sequences = {} self.controller = None if controller: self.add_controller(controller) else: self.enable_controller = enable_controller self.controller_mode = controller_mode self.controller_condition = controller_condition self.controller_condition.owner = self.controller self._update_parameter_components() self.initialization_status = ContextFlags.INITIALIZED @property def graph_processing(self): """ The Composition's processing graph (contains only `Mechanisms <Mechanism>`. :getter: Returns the processing graph, and builds the graph if it needs updating since the last access. """ if self.needs_update_graph_processing or self._graph_processing is None: self._update_processing_graph() return self._graph_processing @property def scheduler(self): """ A default `Scheduler` automatically generated by the Composition, and used for its execution when it is `run <Composition_Run>`. :getter: Returns the default scheduler, and builds it if it needs updating since the last access. """ if self.needs_update_scheduler or not isinstance(self._scheduler, Scheduler): old_scheduler = self._scheduler self._scheduler = Scheduler(graph=self.graph_processing, default_execution_id=self.default_execution_id) if old_scheduler is not None: self._scheduler.add_condition_set(old_scheduler.conditions) self.needs_update_scheduler = False return self._scheduler @scheduler.setter def scheduler(self, value: Scheduler): warnings.warn( f'If {self} is changed (nodes or projections are added or removed), scheduler ' ' will be rebuilt, and will be different than the Scheduler you are now setting it to.', stacklevel=2 ) self._scheduler = value @property def termination_processing(self): return self.scheduler.termination_conds @termination_processing.setter def termination_processing(self, termination_conds): self.scheduler.termination_conds = termination_conds # ****************************************************************************************************************** # GRAPH # ****************************************************************************************************************** def _analyze_graph(self, scheduler=None, context=None): """ Assigns `NodeRoles <NodeRoles>` to nodes based on the structure of the `Graph`. By default, if _analyze_graph determines that a node is `ORIGIN <NodeRole.ORIGIN>`, it is also given the role `INPUT <NodeRole.INPUT>`. Similarly, if _analyze_graph determines that a node is `TERMINAL <NodeRole.TERMINAL>`, it is also given the role `OUTPUT <NodeRole.OUTPUT>`. However, if the **required_roles** argument of `add_node <Composition.add_node>` is used to set any node in the Composition to `INPUT <NodeRole.INPUT>`, then the `ORIGIN <NodeRole.ORIGIN>` nodes are not set to `INPUT <NodeRole.INPUT>` by default. If the **required_roles** argument of `add_node <Composition.add_node>` is used to set any node in the Composition to `OUTPUT <NodeRole.OUTPUT>`, then the `TERMINAL <NodeRole.TERMINAL>` nodes are not set to `OUTPUT <NodeRole.OUTPUT>` by default. """ for n in self.nodes: try: n._analyze_graph(context=context) except AttributeError: pass self._check_feedback(scheduler=scheduler, context=context) self._determine_node_roles(context=context) self._create_CIM_ports(context=context) self._update_shadow_projections(context=context) self._check_for_projection_assignments(context=context) self.needs_update_graph = False def _update_processing_graph(self): """ Constructs the processing graph (the graph that contains only Nodes as vertices) from the composition's full graph """ logger.debug('Updating processing graph') self._graph_processing = self.graph.copy() def remove_vertex(vertex): logger.debug('Removing', vertex) for parent in vertex.parents: for child in vertex.children: if vertex.feedback: child.backward_sources.add(parent.component) self._graph_processing.connect_vertices(parent, child) # ensure that children get handled if len(vertex.parents) == 0: for child in vertex.children: if vertex.feedback: child.backward_sources.add(parent.component) for node in cur_vertex.parents + cur_vertex.children: logger.debug( 'New parents for vertex {0}: \n\t{1}\nchildren: \n\t{2}'.format( node, node.parents, node.children ) ) logger.debug('Removing vertex {0}'.format(cur_vertex)) self._graph_processing.remove_vertex(vertex) # copy to avoid iteration problems when deleting vert_list = self._graph_processing.vertices.copy() for cur_vertex in vert_list: logger.debug('Examining', cur_vertex) if not cur_vertex.component.is_processing: remove_vertex(cur_vertex) self.needs_update_graph_processing = False def _analyze_consideration_queue(self, q, objective_mechanism): """Assigns NodeRole.ORIGIN to all nodes in the first entry of the consideration queue and NodeRole.TERMINAL to all nodes in the last entry of the consideration queue. The ObjectiveMechanism of a controller may not be NodeRole.TERMINAL, so if the ObjectiveMechanism is the only node in the last entry of the consideration queue, then the second-to-last entry is NodeRole.TERMINAL instead. """ for node in q[0]: self._add_node_role(node, NodeRole.ORIGIN) for node in list(q)[-1]: if node != objective_mechanism: self._add_node_role(node, NodeRole.TERMINAL) elif len(q[-1]) < 2: for previous_node in q[-2]: self._add_node_role(previous_node, NodeRole.TERMINAL) # ****************************************************************************************************************** # NODES # ****************************************************************************************************************** def add_node(self, node, required_roles=None, context=None): """ Add a Composition Node (`Mechanism <Mechanism>` or `Composition`) to Composition, if it is not already added Arguments --------- node : `Mechanism <Mechanism>` or `Composition` the node to be added to the Composition required_roles : `NodeRole` or list of NodeRoles any NodeRoles roles that this node should have in addition to those determined by analyze graph. """ self._update_shadows_dict(node) try: node._analyze_graph() except AttributeError: pass node._check_for_composition(context=context) # Add node to Composition's graph if node not in [vertex.component for vertex in self.graph.vertices]: # Only add if it doesn't already exist in graph node.is_processing = True self.graph.add_component(node) # Set incoming edge list of node to empty self.nodes.append(node) self.node_ordering.append(node) self.nodes_to_roles[node] = set() self.needs_update_graph = True self.needs_update_graph_processing = True self.needs_update_scheduler = True try: # activate any projections the node requires node._activate_projections_for_compositions(self) except AttributeError: pass # Implement any components specified in node's aux_components attribute if hasattr(node, "aux_components"): projections = [] # Add all "nodes" to the composition first (in case projections reference them) for component in node.aux_components: if isinstance(component, (Mechanism, Composition)): if isinstance(component, Composition): component._analyze_graph() self.add_node(component) elif isinstance(component, Projection): projections.append((component, False)) elif isinstance(component, tuple): if isinstance(component[0], Projection): if isinstance(component[1], bool) or component[1]==MAYBE: projections.append(component) else: raise CompositionError("Invalid component specification ({}) in {}'s aux_components. If a " "tuple is used to specify a Projection, then the index 0 item must " "be the Projection, and the index 1 item must be the feedback " "specification (True or False).".format(component, node.name)) elif isinstance(component[0], (Mechanism, Composition)): if isinstance(component[1], NodeRole): self.add_node(node=component[0], required_roles=component[1]) elif isinstance(component[1], list): if isinstance(component[1][0], NodeRole): self.add_node(node=component[0], required_roles=component[1]) else: raise CompositionError("Invalid component specification ({}) in {}'s aux_components. " "If a tuple is used to specify a Mechanism or Composition, then " "the index 0 item must be the node, and the index 1 item must " "be the required_roles".format(component, node.name)) else: raise CompositionError("Invalid component specification ({}) in {}'s aux_components. If a " "tuple is used to specify a Mechanism or Composition, then the " "index 0 item must be the node, and the index 1 item must be the " "required_roles".format(component, node.name)) else: raise CompositionError("Invalid component specification ({}) in {}'s aux_components. If a tuple" " is specified, then the index 0 item must be a Projection, Mechanism, " "or Composition.".format(component, node.name)) else: raise CompositionError("Invalid component ({}) in {}'s aux_components. Must be a Mechanism, " "Composition, Projection, or tuple." .format(component.name, node.name)) # Add all Projections to the Composition for proj_spec in projections: # The proj_spec assumes a direct connection between sender and receiver, and is therefore invalid if # either are nested (i.e. projections between them need to be routed through a CIM). In these cases, # we instantiate a new projection between sender and receiver instead of using the original spec. # If the sender or receiver is an AutoAssociativeProjection, then the owner will be another projection # instead of a mechanism, so we need to use owner_mech instead. sender_node = proj_spec[0].sender.owner receiver_node = proj_spec[0].receiver.owner if isinstance(sender_node, AutoAssociativeProjection): sender_node = proj_spec[0].sender.owner.owner_mech if isinstance(receiver_node, AutoAssociativeProjection): receiver_node = proj_spec[0].receiver.owner.owner_mech if sender_node in self.nodes and \ receiver_node in self.nodes: self.add_projection(projection=proj_spec[0], feedback=proj_spec[1]) else: self.add_projection(sender=proj_spec[0].sender, receiver=proj_spec[0].receiver, feedback=proj_spec[1]) # Implement required_roles if required_roles: if not isinstance(required_roles, list): required_roles = [required_roles] for required_role in required_roles: self.add_required_node_role(node, required_role) # Add projections to node from sender of any shadowed InputPorts for input_port in node.input_ports: if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: for proj in input_port.shadow_inputs.path_afferents: sender = proj.sender if sender.owner != self.input_CIM: self.add_projection(projection=MappingProjection(sender=proj.sender, receiver=input_port), sender=proj.sender.owner, receiver=node) # Add ControlSignals to controller and ControlProjections # to any parameter_ports specified for control in node's constructor if self.controller: deferred_init_control_specs = node._get_parameter_port_deferred_init_control_specs() if deferred_init_control_specs: self.controller._remove_default_control_signal(type=CONTROL_SIGNAL) for ctl_sig_spec in deferred_init_control_specs: # FIX: 9/14/19 - IS THE CONTEXT CORRECT (TRY TRACKING IN SYSTEM TO SEE WHAT CONTEXT IS): control_signal = self.controller._instantiate_control_signal(control_signal=ctl_sig_spec, context=Context(source=ContextFlags.COMPOSITION)) self.controller.control.append(control_signal) self.controller._activate_projections_for_compositions(self) def add_nodes(self, nodes, required_roles=None): """ Add a list of Composition Nodes (`Mechanism <Mechanism>` or `Composition`) to the Composition, Arguments --------- nodes : list the nodes to be added to the Composition. Each item of the list must be a `Mechanism <Mechanism>`, a `Composition` or a role-specification tuple with a Mechanism or Composition as the first item, and a `NodeRole` or list of those as the second item; any NodeRoles in a role-specification tuple are applied in addition to those specified in the **required_roles** argument. required_roles : `NodeRole` or list of NodeRoles NodeRoles to assign to the nodes in addition to those determined by analyze graph; these apply to any items in the list of nodes that are not in a tuple; these apply to any specified in any role-specification tuples in the **nodes** argument. """ if not isinstance(nodes, list): raise CompositionError(f"Arg for 'add_nodes' method of '{self.name}' {Composition.__name__} " f"must be a list of nodes or (node, required_roles) tuples") for node in nodes: if isinstance(node, (Mechanism, Composition)): self.add_node(node, required_roles) elif isinstance(node, tuple): node_specific_roles = convert_to_list(node[1]) if required_roles: node_specific_roles.append(required_roles) self.add_node(node=node[0], required_roles=node_specific_roles) else: raise CompositionError(f"Node specified in 'add_nodes' method of '{self.name}' {Composition.__name__} " f"({node}) must be a {Mechanism.__name__}, {Composition.__name__}, " f"or a tuple containing one of those and a {NodeRole.__name__} or list of them") def remove_nodes(self, nodes): if not isinstance(nodes, (list, Mechanism, Composition)): assert False, 'Argument of remove_nodes must be a Mechanism, Composition or list containing either or both' nodes = convert_to_list(nodes) for node in nodes: for proj in node.afferents + node.efferents: try: del self.projections[proj] except ValueError: # why are these not present? pass try: self.graph.remove_component(proj) except CompositionError: # why are these not present? pass self.graph.remove_component(node) del self.nodes_to_roles[node] node_role_pairs = [item for item in self.required_node_roles if item[0] is node] for item in node_role_pairs: self.required_node_roles.remove(item) del self.nodes[node] self.node_ordering.remove(node) def add_required_node_role(self, node, role): if role not in NodeRole: raise CompositionError('Invalid NodeRole: {0}'.format(role)) node_role_pair = (node, role) if node_role_pair not in self.required_node_roles: self.required_node_roles.append(node_role_pair) def remove_required_node_role(self, node, role): if role not in NodeRole: raise CompositionError('Invalid NodeRole: {0}'.format(role)) node_role_pair = (node, role) if node_role_pair in self.required_node_roles: self.required_node_roles.remove(node_role_pair) def get_roles_by_node(self, node): try: return self.nodes_to_roles[node] except KeyError: raise CompositionError('Node {0} not found in {1}.nodes_to_roles'.format(node, self)) def get_nodes_by_role(self, role): """ Returns a List of Composition Nodes in this Composition that have the *role* specified Arguments _________ role : NodeRole the List of nodes having this role to return Returns ------- List of Composition Nodes with `NodeRole` *role* : List(`Mechanisms <Mechanism>` and `Compositions <Composition>`) """ if role is None or role not in NodeRole: raise CompositionError('Invalid NodeRole: {0}'.format(role)) try: return [node for node in self.nodes if role in self.nodes_to_roles[node]] except KeyError as e: raise CompositionError('Node missing from {0}.nodes_to_roles: {1}'.format(self, e)) def _get_nested_nodes(self, nested_nodes=NotImplemented, root_composition=NotImplemented, visited_compositions=NotImplemented): """Recursive search that returns all nodes of all nested compositions in a tuple with the composition they are embedded in. :return A list of tuples in format (node, composition) containing all nodes of all nested compositions. """ if nested_nodes is NotImplemented: nested_nodes=[] if root_composition is NotImplemented: root_composition=self if visited_compositions is NotImplemented: visited_compositions = [self] for node in self.nodes: if node.componentType == 'Composition' and \ node not in visited_compositions: visited_compositions.append(node) node._get_nested_nodes(nested_nodes, root_composition, visited_compositions) elif root_composition is not self: nested_nodes.append((node,self)) return nested_nodes def _get_nested_compositions(self, nested_compositions=NotImplemented, visited_compositions=NotImplemented): """Recursive search that returns all nested compositions. :return A list of nested compositions. """ if nested_compositions is NotImplemented: nested_compositions=[] if visited_compositions is NotImplemented: visited_compositions = [self] for node in self.nodes: if node.componentType == 'Composition' and \ node not in visited_compositions: nested_compositions.append(node) visited_compositions.append(node) node._get_nested_compositions(nested_compositions, visited_compositions) return nested_compositions def _determine_node_roles(self, context=None): # Clear old roles self.nodes_to_roles.update({k: set() for k in self.nodes_to_roles}) # Required Roles for node_role_pair in self.required_node_roles: self._add_node_role(node_role_pair[0], node_role_pair[1]) objective_mechanism = None # # MODIFIED 10/24/19 OLD: # if self.controller and self.enable_controller and self.controller.objective_mechanism: # MODIFIED 10/24/19 NEW: if self.controller and self.controller.objective_mechanism: # MODIFIED 10/24/19 END objective_mechanism = self.controller.objective_mechanism self._add_node_role(objective_mechanism, NodeRole.CONTROLLER_OBJECTIVE) # Use Scheduler.consideration_queue to check for ORIGIN and TERMINAL Nodes: if self.scheduler.consideration_queue: self._analyze_consideration_queue(self.scheduler.consideration_queue, objective_mechanism) # A ControlMechanism should not be the TERMINAL node of a Composition # (unless it is specifed as a required_role, in which case it is reassigned below) for node in self.nodes: if isinstance(node, ControlMechanism): if NodeRole.TERMINAL in self.nodes_to_roles[node]: self.nodes_to_roles[node].remove(NodeRole.TERMINAL) if NodeRole.OUTPUT in self.nodes_to_roles[node]: self.nodes_to_roles[node].remove(NodeRole.OUTPUT) # Cycles for node in self.scheduler.cycle_nodes: self._add_node_role(node, NodeRole.CYCLE) # "Feedback" projections for node in self.feedback_senders: self._add_node_role(node, NodeRole.FEEDBACK_SENDER) for node in self.feedback_receivers: self._add_node_role(node, NodeRole.FEEDBACK_RECEIVER) # Required Roles for node_role_pair in self.required_node_roles: self._add_node_role(node_role_pair[0], node_role_pair[1]) # If INPUT nodes were not specified by user, ORIGIN nodes become INPUT nodes if not self.get_nodes_by_role(NodeRole.INPUT): origin_nodes = self.get_nodes_by_role(NodeRole.ORIGIN) for node in origin_nodes: self._add_node_role(node, NodeRole.INPUT) # If OUTPUT nodes were not specified by user, assign them: # - if there are LearningMechanisms, OUTPUT node is the last non-learning-related node. # - if there are no TERMINAL nodes either, then the last node added to the Composition becomes the OUTPUT node. if not self.get_nodes_by_role(NodeRole.OUTPUT): # FIX: 10/24/19: NOW MISSES controller.objective_mechanism in test_controller_objective_mech_not_terminal # if controller_enabled = False def remove_learning_and_control_nodes(nodes): output_nodes_copy = nodes.copy() for node in output_nodes_copy: if (NodeRole.LEARNING in self.nodes_to_roles[node] or NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[node] or isinstance(node, ControlMechanism) or (isinstance(node, ObjectiveMechanism) and node._role == CONTROL)): nodes.remove(node) if self.get_nodes_by_role(NodeRole.LEARNING) or self.get_nodes_by_role(NodeRole.AUTOASSOCIATIVE_LEARNING): # FIX: ADD COMMENT HERE # terminal_nodes = [[n for n in self.nodes if not NodeRole.LEARNING in self.nodes_to_roles[n]][-1]] output_nodes = list([items for items in self.scheduler.consideration_queue if any([item for item in items if (not NodeRole.LEARNING in self.nodes_to_roles[item] and not NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[item]) ])])[-1].copy() else: output_nodes = self.get_nodes_by_role(NodeRole.TERMINAL) if output_nodes: remove_learning_and_control_nodes(output_nodes) else: try: # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are: # - not used for Learning; # - not ControlMechanisms or ObjectiveMechanisms that project to them; # - do not project to any other nodes. # First, find last consideration_set in scheduler that does not contain only # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s); # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler output_nodes = list([items for items in self.scheduler.consideration_queue if any([item for item in items if (not NodeRole.LEARNING in self.nodes_to_roles[item] and not NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[item] and not isinstance(item, ControlMechanism) and not (isinstance(item, ObjectiveMechanism) and item._role == CONTROL)) ])] )[-1].copy() # Next, remove any learning-related nodes, ControlMechanism(s) or control-related # ObjectiveMechanism(s) that may have "snuck in" (i.e., happen to be in the set) remove_learning_and_control_nodes(output_nodes) # Then, add any nodes that are not learning-related or a ControlMechanism, # and that have *no* efferent Projections # IMPLEMENTATION NOTE: # Do this here, as the list considers entire sets in the consideration queue, # and a node with no efferents may be in the same set as one with efferents # if they have the same dependencies. for node in self.nodes: if (not node.efferents and not NodeRole.LEARNING in self.nodes_to_roles[node] and not NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[node] and not isinstance(node, ControlMechanism) and not (isinstance(node, ObjectiveMechanism) and node._role == CONTROL) ): output_nodes.add(node) except IndexError: output_nodes = [] for node in output_nodes: self._add_node_role(node, NodeRole.OUTPUT) # Finally, assign TERMINAL nodes for node in self.nodes: if not node.efferents or NodeRole.FEEDBACK_SENDER in self.nodes_to_roles[node]: self._add_node_role(node, NodeRole.TERMINAL) def _set_node_roles(self, node, roles): self._clear_node_roles(node) for role in roles: self._add_node_role(role) def _clear_node_roles(self, node): if node in self.nodes_to_roles: self.nodes_to_roles[node] = set() def _add_node_role(self, node, role): if role not in NodeRole: raise CompositionError('Invalid NodeRole: {0}'.format(role)) self.nodes_to_roles[node].add(role) def _remove_node_role(self, node, role): if role not in NodeRole: raise CompositionError('Invalid NodeRole: {0}'.format(role)) self.nodes_to_roles[node].remove(role) tc.typecheck def _create_CIM_ports(self, context=None): """ - remove the default InputPort and OutputPort from the CIMs if this is the first time that real InputPorts and OutputPorts are being added to the CIMs - create a corresponding InputPort and OutputPort on the `input_CIM <Composition.input_CIM>` for each InputPort of each INPUT node. Connect the OutputPort on the input_CIM to the INPUT node's corresponding InputPort via a standard MappingProjection. - create a corresponding InputPort and OutputPort on the `output_CIM <Composition.output_CIM>` for each OutputPort of each OUTPUT node. Connect the OUTPUT node's OutputPort to the output_CIM's corresponding InputPort via a standard MappingProjection. - build two dictionaries: (1) input_CIM_ports = { INPUT Node InputPort: (InputCIM InputPort, InputCIM OutputPort) } (2) output_CIM_ports = { OUTPUT Node OutputPort: (OutputCIM InputPort, OutputCIM OutputPort) } - if the Node has any shadows, create the appropriate projections as needed. - delete all of the above for any node Ports which were previously, but are no longer, classified as INPUT/OUTPUT - if composition has a controller, remove default InputPort and OutputPort of all nested compositions' `parameter CIMs <Composition.parameter_CIM>` which contain nodes that will be modulated and whose default ports have not already been removed - delete afferents of compositions' parameter CIMs if their sender is no longer the controller of any of the composition's parent compositions - create a corresponding InputPort and ControlSignal on the `parameter_CIM <Composition.parameter_CIM>` for each parameter modulated by the controller - instantiate and activate projections from ControlSignals of controller to corresponding InputPorts of nested compositions' `parameter_CIMs <Composition.parameter_CIM>` """ if not self.input_CIM.connected_to_composition: self.input_CIM.input_ports.remove(self.input_CIM.input_port) self.input_CIM.output_ports.remove(self.input_CIM.output_port) self.input_CIM.connected_to_composition = True if not self.output_CIM.connected_to_composition: self.output_CIM.input_ports.remove(self.output_CIM.input_port) self.output_CIM.output_ports.remove(self.output_CIM.output_port) self.output_CIM.connected_to_composition = True current_input_node_input_ports = set() input_nodes = self.get_nodes_by_role(NodeRole.INPUT) for node in input_nodes: for input_port in node.external_input_ports: # add it to our set of current input ports current_input_node_input_ports.add(input_port) # if there is not a corresponding CIM OutputPort, add one if input_port not in set(self.input_CIM_ports.keys()): interface_input_port = InputPort(owner=self.input_CIM, variable=input_port.defaults.value, reference_value=input_port.defaults.value, name="INPUT_CIM_" + node.name + "_" + input_port.name) interface_output_port = OutputPort(owner=self.input_CIM, variable=OWNER_VALUE, default_variable=self.input_CIM.defaults.variable, function=InterfacePortMap( corresponding_input_port=interface_input_port), name="INPUT_CIM_" + node.name + "_" + input_port.name) self.input_CIM_ports[input_port] = [interface_input_port, interface_output_port] projection = MappingProjection(sender=interface_output_port, receiver=input_port, matrix=IDENTITY_MATRIX, name="(" + interface_output_port.name + ") to (" + input_port.owner.name + "-" + input_port.name + ")") projection._activate_for_compositions(self) if isinstance(node, Composition): projection._activate_for_compositions(node) new_shadow_projections = {} # for any entirely new shadow_projections, create a MappingProjection object and add to projections for output_port, input_port in new_shadow_projections: if new_shadow_projections[(output_port, input_port)] is None: shadow_projection = MappingProjection(sender=output_port, receiver=input_port, name="(" + output_port.name + ") to (" + input_port.owner.name + "-" + input_port.name + ")") shadow_projection._activate_for_compositions(self) sends_to_input_ports = set(self.input_CIM_ports.keys()) # For any ports still registered on the CIM that does not map to a corresponding INPUT node I.S.: for input_port in sends_to_input_ports.difference(current_input_node_input_ports): for projection in input_port.path_afferents: if projection.sender == self.input_CIM_ports[input_port][1]: # remove the corresponding projection from the INPUT node's path afferents input_port.path_afferents.remove(projection) # projection.receiver.efferents.remove(projection) # Bug? ^^ projection is not in receiver.efferents?? if projection.receiver.owner in self.shadows and len(self.shadows[projection.receiver.owner]) > 0: for shadow in self.shadows[projection.receiver.owner]: for shadow_input_port in shadow.input_ports: for shadow_projection in shadow_input_port.path_afferents: if shadow_projection.sender == self.input_CIM_ports[input_port][1]: shadow_input_port.path_afferents.remove(shadow_projection) # remove the CIM input and output ports associated with this INPUT node InputPort self.input_CIM.input_ports.remove(self.input_CIM_ports[input_port][0]) self.input_CIM.output_ports.remove(self.input_CIM_ports[input_port][1]) # and from the dictionary of CIM OutputPort/InputPort pairs del self.input_CIM_ports[input_port] # OUTPUT CIMS # loop over all OUTPUT nodes current_output_node_output_ports = set() for node in self.get_nodes_by_role(NodeRole.OUTPUT): for output_port in node.output_ports: current_output_node_output_ports.add(output_port) # if there is not a corresponding CIM OutputPort, add one if output_port not in set(self.output_CIM_ports.keys()): interface_input_port = InputPort(owner=self.output_CIM, variable=output_port.defaults.value, reference_value=output_port.defaults.value, name="OUTPUT_CIM_" + node.name + "_" + output_port.name) interface_output_port = OutputPort( owner=self.output_CIM, variable=OWNER_VALUE, function=InterfacePortMap(corresponding_input_port=interface_input_port), reference_value=output_port.defaults.value, name="OUTPUT_CIM_" + node.name + "_" + output_port.name) self.output_CIM_ports[output_port] = [interface_input_port, interface_output_port] proj_name = "(" + output_port.name + ") to (" + interface_input_port.name + ")" proj = MappingProjection( sender=output_port, receiver=interface_input_port, # FIX: This fails if OutputPorts don't all have the same dimensionality (number of axes); # see example in test_output_ports/TestOutputPorts matrix=IDENTITY_MATRIX, name=proj_name ) proj._activate_for_compositions(self) if isinstance(node, Composition): proj._activate_for_compositions(node) previous_output_node_output_ports = set(self.output_CIM_ports.keys()) for output_port in previous_output_node_output_ports.difference(current_output_node_output_ports): # remove the CIM input and output ports associated with this Terminal Node OutputPort self.output_CIM.remove_ports(self.output_CIM_ports[output_port][0]) self.output_CIM.remove_ports(self.output_CIM_ports[output_port][1]) del self.output_CIM_ports[output_port] # PARAMETER CIMS if self.controller: controller = self.controller nested_nodes = dict(self._get_nested_nodes()) nested_comps = self._get_nested_compositions() for comp in nested_comps: for port in comp.parameter_CIM.input_ports: for afferent in port.all_afferents: if not comp in afferent.sender.owner.composition._get_nested_compositions(): del port._afferents_info[afferent] if afferent in port.path_afferents: port.path_afferents.remove(afferent) if afferent in port.mod_afferents: port.mod_afferents.remove(afferent) for modulatory_signal in controller.control_signals: for projection in modulatory_signal.projections: receiver = projection.receiver mech = receiver.owner if mech in nested_nodes: comp = nested_nodes[mech] pcim = comp.parameter_CIM pcIM_ports = comp.parameter_CIM_ports if receiver not in pcIM_ports: if not pcim.connected_to_composition: pcim.input_ports.remove(pcim.input_port) pcim.output_ports.remove(pcim.output_port) pcim.connected_to_composition = True modulation = modulatory_signal.owner.modulation input_port = InputPort( owner = pcim, ) control_signal = ControlSignal( owner = pcim, modulation = modulation, variable = OWNER_VALUE, function = InterfacePortMap( corresponding_input_port = input_port ), modulates = receiver, name = 'PARAMETER_CIM_' + mech.name + "_" + receiver.name ) for projection in control_signal.projections: projection._activate_for_compositions(self) projection._activate_for_compositions(comp) for projection in receiver.mod_afferents: if projection.sender.owner == controller: receiver.mod_afferents.remove(projection) pcIM_ports[receiver] = (modulatory_signal, input_port) for comp in nested_comps: pcim = comp.parameter_CIM connected_to_controller = False for afferent in pcim.afferents: if afferent.sender.owner is controller: connected_to_controller = True if not connected_to_controller: for efferent in controller.efferents: if efferent.receiver in pcIM_ports: input_projection = MappingProjection( sender = efferent.sender, receiver = pcIM_ports[efferent.receiver][1] ) input_projection._activate_for_compositions(self) input_projection._activate_for_compositions(comp) def _get_nested_node_CIM_port(self, node: Mechanism, node_state: tc.any(InputPort, OutputPort), role: tc.enum(NodeRole.INPUT, NodeRole.OUTPUT) ): """Check for node in nested Composition Return relevant port of relevant CIM if found and nested Composition in which it was found, else (None, None) """ nested_comp = CIM_port_for_nested_node = CIM = None nested_comps = [c for c in self.nodes if isinstance(c, Composition)] for nc in nested_comps: if node in nc.nodes: # Must be assigned Node.Role of INPUT or OUTPUT (depending on receiver vs sender) if role not in nc.nodes_to_roles[node]: raise CompositionError("{} found in nested {} of {} ({}) but without required {} ({})". format(node.name, Composition.__name__, self.name, nc.name, NodeRole.__name__, repr(role))) # With the current implementation, there should never be multiple nested compositions that contain the # same mechanism -- because all nested compositions are passed the same execution ID # if CIM_port_for_nested_node: # warnings.warn("{} found with {} of {} in more than one nested {} of {}; " # "only first one found (in {}) will be used". # format(node.name, NodeRole.__name__, repr(role), # Composition.__name__, self.name, nested_comp.name)) # continue if isinstance(node_state, InputPort): CIM_port_for_nested_node = nc.input_CIM_ports[node_state][0] CIM = nc.input_CIM elif isinstance(node_state, OutputPort): CIM_port_for_nested_node = nc.output_CIM_ports[node_state][1] CIM = nc.output_CIM else: # IMPLEMENTATION NOTE: Place marker for future implementation of ParameterPort handling # However, typecheck above should have caught this assert False nested_comp = nc break return CIM_port_for_nested_node, CIM_port_for_nested_node, nested_comp, CIM def _update_shadows_dict(self, node): # Create an empty entry for this node in the Composition's "shadows" dict # If any other nodes shadow this node, they will be added to the list if node not in self.shadows: self.shadows[node] = [] nested_nodes = dict(self._get_nested_nodes()) # If this node is shadowing another node, then add it to that node's entry in the Composition's "shadows" dict # If the node it's shadowing is a nested node, add it to the entry for the composition it's nested in. for input_port in node.input_ports: if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: owner = input_port.shadow_inputs.owner if owner in nested_nodes: owner = nested_nodes[owner] if node not in self.shadows[owner]: self.shadows[owner].append(node) # ****************************************************************************************************************** # PROJECTIONS # ****************************************************************************************************************** def add_projections(self, projections=None): """ Calls `add_projection <Composition.add_projection>` for each Projection in the *projections* list. Each Projection must have its `sender <Projection_Base.sender>` and `receiver <Projection_Base.receiver>` already specified. If an item in the list is a list of projections, called recursively on that list. Arguments --------- projections : list of Projections list of Projections to be added to the Composition """ if isinstance(projections, list): for projection in projections: if isinstance(projection, list): self.add_projections(projection) elif isinstance(projection, Projection) and \ hasattr(projection, "sender") and \ hasattr(projection, "receiver"): self.add_projection(projection) else: raise CompositionError("Invalid projections specification for {}. The add_projections method of " "Composition requires a list of Projections, each of which must have a " "sender and a receiver.".format(self.name)) else: raise CompositionError("Invalid projections specification for {}. The add_projections method of " "Composition requires a list of Projections, each of which must have a " "sender and a receiver.".format(self.name)) def add_projection(self, projection=None, sender=None, receiver=None, feedback=False, learning_projection=False, name=None, allow_duplicates=False ): """Add **projection** to the Composition, if one with the same sender and receiver doesn't already exist. If **projection** is not specified, create a default `MappingProjection` using **sender** and **receiver**. If **projection** is specified: • if **projection** has already been instantiated, and **sender** and **receiver** are also specified, they must match the `sender <MappingProjection.sender>` and `receiver <MappingProjection.receiver>` of **projection**. • if **sender** and **receiver** are specified and one or more Projections already exists between them: - if it is in the Composition: - if there is only one, the request is ignored and the existing Projection is returned - if there is more than one, an exception is raised as this should never be the case - it is NOT in the Composition: - if there is only one, that Projection is used; - if there is more than one, the last in the list (presumably the most recent) is used; in either case, processing continues, to activate it for the Compostion, construct any "shadow" projections that may be specified, and assign feedback if specified, • if the status of **projection** is `deferred_init`: - if its `sender <Projection_Base.sender>` and/or `receiver <Projection_Base.receiver>` attributes are not specified, then **sender** and/or **receiver** are used. - if `sender <Projection_Base.sender>` and/or `receiver <Projection_Base.receiver>` attributes are specified, they must match **sender** and/or **receiver** if those have also been specified. - if a Projection between the specified sender and receiver does *not* already exist, it is initialized; if it *does* already exist, the request to add it is ignored, however requests to shadow it and/or mark it as a`feedback` Projection are implemented (in case it has not already been done for the existing Projection). .. note:: If **projection** is an instantiated Projection (i.e., not in `deferred_init`) and one already exists between its `sender <Projection_Base.sender>` and `receiver <Projection_Base.receiver>` a warning is generated. COMMENT: IMPLEMENTATION NOTE: Duplicates are determined by the **Ports** to which they project, not the Mechanisms (to allow multiple Projections to exist between the same pair of Mechanisms using different Ports). - If an already instantiated Projection is passed to add_projection and is a duplicate of an existing one, it is detected and suppresed, with a warning, in Port._instantiate_projections_to_port. - If a Projection with deferred_init status is a duplicate, it is fully suppressed here, as these are generated by add_linear_processing_pathway if the pathway overlaps with an existing one, and so warnings are unnecessary and would be confusing to users. COMMENT Arguments --------- sender : Mechanism, Composition, or OutputPort the sender of **projection** projection : Projection, matrix the projection to add receiver : Mechanism, Composition, or InputPort the receiver of **projection** feedback : bool When False (default) all Nodes within a cycle containing this Projection execute in parallel. This means that each Projections within the cycle actually passes to its `receiver <Projection_Base.receiver>` the `value <Projection_Base.value>` of its `sender <Projection_Base.sender>` from the previous execution. When True, this Projection "breaks" the cycle, such that all Nodes execute in sequence, and only the Projection marked as 'feedback' passes to its `receiver <Projection_Base.receiver>` the `value <Projection_Base.value>` of its `sender <Projection_Base.sender>` from the previous execution. Returns ------- projection if added, else None """ existing_projections = False # If a sender and receiver have been specified but not a projection, # check whether there is *any* projection like that # (i.e., whether it/they are already in the current Composition or not); if so: # - if there is only one, use that; # - if there are several, use the last in the list (on the assumption in that it is the most recent). # Note: Skip this if **projection** was specified, as it might include parameters that are different # than the existing ones, in which case should use that rather than any existing ones; # will handle any existing Projections that are in the current Composition below. if sender and receiver and projection is None: existing_projections = self._check_for_existing_projections(sender=sender, receiver=receiver, in_composition=False) if existing_projections: if isinstance(sender, Port): sender_check = sender.owner else: sender_check = sender if isinstance(receiver, Port): receiver_check = receiver.owner else: receiver_check = receiver if ((not isinstance(sender_check, CompositionInterfaceMechanism) and sender_check not in self.nodes) or (not isinstance(receiver_check, CompositionInterfaceMechanism) and receiver_check not in self.nodes)): for proj in existing_projections: self.remove_projection(proj) for port in receiver_check.input_ports + sender_check.output_ports: if proj in port.afferents_info: del port.afferents_info[proj] if proj in port.projections: port.projections.remove(proj) if proj in port.path_afferents: port.path_afferents.remove(proj) if proj in port.mod_afferents: port.mod_afferents.remove(proj) if proj in port.efferents: port.efferents.remove(proj) else: # Need to do stuff at end, so can't just return if self.prefs.verbosePref: warnings.warn(f"Several existing projections were identified between " f"{sender.name} and {receiver.name}: {[p.name for p in existing_projections]}; " f"the last of these will be used in {self.name}.") projection = existing_projections[-1] # FIX: 9/30/19 - Why is this not an else? # Because above is only for existing Projections outside of Composition, which should be # used # But existing one could be within, in which case want to use that one # existing Projection might be deferred_init, and want t try: # Note: this does NOT initialize the Projection if it is in deferred_init projection = self._parse_projection_spec(projection, name) except DuplicateProjectionError: # return projection return # Parse sender and receiver specs sender, sender_mechanism, graph_sender, nested_compositions = self._parse_sender_spec(projection, sender) receiver, receiver_mechanism, graph_receiver, receiver_input_port, nested_compositions, learning_projection = \ self._parse_receiver_spec(projection, receiver, sender, learning_projection) # If Deferred init if projection.initialization_status == ContextFlags.DEFERRED_INIT: # If sender or receiver are Port specs, use those; otherwise, use graph node (Mechanism or Composition) if not isinstance(sender, OutputPort): sender = sender_mechanism if not isinstance(receiver, InputPort): receiver = receiver_mechanism # Check if Projection to be initialized already exists in the current Composition; # if so, mark as existing_projections and skip existing_projections = self._check_for_existing_projections(sender=sender, receiver=receiver) if existing_projections: return else: # Initialize Projection projection._init_args['sender'] = sender projection._init_args['receiver'] = receiver try: projection._deferred_init() except DuplicateProjectionError: # return projection return else: existing_projections = self._check_for_existing_projections(projection, sender=sender, receiver=receiver) # KAM HACK 2/13/19 to get hebbian learning working for PSY/NEU 330 # Add autoassociative learning mechanism + related projections to composition as processing components if (sender_mechanism != self.input_CIM and receiver_mechanism != self.output_CIM and projection not in [vertex.component for vertex in self.graph.vertices] and not learning_projection): projection.is_processing = False # KDM 5/24/19: removing below rename because it results in several existing_projections # projection.name = f'{sender} to {receiver}' self.graph.add_component(projection, feedback=feedback) try: self.graph.connect_components(graph_sender, projection) self.graph.connect_components(projection, graph_receiver) except CompositionError as c: raise CompositionError(f"{c.args[0]} to {self.name}.") # KAM HACK 2/13/19 to get hebbian learning working for PSY/NEU 330 # Add autoassociative learning mechanism + related projections to composition as processing components if not existing_projections: self._validate_projection(projection, sender, receiver, sender_mechanism, receiver_mechanism, learning_projection) self.needs_update_graph = True self.needs_update_graph_processing = True self.needs_update_scheduler = True projection._activate_for_compositions(self) for comp in nested_compositions: projection._activate_for_compositions(comp) # Note: do all of the following even if Projection is a existing_projections, # as these conditions shoud apply to the exisiting one (and it won't hurt to try again if they do) # Create "shadow" projections to any input ports that are meant to shadow this projection's receiver # (note: do this even if there is a duplciate and they are not allowed, as still want to shadow that projection) if receiver_mechanism in self.shadows and len(self.shadows[receiver_mechanism]) > 0: for shadow in self.shadows[receiver_mechanism]: for input_port in shadow.input_ports: if input_port.shadow_inputs is not None: if input_port.shadow_inputs.owner == receiver: # TBI: Copy the projection type/matrix value of the projection that is being shadowed self.add_projection(MappingProjection(sender=sender, receiver=input_port), sender_mechanism, shadow) if feedback: self.feedback_senders.add(sender_mechanism) self.feedback_receivers.add(receiver_mechanism) return projection def remove_projection(self, projection): # step 1 - remove Vertex from Graph if projection in [vertex.component for vertex in self.graph.vertices]: vert = self.graph.comp_to_vertex[projection] self.graph.remove_vertex(vert) # step 2 - remove Projection from Composition's list if projection in self.projections: self.projections.remove(projection) # step 3 - TBI? remove Projection from afferents & efferents lists of any node def _add_projection(self, projection): self.projections.append(projection) def _validate_projection(self, projection, sender, receiver, graph_sender, graph_receiver, learning_projection, ): # FIX: [JDC 6/8/19] SHOULDN'T THERE BE A CHECK FOR THEM LearningProjections? OR ARE THOSE DONE ELSEWHERE? # Skip this validation on learning projections because they have non-standard senders and receivers if not learning_projection: if projection.sender.owner != graph_sender: raise CompositionError("{}'s sender assignment [{}] is incompatible with the positions of these " "Components in the Composition.".format(projection, sender)) if projection.receiver.owner != graph_receiver: raise CompositionError("{}'s receiver assignment [{}] is incompatible with the positions of these " "Components in the Composition.".format(projection, receiver)) def _parse_projection_spec(self, projection, sender=None, receiver=None, name=None): if isinstance(projection, (np.ndarray, np.matrix, list)): return MappingProjection(matrix=projection, sender=sender, receiver=receiver, name=name) elif isinstance(projection, str): if projection in MATRIX_KEYWORD_VALUES: return MappingProjection(matrix=projection, sender=sender, receiver=receiver, name=name) else: raise CompositionError("Invalid projection ({}) specified for {}.".format(projection, self.name)) elif isinstance(projection, ModulatoryProjection_Base): return projection elif projection is None: return MappingProjection(sender=sender, receiver=receiver, name=name) elif not isinstance(projection, Projection): raise CompositionError("Invalid projection ({}) specified for {}. Must be a Projection." .format(projection, self.name)) return projection def _parse_sender_spec(self, projection, sender): # if a sender was not passed, check for a sender OutputPort stored on the Projection object if sender is None: if hasattr(projection, "sender"): sender = projection.sender.owner else: raise CompositionError(f"{projection.name} is missing a sender specification. " f"For a Projection to be added to a Composition a sender must be specified, " "either on the Projection or in the call to Composition.add_projection(). ") # initialize all receiver-related variables graph_sender = sender_mechanism = sender_output_port = sender nested_compositions = [] if isinstance(sender, Mechanism): # Mechanism spec -- update sender_output_port to reference primary OutputPort sender_output_port = sender.output_port elif isinstance(sender, OutputPort): # InputPort spec -- update sender_mechanism and graph_sender to reference owner Mechanism sender_mechanism = graph_sender = sender.owner elif isinstance(sender, Composition): # Nested Composition Spec -- update sender_mechanism to CIM; sender_output_port to CIM's primary O.S. sender_mechanism = sender.output_CIM sender_output_port = sender_mechanism.output_port nested_compositions.append(sender) else: raise CompositionError("sender arg ({}) of call to add_projection method of {} is not a {}, {} or {}". format(sender, self.name, Mechanism.__name__, OutputPort.__name__, Composition.__name__)) if (not isinstance(sender_mechanism, CompositionInterfaceMechanism) and not isinstance(sender, Composition) and sender_mechanism not in self.nodes): if isinstance(sender, Port): sender_name = sender.full_name else: sender_name = sender.name # if the sender is IN a nested Composition AND sender is an OUTPUT Node # then use the corresponding CIM on the nested comp as the sender going forward sender, sender_output_port, graph_sender, sender_mechanism = \ self._get_nested_node_CIM_port(sender_mechanism, sender_output_port, NodeRole.OUTPUT) nested_compositions.append(graph_sender) if sender is None: receiver_name = 'node' if hasattr(projection, 'receiver'): receiver_name = f'{repr(projection.receiver.owner.name)}' raise CompositionError(f"A {Projection.__name__} specified to {receiver_name} in {self.name} " f"has a sender ({repr(sender_name)}) that is not (yet) in it " f"or any of its nested {Composition.__name__}s.") if hasattr(projection, "sender"): if projection.sender.owner != sender and \ projection.sender.owner != graph_sender and \ projection.sender.owner != sender_mechanism: raise CompositionError("The position of {} in {} conflicts with its sender attribute." .format(projection.name, self.name)) return sender, sender_mechanism, graph_sender, nested_compositions def _parse_receiver_spec(self, projection, receiver, sender, learning_projection): receiver_arg = receiver # if a receiver was not passed, check for a receiver InputPort stored on the Projection object if receiver is None: if hasattr(projection, "receiver"): receiver = projection.receiver.owner else: raise CompositionError("For a Projection to be added to a Composition, a receiver must be specified, " "either on the Projection or in the call to Composition.add_projection(). {}" " is missing a receiver specification. ".format(projection.name)) # initialize all receiver-related variables graph_receiver = receiver_mechanism = receiver_input_port = receiver nested_compositions = [] if isinstance(receiver, Mechanism): # Mechanism spec -- update receiver_input_port to reference primary InputPort receiver_input_port = receiver.input_port elif isinstance(receiver, (InputPort, ParameterPort)): # InputPort spec -- update receiver_mechanism and graph_receiver to reference owner Mechanism receiver_mechanism = graph_receiver = receiver.owner elif isinstance(sender, (ControlSignal, ControlMechanism)) and isinstance(receiver, ParameterPort): # ParameterPort spec -- update receiver_mechanism and graph_receiver to reference owner Mechanism receiver_mechanism = graph_receiver = receiver.owner elif isinstance(receiver, Composition): # Nested Composition Spec -- update receiver_mechanism to CIM; receiver_input_port to CIM's primary I.S. receiver_mechanism = receiver.input_CIM receiver_input_port = receiver_mechanism.input_port nested_compositions.append(receiver) # KAM HACK 2/13/19 to get hebbian learning working for PSY/NEU 330 # Add autoassociative learning mechanism + related projections to composition as processing components elif isinstance(receiver, AutoAssociativeProjection): receiver_mechanism = receiver.owner_mech receiver_input_port = receiver_mechanism.input_port learning_projection = True elif isinstance(sender, LearningMechanism): receiver_mechanism = receiver.receiver.owner receiver_input_port = receiver_mechanism.input_port learning_projection = True else: raise CompositionError(f"receiver arg ({receiver_arg}) of call to add_projection method of {self.name} " f"is not a {Mechanism.__name__}, {InputPort.__name__} or {Composition.__name__}.") if (not isinstance(receiver_mechanism, CompositionInterfaceMechanism) and not isinstance(receiver, Composition) and receiver_mechanism not in self.nodes and not learning_projection): # if the receiver is IN a nested Composition AND receiver is an INPUT Node # then use the corresponding CIM on the nested comp as the receiver going forward receiver, receiver_input_port, graph_receiver, receiver_mechanism = \ self._get_nested_node_CIM_port(receiver_mechanism, receiver_input_port, NodeRole.INPUT) nested_compositions.append(graph_receiver) # Otherwise, there was a mistake in the spec if receiver is None: # raise CompositionError(f"receiver arg ({repr(receiver_arg)}) in call to add_projection method of " # f"{self.name} is not in it or any of its nested {Composition.__name__}s.") if isinstance(receiver_arg, Port): receiver_str = f"{receiver_arg} of {receiver_arg.owner}" else: receiver_str = f"{receiver_arg}" raise CompositionError(f"{receiver_str}, specified as receiver of {Projection.__name__} from " f"{sender.name}, is not in {self.name} or any {Composition.__name__}s nested " f"within it.") return receiver, receiver_mechanism, graph_receiver, receiver_input_port, \ nested_compositions, learning_projection def _get_original_senders(self, input_port, projections): original_senders = set() for original_projection in projections: if original_projection in self.projections: original_senders.add(original_projection.sender) correct_sender = original_projection.sender shadow_found = False for shadow_projection in input_port.path_afferents: if shadow_projection.sender == correct_sender: shadow_found = True break if not shadow_found: # TBI - Shadow projection type? Matrix value? new_projection = MappingProjection(sender=correct_sender, receiver=input_port) self.add_projection(new_projection, sender=correct_sender, receiver=input_port) return original_senders def _update_shadow_projections(self, context=None): for node in self.nodes: for input_port in node.input_ports: if input_port.shadow_inputs: original_senders = self._get_original_senders(input_port, input_port.shadow_inputs.path_afferents) for shadow_projection in input_port.path_afferents: if shadow_projection.sender not in original_senders: self.remove_projection(shadow_projection) # If the node does not have any roles, it is internal if len(self.get_roles_by_node(node)) == 0: self._add_node_role(node, NodeRole.INTERNAL) def _check_for_projection_assignments(self, context=None): """Check that all Projections and Ports with require_projection_in_composition attribute are configured. Validate that all InputPorts with require_projection_in_composition == True have an afferent Projection. Validate that all OuputStates with require_projection_in_composition == True have an efferent Projection. Validate that all Projections have senders and receivers. """ projections = self.projections.copy() for node in self.nodes: if isinstance(node, Projection): projections.append(node) continue for input_port in node.input_ports: if input_port.require_projection_in_composition and not input_port.path_afferents: warnings.warn(f'{InputPort.__name__} ({input_port.name}) of {node.name} ' f'doesn\'t have any afferent {Projection.__name__}s') for output_port in node.output_ports: if output_port.require_projection_in_composition and not output_port.efferents: warnings.warn(f'{OutputPort.__name__} ({output_port.name}) of {node.name} ' f'doesn\'t have any efferent {Projection.__name__}s in {self.name}') for projection in projections: if not projection.sender: warnings.warn(f'{Projection.__name__} {projection.name} is missing a sender') if not projection.receiver: warnings.warn(f'{Projection.__name__} {projection.name} is missing a receiver') def _check_for_existing_projections(self, projection=None, sender=None, receiver=None, in_composition:bool=True): """Check for Projection with same sender and receiver If **in_composition** is True, return only Projections found in the current Composition If **in_composition** is False, return only Projections that are found outside the current Composition Return Projection or list of Projections that satisfies the conditions, else False """ assert projection or (sender and receiver), \ f'_check_for_existing_projection must be passed a projection or a sender and receiver' if projection: sender = projection.sender receiver = projection.receiver else: if isinstance(sender, Mechanism): sender = sender.output_port elif isinstance(sender, Composition): sender = sender.output_CIM.output_port if isinstance(receiver, Mechanism): receiver = receiver.input_port elif isinstance(receiver, Composition): receiver = receiver.input_CIM.input_port existing_projections = [proj for proj in sender.efferents if proj.receiver is receiver] existing_projections_in_composition = [proj for proj in existing_projections if proj in self.projections] assert len(existing_projections_in_composition) <= 1, \ f"PROGRAM ERROR: More than one identical projection found " \ f"in {self.name}: {existing_projections_in_composition}." if in_composition: if existing_projections_in_composition: return existing_projections_in_composition[0] else: if existing_projections and not existing_projections_in_composition: return existing_projections return False def _check_feedback(self, scheduler, context=None): # FIX: 10/2/19 - SHOULD REALLY HANDLE THIS BY DETECTING LOOPS DIRECTLY """Check that feedback specification is required for projections to which it has been assigned Rationale: if, after removing the feedback designation of a Projection, structural and functional dependencies are the same, then the designation is not needed so remove it. Note: - graph_processing.dependency_dict is used as indication of structural dependencies - scheduler.dependency_dict is used as indication of functional (execution) dependencies """ if scheduler: # If an external scheduler is provided, update it with current processing graph try: scheduler._init_consideration_queue_from_graph(self.graph_processing) # Ignore any cycles at this point except ValueError: pass else: scheduler = self.scheduler already_tested = [] for vertex in [v for v in self.graph.vertices if v.feedback==MAYBE]: # projection = vertex.component # assert isinstance(projection, Projection), \ # f'PROGRAM ERROR: vertex identified with feedback=True that is not a Projection' if vertex in already_tested: continue v_set = [v for v in self.graph.vertices if (v.feedback==MAYBE and v.component.sender.owner is vertex.component.sender.owner)] for v in v_set: v.feedback = False # Update Composition's graph_processing self._update_processing_graph() # Update scheduler's consideration_queue based on update of graph_processing to detect any new cycles try: scheduler._init_consideration_queue_from_graph(self.graph_processing) except ValueError: # If a cycle is detected, leave feedback alone feedback = 'leave' # If, when feedback is False, the dependency_dicts for the structural and execution are the same, # then no need for feedback specification, so remove it # and remove assignments of sender and receiver to corresponding feedback entries of Composition if self.graph_processing.dependency_dict == scheduler.dependency_dict: feedback = 'remove' else: feedback = 'leave' # Remove nodes that send and receive feedback Projection from feedback_senders and feedback_receivers lists if feedback == 'remove': self.feedback_senders.remove(v.component.sender.owner) self.feedback_receivers.remove(v.component.receiver.owner) # Otherwise, restore feedback assignment and scheduler's consideration_queue else: for v in v_set: v.feedback = True self._update_processing_graph() scheduler._init_consideration_queue_from_graph(self.graph_processing) already_tested.extend(v_set) # ****************************************************************************************************************** # PATHWAYS # ****************************************************************************************************************** # ----------------------------------------- PROCESSING ----------------------------------------------------------- def add_pathway(self, path): """ Adds an existing Pathway to the current Composition Arguments --------- path: the Pathway (Composition) to be added """ # identify nodes and projections nodes, projections = [], [] for c in path.graph.vertices: if isinstance(c.component, Mechanism): nodes.append(c.component) elif isinstance(c.component, Composition): nodes.append(c.component) elif isinstance(c.component, Projection): projections.append(c.component) # add all nodes first for node in nodes: self.add_node(node) # then projections for p in projections: self.add_projection(p, p.sender.owner, p.receiver.owner) self._analyze_graph() def add_linear_processing_pathway(self, pathway, *args): """Add sequence of Mechanisms or Compositions possibly with intercolated Projections A `MappingProjection` is created for each contiguous pair of `Mechanisms <Mechanism>` and/or Compositions in the **pathway** argument, from the `primary OutputPort <OutputPort_Primary>` of the first one to the `primary InputPort <InputPort_Primary>` of the second. Tuples (Mechanism, `NodeRoles <NodeRole>`) can be used to assign `required_roles <Composition.add_node.required_roles>` to Mechanisms. Note that any specifications of a ControlMechanism's **monitor_for_control** `argument <ControlMechanism_Monitor_for_Control_Argument>` or the **monitor** argument specified in the constructor for an ObjectiveMechanism in the **objective_mechanism** `argument <ControlMechanism_ObjectiveMechanism>` supercede any MappingProjections that would otherwise be created for them when specified in the **pathway** argument. """ nodes = [] from psyneulink.core.globals.keywords import PROJECTION, NODE def is_spec(entry, desired_type:tc.enum(NODE, PROJECTION)): """Test whether pathway entry is specified type (NODE or PROJECTION)""" node_specs = (Mechanism, Composition) proj_specs = (Projection, np.ndarray, np.matrix, str, list) if desired_type == NODE: if (isinstance(entry, node_specs) or (isinstance(entry, tuple) and isinstance(entry[0], node_specs) and isinstance(entry[1], NodeRole))): return True elif desired_type == PROJECTION: if (isinstance(entry, proj_specs) or (isinstance(entry, tuple) and isinstance(entry[0], proj_specs) and entry[1] in {True, False, MAYBE})): return True else: return False # First, verify that the pathway begins with a node if not isinstance(pathway, (list, tuple)): raise CompositionError(f"First argument in add_linear_processing_pathway method of '{self.name}' " f"{Composition.__name__} must be a list of nodes") # Then make sure the first item is a node and not a Projection if is_spec(pathway[0], NODE): self.add_nodes([pathway[0]]) # Use add_nodes so that node spec can also be a tuple with required_roles nodes.append(pathway[0]) else: # 'MappingProjection has no attribute _name' error is thrown when pathway[0] is passed to the error msg raise CompositionError("The first item in a linear processing pathway must be a Node (Mechanism or " "Composition).") # Then, add all of the remaining nodes in the pathway for c in range(1, len(pathway)): # if the current item is a Mechanism, Composition or (Mechanism, NodeRole(s)) tuple, add it if is_spec(pathway[c], NODE): self.add_nodes([pathway[c]]) nodes.append(pathway[c]) # FIX 8/27/19 [JDC]: GENERALIZE TO ControlMechanism # MODIFIED 8/12/19 NEW: [JDC] - AVOID DUPLCIATE CONTROL_RELATED PROJECTIONS # Then, delete any ControlMechanism that has its monitor_for_control attribute assigned # and any ObjectiveMechanism that projects to a ControlMechanism, # as well as any projections to them specified in the pathway; # this is to avoid instantiating projections to them that might conflict with those # instantiated by their constructors or, for a controller, _add_controller() items_to_delete = [] for i, item in enumerate(pathway): if ((isinstance(item, ControlMechanism) and item.monitor_for_control) or (isinstance(item, ObjectiveMechanism) and item._role == CONTROL)): items_to_delete.append(item) # Delete any projections to the ControlMechanism or ObjectiveMechanism specified in pathway if i>0 and is_spec(pathway[i - 1],PROJECTION): items_to_delete.append(pathway[i - 1]) for item in items_to_delete: if isinstance(item, ControlMechanism): arg_name = f'in the {repr(MONITOR_FOR_CONTROL)} of its constructor' else: arg_name = f'either in the {repr(MONITOR)} arg of its constructor, ' \ f'or in the {repr(MONITOR_FOR_CONTROL)} arg of its associated {ControlMechanism.__name__}' warnings.warn(f'No new {Projection.__name__}s were added to {item.name} that was included in ' f'the {repr(PATHWAY)} arg of add_linear_processing_pathway for {self.name}, ' f'since there were ones already specified {arg_name}.') del pathway[pathway.index(item)] # MODIFIED 8/12/19 END # Then, loop through pathway and validate that the Mechanism-Projection relationships make sense # and add MappingProjection(s) where needed projections = [] for c in range(1, len(pathway)): # if the current item is a Node if is_spec(pathway[c], NODE): if is_spec(pathway[c - 1], NODE): # if the previous item was also a node, add a MappingProjection between them if isinstance(pathway[c - 1], tuple): sender = pathway[c - 1][0] else: sender = pathway[c - 1] if isinstance(pathway[c], tuple): receiver = pathway[c][0] else: receiver = pathway[c] proj = self.add_projection(sender=sender, receiver=receiver) if proj: projections.append(proj) # if the current item is a Projection specification elif is_spec(pathway[c], PROJECTION): if c == len(pathway) - 1: raise CompositionError("{} is the last item in the pathway. A projection cannot be the last item in" " a linear processing pathway.".format(pathway[c])) # confirm that it is between two nodes, then add the projection if isinstance(pathway[c], tuple): proj = pathway[c][0] feedback = pathway[c][1] else: proj = pathway[c] feedback = False sender = pathway[c - 1] receiver = pathway[c + 1] if isinstance(sender, (Mechanism, Composition)) \ and isinstance(receiver, (Mechanism, Composition)): try: if isinstance(proj, (np.ndarray, np.matrix, list)): proj = MappingProjection(sender=sender, matrix=proj, receiver=receiver) except DuplicateProjectionError: # FIX: 7/22/19 ADD WARNING HERE?? # FIX: 7/22/19 MAKE THIS A METHOD ON Projection?? duplicate = [p for p in receiver.afferents if p in sender.efferents] assert len(duplicate)==1, \ f"PROGRAM ERROR: Could not identify duplicate on DuplicateProjectionError " \ f"for {Projection.__name__} between {sender.name} and {receiver.name} " \ f"in call to {repr('add_linear_processing_pathway')} for {self.name}." duplicate = duplicate[0] warning_msg = f"Projection specified between {sender.name} and {receiver.name} " \ f"in call to 'add_linear_projection' for {self.name} is a duplicate of one" # IMPLEMENTATION NOTE: Version that allows different Projections between same # sender and receiver in different Compositions # if duplicate in self.projections: # warnings.warn(f"{warning_msg} already in the Composition ({duplicate.name}) " # f"and so will be ignored.") # proj=duplicate # else: # if self.prefs.verbosePref: # warnings.warn(f" that already exists between those nodes ({duplicate.name}). The " # f"new one will be used; delete it if you want to use the existing one") # Version that forbids *any* duplicate Projections between same sender and receiver warnings.warn(f"{warning_msg} that already exists between those nodes ({duplicate.name}) " f"and so will be ignored.") proj=duplicate proj = self.add_projection(projection=proj, sender=sender, receiver=receiver, feedback=feedback, allow_duplicates=False) if proj: projections.append(proj) else: raise CompositionError( "{} is not between two Composition Nodes. A Projection in a linear processing pathway must be " "preceded by a Composition Node (Mechanism or Composition) and followed by a Composition Node" .format(pathway[c])) else: raise CompositionError("{} is not a Projection or a Composition node (Mechanism or Composition). A " "linear processing pathway must be made up of Projections and Composition Nodes." .format(pathway[c])) # interleave nodes and projections explicit_pathway = [nodes[0]] for i in range(len(projections)): explicit_pathway.append(projections[i]) explicit_pathway.append(nodes[i + 1]) return explicit_pathway # ------------------------------------------ LEARNING ------------------------------------------------------------ def add_linear_learning_pathway(self, pathway, learning_function, loss_function=None, learning_rate=0.05, error_function=LinearCombination(), learning_update:tc.any(bool, tc.enum(ONLINE, AFTER))=ONLINE): """Implement learning pathway (including necessary `learning components <Composition_Learning_Components>`. Generic method for implementing a learning pathway. Calls `add_linear_processing_pathway` to implement the processing components including, if necessary, the MappingProjections between Mechanisms. All of the MappingProjections (whether specified or created) are subject to learning (and are assigned as the `learned_projection <LearningMechanism.learned_projection>` attribute of the `LearningMechanisms <LeaningMechanisms>` created for the pathway. If **learning_function** is a sublcass of `LearningFunction <LearningFunctions>`, a class-specific `learning method <Composition_Learning_Methods>` is called. Some may allow the error_function to be specified, in which case it must be compatible with the class of LearningFunction specified. If **learning_function** an instantiated function, it is assigned to all of the `LearningMechanisms <LearningMechanism>` created for the MappingProjections in the pathway. A `ComparatorMechanism` is created to compute the error for the pathway, and assigned the function specified in **error_function**, which must be compatible with **learning_function**. See `Composition_Learning` for for a more detailed description of how learning is implemented in a Composition, including the `learning components` <Composition_Learning_Components>` that are created, as well as other `learning methods <Composition_Learning_Methods>` that can be used to implement specific algorithms. Arguments --------- pathway: List list containing either [Node1, Node2] or [Node1, MappingProjection, Node2]. If a projection is specified, that projection is the learned projection. Otherwise, a default MappingProjection is automatically generated for the learned projection. learning_rate : float : default 0.05 specifies the `learning_rate <LearningMechanism.learning_rate>` used for the **learning_function** of the `LearningMechanism` in the **pathway**. error_function : function : default LinearCombination specifies the function assigned to Mechanism used to compute the error from the target and the output (`value <Mechanism_Base.value>`) of the `TARGET` Mechanism in the **pathway**. .. note:: For most learning algorithms (and by default), a `ComparatorMechanism` is used to compute the error. However, some learning algorithms may use a different Mechanism (e.g., for `TDlearning` a `PredictionErrorMechanism` is used, which uses as its fuction `PredictionErrorDeltaFunction`. learning_update : Optional[bool|ONLINE|AFTER] : default AFTER specifies when the `matrix <MappingProjection.matrix>` parameter of the `learned_projection` is updated in each `TRIAL` when the Composition executes; it is assigned as the default value for the `learning_enabled <LearningMechanism.learning_enabled>` attribute of the `LearningMechanism <LearningMechanism>` in the pathway, and its `LearningProjection` (see `learning_enabled <LearningMechanism.learning_enabled>` for meaning of values). Returns -------- A dictionary of components that were automatically generated and added to the Composition in order to implement ReinforcementLearning in the pathway. {LEARNING_MECHANISM: learning_mechanism, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projection} """ if isinstance(learning_function, type) and issubclass(learning_function, BackPropagation): return self._create_backpropagation_learning_pathway(pathway, loss_function, learning_rate, error_function, learning_update) # Processing Components input_source, output_source, learned_projection = \ self._unpack_processing_components_of_learning_pathway(pathway) self.add_linear_processing_pathway([input_source, learned_projection, output_source]) # FIX: CONSOLIDATE LEARNING - WAS SPECIFIC TO RL AND NOT IN TD self.add_required_node_role(output_source, NodeRole.OUTPUT) # Learning Components target, comparator, learning_mechanism = self._create_learning_related_mechanisms(input_source, output_source, error_function, learning_function, learned_projection, learning_rate, learning_update) self.add_nodes([(target, NodeRole.TARGET), comparator, learning_mechanism], required_roles=NodeRole.LEARNING) learning_related_projections = self._create_learning_related_projections(input_source, output_source, target, comparator, learning_mechanism) self.add_projections(learning_related_projections) learning_projection = self._create_learning_projection(learning_mechanism, learned_projection) self.add_projection(learning_projection, learning_projection=True) learning_related_components = {LEARNING_MECHANISM: learning_mechanism, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projection} # Update graph in case method is called again self._analyze_graph() return learning_related_components def add_reinforcement_learning_pathway(self, pathway, learning_rate=0.05, error_function=None, learning_update:tc.any(bool, tc.enum(ONLINE, AFTER))=ONLINE): """Convenience method that calls `add_linear_learning_pathway` with **learning_function**=`Reinforcement` Arguments --------- pathway: List list containing either [Node1, Node2] or [Node1, MappingProjection, Node2]. If a projection is specified, that projection is the learned projection. Otherwise, a default MappingProjection is automatically generated for the learned projection. learning_rate : float : default 0.05 specifies the `learning_rate <ReinforcementLearning.learning_rate>` used for the `ReinforcementLearning` function of the `LearningMechanism` in the **pathway**. error_function : function : default LinearCombination specifies the function assigned to `ComparatorMechanism` used to compute the error from the target and the output (`value <Mechanism_Base.value>`) of the `TARGET` Mechanism in the **pathway**). learning_update : Optional[bool|ONLINE|AFTER] : default AFTER specifies when the `matrix <MappingProjection.matrix>` parameter of the `learned_projection` is updated in each `TRIAL` when the Composition executes; it is assigned as the default value for the `learning_enabled <LearningMechanism.learning_enabled>` attribute of the `LearningMechanism <LearningMechanism>` in the pathway, and its `LearningProjection` (see `learning_enabled <LearningMechanism.learning_enabled>` for meaning of values). Returns -------- A dictionary of components that were automatically generated and added to the Composition in order to implement ReinforcementLearning in the pathway. {LEARNING_MECHANISM: learning_mechanism, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projection} """ return self.add_linear_learning_pathway(pathway, learning_rate=learning_rate, learning_function=Reinforcement, error_function=error_function, learning_update=learning_update) def add_td_learning_pathway(self, pathway, learning_rate=0.05, error_function=None, learning_update:tc.any(bool, tc.enum(ONLINE, AFTER))=ONLINE): """Convenience method that calls `add_linear_learning_pathway` with **learning_function**=`TDLearning` Arguments --------- pathway: List list containing either [Node1, Node2] or [Node1, MappingProjection, Node2]. If a projection is specified, that projection is the learned projection. Otherwise, a default MappingProjection is automatically generated for the learned projection. learning_rate : float : default 0.05 specifies the `learning_rate <TDLearning.learning_rate>` used for the `TDLearning` function of the `LearningMechanism` in the **pathway**. error_function : function : default LinearCombination specifies the function assigned to `ComparatorMechanism` used to compute the error from the target and the output (`value <Mechanism_Base.value>`) of the `TARGET` Mechanism in the **pathway**). learning_update : Optional[bool|ONLINE|AFTER] : default AFTER specifies when the `matrix <MappingProjection.matrix>` parameter of the `learned_projection` is updated in each `TRIAL` when the Composition executes; it is assigned as the default value for the `learning_enabled <LearningMechanism.learning_enabled>` attribute of the `LearningMechanism <LearningMechanism>` in the pathway, and its `LearningProjection` (see `learning_enabled <LearningMechanism.learning_enabled>` for meaning of values). Returns -------- A dictionary of components that were automatically generated and added to the Composition in order to implement TDLearning in the pathway. {LEARNING_MECHANISM: learning_mechanism, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projection} """ return self.add_linear_learning_pathway(pathway, learning_rate=learning_rate, learning_function=TDLearning, learning_update=learning_update) def add_backpropagation_learning_pathway(self, pathway, learning_rate=0.05, error_function=None, loss_function:tc.enum(MSE,SSE)=MSE, learning_update:tc.optional(tc.any(bool, tc.enum(ONLINE, AFTER)))=AFTER): """Convenience method that calls `add_linear_learning_pathway` with **learning_function**=`Backpropagation` Arguments --------- pathway : list specifies list of nodes for the pathway (see `add_linear_processing_pathway` for details of specification). pathway: List specifies nodes of the pathway for the learning sequence (see `add_linear_processing_pathway` for details of specification). Any `MappingProjections <MappingProjection>` specified or constructed for the pathway are assigned as `learned_projections`. learning_rate : float : default 0.05 specifies the `learning_rate <Backpropagation.learning_rate>` used for the `Backpropagation` function of the `LearningMechanisms <LearningMechanism>` in the **pathway**. error_function : function : default LinearCombination specifies the function assigned to `ComparatorMechanism` used to compute the error from the target and the output (`value <Mechanism_Base.value>`) of the `TARGET` (last) Mechanism in the **pathway**). learning_update : Optional[bool|ONLINE|AFTER] : default AFTER specifies when the `matrix <MappingProjection.matrix>` parameters of the `learned_projections` are updated in each `TRIAL` when the Composition executes; it is assigned as the default value for the `learning_enabled <LearningMechanism.learning_enabled>` attribute of the `LearningMechanisms <LearningMechanism>` in the pathway, and their `LearningProjections <LearningProjection>` (see `learning_enabled <LearningMechanism.learning_enabled>` for meaning of values). Returns -------- A dictionary of components that were automatically generated and added to the Composition in order to implement Backpropagation along the pathway. {LEARNING_MECHANISM: learning_mechanism, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projection} """ return self.add_linear_learning_pathway(pathway, learning_rate=learning_rate, learning_function=BackPropagation, loss_function=loss_function, error_function=error_function, learning_update=learning_update) # NOTES: # Learning-type-specific creation methods should: # - create ComparatorMechanism and pass in as error_source (for 1st LearningMechanism in sequence in bp) # - Determine and pass error_sources (aka previous_learning_mechanism) (for bp) # - construct and pass in the learning_function # - do the following for last LearningMechanism in sequence: # learning_mechanism.output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition._set(False, # override=True) # # Create_backprop... should pass error_function (handled by kwargs below) # Check for existence of Learning mechanism (or do this in creation method?); if one exists, compare its # ERROR_SIGNAL input_ports with error_sources and update/add any needed, as well as corresponding # error_matrices (from their learned_projections) -- do so using LearningMechanism's add_ports method); # create projections from each # Move creation of LearningProjections and learning-related projections (MappingProjections) here # ?Do add_nodes and add_projections here or in Learning-type-specific creation methods def _unpack_processing_components_of_learning_pathway(self, processing_pathway): # unpack processing components and add to composition if len(processing_pathway) == 3: input_source, learned_projection, output_source = processing_pathway elif len(processing_pathway) == 2: input_source, output_source = processing_pathway learned_projection = MappingProjection(sender=input_source, receiver=output_source) else: raise CompositionError(f"Too many components in learning pathway: {pathway}. Only single-layer learning " f"is supported by this method. See AutodiffComposition for other learning models.") return input_source, output_source, learned_projection # FIX: NOT CURRENTLY USED; IMPLEMENTED FOR FUTURE USE IN GENERALIZATION OF LEARNING METHODS def _create_learning_components(self, sender_activity_source, # aka input_source receiver_activity_source, # aka output_source error_sources, # aka comparator/previous_learning_mechanism learning_function, learned_projection, learning_rate, learning_update, target_mech=None, **kwargs # Use of type-specific learning arguments ): # ONLY DO THIS IF ONE DOESN'T ALREADY EXIST (?pass in argument determing this?) learning_mechanism = LearningMechanism(function=learning_function, default_variable=[sender_activity_source.output_ports[0].value, receiver_activity_source.output_ports[0].value, error_sources.output_ports[0].value], error_sources=error_sources, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name, **kwargs) self.enable_learning = True return learning_mechanism def _create_learning_related_mechanisms(self, input_source, output_source, error_function, learning_function, learned_projection, learning_rate, learning_update): """Creates *TARGET_MECHANISM*, *COMPARATOR_MECHANISM* and *LEARNING_MECHANISM* for RL and TD learning""" if isinstance(learning_function, type): if issubclass(learning_function, TDLearning): creation_method = self._create_td_related_mechanisms elif issubclass(learning_function, Reinforcement): creation_method = self._create_rl_related_mechanisms else: raise CompositionError(f"'learning_function' argument for add_linear_learning_pathway " f"({learning_function}) must be a class of {LearningFunction.__name__}") target_mechanism, comparator_mechanism, learning_mechanism = creation_method(input_source, output_source, error_function, learned_projection, learning_rate, learning_update) elif is_function_type(learning_function): target_mechanism = ProcessingMechanism(name='Target') comparator_mechanism = ComparatorMechanism(name='Comparator', sample={NAME: SAMPLE, VARIABLE: [0.], WEIGHT: -1}, target={NAME: TARGET, VARIABLE: [0.]}, function=error_function, output_ports=[OUTCOME, MSE]) learning_mechanism = LearningMechanism( function=learning_function( default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], learning_rate=learning_rate), default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], error_sources=comparator_mechanism, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) else: raise CompositionError(f"'learning_function' argument of add_linear_learning_pathway " f"({learning_function}) must be a class of {LearningFunction.__name__} or a " f"learning-compatible function") learning_mechanism.output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition._set(False, override=True) self.enable_learning = True return target_mechanism, comparator_mechanism, learning_mechanism def _create_learning_related_projections(self, input_source, output_source, target, comparator, learning_mechanism): """Construct MappingProjections among `learning components <Composition_Learning_Components>` for pathway""" # FIX 5/29/19 [JDC]: INTEGRATE WITH _get_back_prop_error_sources (RIGHT NOW, ONLY CALLED FOR TERMINAL SEQUENCE) try: sample_projection = MappingProjection(sender=output_source, receiver=comparator.input_ports[SAMPLE]) except DuplicateProjectionError: sample_projection = [p for p in output_source.efferents if p in comparator.input_ports[SAMPLE].path_afferents] try: target_projection = MappingProjection(sender=target, receiver=comparator.input_ports[TARGET]) except DuplicateProjectionError: target_projection = [p for p in target.efferents if p in comparator.input_ports[TARGET].path_afferents] act_in_projection = MappingProjection(sender=input_source.output_ports[0], receiver=learning_mechanism.input_ports[ACTIVATION_INPUT_INDEX]) act_out_projection = MappingProjection(sender=output_source.output_ports[0], receiver=learning_mechanism.input_ports[ACTIVATION_OUTPUT_INDEX]) # FIX CROSS_PATHWAYS 7/28/19 [JDC]: THIS MAY NEED TO USE add_ports (SINCE ONE MAY EXIST; CONSTRUCT TEST FOR IT) error_signal_projection = MappingProjection(sender=comparator.output_ports[OUTCOME], receiver=learning_mechanism.input_ports[ERROR_SIGNAL_INDEX]) return [target_projection, sample_projection, error_signal_projection, act_out_projection, act_in_projection] def _create_learning_projection(self, learning_mechanism, learned_projection): """Construct LearningProjections from LearningMechanisms to learned_projections in processing pathway""" learning_projection = LearningProjection(name="Learning Projection", sender=learning_mechanism.learning_signals[0], receiver=learned_projection.parameter_ports["matrix"]) learned_projection.has_learning_projection = True return learning_projection def _create_rl_related_mechanisms(self, input_source, output_source, error_function, learned_projection, learning_rate, learning_update): target_mechanism = ProcessingMechanism(name='Target') comparator_mechanism = ComparatorMechanism(name='Comparator', sample={NAME: SAMPLE, VARIABLE: [0.], WEIGHT: -1}, target={NAME: TARGET, VARIABLE: [0.]}, function=error_function, output_ports=[OUTCOME, MSE]) learning_mechanism = \ LearningMechanism(function=Reinforcement(default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], learning_rate=learning_rate), default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], error_sources=comparator_mechanism, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) return target_mechanism, comparator_mechanism, learning_mechanism def _create_td_related_mechanisms(self, input_source, output_source, error_function, learned_projection, learning_rate, learning_update): target_mechanism = ProcessingMechanism(name='Target', default_variable=output_source.defaults.value) comparator_mechanism = PredictionErrorMechanism(name='PredictionError', sample={NAME: SAMPLE, VARIABLE: output_source.defaults.value}, target={NAME: TARGET, VARIABLE: output_source.defaults.value}, function=PredictionErrorDeltaFunction(gamma=1.0)) learning_mechanism = LearningMechanism(function=TDLearning(learning_rate=learning_rate), default_variable=[input_source.output_ports[0].defaults.value, output_source.output_ports[0].defaults.value, comparator_mechanism.output_ports[0].defaults.value], error_sources=comparator_mechanism, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) return target_mechanism, comparator_mechanism, learning_mechanism def _create_backpropagation_learning_pathway(self, pathway, loss_function, learning_rate=0.05, error_function=None, learning_update:tc.optional(tc.any(bool, tc.enum(ONLINE, AFTER)))=AFTER): # FIX: LEARNING CONSOLIDATION - Can get rid of this: if not error_function: error_function = LinearCombination() # Add pathway to graph and get its full specification (includes all ProcessingMechanisms and MappingProjections) processing_pathway = self.add_linear_processing_pathway(pathway) path_length = len(processing_pathway) # Pathway length must be >=3 (Mechanism, Projection, Mechanism if path_length >= 3: # get the "terminal_sequence" -- # the last 2 nodes in the back prop pathway and the projection between them # these components are are processed separately because # they inform the construction of the Target and Comparator mechs terminal_sequence = processing_pathway[path_length - 3: path_length] else: raise CompositionError(f"Backpropagation pathway specification ({pathway}) must not contain " f"at least three components " f"([{Mechanism.__name__}, {Projection.__name__}, {Mechanism.__name__}]).") # Unpack and process terminal_sequence: input_source, learned_projection, output_source = terminal_sequence # If pathway includes existing terminal_sequence for the output_source, use that if output_source in self._terminal_backprop_sequences: # FIX CROSSED_PATHWAYS 7/28/19 [JDC]: # THIS SHOULD BE INTEGRATED WITH CALL TO _create_terminal_backprop_learning_components # ** NEED TO CHECK WHETHER LAST NODE IN THE SEQUENCE IS TERMINAL AND IF SO: # ASSIGN USING: self.add_required_node_role(output_source, NodeRole.OUTPUT) # If learned_projection already has a LearningProjection (due to pathway overlap), # use those terminal sequence components if (learned_projection.has_learning_projection and any([lp for lp in learned_projection.parameter_ports[MATRIX].mod_afferents if lp in self.projections])): target = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM] comparator = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM] learning_mechanism = self._terminal_backprop_sequences[output_source][LEARNING_MECHANISM] # Otherwise, create new ones else: target, comparator, learning_mechanism = \ self._create_terminal_backprop_learning_components(input_source, output_source, error_function, loss_function, learned_projection, learning_rate, learning_update) sequence_end = path_length - 3 # # FIX: ALTERNATIVE IS TO TEST WHETHER IT PROJECTIONS TO ANY MECHANISMS WITH LEARNING ROLE # Otherwise, if output_source already projects to a LearningMechanism, integrate with existing sequence elif any(isinstance(p.receiver.owner, LearningMechanism) for p in output_source.efferents): # Set learning_mechanism to the one to which output_source projects learning_mechanism = next((p.receiver.owner for p in output_source.efferents if isinstance(p.receiver.owner, LearningMechanism))) # # Use existing target and comparator to learning_mechanism for Mechanism to which output_source project # target = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM] # comparator = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM] target = None comparator = None sequence_end = path_length - 1 # Otherwise create terminal_sequence for the sequence, # and eliminate existing terminal_sequences previously created for Mechanisms now in the pathway else: # Eliminate existing comparators and targets for Mechanisms now in the pathway that were output_sources # (i.e., ones that belong to previously-created sequences that overlap with the current one) for pathway_mech in [m for m in pathway if isinstance(m, Mechanism)]: old_comparator = next((p.receiver.owner for p in pathway_mech.efferents if (isinstance(p.receiver.owner, ComparatorMechanism) and p.receiver.owner in self.get_nodes_by_role(NodeRole.LEARNING))), None) if old_comparator: old_target = next((p.sender.owner for p in old_comparator.input_ports[TARGET].path_afferents if p.sender.owner in self.get_nodes_by_role(NodeRole.TARGET)), None) self.remove_nodes([old_comparator, old_target]) # FIX CROSSING_PATHWAYS [JDC]: MAKE THE FOLLOWING A METHOD? # Collect InputPorts that received error_signal projections from the old_comparator # and delete after old_comparator has been deleted # (i.e., after those InputPorts have been vacated) old_error_signal_input_ports = [] for error_projection in old_comparator.output_port.efferents: old_error_signal_input_ports.append(error_projection.receiver) Mechanism_Base._delete_mechanism(old_comparator) Mechanism_Base._delete_mechanism(old_target) for input_port in old_error_signal_input_ports: input_port.owner.remove_ports(input_port) del self._terminal_backprop_sequences[pathway_mech] del self.required_node_roles[self.required_node_roles.index((pathway_mech, NodeRole.OUTPUT))] # Create terminal_sequence target, comparator, learning_mechanism = \ self._create_terminal_backprop_learning_components(input_source, output_source, error_function, loss_function, learned_projection, learning_rate, learning_update) self._terminal_backprop_sequences[output_source] = {LEARNING_MECHANISM: learning_mechanism, TARGET_MECHANISM: target, COMPARATOR_MECHANISM: comparator} self.add_required_node_role(pathway[-1], NodeRole.OUTPUT) sequence_end = path_length - 3 # loop backwards through the rest of the pathway to create and connect # the remaining learning mechanisms learning_mechanisms = [learning_mechanism] learned_projections = [learned_projection] for i in range(sequence_end, 1, -2): # set variables for this iteration input_source = processing_pathway[i - 2] learned_projection = processing_pathway[i - 1] output_source = processing_pathway[i] learning_mechanism = self._create_non_terminal_backprop_learning_components(input_source, output_source, learned_projection, learning_rate, learning_update) learning_mechanisms.append(learning_mechanism) learned_projections.append(learned_projection) # Add error_signal projections to any learning_mechanisms that are now dependent on the new one for lm in learning_mechanisms: if lm.dependent_learning_mechanisms: projections = self._add_error_projection_to_dependent_learning_mechs(lm) self.add_projections(projections) # Suppress "no efferent connections" warning for: # - error_signal OutputPort of last LearningMechanism in sequence # - comparator learning_mechanisms[-1].output_ports[ERROR_SIGNAL].parameters.require_projection_in_composition.set(False, override=True) if comparator: for s in comparator.output_ports: s.parameters.require_projection_in_composition.set(False, override=True) learning_related_components = {LEARNING_MECHANISM: learning_mechanisms, COMPARATOR_MECHANISM: comparator, TARGET_MECHANISM: target, LEARNED_PROJECTION: learned_projections} # Update graph in case method is called again self._analyze_graph() return learning_related_components def _create_terminal_backprop_learning_components(self, input_source, output_source, error_function, loss_function, learned_projection, learning_rate, learning_update): """Create ComparatorMechanism, LearningMechanism and LearningProjection for Component in learning sequence""" # target = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM] # comparator = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM] # learning_mechanism = self._terminal_backprop_sequences[output_source][LEARNING_MECHANISM] # If target and comparator already exist (due to overlapping pathway), use those try: target_mechanism = self._terminal_backprop_sequences[output_source][TARGET_MECHANISM] comparator_mechanism = self._terminal_backprop_sequences[output_source][COMPARATOR_MECHANISM] # Otherwise, create new ones except KeyError: target_mechanism = ProcessingMechanism(name='Target', default_variable=output_source.output_ports[0].value) comparator_mechanism = ComparatorMechanism(name='Comparator', target={NAME: TARGET, VARIABLE: target_mechanism.output_ports[0].value}, sample={NAME: SAMPLE, VARIABLE: output_source.output_ports[0].value, WEIGHT: -1}, function=error_function, output_ports=[OUTCOME, MSE]) learning_function = BackPropagation(default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], activation_derivative_fct=output_source.function.derivative, learning_rate=learning_rate, loss_function=loss_function) learning_mechanism = LearningMechanism(function=learning_function, default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, comparator_mechanism.output_ports[0].value], error_sources=comparator_mechanism, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) self.add_nodes(nodes=[(target_mechanism, NodeRole.TARGET), comparator_mechanism, learning_mechanism], required_roles=NodeRole.LEARNING) learning_related_projections = self._create_learning_related_projections(input_source, output_source, target_mechanism, comparator_mechanism, learning_mechanism) self.add_projections(learning_related_projections) learning_projection = self._create_learning_projection(learning_mechanism, learned_projection) self.add_projection(learning_projection, feedback=True) self.enable_learning = True return target_mechanism, comparator_mechanism, learning_mechanism def _create_non_terminal_backprop_learning_components(self, input_source, output_source, learned_projection, learning_rate, learning_update): # Get existing LearningMechanism if one exists (i.e., if this is a crossing point with another pathway) learning_mechanism = \ next((lp.receiver.owner for lp in learned_projection.parameter_ports[MATRIX].mod_afferents if isinstance(lp, LearningProjection)), None) # If learning_mechanism exists: # error_sources will be empty (as they have been dealt with in self._get_back_prop_error_sources # error_projections will contain list of any created to be added to the Composition below if learning_mechanism: error_sources, error_projections = self._get_back_prop_error_sources(output_source, learning_mechanism) # If learning_mechanism does not yet exist: # error_sources will contain ones needed to create learning_mechanism # error_projections will be empty since they can't be created until the learning_mechanism is created below; # they will be created (using error_sources) when, and determined after learning_mechanism is created below else: error_sources, error_projections = self._get_back_prop_error_sources(output_source) error_signal_template = [error_source.output_ports[ERROR_SIGNAL].value for error_source in error_sources] default_variable = [input_source.output_ports[0].value, output_source.output_ports[0].value] + error_signal_template learning_function = BackPropagation(default_variable=[input_source.output_ports[0].value, output_source.output_ports[0].value, error_signal_template[0]], activation_derivative_fct=output_source.function.derivative, learning_rate=learning_rate) learning_mechanism = LearningMechanism(function=learning_function, # default_variable=[input_source.output_ports[0].value, # output_source.output_ports[0].value, # error_signal_template], default_variable=default_variable, error_sources=error_sources, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) # Create MappingProjections from ERROR_SIGNAL OutputPort of each error_source # to corresponding error_input_ports for i, error_source in enumerate(error_sources): error_projection = MappingProjection(sender=error_source, receiver=learning_mechanism.error_signal_input_ports[i]) error_projections.append(error_projection) self.add_node(learning_mechanism, required_roles=NodeRole.LEARNING) act_in_projection = MappingProjection(sender=input_source.output_ports[0], receiver=learning_mechanism.input_ports[0]) act_out_projection = MappingProjection(sender=output_source.output_ports[0], receiver=learning_mechanism.input_ports[1]) self.add_projections([act_in_projection, act_out_projection] + error_projections) learning_projection = self._create_learning_projection(learning_mechanism, learned_projection) self.add_projection(learning_projection, feedback=True) return learning_mechanism def _get_back_prop_error_sources(self, receiver_activity_mech, learning_mech=None): # FIX CROSSED_PATHWAYS [JDC]: GENERALIZE THIS TO HANDLE COMPARATOR/TARGET ASSIGNMENTS IN BACKPROP # AND THEN TO HANDLE ALL FORMS OF LEARNING (AS BELOW) # REFACTOR TO DEAL WITH CROSSING PATHWAYS (?CREATE METHOD ON LearningMechanism TO DO THIS?): # 1) Determine whether this is a terminal sequence: # - use arg passed in or determine from context (see current implementation in add_backpropagation_learning_pathway) # - for terminal sequence, handle target and sample projections as below # 2) For non-terminal sequences, determine # of error_signals coming from LearningMechanisms associated with # all efferentprojections of ProcessingMechanism that projects to ACTIVATION_OUTPUT of LearningMechanism # - check validity of existing error_signal projections with respect to those and, if possible, # their correspondence with error_matrices # - check if any ERROR_SIGNAL input_ports are empty (vacated by terminal sequence elements deleted in # add_projection) # - call add_ports method on LearningMechanism to add new ERROR_SIGNAL input_port to its input_ports # and error_matrix to its self.error_matrices attribute # - add new error_signal projection """Add any LearningMechanisms associated with efferent projection from receiver_activity_mech""" error_sources = [] error_projections = [] # First get all efferents of receiver_activity_mech with a LearningProjection that are in current Composition for efferent in [p for p in receiver_activity_mech.efferents if (hasattr(p, 'has_learning_projection') and p.has_learning_projection and p in self.projections)]: # Then get any LearningProjections to that efferent that are in current Composition for learning_projection in [mod_aff for mod_aff in efferent.parameter_ports[MATRIX].mod_afferents if (isinstance(mod_aff, LearningProjection) and mod_aff in self.projections)]: error_source = learning_projection.sender.owner if (error_source not in self.nodes # error_source is not in the Composition or (learning_mech # learning_mech passed in # the error_source is already associated with learning_mech and (error_source in learning_mech.error_sources) # and the error_source already sends a Projection to learning_mech and (learning_mech in [p.receiver.owner for p in error_source.efferents]))): continue # ignore the error_source error_sources.append(error_source) # If learning_mech was passed in, add error_source to its list of error_signal_input_ports if learning_mech: # FIX: REPLACE WITH learning_mech._add_error_signal_input_port ONCE IMPLEMENTED error_signal_input_port = next((e for e in learning_mech.error_signal_input_ports if not e.path_afferents), None) if error_signal_input_port is None: error_signal_input_port = learning_mech.add_ports( InputPort(projections=error_source.output_ports[ERROR_SIGNAL], name=ERROR_SIGNAL, context=Context(source=ContextFlags.METHOD)), context=Context(source=ContextFlags.METHOD)) # Create Projection here so that don't have to worry about determining correct # error_signal_input_port of learning_mech in _create_non_terminal_backprop_learning_components error_projections.append(MappingProjection(sender=error_source.output_ports[ERROR_SIGNAL], receiver=error_signal_input_port)) # Return error_sources so they can be used to create a new LearningMechanism if needed # Return error_projections created to existing learning_mech # so they can be added to the Composition by _create_non_terminal_backprop_learning_components return error_sources, error_projections def _get_backprop_error_projections(self, learning_mech, receiver_activity_mech): error_sources = [] error_projections = [] # for error_source in learning_mech.error_sources: # if error_source in self.nodes: # error_sources.append(error_source) # Add any LearningMechanisms associated with efferent projection from receiver_activity_mech # First get all efferents of receiver_activity_mech with a LearningProjection that are in current Composition for efferent in [p for p in receiver_activity_mech.efferents if (hasattr(p, 'has_learning_projection') and p.has_learning_projection and p in self.projections)]: # Then any LearningProjections to that efferent that are in current Composition for learning_projection in [mod_aff for mod_aff in efferent.parameter_ports[MATRIX].mod_afferents if (isinstance(mod_aff, LearningProjection) and mod_aff in self.projections)]: error_source = learning_projection.sender.owner if (error_source in learning_mech.error_sources and error_source in self.nodes and learning_mech in [p.receiver.owner for p in error_source.efferents]): continue error_sources.append(error_source) # FIX: REPLACE WITH learning_mech._add_error_signal_input_port ONCE IMPLEMENTED error_signal_input_port = next((e for e in learning_mech.error_signal_input_ports if not e.path_afferents), None) if error_signal_input_port is None: error_signal_input_port = learning_mech.add_ports( InputPort(projections=error_source.output_ports[ERROR_SIGNAL], name=ERROR_SIGNAL, context=Context(source=ContextFlags.METHOD)), context=Context(source=ContextFlags.METHOD)) # DOES THE ABOVE GENERATE A PROJECTION? IF SO, JUST GET AND RETURN THAT; ELSE DO THE FOLLOWING: error_projections.append(MappingProjection(sender=error_source.output_ports[ERROR_SIGNAL], receiver=error_signal_input_port)) return error_projections # 2) For non-terminal sequences, determine # of error_signals coming from LearningMechanisms associated with # all efferentprojections of ProcessingMechanism that projects to ACTIVATION_OUTPUT of LearningMechanism # - check validity of existing error_signal projections with respect to those and, if possible, # their correspondence with error_matrices # - check if any ERROR_SIGNAL input_ports are empty (vacated by terminal sequence elements deleted in # add_projection) # - call add_ports method on LearningMechanism to add new ERROR_SIGNAL input_port to its input_ports # and error_matrix to its self.error_matrices attribute # - add new error_signal projection def _add_error_projection_to_dependent_learning_mechs(self, error_source): projections = [] # Get all afferents to receiver_activity_mech in Composition that have LearningProjections for afferent in [p for p in error_source.input_source.path_afferents if (p in self.projections and hasattr(p, 'has_learning_projection') and p.has_learning_projection)]: # For each LearningProjection to that afferent, if its LearningMechanism doesn't already receiver for learning_projection in [lp for lp in afferent.parameter_ports[MATRIX].mod_afferents if (isinstance(lp, LearningProjection) and error_source not in lp.sender.owner.error_sources)]: dependent_learning_mech = learning_projection.sender.owner error_signal_input_port = dependent_learning_mech.add_ports( InputPort(projections=error_source.output_ports[ERROR_SIGNAL], name=ERROR_SIGNAL, context=Context(source=ContextFlags.METHOD)), context=Context(source=ContextFlags.METHOD)) projections.append(error_signal_input_port[0].path_afferents[0]) # projections.append(MappingProjection(sender=error_source.output_ports[ERROR_SIGNAL], # receiver=error_signal_input_port[0])) return projections # ****************************************************************************************************************** # CONTROL # ****************************************************************************************************************** def add_controller(self, controller:ControlMechanism): """ Add an `OptimizationControlMechanism` as the `controller <Composition.controller>` of the Composition, which gives the OCM access to the `Composition`'s `evaluate <Composition.evaluate>` method. This allows the OCM to use simulations to determine an optimal Control policy. """ if not isinstance(controller, ControlMechanism): raise CompositionError(f"Specification of {repr(CONTROLLER)} arg for {self.name} " f"must be a {repr(ControlMechanism.__name__)} ") # VALIDATE AND ADD CONTROLLER # Warn for request to assign the ControlMechanism already assigned and ignore if controller is self.controller: warnings.warn(f"{controller.name} has already been assigned as the {CONTROLLER} " f"for {self.name}; assignment ignored.") return # Warn for request to assign ControlMechanism that is already the controller of another Composition if hasattr(controller, COMPOSITION) and controller.composition is not self: warnings.warn(f"{controller} has already been assigned as the {CONTROLLER} " f"for another {COMPOSITION} ({controller.composition.name}); assignment ignored.") return # Warn if current one is being replaced if self.controller and self.prefs.verbosePref: warnings.warn(f"The existing {CONTROLLER} for {self.name} ({self.controller.name}) " f"is being replaced by {controller.name}.") controller.composition = self self.controller = controller self.node_ordering.append(controller) if self.controller.objective_mechanism: self.add_node(self.controller.objective_mechanism) self.enable_controller = True controller._activate_projections_for_compositions(self) self._analyze_graph() self._update_shadows_dict(controller) # INSTANTIATE SHADOW_INPUT PROJECTIONS # Skip controller's first (OUTCOME) input_port (that receives the Projection from its objective_mechanism input_cims=[self.input_CIM] + [comp.input_CIM for comp in self._get_nested_compositions()] # For the rest of the controller's input_ports if they are marked as receiving SHADOW_INPUTS, # instantiate the shadowing Projection to them from the sender to the shadowed InputPort for input_port in controller.input_ports[1:]: if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: for proj in input_port.shadow_inputs.path_afferents: sender = proj.sender if sender.owner not in input_cims: self.add_projection(projection=MappingProjection(sender=sender, receiver=input_port), sender=sender.owner, receiver=controller) shadow_proj._activate_for_compositions(self) else: try: shadow_proj = MappingProjection(sender=proj.sender, receiver=input_port) shadow_proj._activate_for_compositions(self) except DuplicateProjectionError: pass for proj in input_port.path_afferents: proj._activate_for_compositions(self) # Check whether controller has input, and if not then disable if not (isinstance(self.controller.input_ports, ContentAddressableList) and self.controller.input_ports): # If controller was enabled, warn that it has been disabled if self.enable_controller: warnings.warn(f"{self.controller.name} for {self.name} has no input_ports, " f"so controller will be disabled.") self.enable_controller = False return # ADD ANY ControlSignals SPECIFIED BY NODES IN COMPOSITION # Get rid of default ControlSignal if it has no ControlProjections controller._remove_default_control_signal(type=CONTROL_SIGNAL) # Add any ControlSignals specified for ParameterPorts of nodes already in the Composition control_signal_specs = self._get_control_signals_for_composition() for ctl_sig_spec in control_signal_specs: # FIX: 9/14/19: THIS SHOULD BE HANDLED IN _instantiate_projection_to_port # CALLED FROM _instantiate_control_signal # SHOULD TRAP THAT ERROR AND GENERATE CONTEXT-APPROPRIATE ERROR MESSAGE # Don't add any that are already on the ControlMechanism # FIX: 9/14/19 - IS THE CONTEXT CORRECT (TRY TRACKING IN SYSTEM TO SEE WHAT CONTEXT IS): new_signal = controller._instantiate_control_signal(control_signal=ctl_sig_spec, context=Context(source=ContextFlags.COMPOSITION)) controller.control.append(new_signal) # FIX: 9/15/19 - WHAT IF NODE THAT RECEIVES ControlProjection IS NOT YET IN COMPOSITON: # ?DON'T ASSIGN ControlProjection? # ?JUST DON'T ACTIVATE IT FOR COMPOSITON? # ?PUT IT IN aux_components FOR NODE? # ! TRACE THROUGH _activate_projections_for_compositions TO SEE WHAT IT CURRENTLY DOES controller._activate_projections_for_compositions(self) def _get_control_signals_for_composition(self): """Return list of ControlSignals specified by nodes in the Composition Generate list of control signal specifications from ParameterPorts of Mechanisms that have been specified for control. The specifications can be: ControlProjections (with deferred_init()) # FIX: 9/14/19 - THIS SHOULD ALREADY HAVE BEEN PARSED INTO ControlProjection WITH DEFFERRED_INIT: # OTHERWISE, NEED TO ADD HANDLING OF IT BELOW ControlSignals (e.g., in a 2-item tuple specification for the parameter); Note: The initialization of the ControlProjection and, if specified, the ControlSignal are completed in the call to controller_instantiate_control_signal() in add_controller. Mechanism can be in the Compositon itself, or in a nested Composition that does not have its own controller. """ control_signal_specs = [] for node in self.nodes: if isinstance(node, Composition): # Get control signal specifications for nested composition if it does not have its own controller if node.controller: control_signal_specs.append(node._get_control_signals_for_composition()) elif isinstance(node, Mechanism): control_signal_specs.extend(node._get_parameter_port_deferred_init_control_specs()) return control_signal_specs def _build_predicted_inputs_dict(self, predicted_input): inputs = {} # ASSUMPTION: input_ports[0] is NOT a feature and input_ports[1:] are features # If this is not a good assumption, we need another way to look up the feature InputPorts # of the OCM and know which InputPort maps to which predicted_input value nested_nodes = dict(self._get_nested_nodes()) for j in range(len(self.controller.input_ports) - 1): input_port = self.controller.input_ports[j + 1] if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: owner = input_port.shadow_inputs.owner if not owner in nested_nodes: inputs[input_port.shadow_inputs.owner] = predicted_input[j] else: comp = nested_nodes[owner] if not comp in inputs: inputs[comp]=[[predicted_input[j]]] else: inputs[comp]=np.concatenate([[predicted_input[j]],inputs[comp][0]]) return inputs def reshape_control_signal(self, arr): current_shape = np.shape(arr) if len(current_shape) > 2: newshape = (current_shape[0], current_shape[1]) newarr = np.reshape(arr, newshape) arr = tuple(newarr[i].item() for i in range(len(newarr))) return np.array(arr) def _get_total_cost_of_control_allocation(self, control_allocation, context, runtime_params): total_cost = 0. if control_allocation is not None: # using "is not None" in case the control allocation is 0. base_control_allocation = self.reshape_control_signal(self.controller.parameters.value._get(context)) candidate_control_allocation = self.reshape_control_signal(control_allocation) # Get reconfiguration cost for candidate control signal reconfiguration_cost = 0. if callable(self.controller.compute_reconfiguration_cost): reconfiguration_cost = self.controller.compute_reconfiguration_cost([candidate_control_allocation, base_control_allocation]) self.controller.reconfiguration_cost.set(reconfiguration_cost, context) # Apply candidate control signal self.controller._apply_control_allocation(candidate_control_allocation, context=context, runtime_params=runtime_params, ) # Get control signal costs all_costs = self.controller.parameters.costs._get(context) + [reconfiguration_cost] # Compute a total for the candidate control signal(s) total_cost = self.controller.combine_costs(all_costs) return total_cost def evaluate( self, predicted_input=None, control_allocation=None, num_simulation_trials=None, runtime_params=None, base_context=Context(execution_id=None), context=None, execution_mode=False, return_results=False, ): """Runs a simulation of the `Composition`, with the specified control_allocation, excluding its `controller <Composition.controller>` in order to return the `net_outcome <ControlMechanism.net_outcome>` of the Composition, according to its `controller <Composition.controller>` under that control_allocation. All values are reset to pre-simulation values at the end of the simulation. """ # Apply candidate control to signal(s) for the upcoming simulation and determine its cost total_cost = self._get_total_cost_of_control_allocation(control_allocation, context, runtime_params) # Build input dictionary for simulation inputs = self._build_predicted_inputs_dict(predicted_input) # Run Composition in "SIMULATION" context if self._animate is not False and self._animate_simulations is not False: animate = self._animate buffer_animate_state = None else: animate = False buffer_animate_state = self._animate context.add_flag(ContextFlags.SIMULATION) context.remove_flag(ContextFlags.CONTROL) results = self.run(inputs=inputs, context=context, runtime_params=runtime_params, num_trials=num_simulation_trials, animate=animate, bin_execute=execution_mode, skip_initialization=True, ) context.remove_flag(ContextFlags.SIMULATION) context.add_flag(ContextFlags.CONTROL) if buffer_animate_state: self._animate = buffer_animate_state # Store simulation results on "base" composition if self.initialization_status != ContextFlags.INITIALIZING: try: self.parameters.simulation_results._get(base_context).append( self.get_output_values(context)) except AttributeError: self.parameters.simulation_results._set([self.get_output_values(context)], base_context) # Update input ports in order to get correct value for "outcome" (from objective mech) self.controller._update_input_ports(context, runtime_params) outcome = self.controller.input_port.parameters.value._get(context) if outcome is None: net_outcome = 0.0 else: # Compute net outcome based on the cost of the simulated control allocation (usually, net = outcome - cost) net_outcome = self.controller.compute_net_outcome(outcome, total_cost) if return_results: return net_outcome, results else: return net_outcome # ****************************************************************************************************************** # SHOW_GRAPH # ****************************************************************************************************************** @tc.typecheck @handle_external_context(execution_id=NotImplemented) def show_graph(self, show_node_structure:tc.any(bool, tc.enum(VALUES, LABELS, FUNCTIONS, MECH_FUNCTION_PARAMS, STATE_FUNCTION_PARAMS, ROLES, ALL))=False, show_nested:tc.optional(tc.any(bool,dict,tc.enum(ALL)))=ALL, show_controller:tc.any(bool, tc.enum(AGENT_REP))=False, show_cim:bool=False, show_learning:bool=False, show_headers:bool=True, show_types:bool=False, show_dimensions:bool=False, show_projection_labels:bool=False, direction:tc.enum('BT', 'TB', 'LR', 'RL')='BT', # active_items:tc.optional(list)=None, active_items=None, active_color=BOLD, input_color='green', output_color='red', input_and_output_color='brown', # feedback_color='yellow', controller_color='blue', learning_color='orange', composition_color='pink', control_projection_arrow='box', feedback_shape = 'septagon', cim_shape='square', output_fmt:tc.enum('pdf','gv','jupyter','gif')='pdf', context=None, **kwargs): """ show_graph( \ show_node_structure=False, \ show_nested=True, \ show_controller=False, \ show_cim=False, \ show_learning=False, \ show_headers=True, \ show_types=False, \ show_dimensions=False, \ show_projection_labels=False, \ direction='BT', \ active_items=None, \ active_color=BOLD, \ input_color='green', \ output_color='red', \ input_and_output_color='brown', \ controller_color='blue', \ composition_color='pink', \ feedback_shape = 'septagon', \ cim_shape='square', \ output_fmt='pdf', \ context=None) Show graphical display of Components in a Composition's graph. .. note:: This method relies on `graphviz <http://www.graphviz.org>`_, which must be installed and imported (standard with PsyNeuLink pip install) See `Visualizing a Composition <Composition_Visualization>` for details and examples. Arguments --------- show_node_structure : bool, VALUES, LABELS, FUNCTIONS, MECH_FUNCTION_PARAMS, STATE_FUNCTION_PARAMS, ROLES, \ or ALL : default False show a detailed representation of each `Mechanism <Mechanism>` in the graph, including its `Ports <Port>`; can have any of the following settings alone or in a list: * `True` -- show Ports of Mechanism, but not information about the `value <Component.value>` or `function <Component.function>` of the Mechanism or its Ports. * *VALUES* -- show the `value <Mechanism_Base.value>` of the Mechanism and the `value <Port_Base.value>` of each of its Ports. * *LABELS* -- show the `value <Mechanism_Base.value>` of the Mechanism and the `value <Port_Base.value>` of each of its Ports, using any labels for the values of InputPorts and OutputPorts specified in the Mechanism's `input_labels_dict <Mechanism.input_labels_dict>` and `output_labels_dict <Mechanism.output_labels_dict>`, respectively. * *FUNCTIONS* -- show the `function <Mechanism_Base.function>` of the Mechanism and the `function <Port_Base.function>` of its InputPorts and OutputPorts. * *MECH_FUNCTION_PARAMS_* -- show the parameters of the `function <Mechanism_Base.function>` for each Mechanism in the Composition (only applies if *FUNCTIONS* is True). * *STATE_FUNCTION_PARAMS_* -- show the parameters of the `function <Mechanism_Base.function>` for each Port of each Mechanism in the Composition (only applies if *FUNCTIONS* is True). * *ROLES* -- show the `role <Composition.NodeRoles>` of the Mechanism in the Composition (but not any of the other information; use *ALL* to show ROLES with other information). * *ALL* -- shows the role, `function <Component.function>`, and `value <Component.value>` of the Mechanisms in the `Composition` and their `Ports <Port>` (using labels for the values, if specified -- see above), including parameters for all functions. show_nested : bool | dict : default ALL specifies whether any nested Composition(s) are shown in details as inset graphs. A dict can be used to specify any of the arguments allowed for show_graph to be used for the nested Composition(s); *ALL* passes all arguments specified for the main Composition to the nested one(s); True uses the default values of show_graph args for the nested Composition(s). show_controller : bool or AGENT_REP : default False specifies whether or not to show the Composition's `controller <Composition.controller>` and associated `objective_mechanism <ControlMechanism.objective_mechanism>` if it has one. If the controller is an OptimizationControlMechanism and it has an `agent_rep <OptimizationControlMechanism>`, then specifying *AGENT_REP* will also show that. All of these items are displayed in the color specified for **controller_color**. show_cim : bool : default False specifies whether or not to show the Composition's input and out CompositionInterfaceMechanisms (CIMs) show_learning : bool or ALL : default False specifies whether or not to show the learning components of the Compositoin; they will all be displayed in the color specified for **learning_color**. Projections that receive a `LearningProjection` will be shown as a diamond-shaped node. If set to *ALL*, all Projections associated with learning will be shown: the LearningProjections as well as from `ProcessingMechanisms <ProcessingMechanism>` to `LearningMechanisms <LearningMechanism>` that convey error and activation information; if set to `True`, only the LearningPojections are shown. show_projection_labels : bool : default False specifies whether or not to show names of projections. show_headers : bool : default True specifies whether or not to show headers in the subfields of a Mechanism's node; only takes effect if **show_node_structure** is specified (see above). show_types : bool : default False specifies whether or not to show type (class) of `Mechanism <Mechanism>` in each node label. show_dimensions : bool : default False specifies whether or not to show dimensions for the `variable <Component.variable>` and `value <Component.value>` of each Component in the graph (and/or MappingProjections when show_learning is `True`); can have the following settings: * *MECHANISMS* -- shows `Mechanism <Mechanism>` input and output dimensions. Input dimensions are shown in parentheses below the name of the Mechanism; each number represents the dimension of the `variable <InputPort.variable>` for each `InputPort` of the Mechanism; Output dimensions are shown above the name of the Mechanism; each number represents the dimension for `value <OutputPort.value>` of each of `OutputPort` of the Mechanism. * *PROJECTIONS* -- shows `MappingProjection` `matrix <MappingProjection.matrix>` dimensions. Each is shown in (<dim>x<dim>...) format; for standard 2x2 "weight" matrix, the first entry is the number of rows (input dimension) and the second the number of columns (output dimension). * *ALL* -- eqivalent to `True`; shows dimensions for both Mechanisms and Projections (see above for formats). direction : keyword : default 'BT' 'BT': bottom to top; 'TB': top to bottom; 'LR': left to right; and 'RL`: right to left. active_items : List[Component] : default None specifies one or more items in the graph to display in the color specified by *active_color**. active_color : keyword : default 'yellow' specifies how to highlight the item(s) specified in *active_items**: either a color recognized by GraphViz, or the keyword *BOLD*. input_color : keyword : default 'green', specifies the display color for `INPUT <NodeRole.INPUT>` Nodes in the Composition output_color : keyword : default 'red', specifies the display color for `OUTPUT` Nodes in the Composition input_and_output_color : keyword : default 'brown' specifies the display color of nodes that are both an `INPUT <NodeRole.INPUT>` and an `OUTPUT <NodeRole.OUTPUT>` Node in the Composition COMMENT: feedback_color : keyword : default 'yellow' specifies the display color of nodes that are assigned the `NodeRole` `FEEDBACK_SENDER`. COMMENT controller_color : keyword : default 'blue' specifies the color in which the controller components are displayed learning_color : keyword : default 'orange' specifies the color in which the learning components are displayed composition_color : keyword : default 'brown' specifies the display color of nodes that represent nested Compositions. feedback_shape : keyword : default 'septagon' specifies the display shape of nodes that are assigned the `NodeRole` `FEEDBACK_SENDER`. cim_shape : default 'square' specifies the display color input_CIM and output_CIM nodes output_fmt : keyword : default 'pdf' 'pdf': generate and open a pdf with the visualization; 'jupyter': return the object (for working in jupyter/ipython notebooks); 'gv': return graphviz object 'gif': return gif used for animation Returns ------- display of Composition : `pdf` or Graphviz graph object PDF: (placed in current directory) if :keyword:`output_fmt` arg is 'pdf'; Graphviz graph object if :keyword:`output_fmt` arg is 'gv' or 'jupyter'; gif if :keyword:`output_fmt` arg is 'gif'. """ # HELPER METHODS ---------------------------------------------------------------------- tc.typecheck _locals = locals().copy() def _assign_processing_components(g, rcvr, show_nested): """Assign nodes to graph""" if isinstance(rcvr, Composition) and show_nested: # User passed args for nested Composition output_fmt_arg = {'output_fmt':'gv'} if isinstance(show_nested, dict): args = show_nested args.update(output_fmt_arg) elif show_nested is ALL: # Pass args from main call to show_graph to call for nested Composition args = dict({k:_locals[k] for k in list(inspect.signature(self.show_graph).parameters)}) args.update(output_fmt_arg) if kwargs: args['kwargs'] = kwargs else: del args['kwargs'] else: # Use default args for nested Composition args = output_fmt_arg nested_comp_graph = rcvr.show_graph(**args) nested_comp_graph.name = "cluster_" + rcvr.name rcvr_label = rcvr.name # if rcvr in self.get_nodes_by_role(NodeRole.FEEDBACK_SENDER): # nested_comp_graph.attr(color=feedback_color) if rcvr in self.get_nodes_by_role(NodeRole.INPUT) and \ rcvr in self.get_nodes_by_role(NodeRole.OUTPUT): nested_comp_graph.attr(color=input_and_output_color) elif rcvr in self.get_nodes_by_role(NodeRole.INPUT): nested_comp_graph.attr(color=input_color) elif rcvr in self.get_nodes_by_role(NodeRole.OUTPUT): nested_comp_graph.attr(color=output_color) nested_comp_graph.attr(label=rcvr_label) g.subgraph(nested_comp_graph) # If rcvr is a learning component and not an INPUT node, # break and handle in _assign_learning_components() # (node: this allows TARGET node for learning to remain marked as an INPUT node) if ((NodeRole.LEARNING in self.nodes_to_roles[rcvr] or NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[rcvr]) and not NodeRole.INPUT in self.nodes_to_roles[rcvr]): return # If rcvr is ObjectiveMechanism for Composition's controller, # break and handle in _assign_control_components() if (isinstance(rcvr, ObjectiveMechanism) and self.controller and rcvr is self.controller.objective_mechanism): return # Implement rcvr node else: # Set rcvr shape, color, and penwidth based on node type rcvr_rank = 'same' # Feedback Node if rcvr in self.get_nodes_by_role(NodeRole.FEEDBACK_SENDER): node_shape = feedback_shape else: node_shape = mechanism_shape # Get condition if any associated with rcvr if rcvr in self.scheduler.conditions: condition = self.scheduler.conditions[rcvr] else: condition = None # # Feedback Node # if rcvr in self.get_nodes_by_role(NodeRole.FEEDBACK_SENDER): # if rcvr in active_items: # if active_color is BOLD: # rcvr_color = feedback_color # else: # rcvr_color = active_color # rcvr_penwidth = str(bold_width + active_thicker_by) # self.active_item_rendered = True # else: # rcvr_color = feedback_color # rcvr_penwidth = str(bold_width) # Input and Output Node if rcvr in self.get_nodes_by_role(NodeRole.INPUT) and \ rcvr in self.get_nodes_by_role(NodeRole.OUTPUT): if rcvr in active_items: if active_color is BOLD: rcvr_color = input_and_output_color else: rcvr_color = active_color rcvr_penwidth = str(bold_width + active_thicker_by) self.active_item_rendered = True else: rcvr_color = input_and_output_color rcvr_penwidth = str(bold_width) # Input Node elif rcvr in self.get_nodes_by_role(NodeRole.INPUT): if rcvr in active_items: if active_color is BOLD: rcvr_color = input_color else: rcvr_color = active_color rcvr_penwidth = str(bold_width + active_thicker_by) self.active_item_rendered = True else: rcvr_color = input_color rcvr_penwidth = str(bold_width) rcvr_rank = input_rank # Output Node elif rcvr in self.get_nodes_by_role(NodeRole.OUTPUT): if rcvr in active_items: if active_color is BOLD: rcvr_color = output_color else: rcvr_color = active_color rcvr_penwidth = str(bold_width + active_thicker_by) self.active_item_rendered = True else: rcvr_color = output_color rcvr_penwidth = str(bold_width) rcvr_rank = output_rank # Composition elif isinstance(rcvr, Composition): node_shape = composition_shape if rcvr in active_items: if active_color is BOLD: rcvr_color = composition_color else: rcvr_color = active_color rcvr_penwidth = str(bold_width + active_thicker_by) self.active_item_rendered = True else: rcvr_color = composition_color rcvr_penwidth = str(bold_width) elif rcvr in active_items: if active_color is BOLD: rcvr_color = default_node_color else: rcvr_color = active_color rcvr_penwidth = str(default_width + active_thicker_by) self.active_item_rendered = True else: rcvr_color = default_node_color rcvr_penwidth = str(default_width) # Implement rcvr node rcvr_label = self._get_graph_node_label(rcvr, show_types, show_dimensions) if show_node_structure and isinstance(rcvr, Mechanism): g.node(rcvr_label, rcvr._show_structure(**node_struct_args, node_border=rcvr_penwidth, condition=condition), shape=struct_shape, color=rcvr_color, rank=rcvr_rank, penwidth=rcvr_penwidth) else: g.node(rcvr_label, shape=node_shape, color=rcvr_color, rank=rcvr_rank, penwidth=rcvr_penwidth) # Implement sender edges sndrs = processing_graph[rcvr] _assign_incoming_edges(g, rcvr, rcvr_label, sndrs) def _assign_cim_components(g, cims): cim_rank = 'same' for cim in cims: cim_penwidth = str(default_width) # ASSIGN CIM NODE **************************************************************** # Assign color # Also take opportunity to verify that cim is either input_CIM or output_CIM if cim is self.input_CIM: if cim in active_items: if active_color is BOLD: cim_color = input_color else: cim_color = active_color cim_penwidth = str(default_width + active_thicker_by) self.active_item_rendered = True else: cim_color = input_color elif cim is self.output_CIM: if cim in active_items: if active_color is BOLD: cim_color = output_color else: cim_color = active_color cim_penwidth = str(default_width + active_thicker_by) self.active_item_rendered = True else: cim_color = output_color else: assert False, '_assignm_cim_components called with node that is not input_CIM or output_CIM' # Assign lablel cim_label = self._get_graph_node_label(cim, show_types, show_dimensions) if show_node_structure: g.node(cim_label, cim._show_structure(**node_struct_args, node_border=cim_penwidth, compact_cim=True), shape=struct_shape, color=cim_color, rank=cim_rank, penwidth=cim_penwidth) else: g.node(cim_label, shape=cim_shape, color=cim_color, rank=cim_rank, penwidth=cim_penwidth) # ASSIGN CIM PROJECTIONS **************************************************************** # Projections from input_CIM to INPUT nodes if cim is self.input_CIM: for output_port in self.input_CIM.output_ports: projs = output_port.efferents for proj in projs: input_mech = proj.receiver.owner if input_mech is self.controller: # Projections to contoller are handled under _assign_controller_components continue # Validate the Projection is to an INPUT node or a node that is shadowing one if ((input_mech in self.nodes_to_roles and not NodeRole.INPUT in self.nodes_to_roles[input_mech]) and (proj.receiver.shadow_inputs in self.nodes_to_roles and not NodeRole.INPUT in self.nodes_to_roles[proj.receiver.shadow_inputs])): raise CompositionError("Projection from input_CIM of {} to node {} " "that is not an {} node or shadowing its {}". format(self.name, input_mech, NodeRole.INPUT.name, NodeRole.INPUT.name.lower())) # Construct edge name input_mech_label = self._get_graph_node_label(input_mech, show_types, show_dimensions) if show_node_structure: cim_proj_label = '{}:{}-{}'. \ format(cim_label, OutputPort.__name__, proj.sender.name) proc_mech_rcvr_label = '{}:{}-{}'. \ format(input_mech_label, InputPort.__name__, proj.receiver.name) else: cim_proj_label = cim_label proc_mech_rcvr_label = input_mech_label # Render Projection if any(item in active_items for item in {proj, proj.receiver.owner}): if active_color is BOLD: proj_color = default_node_color else: proj_color = active_color proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: proj_color = default_node_color proj_width = str(default_width) if show_projection_labels: label = self._get_graph_node_label(proj, show_types, show_dimensions) else: label = '' g.edge(cim_proj_label, proc_mech_rcvr_label, label=label, color=proj_color, penwidth=proj_width) # Projections from OUTPUT nodes to output_CIM if cim is self.output_CIM: # Construct edge name for input_port in self.output_CIM.input_ports: projs = input_port.path_afferents for proj in projs: # Validate the Projection is from an OUTPUT node output_mech = proj.sender.owner if not NodeRole.OUTPUT in self.nodes_to_roles[output_mech]: raise CompositionError("Projection to output_CIM of {} from node {} " "that is not an {} node". format(self.name, output_mech, NodeRole.OUTPUT.name, NodeRole.OUTPUT.name.lower())) # Construct edge name output_mech_label = self._get_graph_node_label(output_mech, show_types, show_dimensions) if show_node_structure: cim_proj_label = '{}:{}'. \ format(cim_label, cim._get_port_name(proj.receiver)) proc_mech_sndr_label = '{}:{}'.\ format(output_mech_label, output_mech._get_port_name(proj.sender)) # format(output_mech_label, OutputPort.__name__, proj.sender.name) else: cim_proj_label = cim_label proc_mech_sndr_label = output_mech_label # Render Projection if any(item in active_items for item in {proj, proj.receiver.owner}): if active_color is BOLD: proj_color = default_node_color else: proj_color = active_color proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: proj_color = default_node_color proj_width = str(default_width) if show_projection_labels: label = self._get_graph_node_label(proj, show_types, show_dimensions) else: label = '' g.edge(proc_mech_sndr_label, cim_proj_label, label=label, color=proj_color, penwidth=proj_width) def _assign_controller_components(g): """Assign control nodes and edges to graph""" controller = self.controller if controller is None: warnings.warn(f"{self.name} has not been assigned a \'controller\', " f"so \'show_controller\' option in call to its show_graph() method will be ignored.") return if controller in active_items: if active_color is BOLD: ctlr_color = controller_color else: ctlr_color = active_color ctlr_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: ctlr_color = controller_color ctlr_width = str(default_width) # Assign controller node node_shape = mechanism_shape ctlr_label = self._get_graph_node_label(controller, show_types, show_dimensions) if show_node_structure: g.node(ctlr_label, controller._show_structure(**node_struct_args, node_border=ctlr_width, condition=self.controller_condition), shape=struct_shape, color=ctlr_color, penwidth=ctlr_width, rank=control_rank ) else: g.node(ctlr_label, color=ctlr_color, penwidth=ctlr_width, shape=node_shape, rank=control_rank) # outgoing edges (from controller to ProcessingMechanisms) for control_signal in controller.control_signals: for ctl_proj in control_signal.efferents: proc_mech_label = self._get_graph_node_label(ctl_proj.receiver.owner, show_types, show_dimensions) if controller in active_items: if active_color is BOLD: ctl_proj_color = controller_color else: ctl_proj_color = active_color ctl_proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: ctl_proj_color = controller_color ctl_proj_width = str(default_width) if show_projection_labels: edge_label = ctl_proj.name else: edge_label = '' if show_node_structure: ctl_sndr_label = ctlr_label + ':' + controller._get_port_name(control_signal) proc_mech_rcvr_label = \ proc_mech_label + ':' + controller._get_port_name(ctl_proj.receiver) else: ctl_sndr_label = ctlr_label proc_mech_rcvr_label = proc_mech_label g.edge(ctl_sndr_label, proc_mech_rcvr_label, label=edge_label, color=ctl_proj_color, penwidth=ctl_proj_width ) # If controller has objective_mechanism, assign its node and Projections if controller.objective_mechanism: # get projection from ObjectiveMechanism to ControlMechanism objmech_ctlr_proj = controller.input_port.path_afferents[0] if controller in active_items: if active_color is BOLD: objmech_ctlr_proj_color = controller_color else: objmech_ctlr_proj_color = active_color objmech_ctlr_proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: objmech_ctlr_proj_color = controller_color objmech_ctlr_proj_width = str(default_width) # get ObjectiveMechanism objmech = objmech_ctlr_proj.sender.owner if objmech in active_items: if active_color is BOLD: objmech_color = controller_color else: objmech_color = active_color objmech_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: objmech_color = controller_color objmech_width = str(default_width) objmech_label = self._get_graph_node_label(objmech, show_types, show_dimensions) if show_node_structure: if objmech in self.scheduler.conditions: condition = self.scheduler.conditions[objmech] else: condition = None g.node(objmech_label, objmech._show_structure(**node_struct_args, node_border=ctlr_width, condition=condition), shape=struct_shape, color=objmech_color, penwidth=ctlr_width, rank=control_rank ) else: g.node(objmech_label, color=objmech_color, penwidth=objmech_width, shape=node_shape, rank=control_rank) # objmech to controller edge if show_projection_labels: edge_label = objmech_ctlr_proj.name else: edge_label = '' if show_node_structure: obj_to_ctrl_label = objmech_label + ':' + objmech._get_port_name(objmech_ctlr_proj.sender) ctlr_from_obj_label = ctlr_label + ':' + objmech._get_port_name(objmech_ctlr_proj.receiver) else: obj_to_ctrl_label = objmech_label ctlr_from_obj_label = ctlr_label g.edge(obj_to_ctrl_label, ctlr_from_obj_label, label=edge_label, color=objmech_ctlr_proj_color, penwidth=objmech_ctlr_proj_width) # incoming edges (from monitored mechs to objective mechanism) for input_port in objmech.input_ports: for projection in input_port.path_afferents: if objmech in active_items: if active_color is BOLD: proj_color = controller_color else: proj_color = active_color proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: proj_color = controller_color proj_width = str(default_width) if show_node_structure: sndr_proj_label = self._get_graph_node_label(projection.sender.owner, show_types, show_dimensions) + \ ':' + objmech._get_port_name(projection.sender) objmech_proj_label = objmech_label + ':' + objmech._get_port_name(input_port) else: sndr_proj_label = self._get_graph_node_label(projection.sender.owner, show_types, show_dimensions) objmech_proj_label = self._get_graph_node_label(objmech, show_types, show_dimensions) if show_projection_labels: edge_label = projection.name else: edge_label = '' g.edge(sndr_proj_label, objmech_proj_label, label=edge_label, color=proj_color, penwidth=proj_width) # If controller has an agent_rep, assign its node and edges (not Projections per se) if hasattr(controller, 'agent_rep') and controller.agent_rep and show_controller==AGENT_REP : # get agent_rep agent_rep = controller.agent_rep # controller is active, treat if controller in active_items: if active_color is BOLD: agent_rep_color = controller_color else: agent_rep_color = active_color agent_rep_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: agent_rep_color = controller_color agent_rep_width = str(default_width) # agent_rep node agent_rep_label = self._get_graph_node_label(agent_rep, show_types, show_dimensions) g.node(agent_rep_label, color=agent_rep_color, penwidth=agent_rep_width, shape=agent_rep_shape, rank=control_rank) # agent_rep <-> controller edges g.edge(agent_rep_label, ctlr_label, color=agent_rep_color, penwidth=agent_rep_width) g.edge(ctlr_label, agent_rep_label, color=agent_rep_color, penwidth=agent_rep_width) # get any other incoming edges to controller (i.e., other than from ObjectiveMechanism) senders = set() for i in controller.input_ports[1:]: for p in i.path_afferents: senders.add(p.sender.owner) _assign_incoming_edges(g, controller, ctlr_label, senders, proj_color=ctl_proj_color) def _assign_learning_components(g): """Assign learning nodes and edges to graph""" # Get learning_components, with exception of INPUT (i.e. TARGET) nodes # (i.e., allow TARGET node to continue to be marked as an INPUT node) learning_components = [node for node in self.learning_components if not NodeRole.INPUT in self.nodes_to_roles[node]] # learning_components.extend([node for node in self.nodes if # NodeRole.AUTOASSOCIATIVE_LEARNING in # self.nodes_to_roles[node]]) for rcvr in learning_components: # if rcvr is Projection, skip (handled in _assign_processing_components) if isinstance(rcvr, MappingProjection): return # Get rcvr info rcvr_label = self._get_graph_node_label(rcvr, show_types, show_dimensions) if rcvr in active_items: if active_color is BOLD: rcvr_color = learning_color else: rcvr_color = active_color rcvr_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: rcvr_color = learning_color rcvr_width = str(default_width) # rcvr is a LearningMechanism or ObjectiveMechanism (ComparatorMechanism) # Implement node for Mechanism if show_node_structure: g.node(rcvr_label, rcvr._show_structure(**node_struct_args), rank=learning_rank, color=rcvr_color, penwidth=rcvr_width) else: g.node(rcvr_label, color=rcvr_color, penwidth=rcvr_width, rank=learning_rank, shape=mechanism_shape) # Implement sender edges sndrs = processing_graph[rcvr] _assign_incoming_edges(g, rcvr, rcvr_label, sndrs) def render_projection_as_node(g, proj, label, proj_color, proj_width, sndr_label=None, rcvr_label=None): proj_receiver = proj.receiver.owner # Node for Projection g.node(label, shape=learning_projection_shape, color=proj_color, penwidth=proj_width) # FIX: ?? if proj_receiver in active_items: # edge_color = proj_color # edge_width = str(proj_width) if active_color is BOLD: edge_color = proj_color else: edge_color = active_color edge_width = str(default_width + active_thicker_by) else: edge_color = default_node_color edge_width = str(default_width) # Edges to and from Projection node if sndr_label: G.edge(sndr_label, label, arrowhead='none', color=edge_color, penwidth=edge_width) if rcvr_label: G.edge(label, rcvr_label, color=edge_color, penwidth=edge_width) # LearningProjection(s) to node # if proj in active_items or (proj_learning_in_execution_phase and proj_receiver in active_items): if proj in active_items: if active_color is BOLD: learning_proj_color = learning_color else: learning_proj_color = active_color learning_proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True else: learning_proj_color = learning_color learning_proj_width = str(default_width) sndrs = proj._parameter_ports['matrix'].mod_afferents # GET ALL LearningProjections to proj for sndr in sndrs: sndr_label = self._get_graph_node_label(sndr.sender.owner, show_types, show_dimensions) rcvr_label = self._get_graph_node_label(proj, show_types, show_dimensions) if show_projection_labels: edge_label = proj._parameter_ports['matrix'].mod_afferents[0].name else: edge_label = '' if show_node_structure: G.edge(sndr_label + ':' + OutputPort.__name__ + '-' + 'LearningSignal', rcvr_label, label=edge_label, color=learning_proj_color, penwidth=learning_proj_width) else: G.edge(sndr_label, rcvr_label, label = edge_label, color=learning_proj_color, penwidth=learning_proj_width) return True @tc.typecheck def _assign_incoming_edges(g, rcvr, rcvr_label, senders, proj_color=None, proj_arrow=None): proj_color = proj_color or default_node_color proj_arrow = default_projection_arrow for sndr in senders: # Set sndr info sndr_label = self._get_graph_node_label(sndr, show_types, show_dimensions) # Iterate through all Projections from all OutputPorts of sndr for output_port in sndr.output_ports: for proj in output_port.efferents: # Skip any projections to ObjectiveMechanism for controller # (those are handled in _assign_control_components) if (self.controller and proj.receiver.owner in {self.controller, self.controller.objective_mechanism}): continue # Only consider Projections to the rcvr if ((isinstance(rcvr, (Mechanism, Projection)) and proj.receiver.owner == rcvr) or (isinstance(rcvr, Composition) and proj.receiver.owner is rcvr.input_CIM)): if show_node_structure and isinstance(sndr, Mechanism) and isinstance(rcvr, Mechanism): sndr_proj_label = f'{sndr_label}:{sndr._get_port_name(proj.sender)}' proc_mech_rcvr_label = f'{rcvr_label}:{rcvr._get_port_name(proj.receiver)}' else: sndr_proj_label = sndr_label proc_mech_rcvr_label = rcvr_label try: has_learning = proj.has_learning_projection is not None except AttributeError: has_learning = None edge_label = self._get_graph_node_label(proj, show_types, show_dimensions) is_learning_component = rcvr in self.learning_components or sndr in self.learning_components # Check if Projection or its receiver is active if any(item in active_items for item in {proj, proj.receiver.owner}): if active_color is BOLD: # if (isinstance(rcvr, LearningMechanism) or isinstance(sndr, LearningMechanism)): if is_learning_component: proj_color = learning_color else: pass else: proj_color = active_color proj_width = str(default_width + active_thicker_by) self.active_item_rendered = True # Projection to or from a LearningMechanism elif (NodeRole.LEARNING in self.nodes_to_roles[rcvr] or NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[rcvr]): proj_color = learning_color proj_width = str(default_width) else: proj_width = str(default_width) proc_mech_label = edge_label # Render Projection as edge if show_learning and has_learning: # Render Projection as node # (do it here rather than in _assign_learning_components, # as it needs afferent and efferent edges to other nodes) # IMPLEMENTATION NOTE: Projections can't yet use structured nodes: deferred = not render_projection_as_node(g=g, proj=proj, label=proc_mech_label, rcvr_label=proc_mech_rcvr_label, sndr_label=sndr_proj_label, proj_color=proj_color, proj_width=proj_width) # Deferred if it is the last Mechanism in a learning sequence # (see _render_projection_as_node) if deferred: continue else: from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection if isinstance(proj, ControlProjection): arrowhead=control_projection_arrow else: arrowhead=proj_arrow if show_projection_labels: label = proc_mech_label else: label = '' g.edge(sndr_proj_label, proc_mech_rcvr_label, label=label, color=proj_color, penwidth=proj_width, arrowhead=arrowhead) # SETUP AND CONSTANTS ----------------------------------------------------------------- INITIAL_FRAME = "INITIAL_FRAME" if context.execution_id is NotImplemented: context.execution_id = self.default_execution_id # For backward compatibility if 'show_model_based_optimizer' in kwargs: show_controller = kwargs['show_model_based_optimizer'] del kwargs['show_model_based_optimizer'] if kwargs: raise CompositionError(f'Unrecognized argument(s) in call to show_graph method ' f'of {Composition.__name__} {repr(self.name)}: {", ".join(kwargs.keys())}') if show_dimensions == True: show_dimensions = ALL active_items = active_items or [] if active_items: active_items = convert_to_list(active_items) if (self.scheduler.get_clock(context).time.run >= self._animate_num_runs or self.scheduler.get_clock(context).time.trial >= self._animate_num_trials): return for item in active_items: if not isinstance(item, Component) and item is not INITIAL_FRAME: raise CompositionError( "PROGRAM ERROR: Item ({}) specified in {} argument for {} method of {} is not a {}". format(item, repr('active_items'), repr('show_graph'), self.name, Component.__name__)) self.active_item_rendered = False # Argument values used to call Mechanism._show_structure() if isinstance(show_node_structure, (list, tuple, set)): node_struct_args = {'composition': self, 'show_roles': any(key in show_node_structure for key in {ROLES, ALL}), 'show_conditions': any(key in show_node_structure for key in {CONDITIONS, ALL}), 'show_functions': any(key in show_node_structure for key in {FUNCTIONS, ALL}), 'show_mech_function_params': any(key in show_node_structure for key in {MECH_FUNCTION_PARAMS, ALL}), 'show_port_function_params': any(key in show_node_structure for key in {STATE_FUNCTION_PARAMS, ALL}), 'show_values': any(key in show_node_structure for key in {VALUES, ALL}), 'use_labels': any(key in show_node_structure for key in {LABELS, ALL}), 'show_headers': show_headers, 'output_fmt': 'struct', 'context':context} else: node_struct_args = {'composition': self, 'show_roles': show_node_structure in {ROLES, ALL}, 'show_conditions': show_node_structure in {CONDITIONS, ALL}, 'show_functions': show_node_structure in {FUNCTIONS, ALL}, 'show_mech_function_params': show_node_structure in {MECH_FUNCTION_PARAMS, ALL}, 'show_port_function_params': show_node_structure in {STATE_FUNCTION_PARAMS, ALL}, 'show_values': show_node_structure in {VALUES, LABELS, ALL}, 'use_labels': show_node_structure in {LABELS, ALL}, 'show_headers': show_headers, 'output_fmt': 'struct', 'context': context} # DEFAULT ATTRIBUTES ---------------------------------------------------------------- default_node_color = 'black' mechanism_shape = 'oval' learning_projection_shape = 'diamond' struct_shape = 'plaintext' # assumes use of html cim_shape = 'rectangle' composition_shape = 'rectangle' agent_rep_shape = 'egg' default_projection_arrow = 'normal' bold_width = 3 default_width = 1 active_thicker_by = 2 input_rank = 'source' control_rank = 'min' learning_rank = 'min' output_rank = 'max' # BUILD GRAPH ------------------------------------------------------------------------ import graphviz as gv G = gv.Digraph( name=self.name, engine="dot", node_attr={ 'fontsize': '12', 'fontname': 'arial', 'shape': 'record', 'color': default_node_color, 'penwidth': str(default_width), }, edge_attr={ 'fontsize': '10', 'fontname': 'arial' }, graph_attr={ "rankdir": direction, 'overlap': "False" }, ) # get all Nodes # FIX: call to _analyze_graph in nested calls to show_graph cause trouble if output_fmt != 'gv': self._analyze_graph(context=context) processing_graph = self.graph_processing.dependency_dict rcvrs = list(processing_graph.keys()) for r in rcvrs: _assign_processing_components(G, r, show_nested) # Add cim Components to graph if show_cim if show_cim: _assign_cim_components(G, [self.input_CIM, self.output_CIM]) # Add controller-related Components to graph if show_controller if show_controller: _assign_controller_components(G) # Add learning-related Components to graph if show_learning if show_learning: _assign_learning_components(G) # Sort nodes for display def get_index_of_node_in_G_body(node, node_type:tc.enum(MECHANISM, PROJECTION, BOTH)): """Get index of node in G.body""" for i, item in enumerate(G.body): if node.name in item: if node_type in {MECHANISM, BOTH}: if not '->' in item: return i elif node_type in {PROJECTION, BOTH}: if '->' in item: return i else: assert False, f'PROGRAM ERROR: node_type not specified or illegal ({node_type})' for node in self.nodes: roles = self.get_roles_by_node(node) # Put INPUT node(s) first if NodeRole.INPUT in roles: i = get_index_of_node_in_G_body(node, MECHANISM) if i is not None: G.body.insert(0,G.body.pop(i)) # Put OUTPUT node(s) last (except for ControlMechanisms) if NodeRole.OUTPUT in roles: i = get_index_of_node_in_G_body(node, MECHANISM) if i is not None: G.body.insert(len(G.body),G.body.pop(i)) # Put ControlMechanism(s) last if isinstance(node, ControlMechanism): i = get_index_of_node_in_G_body(node, MECHANISM) if i is not None: G.body.insert(len(G.body),G.body.pop(i)) for proj in self.projections: # Put ControlProjection(s) last (along with ControlMechanis(s)) if isinstance(proj, ControlProjection): i = get_index_of_node_in_G_body(node, PROJECTION) if i is not None: G.body.insert(len(G.body),G.body.pop(i)) if self.controller and show_controller: i = get_index_of_node_in_G_body(self.controller, MECHANISM) G.body.insert(len(G.body),G.body.pop(i)) # GENERATE OUTPUT --------------------------------------------------------------------- # Show as pdf try: if output_fmt == 'pdf': # G.format = 'svg' G.view(self.name.replace(" ", "-"), cleanup=True, directory='show_graph OUTPUT/PDFS') # Generate images for animation elif output_fmt == 'gif': if self.active_item_rendered or INITIAL_FRAME in active_items: self._generate_gifs(G, active_items, context) # Return graph to show in jupyter elif output_fmt == 'jupyter': return G elif output_fmt == 'gv': return G except: raise CompositionError(f"Problem displaying graph for {self.name}") @tc.typecheck def _show_structure(self, # direction = 'BT', show_functions:bool=False, show_values:bool=False, use_labels:bool=False, show_headers:bool=False, show_roles:bool=False, show_conditions:bool=False, system=None, composition=None, condition:tc.optional(Condition)=None, compact_cim:tc.optional(tc.enum(INPUT, OUTPUT))=None, output_fmt:tc.enum('pdf','struct')='pdf', context=None ): """Generate a detailed display of a the structure of a Mechanism. .. note:: This method relies on `graphviz <http://www.graphviz.org>`_, which must be installed and imported (standard with PsyNeuLink pip install) Displays the structure of a Mechanism using the GraphViz `record <http://graphviz.readthedocs.io/en/stable/examples.html#structs-revisited-py>`_ shape. This method is called by `System.show_graph` if its **show_mechanism_structure** argument is specified as `True` when it is called. Arguments --------- show_functions : bool : default False show the `function <Component.function>` of the Mechanism and each of its Ports. show_mech_function_params : bool : default False show the parameters of the Mechanism's `function <Component.function>` if **show_functions** is True. show_port_function_params : bool : default False show parameters for the `function <Component.function>` of the Mechanism's Ports if **show_functions** is True). show_values : bool : default False show the `value <Component.value>` of the Mechanism and each of its Ports (prefixed by "="). use_labels : bool : default False use labels for values if **show_values** is `True`; labels must be specified in the `input_labels_dict <Mechanism.input_labels_dict>` (for InputPort values) and `output_labels_dict <Mechanism.output_labels_dict>` (for OutputPort values); otherwise it is ignored. show_headers : bool : default False show the Mechanism, InputPort, ParameterPort and OutputPort headers. show_roles : bool : default False show the `roles <Composition.NodeRoles>` of each Mechanism in the `Composition`. show_conditions : bool : default False show the `conditions <Condition>` used by `Composition` to determine whether/when to execute each Mechanism. system : System : default None specifies the `System` (to which the Mechanism must belong) for which to show its role (see **roles**); if this is not specified, the **show_roles** argument is ignored. composition : Composition : default None specifies the `Composition` (to which the Mechanism must belong) for which to show its role (see **roles**); if this is not specified, the **show_roles** argument is ignored. compact_cim : *INPUT* or *OUTUPT* : default None specifies whether to suppress InputPort fields for input_CIM and OutputPort fields for output_CIM. output_fmt : keyword : default 'pdf' 'pdf': generate and open a pdf with the visualization;\n 'jupyter': return the object (ideal for working in jupyter/ipython notebooks)\n 'struct': return a string that specifies the structure of a mechanism, for use in a GraphViz node specification. """ if composition: system = composition open_bracket = r'{' pipe = r' | ' close_bracket = r'}' mechanism_header = r'COMPOSITION:\n' input_ports_header = r'______CIMInputPortS______\n' \ r'/\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ' \ r'\ \ \ \ \ \ \ \ \ \ \\' output_ports_header = r'\\______\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ______/' \ r'\nCIMOutputPortS' def mech_string(mech): """Return string with name of mechanism possibly with function and/or value Inclusion of role, function and/or value is determined by arguments of call to _show_structure """ if show_headers: mech_header = mechanism_header else: mech_header = '' mech_name = r' <{0}> {1}{0}'.format(mech.name, mech_header) mech_role = '' if system and show_roles: try: mech_role = r'\n[{}]'.format(self.systems[system]) except KeyError: # # mech_role = r'\n[{}]'.format(self.system) # mech_role = r'\n[CONTROLLER]' from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import \ ControlMechanism from psyneulink.core.components.mechanisms.processing.objectivemechanism import \ ObjectiveMechanism if isinstance(mech, ControlMechanism) and hasattr(mech, 'system'): mech_role = r'\n[CONTROLLER]' elif isinstance(mech, ObjectiveMechanism) and hasattr(mech, '_role'): mech_role = r'\n[{}]'.format(mech._role) else: mech_role = "" mech_function = '' if show_functions: mech_function = r'\n({})'.format(mech.function.__class__.__name__) mech_value = '' if show_values: mech_value = r'\n={}'.format(mech.value) return mech_name + mech_role + mech_function + mech_value from psyneulink.core.globals.utilities import ContentAddressableList def states_string(port_list: ContentAddressableList, port_type, include_function: bool = False, include_value: bool = False, use_label: bool = False): """Return string with name of ports in ContentAddressableList with functions and/or values as specified""" states = open_bracket for i, port in enumerate(port_list): if i: states += pipe function = '' if include_function: function = r'\n({})'.format(port.function.__class__.__name__) value = '' if include_value: if use_label: value = r'\n={}'.format(port.label) else: value = r'\n={}'.format(port.value) states += r'<{0}-{1}> {1}{2}{3}'.format(port_type.__name__, port.name, function, value) states += close_bracket return states # Construct Mechanism specification mech = mech_string(self) # Construct InputPorts specification if len(self.input_ports) and compact_cim is not INPUT: if show_headers: input_ports = input_ports_header + pipe + states_string(self.input_ports, InputPort, include_function=show_functions, include_value=show_values, use_label=use_labels) else: input_ports = states_string(self.input_ports, InputPort, include_function=show_functions, include_value=show_values, use_label=use_labels) input_ports = pipe + input_ports else: input_ports = '' # Construct OutputPorts specification if len(self.output_ports) and compact_cim is not OUTPUT: if show_headers: output_ports = states_string(self.output_ports, OutputPort, include_function=show_functions, include_value=show_values, use_label=use_labels) + pipe + output_ports_header else: output_ports = states_string(self.output_ports, OutputPort, include_function=show_functions, include_value=show_values, use_label=use_labels) output_ports = output_ports + pipe else: output_ports = '' m_node_struct = open_bracket + \ output_ports + \ open_bracket + mech + close_bracket + \ input_ports + \ close_bracket if output_fmt == 'struct': # return m.node return m_node_struct # Make node import graphviz as gv m = gv.Digraph( # 'mechanisms', # filename='mechanisms_revisited.gv', node_attr={'shape': 'record'}, ) m.node(self.name, m_node_struct, shape='record') if output_fmt == 'pdf': m.view(self.name.replace(" ", "-"), cleanup=True) elif output_fmt == 'jupyter': return m def _get_graph_node_label(self, item, show_types=None, show_dimensions=None): if not isinstance(item, (Mechanism, Composition, Projection)): raise CompositionError("Unrecognized node type ({}) in graph for {}".format(item, self.name)) # TBI Show Dimensions name = item.name if show_types: name = item.name + '\n(' + item.__class__.__name__ + ')' if show_dimensions in {ALL, MECHANISMS} and isinstance(item, Mechanism): input_str = "in ({})".format(",".join(str(input_port.socket_width) for input_port in item.input_ports)) output_str = "out ({})".format(",".join(str(len(np.atleast_1d(output_port.value))) for output_port in item.output_ports)) return f"{output_str}\n{name}\n{input_str}" if show_dimensions in {ALL, PROJECTIONS} and isinstance(item, Projection): # MappingProjections use matrix if isinstance(item, MappingProjection): value = np.array(item.matrix) dim_string = "({})".format("x".join([str(i) for i in value.shape])) return "{}\n{}".format(item.name, dim_string) # ModulatoryProjections use value else: value = np.array(item.value) dim_string = "({})".format(len(value)) return "{}\n{}".format(item.name, dim_string) if isinstance(item, CompositionInterfaceMechanism): name = name.replace('Input_CIM','INPUT') name = name.replace('Output_CIM', 'OUTPUT') return name def _set_up_animation(self, context): self._component_animation_execution_count = None if isinstance(self._animate, dict): # Assign directory for animation files from psyneulink._version import root_dir default_dir = root_dir + '/../show_graph output/GIFs/' + self.name # + " gifs" # try: # rmtree(self._animate_directory) # except: # pass self._animate_unit = self._animate.pop(UNIT, EXECUTION_SET) self._image_duration = self._animate.pop(DURATION, 0.75) self._animate_num_runs = self._animate.pop(NUM_RUNS, 1) self._animate_num_trials = self._animate.pop(NUM_TRIALS, 1) self._animate_simulations = self._animate.pop(SIMULATIONS, False) self._movie_filename = self._animate.pop(MOVIE_NAME, self.name + ' movie') + '.gif' self._animation_directory = self._animate.pop(MOVIE_DIR, default_dir) self._save_images = self._animate.pop(SAVE_IMAGES, False) self._show_animation = self._animate.pop(SHOW, False) if not self._animate_unit in {COMPONENT, EXECUTION_SET}: raise SystemError(f"{repr(UNIT)} entry of {repr('animate')} argument for {self.name} method " f"of {repr('run')} ({self._animate_unit}) " f"must be {repr(COMPONENT)} or {repr(EXECUTION_SET)}.") if not isinstance(self._image_duration, (int, float)): raise SystemError(f"{repr(DURATION)} entry of {repr('animate')} argument for {repr('run')} method of " f"{self.name} ({self._image_duration}) must be an int or a float.") if not isinstance(self._animate_num_runs, int): raise SystemError(f"{repr(NUM_RUNS)} entry of {repr('animate')} argument for {repr('show_graph')} " f"method of {self.name} ({self._animate_num_runs}) must an integer.") if not isinstance(self._animate_num_trials, int): raise SystemError(f"{repr(NUM_TRIALS)} entry of {repr('animate')} argument for {repr('show_graph')} " f"method of {self.name} ({self._animate_num_trials}) must an integer.") if not isinstance(self._animate_simulations, bool): raise SystemError(f"{repr(SIMULATIONS)} entry of {repr('animate')} argument for {repr('show_graph')} " f"method of {self.name} ({self._animate_num_trials}) must a boolean.") if not isinstance(self._animation_directory, str): raise SystemError(f"{repr(MOVIE_DIR)} entry of {repr('animate')} argument for {repr('run')} " f"method of {self.name} ({self._animation_directory}) must be a string.") if not isinstance(self._movie_filename, str): raise SystemError(f"{repr(MOVIE_NAME)} entry of {repr('animate')} argument for {repr('run')} " f"method of {self.name} ({self._movie_filename}) must be a string.") if not isinstance(self._save_images, bool): raise SystemError(f"{repr(SAVE_IMAGES)} entry of {repr('animate')} argument for {repr('run')} method " f"of {self.name} ({self._save_images}) must be a boolean") if not isinstance(self._show_animation, bool): raise SystemError(f"{repr(SHOW)} entry of {repr('animate')} argument for {repr('run')} " f"method of {self.name} ({self._show_animation}) must be a boolean.") elif self._animate: # self._animate should now be False or a dict raise SystemError("{} argument for {} method of {} ({}) must be a boolean or " "a dictionary of argument specifications for its {} method". format(repr('animate'), repr('run'), self.name, self._animate, repr('show_graph'))) def _animate_execution(self, active_items, context): if self._component_animation_execution_count is None: self._component_animation_execution_count = 0 else: self._component_animation_execution_count += 1 self.show_graph(active_items=active_items, **self._animate, output_fmt='gif', context=context, ) def _generate_gifs(self, G, active_items, context): def create_phase_string(phase): return f'%16s' % phase + ' - ' def create_time_string(time, spec): if spec == 'TIME': r = time.run t = time.trial p = time.pass_ ts = time.time_step else: r = t = p = ts = '__' return f"Time(run: %2s, " % r + f"trial: %2s, " % t + f"pass: %2s, " % p + f"time_step: %2s)" % ts G.format = 'gif' execution_phase = context.execution_phase time = self.scheduler.get_clock(context).time run_num = time.run trial_num = time.trial if INITIAL_FRAME in active_items: phase_string = create_phase_string('Initializing') time_string = create_time_string(time, 'BLANKS') elif ContextFlags.PROCESSING in execution_phase: phase_string = create_phase_string('Processing Phase') time_string = create_time_string(time, 'TIME') # elif ContextFlags.LEARNING in execution_phase: # time = self.scheduler_learning.get_clock(context).time # time_string = "Time(run: {}, trial: {}, pass: {}, time_step: {}". \ # format(run_num, time.trial, time.pass_, time.time_step) # phase_string = 'Learning Phase - ' elif ContextFlags.CONTROL in execution_phase: phase_string = create_phase_string('Control Phase') time_string = create_time_string(time, 'TIME') else: raise CompositionError( f"PROGRAM ERROR: Unrecognized phase during execution of {self.name}: {execution_phase.name}") label = f'\n{self.name}\n{phase_string}{time_string}\n' G.attr(label=label) G.attr(labelloc='b') G.attr(fontname='Monaco') G.attr(fontsize='14') index = repr(self._component_animation_execution_count) image_filename = '-'.join([repr(run_num), repr(trial_num), index]) image_file = self._animation_directory + '/' + image_filename + '.gif' G.render(filename=image_filename, directory=self._animation_directory, cleanup=True, # view=True ) # Append gif to self._animation image = Image.open(image_file) # TBI? # if not self._save_images: # remove(image_file) if not hasattr(self, '_animation'): self._animation = [image] else: self._animation.append(image) # ****************************************************************************************************************** # EXECUTION # ****************************************************************************************************************** @handle_external_context() def run( self, inputs=None, scheduler=None, termination_processing=None, num_trials=None, call_before_time_step=None, call_after_time_step=None, call_before_pass=None, call_after_pass=None, call_before_trial=None, call_after_trial=None, clamp_input=SOFT_CLAMP, bin_execute=False, log=False, initial_values=None, reinitialize_values=None, runtime_params=None, skip_initialization=False, animate=False, context=None, base_context=Context(execution_id=None), ): """Pass inputs to Composition, then execute sets of nodes that are eligible to run until termination conditions are met. See `Run` for details of formatting input specifications. See `Run` for details of formatting input specifications. Use **animate** to generate a gif of the execution sequence. Arguments --------- inputs: { `Mechanism <Mechanism>` : list } or { `Composition <Composition>` : list } a dictionary containing a key-value pair for each Node in the composition that receives inputs from the user. For each pair, the key is the Node and the value is a list of inputs. Each input in the list corresponds to a certain `TRIAL`. scheduler : Scheduler the scheduler object that owns the conditions that will instruct the execution of the Composition. If not specified, the Composition will use its automatically generated scheduler. context context will be set to self.default_execution_id if unspecified base_context the context corresponding to the execution context from which this execution will be initialized, if values currently do not exist for **context** num_trials : int typically, the composition will infer the number of trials from the length of its input specification. To reuse the same inputs across many trials, you may specify an input dictionary with lists of length 1, or use default inputs, and select a number of trials with num_trials. call_before_time_step : callable will be called before each `TIME_STEP` is executed. call_after_time_step : callable will be called after each `TIME_STEP` is executed. call_before_pass : callable will be called before each `PASS` is executed. call_after_pass : callable will be called after each `PASS` is executed. call_before_trial : callable will be called before each `TRIAL` is executed. call_after_trial : callable will be called after each `TRIAL` is executed. initial_values : Dict[Node: Node Value] sets the values of nodes before the start of the run. This is useful in cases where a node's value is used before that node executes for the first time (usually due to recurrence or control). runtime_params : Dict[Node: Dict[Parameter: Tuple(Value, Condition)]] nested dictionary of (value, `Condition`) tuples for parameters of Nodes (`Mechanisms <Mechanism>` or `Compositions <Composition>` of the Composition; specifies alternate parameter values to be used only during this `Run` when the specified `Condition` is met. Outer dictionary: - *key* - Node - *value* - Runtime Parameter Specification Dictionary Runtime Parameter Specification Dictionary: - *key* - keyword corresponding to a parameter of the Node - *value* - tuple in which the index 0 item is the runtime parameter value, and the index 1 item is a `Condition` See `Run_Runtime_Parameters` for more details and examples of valid dictionaries. animate : dict or bool : False specifies use of the `show_graph <Composition.show_graph>` method to generate a gif movie showing the sequence of Components executed in a run. A dict can be specified containing options to pass to the `show_graph <Composition.show_graph>` method; each key must be a legal argument for the `show_graph <Composition.show_graph>` method, and its value a specification for that argument. The entries listed below can also be included in the dict to specify parameters of the animation. If the **animate** argument is specified simply as `True`, defaults are used for all arguments of `show_graph <Composition.show_graph>` and the options below: * *UNIT*: *EXECUTION_SET* or *COMPONENT* (default=\\ *EXECUTION_SET*\\ ) -- specifies which Components to treat as active in each call to `show_graph <Composition.show_graph>`. *COMPONENT* generates an image for the execution of each Component. *EXECUTION_SET* generates an image for each `execution_set <Component.execution_sets>`, showing all of the Components in that set as active. * *DURATION*: float (default=0.75) -- specifies the duration (in seconds) of each image in the movie. * *NUM_RUNS*: int (default=1) -- specifies the number of runs to animate; by default, this is 1. If the number specified is less than the total number of runs executed, only the number specified are animated; if it is greater than the number of runs being executed, only the number being run are animated. * *NUM_TRIALS*: int (default=1) -- specifies the number of trials to animate; by default, this is 1. If the number specified is less than the total number of trials being run, only the number specified are animated; if it is greater than the number of trials being run, only the number being run are animated. * *MOVIE_DIR*: str (default=project root dir) -- specifies the directdory to be used for the movie file; by default a subdirectory of <root_dir>/show_graph_OUTPUT/GIFS is created using the `name <Composition.name>` of the `Composition`, and the gif files are stored there. * *MOVIE_NAME*: str (default=\\ `name <System.name>` + 'movie') -- specifies the name to be used for the movie file; it is automatically appended with '.gif'. * *SAVE_IMAGES*: bool (default=\\ `False`\\ ) -- specifies whether to save each of the images used to construct the animation in separate gif files, in addition to the file containing the animation. * *SHOW*: bool (default=\\ `False`\\ ) -- specifies whether to show the animation after it is constructed, using the OS's default viewer. log : bool, LogCondition Sets the `log_condition <Parameter.log_condition>` for every primary `node <Composition.nodes>` and `projection <Composition.projections>` in this Composition, if it is not already set. .. note:: as when setting the `log_condition <Parameter.log_condition>` directly, a value of `True` will correspond to the `EXECUTION LogCondition <LogCondition.EXECUTION>`. COMMENT: REPLACE WITH EVC/OCM EXAMPLE Examples -------- This figure shows an animation of the Composition in the XXX example script, with the show_graph **show_learning** argument specified as *ALL*: .. _Composition_XXX_movie: .. figure:: _static/XXX_movie.gif :alt: Animation of Composition in XXX example script :scale: 50 % This figure shows an animation of the Composition in the XXX example script, with the show_graph **show_control** argument specified as *ALL* and *UNIT* specified as *EXECUTION_SET*: .. _Composition_XXX_movie: .. figure:: _static/XXX_movie.gif :alt: Animation of Composition in XXX example script :scale: 150 % COMMENT Returns --------- output value of the final Node executed in the composition : various """ if scheduler is None: scheduler = self.scheduler if termination_processing is None: termination_processing = self.termination_processing else: new_conds = self.termination_processing.copy() new_conds.update(termination_processing) termination_processing = new_conds if initial_values is not None: for node in initial_values: if node not in self.nodes: raise CompositionError("{} (entry in initial_values arg) is not a node in \'{}\'". format(node.name, self.name)) if reinitialize_values is None: reinitialize_values = {} for node in reinitialize_values: node.reinitialize(*reinitialize_values[node], context=context) # MODIFIED 8/27/19 OLD: # try: # if ContextFlags.SIMULATION not in context.execution_phase: # self._analyze_graph() # except AttributeError: # # if context is None, it has not been created for this context yet, so it is not # # in a simulation # self._analyze_graph() # MODIFIED 8/27/19 NEW: # FIX: MODIFIED FEEDBACK - # THIS IS NEEDED HERE (AND NO LATER) TO WORK WITH test_3_mechanisms_2_origins_1_additive_control_1_terminal # If a scheduler was passed in, first call _analyze_graph with default scheduler if scheduler is not self.scheduler: self._analyze_graph(context=context) # Then call _analyze graph with scheduler actually being used (passed in or default) try: if ContextFlags.SIMULATION not in context.execution_phase: self._analyze_graph(scheduler=scheduler, context=context) except AttributeError: # if context is None, it has not been created for this context yet, # so it is not in a simulation self._analyze_graph(scheduler=scheduler, context=context) # MODIFIED 8/27/19 END # set auto logging if it's not already set, and if log argument is True if log: for item in self.nodes + self.projections: if not isinstance(item, CompositionInterfaceMechanism): for param in item.parameters: if param.loggable and param.log_condition is LogCondition.OFF: param.log_condition = LogCondition.EXECUTION # Set animation attributes if animate is True: animate = {} self._animate = animate if self._animate is not False: self._set_up_animation(context) # SET UP EXECUTION ----------------------------------------------- results = [] self._assign_execution_ids(context) scheduler._init_counts(execution_id=context.execution_id) input_nodes = self.get_nodes_by_role(NodeRole.INPUT) # if there is only one INPUT Node, allow inputs to be specified in a list # if there is only one INPUT Node, allow inputs to be specified in a list if isinstance(inputs, (list, np.ndarray)): if len(input_nodes) == 1: inputs = {next(iter(input_nodes)): inputs} else: raise CompositionError( f"Inputs to {self.name} must be specified in a dictionary with a key for each of its " f"{len(input_nodes)} INPUT nodes ({[n.name for n in input_nodes]}).") elif callable(inputs): num_inputs_sets = 1 autodiff_stimuli = {} elif hasattr(inputs, '__next__'): num_inputs_sets = sys.maxsize autodiff_stimuli = {} elif not isinstance(inputs, dict): if len(input_nodes) == 1: raise CompositionError( "Inputs to {} must be specified in a list or in a dictionary " "with the INPUT node ({}) as its only key". format(self.name, next(iter(input_nodes)).name)) else: input_node_names = ", ".join([i.name for i in input_nodes]) raise CompositionError( "Inputs to {} must be specified in a dictionary " "with its {} INPUT nodes ({}) as the keys and their inputs as the values". format(self.name, len(input_nodes), input_node_names)) if not callable(inputs) \ and not hasattr(inputs, '__next__'): # Currently, no validation if 'inputs' arg is a function ad_tmp = {} if hasattr(self,'learning_enabled') and self.learning_enabled is True: ad_tmp = inputs inputs = inputs["inputs"] inputs, num_inputs_sets, autodiff_stimuli = self._adjust_stimulus_dict(inputs,bin_execute=bin_execute) #HACK: basically checks to see if we retrieved info from the _adjust_stimulus_dict call, and replaces it with our own parsed version if learning is enabled if hasattr(self,'learning_enabled') and self.learning_enabled is True: autodiff_stimuli = ad_tmp if num_trials is not None: num_trials = num_trials else: num_trials = num_inputs_sets scheduler._reset_counts_total(TimeScale.RUN, context.execution_id) # KDM 3/29/19: run the following not only during LLVM Run compilation, due to bug where TimeScale.RUN # termination condition is checked and no data yet exists. Adds slight overhead as long as run is not # called repeatedly (this init is repeated in Composition.execute) # initialize from base context but don't overwrite any values already set for this context if (not skip_initialization and (context is None or ContextFlags.SIMULATION not in context.execution_phase)): self._initialize_from_context(context, base_context, override=False) context.composition = self is_simulation = (context is not None and ContextFlags.SIMULATION in context.execution_phase) if (bin_execute is True or str(bin_execute).endswith('Run')): # There's no mode to run simulations. # Simulations are run as part of the controller node wrapper. assert not is_simulation try: if bin_execute is True or bin_execute.startswith('LLVM'): _comp_ex = pnlvm.CompExecution(self, [context.execution_id]) results += _comp_ex.run(inputs, num_trials, num_inputs_sets,autodiff_stimuli=autodiff_stimuli) elif bin_execute.startswith('PTX'): self.__ptx_initialize(context) EX = self._compilation_data.ptx_execution._get(context) results += EX.cuda_run(inputs, num_trials, num_inputs_sets) full_results = self.parameters.results._get(context) if full_results is None: full_results = results else: full_results.extend(results) self.parameters.results._set(full_results, context) # KAM added the [-1] index after changing Composition run() # behavior to return only last trial of run (11/7/18) self.most_recent_context = context return full_results[-1] except Exception as e: if bin_execute is not True: raise e print("WARNING: Failed to Run execution `{}': {}".format( self.name, str(e))) # Reset gym forager environment for the current trial if self.env: trial_output = np.atleast_2d(self.env.reset()) # Loop over the length of the list of inputs - each input represents a TRIAL for trial_num in range(num_trials): # Execute call before trial "hook" (user defined function) if call_before_trial: call_with_pruned_args(call_before_trial, context=context) if termination_processing[TimeScale.RUN].is_satisfied( scheduler=scheduler, context=context ): break # PROCESSING ------------------------------------------------------------------------ # Prepare stimuli from the outside world -- collect the inputs for this TRIAL and store them in a dict if callable(inputs): # If 'inputs' argument is a function, call the function here with results from last trial execution_stimuli = inputs(self.env, trial_output) if not isinstance(execution_stimuli, dict): return trial_output elif hasattr(inputs, '__next__'): try: execution_stimuli = inputs.__next__() except StopIteration: break else: execution_stimuli = {} stimulus_index = trial_num % num_inputs_sets for node in inputs: if len(inputs[node]) == 1: execution_stimuli[node] = inputs[node][0] continue execution_stimuli[node] = inputs[node][stimulus_index] execution_autodiff_stimuli = {} for node in autodiff_stimuli: if isinstance(autodiff_stimuli[node], list): execution_autodiff_stimuli[node] = autodiff_stimuli[node][stimulus_index] else: execution_autodiff_stimuli[node] = autodiff_stimuli[node] for node in self.nodes: if hasattr(node, "reinitialize_when") and node.parameters.has_initializers._get(context): if node.reinitialize_when.is_satisfied(scheduler=self.scheduler, context=context): node.reinitialize(None, context=context) # execute processing # pass along the stimuli for this trial trial_output = self.execute(inputs=execution_stimuli, autodiff_stimuli=execution_autodiff_stimuli, scheduler=scheduler, termination_processing=termination_processing, call_before_time_step=call_before_time_step, call_before_pass=call_before_pass, call_after_time_step=call_after_time_step, call_after_pass=call_after_pass, context=context, base_context=base_context, clamp_input=clamp_input, runtime_params=runtime_params, skip_initialization=True, bin_execute=bin_execute, ) # --------------------------------------------------------------------------------- # store the result of this execute in case it will be the final result # object.results.append(result) if isinstance(trial_output, collections.abc.Iterable): result_copy = trial_output.copy() else: result_copy = trial_output if ContextFlags.SIMULATION not in context.execution_phase: results.append(result_copy) if not self.parameters.retain_old_simulation_data._get(): if self.controller is not None: # if any other special parameters store simulation info that needs to be cleaned up # consider dedicating a function to it here # this will not be caught above because it resides in the base context (context) if not self.parameters.simulation_results.retain_old_simulation_data: self.parameters.simulation_results._get(context).clear() if not self.controller.parameters.simulation_ids.retain_old_simulation_data: self.controller.parameters.simulation_ids._get(context).clear() if call_after_trial: call_with_pruned_args(call_after_trial, context=context) scheduler.get_clock(context)._increment_time(TimeScale.RUN) full_results = self.parameters.results._get(context) if full_results is None: full_results = results else: full_results.extend(results) self.parameters.results._set(full_results, context) self.most_recent_context = context if self._animate is not False: # Save list of gifs in self._animation as movie file movie_path = self._animation_directory + '/' + self._movie_filename self._animation[0].save(fp=movie_path, format='GIF', save_all=True, append_images=self._animation[1:], duration=self._image_duration * 1000, loop=0) # print(f'\nSaved movie for {self.name} in {self._animation_directory}/{self._movie_filename}') print(f"\nSaved movie for '{self.name}' in '{self._movie_filename}'") if self._show_animation: movie = Image.open(movie_path) movie.show() return trial_output @handle_external_context(execution_phase=ContextFlags.PROCESSING) def execute( self, inputs=None, autodiff_stimuli=None, scheduler=None, termination_processing=None, call_before_time_step=None, call_before_pass=None, call_after_time_step=None, call_after_pass=None, context=None, base_context=Context(execution_id=None), clamp_input=SOFT_CLAMP, runtime_params=None, skip_initialization=False, bin_execute=False, ): """ Passes inputs to any Nodes receiving inputs directly from the user (via the "inputs" argument) then coordinates with the Scheduler to execute sets of nodes that are eligible to execute until termination conditions are met. Arguments --------- inputs: { `Mechanism <Mechanism>` or `Composition <Composition>` : list } a dictionary containing a key-value pair for each node in the composition that receives inputs from the user. For each pair, the key is the node (Mechanism or Composition) and the value is an input, the shape of which must match the node's default variable. scheduler : Scheduler the scheduler object that owns the conditions that will instruct the execution of this Composition If not specified, the Composition will use its automatically generated scheduler. context context will be set to self.default_execution_id if unspecified base_context the context corresponding to the execution context from which this execution will be initialized, if values currently do not exist for **context** call_before_time_step : callable called before each `TIME_STEP` is executed passed the current *context* (but it is not necessary for your callable to take) call_after_time_step : callable called after each `TIME_STEP` is executed passed the current *context* (but it is not necessary for your callable to take) call_before_pass : callable called before each `PASS` is executed passed the current *context* (but it is not necessary for your callable to take) call_after_pass : callable called after each `PASS` is executed passed the current *context* (but it is not necessary for your callable to take) Returns --------- output value of the final Mechanism executed in the Composition : various """ # ASSIGNMENTS ************************************************************************************************** if bin_execute == 'Python': bin_execute = False if not hasattr(self, '_animate'): # These are meant to be assigned in run method; needed here for direct call to execute method self._animate = False # KAM Note 4/29/19 # The nested var is set to True if this Composition is nested in another Composition, otherwise False # Later on, this is used to determine: # (1) whether to initialize from context # (2) whether to assign values to CIM from input dict (if not nested) or simply execute CIM (if nested) nested = False if len(self.input_CIM.path_afferents) > 0: nested = True runtime_params = self._parse_runtime_params(runtime_params) # Assign the same execution_ids to all nodes in the Composition and get it (if it was None) self._assign_execution_ids(context) context.composition = self input_nodes = self.get_nodes_by_role(NodeRole.INPUT) execution_scheduler = scheduler or self.scheduler context.source = ContextFlags.COMPOSITION if termination_processing is None: termination_processing = self.termination_processing # Skip initialization if possible (for efficiency): # - and(context has not changed # - structure of the graph has not changed # - not a nested composition # - its not a simulation) # - or(gym forage env is being used) # (e.g., when run is called externally repeated for the same environment) # KAM added HACK below "or self.env is None" in order to merge in interactive inputs fix for speed improvement # TBI: Clean way to call _initialize_from_context if context has not changed, BUT composition has changed # for example: # comp.run() # comp.add_node(new_node) # comp.run(). # context has not changed on the comp, BUT new_node's execution id needs to be set from None --> ID if self.most_recent_context != context or self.env is None: # initialize from base context but don't overwrite any values already set for this context if ( not skip_initialization and not nested or context is None and context.execution_phase is not ContextFlags.SIMULATION ): self._initialize_from_context(context, base_context, override=False) context.composition = self # Generate first frame of animation without any active_items if self._animate is not False: # If context fails, the scheduler has no data for it yet. # It also may be the first, so fall back to default execution_id try: self._animate_execution(INITIAL_FRAME, context) except KeyError: old_eid = context.execution_id context.execution_id = self.default_execution_id self._animate_execution(INITIAL_FRAME, context) context.execution_id = old_eid # EXECUTE INPUT CIM ******************************************************************************************** # FIX: 6/12/19 MOVE TO EXECUTE BELOW? (i.e., with bin_execute / _comp_ex.execute_node(self.input_CIM, inputs)) # Handles Input CIM and Parameter CIM execution. # # FIX: 8/21/19 # If self is a nested composition, its input CIM will obtain its value in one of two ways, # depending on whether or not it is being executed within a simulation. # If it is a simulation, then we need to use the _assign_values_to_input_CIM method, which parses the inputs # argument of the execute method into a suitable shape for the input ports of the input_CIM. # If it is not a simulation, we can simply execute the input CIM. # # If self is an unnested composition, we must update the input ports for any input nodes that are Compositions. # This is done to update the variable for their input CIMs, which allows the _adjust_execution_stimuli # method to properly validate input for those nodes. # -DS context.add_flag(ContextFlags.PROCESSING) if nested: # check that inputs are specified - autodiff does not in some cases if ContextFlags.SIMULATION in context.execution_phase and inputs is not None: inputs = self._adjust_execution_stimuli(inputs) self._assign_values_to_input_CIM(inputs, context=context) else: self.input_CIM.execute(context=context) self.parameter_CIM.execute(context=context) else: inputs = self._adjust_execution_stimuli(inputs) self._assign_values_to_input_CIM(inputs, context=context) # FIX: 6/12/19 Deprecate? # Manage input clamping next_pass_before = 1 next_pass_after = 1 if clamp_input: soft_clamp_inputs = self._identify_clamp_inputs(SOFT_CLAMP, clamp_input, input_nodes) hard_clamp_inputs = self._identify_clamp_inputs(HARD_CLAMP, clamp_input, input_nodes) pulse_clamp_inputs = self._identify_clamp_inputs(PULSE_CLAMP, clamp_input, input_nodes) no_clamp_inputs = self._identify_clamp_inputs(NO_CLAMP, clamp_input, input_nodes) # Animate input_CIM # FIX: COORDINATE WITH REFACTORING OF PROCESSING/CONTROL CONTEXT # (NOT SURE WHETHER IT CAN BE LEFT IN PROCESSING AFTER THAT) if self._animate is not False and SHOW_CIM in self._animate and self._animate[SHOW_CIM]: self._animate_execution(self.input_CIM, context) # FIX: END context.remove_flag(ContextFlags.PROCESSING) # EXECUTE CONTROLLER (if specified for BEFORE) ***************************************************************** # Compile controller execution (if compilation is specified) -------------------------------- if bin_execute: is_simulation = (context is not None and ContextFlags.SIMULATION in context.execution_phase) # Try running in Exec mode first if (bin_execute is True or str(bin_execute).endswith('Exec')): # There's no mode to execute simulations. # Simulations are run as part of the controller node wrapper. assert not is_simulation try: if bin_execute is True or bin_execute.startswith('LLVM'): _comp_ex = pnlvm.CompExecution(self, [context.execution_id]) _comp_ex.execute(inputs) return _comp_ex.extract_node_output(self.output_CIM) elif bin_execute.startswith('PTX'): self.__ptx_initialize(context) __execution = self._compilation_data.ptx_execution._get(context) __execution.cuda_execute(inputs) return __execution.extract_node_output(self.output_CIM) except Exception as e: if bin_execute is not True: raise e string = "Failed to execute `{}': {}".format(self.name, str(e)) print("WARNING: {}".format(string)) # Exec failed for some reason, we can still try node level bin_execute try: # Filter out mechanisms. Nested compositions are not executed in this mode # Filter out controller. Compilation of controllers is not supported yet mechanisms = [n for n in self._all_nodes if isinstance(n, Mechanism) and (n is not self.controller or not is_simulation)] # Generate all mechanism wrappers for m in mechanisms: self._get_node_wrapper(m) _comp_ex = pnlvm.CompExecution(self, [context.execution_id]) # Compile all mechanism wrappers for m in mechanisms: _comp_ex._set_bin_node(m) bin_execute = True except Exception as e: if bin_execute is not True: raise e string = "Failed to compile wrapper for `{}' in `{}': {}".format(m.name, self.name, str(e)) print("WARNING: {}".format(string)) bin_execute = False # Execute controller -------------------------------------------------------- if (self.enable_controller and self.controller_mode is BEFORE and self.controller_condition.is_satisfied(scheduler=execution_scheduler, context=context)): # control phase # FIX: SHOULD SET CONTEXT AS CONTROL HERE AND RESET AT END (AS DONE FOR animation BELOW) if ( self.initialization_status != ContextFlags.INITIALIZING and ContextFlags.SIMULATION not in context.execution_phase ): if self.controller and not bin_execute: # FIX: REMOVE ONCE context IS SET TO CONTROL ABOVE # FIX: END REMOVE context.add_flag(ContextFlags.PROCESSING) self.controller.execute(context=context) if bin_execute: _comp_ex.execute_node(self.controller) context.remove_flag(ContextFlags.PROCESSING) # Animate controller (before execution) context.add_flag(ContextFlags.CONTROL) if self._animate != False and SHOW_CONTROLLER in self._animate and self._animate[SHOW_CONTROLLER]: self._animate_execution(self.controller, context) context.remove_flag(ContextFlags.CONTROL) # EXECUTE (each execution_set) ********************************************************************************* # PREPROCESS (get inputs, call_before_pass, animate first frame) ---------------------------------- context.add_flag(ContextFlags.PROCESSING) if bin_execute: _comp_ex.execute_node(self.input_CIM, inputs) # WHY DO BOTH? WHY NOT if-else? if call_before_pass: call_with_pruned_args(call_before_pass, context=context) # GET execution_set ------------------------------------------------------------------------- # run scheduler to receive sets of nodes that may be executed at this time step in any order for next_execution_set in execution_scheduler.run(termination_conds=termination_processing, context=context, skip_trial_time_increment=True, ): # SETUP EXECUTION ---------------------------------------------------------------------------- # FIX: 6/12/19 WHY IS call_*after*_pass BEING CALLED BEFORE THE PASS? if call_after_pass: if next_pass_after == \ execution_scheduler.get_clock(context).get_total_times_relative(TimeScale.PASS, TimeScale.TRIAL): logger.debug('next_pass_after {0}\tscheduler pass {1}'. format(next_pass_after, execution_scheduler.get_clock( context).get_total_times_relative( TimeScale.PASS, TimeScale.TRIAL))) call_with_pruned_args(call_after_pass, context=context) next_pass_after += 1 if call_before_pass: if next_pass_before == \ execution_scheduler.get_clock(context).get_total_times_relative(TimeScale.PASS, TimeScale.TRIAL): call_with_pruned_args(call_before_pass, context=context) logger.debug('next_pass_before {0}\tscheduler pass {1}'. format(next_pass_before, execution_scheduler.get_clock( context).get_total_times_relative( TimeScale.PASS, TimeScale.TRIAL))) next_pass_before += 1 if call_before_time_step: call_with_pruned_args(call_before_time_step, context=context) # MANAGE EXECUTION OF FEEDBACK / CYCLIC GRAPHS ------------------------------------------------ # Set up storage of all node values *before* the start of each timestep # If nodes within a timestep are connected by projections, those projections must pass their senders' # values from the beginning of the timestep (i.e. their "frozen values") # This ensures that the order in which nodes execute does not affect the results of this timestep frozen_values = {} new_values = {} if bin_execute: _comp_ex.freeze_values() # PURGE LEARNING IF NOT ENABLED ---------------------------------------------------------------- # If learning is turned off, check for any learning related nodes and remove them from the execution set if not self.enable_learning: next_execution_set = next_execution_set - set(self.get_nodes_by_role(NodeRole.LEARNING)) # ANIMATE execution_set ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if self._animate is not False and self._animate_unit is EXECUTION_SET: self._animate_execution(next_execution_set, context) # EXECUTE (each node) -------------------------------------------------------------------------- # execute each node with EXECUTING in context for node in next_execution_set: # Store values of all nodes in this execution_set for use by other nodes in the execution set # throughout this timestep (e.g., for recurrent Projections) frozen_values[node] = node.get_output_values(context) # FIX: 6/12/19 Deprecate? # Handle input clamping if node in input_nodes: if clamp_input: if node in hard_clamp_inputs: # clamp = HARD_CLAMP --> "turn off" recurrent projection if hasattr(node, "recurrent_projection"): node.recurrent_projection.sender.parameters.value._set([0.0], context) elif node in no_clamp_inputs: for input_port in node.input_ports: self.input_CIM_ports[input_port][1].parameters.value._set(0.0, context) # EXECUTE A MECHANISM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if isinstance(node, Mechanism): execution_runtime_params = {} if node in runtime_params: for param in runtime_params[node]: if runtime_params[node][param][1].is_satisfied(scheduler=execution_scheduler, # KAM 5/15/18 - not sure if this will always be the correct execution id: context=context): execution_runtime_params[param] = runtime_params[node][param][0] # Set context.execution_phase # Set to PROCESSING by default context.add_flag(ContextFlags.PROCESSING) # Set to LEARNING if Mechanism receives any PathwayProjections that are being learned # for which learning_enabled == True or ONLINE (i.e., not False or AFTER) if self.enable_learning: projections = set(self.projections).intersection(set(node.path_afferents)) if any([p for p in projections if any([a for a in p.parameter_ports[MATRIX].mod_afferents if (hasattr(a, 'learning_enabled') and a.learning_enabled in {True, ONLINE})])]): context.replace_flag(ContextFlags.PROCESSING, ContextFlags.LEARNING) # Execute node if bin_execute: _comp_ex.execute_node(node) else: if node is not self.controller: if nested and node in self.get_nodes_by_role(NodeRole.INPUT): for port in node.input_ports: port._update(context=context) node.execute( context=context, runtime_params=execution_runtime_params, ) # Reset runtime_params for node and its function if specified if context.execution_id in node._runtime_params_reset: for key in node._runtime_params_reset[context.execution_id]: node._set_parameter_value(key, node._runtime_params_reset[context.execution_id][key], context) node._runtime_params_reset[context.execution_id] = {} if context.execution_id in node.function._runtime_params_reset: for key in node.function._runtime_params_reset[context.execution_id]: node.function._set_parameter_value( key, node.function._runtime_params_reset[context.execution_id][key], context) node.function._runtime_params_reset[context.execution_id] = {} # Set execution_phase for node's context back to IDLE if self.enable_learning: context.replace_flag(ContextFlags.LEARNING, ContextFlags.PROCESSING) context.remove_flag(ContextFlags.PROCESSING) # EXECUTE A NESTED COMPOSITION ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif isinstance(node, Composition): # Set up compilation if bin_execute: # Values of node with compiled wrappers are in binary data structure srcs = (proj.sender.owner for proj in node.input_CIM.afferents if proj.sender.owner in self.__generated_node_wrappers) for srnode in srcs: assert srnode in self.nodes or srnode is self.input_CIM data = _comp_ex.extract_frozen_node_output(srnode) for i, v in enumerate(data): # This sets frozen values srnode.output_ports[i].parameters.value._set(v, context, skip_history=True, skip_log=True) # Pass outer context to nested Composition context.composition = node if ContextFlags.SIMULATION in context.execution_phase: is_simulating = True context.remove_flag(ContextFlags.SIMULATION) else: is_simulating = False # Execute Composition # FIX: 6/12/19 WHERE IS COMPILED EXECUTION OF NESTED NODE? # autodiff compositions must be passed extra inputs pytorch_enabled = False if hasattr(node, "pytorch_representation"): if node.learning_enabled: pytorch_enabled = True # Autodiff execution if pytorch_enabled: ret = node.execute(inputs=autodiff_stimuli[node], context=context) # Standard execution else: ret = node.execute(context=context) if is_simulating: context.add_flag(ContextFlags.SIMULATION) context.composition = self # Get output info from compiled execution if bin_execute: # Update result in binary data structure _comp_ex.insert_node_output(node, ret) for i, v in enumerate(ret): # Set current output. This will be stored to "new_values" below node.output_CIM.output_ports[i].parameters.value._set(v, context, skip_history=True, skip_log=True) # ANIMATE node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if self._animate is not False and self._animate_unit is COMPONENT: self._animate_execution(node, context) # MANAGE INPUTS (for next execution_set)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # FIX: 6/12/19 Deprecate? # Handle input clamping if node in input_nodes: if clamp_input: if node in pulse_clamp_inputs: for input_port in node.input_ports: # clamp = None --> "turn off" input node self.input_CIM_ports[input_port][1].parameters.value._set(0, context) # Store new value generated by node, # then set back to frozen value for use by other nodes in execution_set new_values[node] = node.get_output_values(context) for i in range(len(node.output_ports)): node.output_ports[i].parameters.value._set(frozen_values[node][i], context, skip_history=True, skip_log=True) # Set all nodes to new values for node in next_execution_set: for i in range(len(node.output_ports)): node.output_ports[i].parameters.value._set(new_values[node][i], context, skip_history=True, skip_log=True) if call_after_time_step: call_with_pruned_args(call_after_time_step, context=context) context.remove_flag(ContextFlags.PROCESSING) # Update matrix parameter of PathwayProjections being learned with learning_enabled==AFTER if self.enable_learning: context.add_flag(ContextFlags.LEARNING) for projection in [p for p in self.projections if hasattr(p, 'has_learning_projection') and p.has_learning_projection]: matrix_parameter_port = projection.parameter_ports[MATRIX] if any([lp for lp in matrix_parameter_port.mod_afferents if lp.learning_enabled == AFTER]): matrix_parameter_port._update(context=context) context.remove_flag(ContextFlags.LEARNING) if call_after_pass: call_with_pruned_args(call_after_pass, context=context) # Animate output_CIM # FIX: NOT SURE WHETHER IT CAN BE LEFT IN PROCESSING AFTER THIS - # COORDINATE WITH REFACTORING OF PROCESSING/CONTROL CONTEXT if self._animate is not False and SHOW_CIM in self._animate and self._animate[SHOW_CIM]: self._animate_execution(self.output_CIM, context) # FIX: END # EXECUTE CONTROLLER (if controller_mode == AFTER) ************************************************************ if (self.enable_controller and self.controller_mode == AFTER and self.controller_condition.is_satisfied(scheduler=execution_scheduler, context=context)): # control phase if ( self.initialization_status != ContextFlags.INITIALIZING and ContextFlags.SIMULATION not in context.execution_phase ): context.add_flag(ContextFlags.CONTROL) if self.controller and not bin_execute: self.controller.execute(context=context) if bin_execute: _comp_ex.freeze_values() _comp_ex.execute_node(self.controller) # Animate controller (after execution) if self._animate is not False and SHOW_CONTROLLER in self._animate and self._animate[SHOW_CONTROLLER]: self._animate_execution(self.controller, context) context.remove_flag(ContextFlags.CONTROL) execution_scheduler.get_clock(context)._increment_time(TimeScale.TRIAL) # REPORT RESULTS *********************************************************************************************** # Extract result here if bin_execute: _comp_ex.freeze_values() _comp_ex.execute_node(self.output_CIM) return _comp_ex.extract_node_output(self.output_CIM) context.add_flag(ContextFlags.PROCESSING) self.output_CIM.execute(context=context) context.remove_flag(ContextFlags.PROCESSING) output_values = [] for port in self.output_CIM.output_ports: output_values.append(port.parameters.value._get(context)) return output_values @handle_external_context(execution_id=NotImplemented) def reinitialize(self, values, context=NotImplemented): if context.execution_id is NotImplemented: context.execution_id = self.most_recent_context.execution_id for i in range(self.stateful_nodes): self.stateful_nodes[i].reinitialize(values[i], context=context) def disable_all_history(self): """ When run, disables history tracking for all Parameters of all Components used in this Composition """ self._set_all_parameter_properties_recursively(history_max_length=0) def _get_processing_condition_set(self, node): dep_group = [] for group in self.scheduler.consideration_queue: if node in group: break dep_group = group # NOTE: This is not ideal we don't need to depend on # the entire previous group. Only our dependencies cond = [EveryNCalls(dep, 1) for dep in dep_group] if node not in self.scheduler.conditions: cond.append(Always()) else: node_conds = self.scheduler.conditions[node] cond.append(node_conds) return All(*cond) def _input_matches_variable(self, input_value, var): # input_value ports are uniform if np.shape(np.atleast_2d(input_value)) == np.shape(var): return "homogeneous" # input_value ports have different lengths elif len(np.shape(var)) == 1 and isinstance(var[0], (list, np.ndarray)): for i in range(len(input_value)): if len(input_value[i]) != len(var[i]): return False return "heterogeneous" return False def _adjust_stimulus_dict(self, stimuli,bin_execute=False): autodiff_stimuli = {} all_stimuli_keys = list(stimuli.keys()) for node in all_stimuli_keys: if hasattr(node, "pytorch_representation"): if node.learning_enabled: autodiff_stimuli[node] = stimuli[node] del stimuli[node] # STEP 1A: Check that all of the nodes listed in the inputs dict are INPUT nodes in the composition input_nodes = self.get_nodes_by_role(NodeRole.INPUT) for node in stimuli.keys(): if not node in input_nodes: if not isinstance(node, (Mechanism, Composition)): raise CompositionError(f'{node} in "inputs" dict for {self.name} is not a ' f'{Mechanism.__name__} or {Composition.__name__}.') else: raise CompositionError(f"{node.name} in inputs dict for {self.name} is not one of its INPUT nodes.") # STEP 1B: Check that all of the INPUT nodes are represented - if not, use default_external_input_values for node in input_nodes: if not node in stimuli: stimuli[node] = node.default_external_input_values # STEP 2: Loop over all dictionary entries to validate their content and adjust any convenience notations: # (1) Replace any user provided convenience notations with values that match the following specs: # a - all dictionary values are lists containing an input value for each trial (even if only one trial) # b - each input value is a 2d array that matches variable # example: { Mech1: [Fully_specified_input_for_mech1_on_trial_1, Fully_specified_input_for_mech1_on_trial_2 … ], # Mech2: [Fully_specified_input_for_mech2_on_trial_1, Fully_specified_input_for_mech2_on_trial_2 … ]} # (2) Verify that all nodes provide the same number of inputs (check length of each dictionary value) adjusted_stimuli = {} nums_input_sets = set() for node, stim_list in stimuli.items(): if isinstance(node, Composition): if isinstance(stim_list, dict): adjusted_stimulus_dict, num_trials, autodiff_stimuli = node._adjust_stimulus_dict(stim_list) translated_stimulus_dict = {} # first time through the stimulus dictionary, assemble a dictionary in which the keys are input CIM # InputPorts and the values are lists containing the first input value for nested_input_node, values in adjusted_stimulus_dict.items(): first_value = values[0] for i in range(len(first_value)): input_port = nested_input_node.external_input_ports[i] input_cim_input_port = node.input_CIM_ports[input_port][0] translated_stimulus_dict[input_cim_input_port] = [first_value[i]] # then loop through the stimulus dictionary again for each remaining trial for trial in range(1, num_trials): translated_stimulus_dict[input_cim_input_port].append(values[trial][i]) adjusted_stimulus_list = [] for trial in range(num_trials): trial_adjusted_stimulus_list = [] for port in node.external_input_ports: trial_adjusted_stimulus_list.append(translated_stimulus_dict[port][trial]) adjusted_stimulus_list.append(trial_adjusted_stimulus_list) stimuli[node] = adjusted_stimulus_list stim_list = adjusted_stimulus_list # ADDED CW 12/21/18: This line fixed a bug, but it might be a hack # excludes any input ports marked "internal_only" (usually recurrent) # KDM 3/29/19: changed to use defaults equivalent of node.external_input_values input_must_match = [input_port.defaults.value for input_port in node.input_ports if not input_port.internal_only] if input_must_match == []: # all input ports are internal_only continue check_spec_type = self._input_matches_variable(stim_list, input_must_match) # If a node provided a single input, wrap it in one more list in order to represent trials if check_spec_type == "homogeneous" or check_spec_type == "heterogeneous": if check_spec_type == "homogeneous": # np.atleast_2d will catch any single-input ports specified without an outer list # e.g. [2.0, 2.0] --> [[2.0, 2.0]] adjusted_stimuli[node] = [np.atleast_2d(stim_list)] else: adjusted_stimuli[node] = [stim_list] nums_input_sets.add(1) else: adjusted_stimuli[node] = [] for stim in stimuli[node]: check_spec_type = self._input_matches_variable(stim, input_must_match) # loop over each input to verify that it matches variable if check_spec_type == False: err_msg = "Input stimulus ({}) for {} is incompatible with its external_input_values ({}).". \ format(stim, node.name, input_must_match) # 8/3/17 CW: I admit the error message implementation here is very hacky; but it's at least not a hack # for "functionality" but rather a hack for user clarity if "KWTA" in str(type(node)): err_msg = err_msg + " For KWTA mechanisms, remember to append an array of zeros (or other values)" \ " to represent the outside stimulus for the inhibition InputPort, and " \ "for systems, put your inputs" raise RunError(err_msg) elif check_spec_type == "homogeneous": # np.atleast_2d will catch any single-input ports specified without an outer list # e.g. [2.0, 2.0] --> [[2.0, 2.0]] adjusted_stimuli[node].append(np.atleast_2d(stim)) else: adjusted_stimuli[node].append(stim) nums_input_sets.add(len(stimuli[node])) if len(nums_input_sets) > 1: if 1 in nums_input_sets: nums_input_sets.remove(1) if len(nums_input_sets) > 1: raise CompositionError("The input dictionary for {} contains input specifications of different " "lengths ({}). The same number of inputs must be provided for each node " "in a Composition.".format(self.name, nums_input_sets)) else: raise CompositionError("The input dictionary for {} contains input specifications of different " "lengths ({}). The same number of inputs must be provided for each node " "in a Composition.".format(self.name, nums_input_sets)) num_input_sets = nums_input_sets.pop() return adjusted_stimuli, num_input_sets, autodiff_stimuli def _adjust_execution_stimuli(self, stimuli): adjusted_stimuli = {} for node, stimulus in stimuli.items(): if isinstance(node, Composition): input_must_match = node.default_external_input_values if isinstance(stimulus, dict): adjusted_stimulus_dict = node._adjust_stimulus_dict(stimulus) adjusted_stimuli[node] = adjusted_stimulus_dict continue else: input_must_match = node.default_external_input_values check_spec_type = self._input_matches_variable(stimulus, input_must_match) # If a node provided a single input, wrap it in one more list in order to represent trials if check_spec_type == "homogeneous" or check_spec_type == "heterogeneous": if check_spec_type == "homogeneous": # np.atleast_2d will catch any single-input ports specified without an outer list # e.g. [2.0, 2.0] --> [[2.0, 2.0]] adjusted_stimuli[node] = np.atleast_2d(stimulus) else: adjusted_stimuli[node] = stimulus else: raise CompositionError("Input stimulus ({}) for {} is incompatible with its variable ({})." .format(stimulus, node.name, input_must_match)) return adjusted_stimuli def _assign_values_to_input_CIM(self, inputs, context=None): """ Assign values from input dictionary to the InputPorts of the Input CIM, then execute the Input CIM """ build_CIM_input = [] for input_port in self.input_CIM.input_ports: # "input_port" is an InputPort on the input CIM for key in self.input_CIM_ports: # "key" is an InputPort on an origin Node of the Composition if self.input_CIM_ports[key][0] == input_port: origin_input_port = key origin_node = key.owner index = origin_node.input_ports.index(origin_input_port) if isinstance(origin_node, CompositionInterfaceMechanism): index = origin_node.input_ports.index(origin_input_port) origin_node = origin_node.composition if origin_node in inputs: value = inputs[origin_node][index] else: value = origin_node.defaults.variable[index] build_CIM_input.append(value) self.input_CIM.execute(build_CIM_input, context=context) def _assign_execution_ids(self, context=None): """ assigns the same execution id to each Node in the composition's processing graph as well as the CIMs. he execution id is either specified in the user's call to run(), or from the Composition's **default_execution_id** """ # Traverse processing graph and assign one execution_id to all of its nodes if context.execution_id is None: context.execution_id = self.default_execution_id if context.execution_id not in self.execution_ids: self.execution_ids.add(context.execution_id) def _identify_clamp_inputs(self, list_type, input_type, origins): # clamp type of this list is same as the one the user set for the whole composition; return all nodes if list_type == input_type: return origins # the user specified different types of clamps for each origin node; generate a list accordingly elif isinstance(input_type, dict): return [k for k, v in input_type.items() if list_type == v] # clamp type of this list is NOT same as the one the user set for the whole composition; return empty list else: return [] def _parse_runtime_params(self, runtime_params): if runtime_params is None: return {} for node in runtime_params: for param in runtime_params[node]: if isinstance(runtime_params[node][param], tuple): if len(runtime_params[node][param]) == 1: runtime_params[node][param] = (runtime_params[node][param], Always()) elif len(runtime_params[node][param]) != 2: raise CompositionError( "Invalid runtime parameter specification ({}) for {}'s {} parameter in {}. " "Must be a tuple of the form (parameter value, condition), or simply the " "parameter value. ".format(runtime_params[node][param], node.name, param, self.name)) else: runtime_params[node][param] = (runtime_params[node][param], Always()) return runtime_params def _after_agent_rep_execution(self, context=None): pass # ****************************************************************************************************************** # LLVM # ****************************************************************************************************************** def _get_param_struct_type(self, ctx): mech_param_type_list = (ctx.get_param_struct_type(m) for m in self._all_nodes) proj_param_type_list = (ctx.get_param_struct_type(p) for p in self.projections) return pnlvm.ir.LiteralStructType(( pnlvm.ir.LiteralStructType(mech_param_type_list), pnlvm.ir.LiteralStructType(proj_param_type_list))) def _get_state_struct_type(self, ctx): mech_ctx_type_list = (ctx.get_state_struct_type(m) for m in self._all_nodes) proj_ctx_type_list = (ctx.get_state_struct_type(p) for p in self.projections) return pnlvm.ir.LiteralStructType(( pnlvm.ir.LiteralStructType(mech_ctx_type_list), pnlvm.ir.LiteralStructType(proj_ctx_type_list))) def _get_input_struct_type(self, ctx): pathway = ctx.get_input_struct_type(self.input_CIM) if not self.parameter_CIM.afferents: return pathway modulatory = ctx.get_input_struct_type(self.parameter_CIM) return pnlvm.ir.LiteralStructType((pathway, modulatory)) def _get_output_struct_type(self, ctx): return ctx.get_output_struct_type(self.output_CIM) def _get_data_struct_type(self, ctx): output_type_list = (ctx.get_output_struct_type(m) for m in self._all_nodes) data = [pnlvm.ir.LiteralStructType(output_type_list)] for node in self.nodes: nested_data = ctx.get_data_struct_type(node) data.append(nested_data) return pnlvm.ir.LiteralStructType(data) def _get_state_initializer(self, context=None, simulation=False): mech_contexts = (tuple(m._get_state_initializer(context=context)) for m in self._all_nodes if m is not self.controller or not simulation) proj_contexts = (tuple(p._get_state_initializer(context=context)) for p in self.projections) return (tuple(mech_contexts), tuple(proj_contexts)) def _get_param_initializer(self, context, simulation=False): mech_params = (tuple(m._get_param_initializer(context)) for m in self._all_nodes if m is not self.controller or not simulation) proj_params = (tuple(p._get_param_initializer(context)) for p in self.projections) return (tuple(mech_params), tuple(proj_params)) def _get_data_initializer(self, context=None): output = [(os.parameters.value.get(context) for os in m.output_ports) for m in self._all_nodes] data = [output] for node in self.nodes: nested_data = node._get_data_initializer(context=context) \ if hasattr(node,'_get_data_initializer') else [] data.append(nested_data) return pnlvm._tupleize(data) def _get_node_index(self, node): node_list = list(self._all_nodes) return node_list.index(node) def _get_node_wrapper(self, node): if node not in self.__generated_node_wrappers: class node_wrapper(): def __init__(self, node, gen_f): self._node = node self._gen_f = gen_f def _gen_llvm_function(self): return self._gen_f(self._node) wrapper = node_wrapper(node, self.__gen_node_wrapper) self.__generated_node_wrappers[node] = wrapper return wrapper return self.__generated_node_wrappers[node] def _gen_llvm_function(self): with pnlvm.LLVMBuilderContext.get_global() as ctx: return ctx.gen_composition_exec(self) @property def _llvm_run(self): if self.__generated_run is None: with pnlvm.LLVMBuilderContext.get_global() as ctx: self.__generated_run = ctx.gen_composition_run(self) return self.__generated_run @property def _llvm_simulation(self): if self.__generated_simulation is None: with pnlvm.LLVMBuilderContext.get_global() as ctx: self.__generated_simulation = ctx.gen_composition_exec(self, True) return self.__generated_simulation @property def _llvm_sim_run(self): if self.__generated_sim_run is None: with pnlvm.LLVMBuilderContext.get_global() as ctx: self.__generated_sim_run = ctx.gen_composition_run(self, True) return self.__generated_sim_run @handle_external_context(execution_id=NotImplemented) def reinitialize(self, context=None): if context.execution_id is NotImplemented: context.execution_id = self.most_recent_context.execution_id self._compilation_data.ptx_execution.set(None, context) self._compilation_data.parameter_struct.set(None, context) self._compilation_data.state_struct.set(None, context) self._compilation_data.data_struct.set(None, context) self._compilation_data.scheduler_conditions.set(None, context) def __ptx_initialize(self, context=None): if self._compilation_data.ptx_execution._get(context) is None: self._compilation_data.ptx_execution._set(pnlvm.CompExecution(self, [context.execution_id]), context) def __gen_node_wrapper(self, node): name = 'comp_wrap_' is_mech = isinstance(node, Mechanism) with pnlvm.LLVMBuilderContext.get_global() as ctx: data_struct_ptr = ctx.get_data_struct_type(self).as_pointer() args = [ ctx.get_state_struct_type(self).as_pointer(), ctx.get_param_struct_type(self).as_pointer(), ctx.get_input_struct_type(self).as_pointer(), data_struct_ptr, data_struct_ptr] if not is_mech: # Add condition struct cond_gen = pnlvm.helpers.ConditionGenerator(ctx, self) cond_ty = cond_gen.get_condition_struct_type().as_pointer() args.append(cond_ty) builder = ctx.create_llvm_function(args, node, name + node.name) llvm_func = builder.function llvm_func.attributes.add('alwaysinline') for a in llvm_func.args: a.attributes.add('nonnull') context, params, comp_in, data_in, data_out = llvm_func.args[:5] cond_ptr = llvm_func.args[-1] m_function = ctx.import_llvm_function(node) if node is self.input_CIM: # if there are incoming modulatory projections, # the input structure is shared if self.parameter_CIM.afferents: m_in = builder.gep(comp_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) else: m_in = comp_in incoming_projections = [] elif node is self.parameter_CIM and node.afferents: # if parameter_CIM has afferent projections, # their values are in comp_in[1] m_in = builder.gep(comp_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) # And we run no further projection incoming_projections = [] elif not is_mech: m_in = builder.alloca(m_function.args[2].type.pointee) incoming_projections = node.input_CIM.afferents + node.parameter_CIM.afferents else: # this path also handles parameter_CIM with no afferent # projections. 'comp_in' does not include any extra values, # and the entire call should be optimized out. m_in = builder.alloca(m_function.args[2].type.pointee) incoming_projections = node.afferents # Execute all incoming projections # TODO: This should filter out projections with different execution ID for proj in incoming_projections: # Skip autoassociative projections if proj.sender.owner is proj.receiver.owner: continue # Get location of projection input data par_mech = proj.sender.owner if par_mech in self._all_nodes: par_idx = self._get_node_index(par_mech) else: comp = par_mech.composition assert par_mech is comp.output_CIM par_idx = self.nodes.index(comp) output_s = proj.sender assert output_s in par_mech.output_ports output_port_idx = par_mech.output_ports.index(output_s) proj_in = builder.gep(data_in, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(par_idx), ctx.int32_ty(output_port_idx)]) # Get location of projection output (in mechanism's input structure rec_port = proj.receiver assert rec_port.owner is node or rec_port.owner is node.input_CIM or rec_port.owner is node.parameter_CIM indices = [0] if proj in rec_port.owner.path_afferents: rec_port_idx = rec_port.owner.input_ports.index(rec_port) assert proj in rec_port.pathway_projections projection_idx = rec_port.pathway_projections.index(proj) # Adjust for AutoAssociative projections for i in range(projection_idx): if isinstance(rec_port.pathway_projections[i], AutoAssociativeProjection): projection_idx -= 1 if not is_mech and node.parameter_CIM.afferents: # If there are afferent projections to parameter_CIM # the input structure is split between input_CIM # and parameter_CIM if proj in node.parameter_CIM.afferents: # modulatory projection indices.append(1) else: # pathway projection indices.append(0) indices.extend([rec_port_idx, projection_idx]) elif proj in rec_port.owner.mod_afferents: # Only mechanism ports list mod projections in mod_afferents assert is_mech projection_idx = rec_port.owner.mod_afferents.index(proj) indices.extend([len(rec_port.owner.input_ports), projection_idx]) else: assert False, "Projection neither pathway nor modulatory" proj_out = builder.gep(m_in, [ctx.int32_ty(i) for i in indices]) # Get projection parameters and state proj_idx = self.projections.index(proj) # Projections are listed second in param and state structure proj_params = builder.gep(params, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(proj_idx)]) proj_context = builder.gep(context, [ctx.int32_ty(0), ctx.int32_ty(1), ctx.int32_ty(proj_idx)]) proj_function = ctx.import_llvm_function(proj) if proj_out.type != proj_function.args[3].type: warnings.warn("Shape mismatch: Projection ({}) results does not match the receiver state({}) input: {} vs. {}".format(proj, proj.receiver, proj.defaults.value, proj.receiver.defaults.variable)) proj_out = builder.bitcast(proj_out, proj_function.args[3].type) builder.call(proj_function, [proj_params, proj_context, proj_in, proj_out]) idx = ctx.int32_ty(self._get_node_index(node)) zero = ctx.int32_ty(0) m_params = builder.gep(params, [zero, zero, idx]) m_context = builder.gep(context, [zero, zero, idx]) m_out = builder.gep(data_out, [zero, zero, idx]) if is_mech: call_args = [m_params, m_context, m_in, m_out] if len(m_function.args) > 4: assert node is self.controller call_args += [params, context, data_in] builder.call(m_function, call_args) else: # Condition and data structures includes parent first nested_idx = ctx.int32_ty(self._get_node_index(node) + 1) m_data = builder.gep(data_in, [zero, nested_idx]) m_cond = builder.gep(cond_ptr, [zero, nested_idx]) builder.call(m_function, [m_context, m_params, m_in, m_data, m_cond]) # Copy output of the nested composition to its output place output_idx = node._get_node_index(node.output_CIM) result = builder.gep(m_data, [zero, zero, ctx.int32_ty(output_idx)]) builder.store(builder.load(result), m_out) builder.ret_void() return llvm_func @property def _dict_summary(self): scheduler_dict = { str(ContextFlags.PROCESSING): self.scheduler._dict_summary } super_summary = super()._dict_summary try: super_summary[self._model_spec_id_parameters][MODEL_SPEC_ID_PSYNEULINK]['schedulers'] = scheduler_dict except KeyError: super_summary[self._model_spec_id_parameters][MODEL_SPEC_ID_PSYNEULINK] = {} super_summary[self._model_spec_id_parameters][MODEL_SPEC_ID_PSYNEULINK]['schedulers'] = scheduler_dict nodes_dict = {MODEL_SPEC_ID_PSYNEULINK: {}} projections_dict = {MODEL_SPEC_ID_PSYNEULINK: {}} additional_projections = [] additional_nodes = ( [self.controller] if self.controller is not None else [] ) for n in list(self.nodes) + additional_nodes: if not isinstance(n, CompositionInterfaceMechanism): nodes_dict[n.name] = n._dict_summary # consider making this more general in the future try: additional_projections.extend(n.control_projections) except AttributeError: pass for p in list(self.projections) + additional_projections: has_cim_sender = isinstance( p.sender.owner, CompositionInterfaceMechanism ) has_cim_receiver = isinstance( p.receiver.owner, CompositionInterfaceMechanism ) # filter projections to/from CIMs, unless they are to embedded # compositions (any others should be automatically generated) if ( (not has_cim_sender or p.sender.owner.composition in self.nodes) and ( not has_cim_receiver or p.receiver.owner.composition in self.nodes ) ): p_summary = p._dict_summary if has_cim_sender: p_summary[MODEL_SPEC_ID_SENDER_MECH] = p.sender.owner.composition.name if has_cim_receiver: p_summary[MODEL_SPEC_ID_RECEIVER_MECH] = p.receiver.owner.composition.name projections_dict[p.name] = p_summary if len(nodes_dict[MODEL_SPEC_ID_PSYNEULINK]) == 0: del nodes_dict[MODEL_SPEC_ID_PSYNEULINK] if len(projections_dict[MODEL_SPEC_ID_PSYNEULINK]) == 0: del projections_dict[MODEL_SPEC_ID_PSYNEULINK] return { MODEL_SPEC_ID_COMPOSITION: [{ **super_summary, **{ MODEL_SPEC_ID_NODES: nodes_dict, MODEL_SPEC_ID_PROJECTIONS: projections_dict, 'controller': self.controller, } }] } # ****************************************************************************************************************** # PROPERTIES # ****************************************************************************************************************** @property def input_ports(self): """Returns all InputPorts that belong to the Input CompositionInterfaceMechanism""" return self.input_CIM.input_ports @property def output_ports(self): """Returns all OutputPorts that belong to the Output CompositionInterfaceMechanism""" return self.output_CIM.output_ports @property def output_values(self): """Returns values of all OutputPorts that belong to the Output CompositionInterfaceMechanism""" return self.get_output_values() def get_output_values(self, context=None): return [output_port.parameters.value.get(context) for output_port in self.output_CIM.output_ports] @property def input_port(self): """Returns the index 0 InputPort that belongs to the Input CompositionInterfaceMechanism""" return self.input_CIM.input_ports[0] @property def input_values(self): """Returns values of all InputPorts that belong to the Input CompositionInterfaceMechanism""" return self.get_input_values() def get_input_values(self, context=None): return [input_port.parameters.value.get(context) for input_port in self.input_CIM.input_ports] @property def runs_simulations(self): return True @property def simulation_results(self): return self.parameters.simulation_results.get(self.default_execution_id) # For now, external_input_ports == input_ports and external_input_values == input_values # They could be different in the future depending on new features (ex. if we introduce recurrent compositions) # Useful to have this property for treating Compositions the same as Mechanisms in run & execute @property def external_input_ports(self): """Returns all external InputPorts that belong to the Input CompositionInterfaceMechanism""" try: return [input_port for input_port in self.input_CIM.input_ports if not input_port.internal_only] except (TypeError, AttributeError): return None @property def external_input_values(self): """Returns values of all external InputPorts that belong to the Input CompositionInterfaceMechanism""" try: return [input_port.value for input_port in self.input_CIM.input_ports if not input_port.internal_only] except (TypeError, AttributeError): return None @property def default_external_input_values(self): """Returns the default values of all external InputPorts that belong to the Input CompositionInterfaceMechanism""" try: return [input_port.defaults.value for input_port in self.input_CIM.input_ports if not input_port.internal_only] except (TypeError, AttributeError): return None @property def stateful_nodes(self): """ List of all nodes in the system that are currently marked as stateful. For Mechanisms, statefulness is determined by checking whether node.has_initializers is True. For Compositions, statefulness is determined by checking whether any of its nodes are stateful. Returns ------- all stateful nodes in the system : List[Nodes] """ stateful_nodes = [] for node in self.nodes: if isinstance(node, Composition): if len(node.stateful_nodes) > 0: stateful_nodes.append(node) elif node.has_initializers: stateful_nodes.append(node) return stateful_nodes @property def output_port(self): """Returns the index 0 OutputPort that belongs to the Output CompositionInterfaceMechanism""" return self.output_CIM.output_ports[0] @property def class_parameters(self): return self.__class__.parameters @property def stateful_parameters(self): return [param for param in self.parameters if param.stateful] @property def _dependent_components(self): return list(itertools.chain( super()._dependent_components, self.nodes, self.projections, [self.input_CIM, self.output_CIM, self.parameter_CIM], [self.controller] if self.controller is not None else [] )) @property def learning_components(self): return [node for node in self.nodes if (NodeRole.LEARNING in self.nodes_to_roles[node] or NodeRole.AUTOASSOCIATIVE_LEARNING in self.nodes_to_roles[node])] @property def learned_components(self): learned_projections = [proj for proj in self.projections if hasattr(proj, 'has_learning_projection') and proj.has_learning_projection] related_processing_mechanisms = [mech for mech in self.nodes if (isinstance(mech, Mechanism) and (any([mech in learned_projections for mech in mech.afferents]) or any([mech in learned_projections for mech in mech.efferents])))] return related_processing_mechanisms + learned_projections @property def afferents(self): return ContentAddressableList(component_type=Projection, list=[proj for proj in self.input_CIM.afferents]) @property def efferents(self): return ContentAddressableList(component_type=Projection, list=[proj for proj in self.output_CIM.efferents]) @property def _all_nodes(self): for n in self.nodes: yield n yield self.input_CIM yield self.output_CIM yield self.parameter_CIM if self.controller: yield self.controller
nilq/baby-python
python
''' File name: GIN.py Discription: Learning Hidden Causal Representation with GIN condition Author: ZhiyiHuang@DMIRLab, RuichuCai@DMIRLab From DMIRLab: https://dmir.gdut.edu.cn/ ''' from collections import deque from itertools import combinations import numpy as np from causallearn.graph.GeneralGraph import GeneralGraph from causallearn.graph.GraphNode import GraphNode from causallearn.graph.NodeType import NodeType from causallearn.search.FCMBased.lingam.hsic import hsic_test_gamma def GIN(data): ''' Learning causal structure of Latent Variables for Linear Non-Gaussian Latent Variable Model with Generalized Independent Noise Condition Parameters ---------- data : numpy ndarray data set Returns ------- G : general graph causal graph K : list causal order ''' v_labels = list(range(data.shape[1])) v_set = set(v_labels) cov = np.cov(data.T) # Step 1: Finding Causal Clusters cluster_list = [] min_cluster = {i: set() for i in v_set} min_dep_score = {i: 1e9 for i in v_set} for (x1, x2) in combinations(v_set, 2): x_set = {x1, x2} z_set = v_set - x_set dep_statistic = cal_dep_for_gin(data, cov, list(x_set), list(z_set)) for i in x_set: if min_dep_score[i] > dep_statistic: min_dep_score[i] = dep_statistic min_cluster[i] = x_set for i in v_labels: cluster_list.append(list(min_cluster[i])) cluster_list = merge_overlaping_cluster(cluster_list) # Step 2: Learning the Causal Order of Latent Variables K = [] while (len(cluster_list) != 0): root = find_root(data, cov, cluster_list, K) K.append(root) cluster_list.remove(root) latent_id = 1 l_nodes = [] G = GeneralGraph([]) for cluster in K: l_node = GraphNode(f"L{latent_id}") l_node.set_node_type(NodeType.LATENT) l_nodes.append(l_node) G.add_node(l_node) for l in l_nodes: if l != l_node: G.add_directed_edge(l, l_node) for o in cluster: o_node = GraphNode(f"X{o + 1}") G.add_node(o_node) G.add_directed_edge(l_node, o_node) latent_id += 1 return G, K def cal_dep_for_gin(data, cov, X, Z): ''' Calculate the statistics of dependence via Generalized Independent Noise Condition Parameters ---------- data : data set (numpy ndarray) cov : covariance matrix X : test set variables Z : condition set variables Returns ------- sta : test statistic ''' cov_m = cov[np.ix_(Z, X)] _, _, v = np.linalg.svd(cov_m) omega = v.T[:, -1] e_xz = np.dot(omega, data[:, X].T) sta = 0 for i in Z: sta += hsic_test_gamma(e_xz, data[:, i])[0] sta /= len(Z) return sta def find_root(data, cov, clusters, K): ''' Find the causal order by statistics of dependence Parameters ---------- data : data set (numpy ndarray) cov : covariance matrix clusters : clusters of observed variables K : causal order Returns ------- root : latent root cause ''' if len(clusters) == 1: return clusters[0] root = clusters[0] dep_statistic_score = 1e30 for i in clusters: for j in clusters: if i == j: continue X = [i[0], j[0]] Z = [] for k in range(1, len(i)): Z.append(i[k]) if K: for k in K: X.append(k[0]) Z.append(k[1]) dep_statistic = cal_dep_for_gin(data, cov, X, Z) if dep_statistic < dep_statistic_score: dep_statistic_score = dep_statistic root = i return root def _get_all_elements(S): result = set() for i in S: for j in i: result |= {j} return result # merging cluster def merge_overlaping_cluster(cluster_list): v_labels = _get_all_elements(cluster_list) cluster_dict = {i: -1 for i in v_labels} cluster_b = {i: [] for i in v_labels} cluster_len = 0 for i in range(len(cluster_list)): for j in cluster_list[i]: cluster_b[j].append(i) visited = [False] * len(cluster_list) cont = True while cont: cont = False q = deque() for i, val in enumerate(visited): if not val: q.append(i) visited[i] = True break while q: top = q.popleft() for i in cluster_list[top]: cluster_dict[i] = cluster_len for j in cluster_b[i]: if not visited[j]: q.append(j) visited[j] = True for i in visited: if not i: cont = True break cluster_len += 1 cluster = [[] for _ in range(cluster_len)] for i in v_labels: cluster[cluster_dict[i]].append(i) return cluster
nilq/baby-python
python
#!/usr/bin/python2.5 # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the 'License') # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rich Text 2 Test Definitions.""" __author__ = 'rolandsteiner@google.com (Roland Steiner)' # Selection specifications used in test files: # # Caret/collapsed selections: # # SC: 'caret' caret/collapsed selection # SB: 'before' caret/collapsed selection before element # SA: 'after' caret/collapsed selection after element # SS: 'start' caret/collapsed selection at the start of the element (before first child/at text pos. 0) # SE: 'end' caret/collapsed selection at the end of the element (after last child/at text pos. n) # SX: 'betwixt' collapsed selection between elements # # Range selections: # # SO: 'outside' selection wraps element in question # SI: 'inside' selection is inside of element in question # SW: 'wrap' as SI, but also wraps all children of element # SL: 'left' oblique selection - starts outside element and ends inside # SR: 'right' oblique selection - starts inside element and ends outside # SM: 'mixed' selection starts and ends in different elements # # SxR: selection is reversed # # Sxn or SxRn selection applies to element #n of several identical import logging from categories import test_set_base # common to the RichText2 suite from categories.richtext2 import common # tests from categories.richtext2.tests.apply import APPLY_TESTS from categories.richtext2.tests.applyCSS import APPLY_TESTS_CSS from categories.richtext2.tests.change import CHANGE_TESTS from categories.richtext2.tests.changeCSS import CHANGE_TESTS_CSS from categories.richtext2.tests.delete import DELETE_TESTS from categories.richtext2.tests.forwarddelete import FORWARDDELETE_TESTS from categories.richtext2.tests.insert import INSERT_TESTS from categories.richtext2.tests.selection import SELECTION_TESTS from categories.richtext2.tests.unapply import UNAPPLY_TESTS from categories.richtext2.tests.unapplyCSS import UNAPPLY_TESTS_CSS from categories.richtext2.tests.querySupported import QUERYSUPPORTED_TESTS from categories.richtext2.tests.queryEnabled import QUERYENABLED_TESTS from categories.richtext2.tests.queryIndeterm import QUERYINDETERM_TESTS from categories.richtext2.tests.queryState import QUERYSTATE_TESTS, QUERYSTATE_TESTS_CSS from categories.richtext2.tests.queryValue import QUERYVALUE_TESTS, QUERYVALUE_TESTS_CSS _SELECTION_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in SELECTION_TESTS.get(c, []) for t in g['tests']]) _APPLY_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in APPLY_TESTS.get(c, []) for t in g['tests']]) _APPLY_TESTS_CSS_COUNT = len([t['id'] for c in common.CLASSES for g in APPLY_TESTS_CSS.get(c, []) for t in g['tests']]) _CHANGE_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in CHANGE_TESTS.get(c, []) for t in g['tests']]) _CHANGE_TESTS_CSS_COUNT = len([t['id'] for c in common.CLASSES for g in CHANGE_TESTS_CSS.get(c, []) for t in g['tests']]) _UNAPPLY_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in UNAPPLY_TESTS.get(c, []) for t in g['tests']]) _UNAPPLY_TESTS_CSS_COUNT = len([t['id'] for c in common.CLASSES for g in UNAPPLY_TESTS_CSS.get(c, []) for t in g['tests']]) _DELETE_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in DELETE_TESTS.get(c, []) for t in g['tests']]) _FORWARDDELETE_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in FORWARDDELETE_TESTS.get(c, []) for t in g['tests']]) _INSERT_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in INSERT_TESTS.get(c, []) for t in g['tests']]) _SELECTION_RESULTS_COUNT = _APPLY_TESTS_COUNT + \ _APPLY_TESTS_CSS_COUNT + \ _CHANGE_TESTS_COUNT + \ _CHANGE_TESTS_CSS_COUNT + \ _UNAPPLY_TESTS_COUNT + \ _UNAPPLY_TESTS_CSS_COUNT + \ _DELETE_TESTS_COUNT + \ _FORWARDDELETE_TESTS_COUNT + \ _INSERT_TESTS_COUNT _QUERYSUPPORTED_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYSUPPORTED_TESTS.get(c, []) for t in g['tests']]) _QUERYENABLED_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYENABLED_TESTS.get(c, []) for t in g['tests']]) _QUERYINDETERM_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYINDETERM_TESTS.get(c, []) for t in g['tests']]) _QUERYSTATE_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYSTATE_TESTS.get(c, []) for t in g['tests']]) _QUERYSTATE_TESTS_CSS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYSTATE_TESTS_CSS.get(c, []) for t in g['tests']]) _QUERYVALUE_TESTS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYVALUE_TESTS.get(c, []) for t in g['tests']]) _QUERYVALUE_TESTS_CSS_COUNT = len([t['id'] for c in common.CLASSES for g in QUERYVALUE_TESTS_CSS.get(c, []) for t in g['tests']]) TEST_CATEGORIES = { 'selection': { 'count': _SELECTION_TESTS_COUNT, 'short': 'Selection', 'long': '''These tests verify that selection commands are honored correctly. The expected and actual outputs are shown.'''}, 'apply': { 'count': _APPLY_TESTS_COUNT, 'short': 'Apply Format', 'long': '''These tests use execCommand to apply formatting to plain text, with styleWithCSS being set to false. The expected and actual outputs are shown.'''}, 'applyCSS': { 'count': _APPLY_TESTS_CSS_COUNT, 'short': 'Apply Format, styleWithCSS', 'long': '''These tests use execCommand to apply formatting to plain text, with styleWithCSS being set to true. The expected and actual outputs are shown.'''}, 'change': { 'count': _CHANGE_TESTS_COUNT, 'short': 'Change Format', 'long': '''These tests are similar to the unapply tests, except that they're for execCommands which take an argument (fontname, fontsize, etc.). They apply the execCommand to text which already has some formatting, in order to change it. styleWithCSS is being set to false. The expected and actual outputs are shown.'''}, 'changeCSS': { 'count': _CHANGE_TESTS_CSS_COUNT, 'short': 'Change Format, styleWithCSS', 'long': '''These tests are similar to the unapply tests, except that they're for execCommands which take an argument (fontname, fontsize, etc.). They apply the execCommand to text which already has some formatting, in order to change it. styleWithCSS is being set to true. The expected and actual outputs are shown.'''}, 'unapply': { 'count': _UNAPPLY_TESTS_COUNT, 'short': 'Unapply Format', 'long': '''These tests put different combinations of HTML into a contenteditable iframe, and then run an execCommand to attempt to remove the formatting the HTML applies. For example, there are tests to check if bold styling from &lt;b&gt;, &lt;strong&gt;, and &lt;span style="font-weight:normal"&gt; are all removed by the bold execCommand. It is important that browsers can remove all variations of a style, not just the variation the browser applies on its own, because it's quite possible that a web application could allow editing with multiple browsers, or that users could paste content into the contenteditable region. For these tests, styleWithCSS is set to false. The expected and actual outputs are shown.'''}, 'unapplyCSS': { 'count': _UNAPPLY_TESTS_CSS_COUNT, 'short': 'Unapply Format, styleWithCSS', 'long': '''These tests put different combinations of HTML into a contenteditable iframe, and then run an execCommand to attempt to remove the formatting the HTML applies. For example, there are tests to check if bold styling from &lt;b&gt;, &lt;strong&gt;, and &lt;span style="font-weight:normal"&gt; are all removed by the bold execCommand. It is important that browsers can remove all variations of a style, not just the variation the browser applies on its own, because it's quite possible that a web application could allow editing with multiple browsers, or that users could paste content into the contenteditable region. For these tests, styleWithCSS is set to true. The expected and actual outputs are shown.'''}, 'delete': { 'count': _DELETE_TESTS_COUNT, 'short': 'Delete Content', 'long': '''These tests verify that 'delete' commands are executed correctly. Note that 'delete' commands are supposed to have the same result as if the user had hit the 'BackSpace' (NOT 'Delete'!) key. The expected and actual outputs are shown.'''}, 'forwarddelete': { 'count': _FORWARDDELETE_TESTS_COUNT, 'short': 'Forward-Delete Content', 'long': '''These tests verify that 'forwarddelete' commands are executed correctly. Note that 'forwarddelete' commands are supposed to have the same result as if the user had hit the 'Delete' key. The expected and actual outputs are shown.'''}, 'insert': { 'count': _INSERT_TESTS_COUNT, 'short': 'Insert Content', 'long': '''These tests verify that the various 'insert' and 'create' commands, that create a single HTML element, rather than wrapping existing content, are executed correctly. (Commands that wrap existing HTML are part of the 'apply' and 'applyCSS' categories.) The expected and actual outputs are shown.'''}, 'selectionResult': { 'count': _SELECTION_RESULTS_COUNT, 'short': 'Selection Results', 'long': '''Number of cases within those tests that manipulate HTML (categories 'Apply', 'Change', 'Unapply', 'Delete', 'ForwardDelete', 'Insert') where the result selection matched the expectation.'''}, 'querySupported': { 'count': _QUERYSUPPORTED_TESTS_COUNT, 'short': 'q.C.Supported Function', 'long': '''These tests verify that the 'queryCommandSupported()' function return a correct result given a certain set-up. styleWithCSS is being set to false. The expected and actual results are shown.'''}, 'queryEnabled': { 'count': _QUERYENABLED_TESTS_COUNT, 'short': 'q.C.Enabled Function', 'long': '''These tests verify that the 'queryCommandEnabled()' function return a correct result given a certain set-up. styleWithCSS is being set to false. The expected and actual results are shown.'''}, 'queryIndeterm': { 'count': _QUERYINDETERM_TESTS_COUNT, 'short': 'q.C.Indeterm Function', 'long': '''These tests verify that the 'queryCommandIndeterm()' function return a correct result given a certain set-up. styleWithCSS is being set to false. The expected and actual results are shown.'''}, 'queryState': { 'count': _QUERYSTATE_TESTS_COUNT, 'short': 'q.C.State Function', 'long': '''These tests verify that the 'queryCommandState()' function return a correct result given a certain set-up. styleWithCSS is being set to false. The expected and actual results are shown.'''}, 'queryStateCSS': { 'count': _QUERYSTATE_TESTS_CSS_COUNT, 'short': 'q.C.State Function, styleWithCSS', 'long': '''These tests verify that the 'queryCommandState()' function return a correct result given a certain set-up. styleWithCSS is being set to true. The expected and actual results are shown.'''}, 'queryValue': { 'count': _QUERYVALUE_TESTS_COUNT, 'short': 'q.C.Value Function', 'long': '''These tests verify that the 'queryCommandValue()' function return a correct result given a certain set-up. styleWithCSS is being set to false. The expected and actual results are shown.'''}, 'queryValueCSS': { 'count': _QUERYVALUE_TESTS_CSS_COUNT, 'short': 'q.C.Value Function, styleWithCSS', 'long': '''These tests verify that the 'queryCommandValue()' function return a correct result given a certain set-up. styleWithCSS is being set to true. The expected and actual results are shown.'''} } # Category tests: # key, short description, documentation, # of tests class RichText2TestCategory(test_set_base.TestBase): TESTS_URL_PATH = '/%s/test' % common.CATEGORY def __init__(self, key): test_set_base.TestBase.__init__( self, key = key, name = TEST_CATEGORIES[key]['short'], url = self.TESTS_URL_PATH, doc = TEST_CATEGORIES[key]['long'], min_value = 0, max_value = TEST_CATEGORIES[key]['count'], cell_align = 'center') # Explicitly list categories rather than using a list comprehension, to preserve order _CATEGORIES_SET = [ RichText2TestCategory('selection'), RichText2TestCategory('apply'), RichText2TestCategory('applyCSS'), RichText2TestCategory('change'), RichText2TestCategory('changeCSS'), RichText2TestCategory('unapply'), RichText2TestCategory('unapplyCSS'), RichText2TestCategory('delete'), RichText2TestCategory('forwarddelete'), RichText2TestCategory('insert'), RichText2TestCategory('selectionResult'), RichText2TestCategory('querySupported'), RichText2TestCategory('queryEnabled'), RichText2TestCategory('queryIndeterm'), RichText2TestCategory('queryState'), RichText2TestCategory('queryStateCSS'), RichText2TestCategory('queryValue'), RichText2TestCategory('queryValueCSS'), ] class RichText2TestSet(test_set_base.TestSet): def GetTestScoreAndDisplayValue(self, test_key, raw_scores): """Get a score and a text string to output to the display. Args: test_key: a key for a test_set sub-category. raw_scores: a dict of raw_scores indexed by test keys. Returns: score, display_value # score is from 0 to 100. # display_value is the text for the cell. """ score = raw_scores.get(test_key) category = TEST_CATEGORIES[test_key] if score is None or category is None: return 0, '' count = category['count'] percent = int(round(100.0 * score / count)) display = '%s/%s' % (score, count) return percent, display def GetRowScoreAndDisplayValue(self, results): """Get the overall score and text string for this row of results data. Args: results: { 'test_key_1': {'score': score_1, 'raw_score': raw_score_1, ...}, 'test_key_2': {'score': score_2, 'raw_score': raw_score_2, ...}, ... } Returns: score, display_value # score is from 0 to 100. # display_value is the text for the cell. """ total_passed = 0 total_tests = 0 for test_key, test_results in results.items(): display_test = test_results['display'] if display_test == '': # If we ever see display_test == '', we know we can just walk away. return 0, '' passed, total = display_test.split('/') total_passed += int(passed) total_tests += int(total) display = '%s/%s' % (total_passed, total_tests) score = int(round(100.0 * total_passed / total_tests)) return score, display TEST_SET = RichText2TestSet( category = common.CATEGORY, category_name = 'Rich Text', summary_doc = 'New suite of tests to see how well editor controls work with a variety of HTML.', tests = _CATEGORIES_SET, test_page = "richtext2/run", )
nilq/baby-python
python
#!/bin/env python3.9 """ cyclic backup creating a tar file """ import datetime import getopt import logging import os import re import sqlite3 import stat import sys import tarfile import time import traceback from builtins import bool import jinja2 import yaml blocked = set() config = { 'db': 'cycbackup.db', 'exclude': [], 'flag': '.bkstop', 'min_age': 300, 'size': '50M', 'target': '/tmp/backup.tar', } """ default config settings """ counts = { 'backed_up': 0, 'blocked': 0, 'cyclic': 0, 'device': 0, 'excluded': 0, 'incremental': 0, 'permissions': 0, 'removed': 0, 'same_old': 0, 'too_big': 0, 'too_recent': 0, } db_conn: sqlite3.Connection done = False exclude = [] file_size = 0 max_age = 0 start_device: int tar_file: tarfile.TarFile target_size = 0 vol_num = 0 resultT = """ The counts are: backed up files:{{ "%7d" | format(backed_up) }} incremental:{{ "%7d" | format(incremental) }} cyclic:{{ "%7d" | format(cyclic) }} skipped 2 recent:{{ "%7d" | format(too_recent) }} skipped as same:{{ "%7d" | format(same_old) }} skipped flag:{{ "%7d" | format(excluded) }} skipped perm.:{{ "%7d" | format(permissions) }} removed from db:{{ "%7d" | format(removed) }} """ """ template for the results """ def prep_database(): """ prepares the database, creates it if not exists """ global db_conn, vol_num version: int = 0 try: row = db_conn.execute('select max(version) from dbv').fetchone() if row is not None: version = row[0] except sqlite3.DatabaseError: logging.info('db has no version') if version == 0: logging.info("creating db from scratch") schema_stmts = [ 'CREATE TABLE files (name TEXT NOT NULL, mtime REAL NOT NULL,volume INTEGER)', 'CREATE UNIQUE INDEX "prime" on files (name ASC)', 'CREATE INDEX vols on files (volume ASC)', 'CREATE TABLE backup (num INTEGER NOT NULL, date TEXT NOT NULL)', 'CREATE INDEX bknum on backup (num ASC)', 'CREATE TABLE dbv(version INTEGER NOT NULL)', 'insert into dbv values(1)' ] for stmt in schema_stmts: db_conn.execute(stmt) db_conn.commit() logging.debug("upgraded from scratch") db_conn.execute('pragma journal_mode=wal') db_conn.execute('pragma synchronous=normal') row = db_conn.execute('select max(volume) from files').fetchone() if row is not None and row[0] is not None: vol_num = row[0] + 1 logging.debug(f"the current volume is {vol_num}") def archive(fullname, inc) -> bool: """ archives one file if conditions are met :param fullname: full name of the file :param inc: apply rules for incremental backup """ global exclude, counts, config, blocked, file_size, db_conn, vol_num for item in blocked: if fullname.startswith(item): counts['blocked'] += 1 # logging.debug(f"blocked: {fullname}") return False path = fullname while True: path, tail = os.path.split(path) if len(path) <= 1: break try: if os.lstat(os.path.join(path, config['flag'])): logging.debug("found flag in path") blocked.add(path) return False except FileNotFoundError as fnfe: pass try: stat_buf = os.lstat(fullname) except Exception as ex: logging.error(f"lstat({fullname}): {ex}") exc_type, exc_value, exc_traceback = sys.exc_info() for l in traceback.format_exception(exc_type, exc_value, exc_traceback): logging.warning(f" {l.strip()}") return False if stat.S_ISDIR(stat_buf.st_mode): ext_filename = fullname + '/' else: ext_filename = fullname for pt in exclude: if pt.search(ext_filename) is not None: counts['excluded'] += 1 # logging.debug(f"excluded: {fullname}") return False if fullname == config['db']: return False if stat_buf.st_dev != start_device: counts['device'] += 1 logging.debug(f"device: {fullname}") return False # sockets are created by running programs if stat.S_ISSOCK(stat_buf.st_mode): return False mtime = int(stat_buf.st_mtime) if mtime > max_age: counts['too_recent'] += 1 logging.debug(f"too recent: {fullname}") return False # checking age against database if inc: row = db_conn.execute('select mtime from files where name=?', (fullname,)).fetchone() if row is not None: if row[0] == mtime: counts['same_old'] += 1 # logging.debug(f"same old: {fullname}") return False if not os.access(fullname, os.R_OK): logging.warning('missing permissions: ' + fullname) counts['permissions'] += 1 logging.debug(f"permissions: {fullname}") return False nfs = file_size + 1536 + stat_buf.st_size if nfs >= target_size: counts['too_big'] += 1 # logging.debug(f"too big: {fullname}") return False if inc: counts['incremental'] += 1 # logging.debug(f"incremental: {fullname}") else: counts['cyclic'] += 1 # logging.debug(f"cyclic: {fullname}") try: tar_file.add(fullname, recursive=False) counts['backed_up'] += 1 db_conn.execute('replace into files(name,mtime,volume) values(?,?,?)', (fullname, mtime, vol_num)) db_conn.commit() file_size = tar_file.fileobj.tell() except Exception as ex: logging.error(f"tar archive {ex}") return True def incremental(): """ incremental part - saving newer files """ global config, blocked, start_device for entry in config['backup']: try: stat_buf = os.lstat(entry) start_device = stat_buf.st_dev for path, dirs, files in os.walk(entry): for item in files: if item == config['flag']: blocked.add(path) continue fullname = os.path.join(path, item) archive(fullname, True) if file_size + 8096 > target_size: return for item in dirs: fullname = os.path.join(path, item) archive(fullname, True) if file_size + 8096 > target_size: return except FileNotFoundError as fnfe: logging.error(f"backup entry {entry} not found:\n {fnfe}") def cyclic(): """ cyclic part - saving old files """ global config, db_conn rs = db_conn.execute('select name, volume from files where volume < ? order by volume ASC', (vol_num,)) while True: row = rs.fetchone() if row is None: return if not archive(row[0], False): db_conn.execute('delete from files where name=?',(row[0],)) db_conn.commit() if file_size + 8096 > target_size: return def main(): """ use cycbackup {options} """ global config, db_conn, tar_file, exclude, max_age, target_size, vol_num opts, arg = getopt.getopt(sys.argv[1:], 'c:it:h?') for opt, opt_arg in opts: if opt == '-c': with open(opt_arg) as cf: config.update(yaml.safe_load(cf)) elif opt == '-i': yaml.safe_dump(config, sys.stderr) elif opt == '-t': config['target'] = opt_arg else: print(main.__doc__) sys.exit(0) config['db'] = os.path.abspath(config['db']) for pattern in config['exclude']: cp = re.compile(pattern) exclude.append(cp) max_age = time.time() - config['min_age'] size_pat = re.compile('(\\d+)([kmgGM])') m = size_pat.search(config['size']) target_size = 50 * 1024 * 1024 if m is not None: target_size = int(m.group(1)) unit = m.group(2) if unit == 'k': target_size *= 1000 elif unit == 'K': target_size *= 1024 elif unit == 'm': target_size *= 1000000 elif unit == 'M': target_size *= 1024 * 1024 elif unit == 'g': target_size *= 1000 * 1000 * 1000 elif unit == 'G': target_size *= 1024 * 1024 * 1024 logging.debug(f"target size is {target_size}") with sqlite3.connect(config['db']) as db_conn: prep_database() now = datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S') db_conn.execute('insert into backup(num,date) values(?,?)', (vol_num, now)) db_conn.commit() with tarfile.open(config['target'], 'w:') as tar_file: incremental() db_conn.commit() cyclic() db_conn.commit() for row in db_conn.execute('select b.num,b.date, count(f.name) from backup as b left join' + ' files as f on b.num=f.volume group by b.num'): if int(row[2]) == 0: msg = f"tarfile {row[1]} from backup {row[0]} can be deleted" logging.info(msg) print(msg) db_conn.execute('delete from backup where num=?', (row[0],)) db_conn.commit() templ = jinja2.Template(resultT) result_txt = templ.render(counts) logging.debug(result_txt) print(result_txt) if __name__ == '__main__': try: print(f"{sys.argv[0]} running") logging.basicConfig(filename='cycbackup.log', level=logging.DEBUG, filemode='w', format='%(asctime)s [%(levelname)s] %(filename)s:%(lineno)d %(funcName)s:\t%(message)s') main() except Exception as ex: logging.error(f"main exception {ex}") traceback.print_exc() finally: print("all done")
nilq/baby-python
python
# -*- coding: utf-8 -*- from os.path import dirname, abspath from math import sqrt import numpy as np from scipy.stats import spearmanr, pearsonr from scipy.spatial.distance import cosine import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! Needed for calculations on server. import matplotlib.pyplot as plt import matplotlib.colors as mcolors import seaborn as sb from pandas import DataFrame sb.set_style("ticks") sb.set_color_codes() def read_values(path): """ Read the similarity values. """ pairs, edit_sim, wl0, wl1, wl2 = [], [], [], [], [] with open(path, "r") as f: next(f) # skip header for line in f: c1, c2, e, w0, w1, w2 = line.strip("\n").split("\t") pairs.append((c1,c2)) edit_sim.append(float(e)) wl0.append(float(w0)) wl1.append(float(w1)) wl2.append(float(w2)) return (pairs, np.array(edit_sim), np.array(wl0), np.array(wl1), np.array(wl2)) def cos_similarity(a1, a2): """ Return cosine similarity = 1-cosine distance of arrays a1 and a2. """ return 1-cosine(a1, a2) def plot_wl_vs_edit(w, wl_sim, edit_sim, output): """ Plot the joint plot of wl similarity vs. edit similarity as scatter plot with histograms on marginal axes for given weight w. Calculate and return pearson and spearman correlation as well as cosine similarity. """ g = sb.jointplot(wl_sim, edit_sim, kind="reg", stat_func=None, joint_kws={"scatter_kws":{"alpha":0.5, "s":20, "edgecolors":"none"}}) g.set_axis_labels("WL similarity", "Edit similarity", fontsize=22) #g.fig.suptitle("w={}, {} pairs".format(w, len(wl_sim)), fontsize=21) sb.despine() plt.xlim(0, 1.01) plt.ylim(0,1.01) plt.plot(list(range(0,100)),list(range(0,100)), c="orange") plt.tick_params(axis="both", labelsize=18) plt.tight_layout() plt.savefig(output, dpi=300) plt.close() def plot_correlations(c, weights, correlations, output): """ Plot the correlation values for all weights. """ fig, ax = plt.subplots() plt.plot(weights, correlations, marker="o", ls="") plt.xlabel("Weight $\mathregular{w_1}$ = 1-$\mathregular{w_0}$", fontsize=22) if "sqrt" in c: plt.ylabel("{}".format(c), fontsize=18) else: plt.ylabel("{}".format(c), fontsize=22) m = max(correlations) m_i = list(reversed(correlations)).index(m)/len(weights) ax.axvline(x=m_i, ymax=m, c="grey") zed = [tick.label.set_fontsize(18) for tick in ax.xaxis.get_major_ticks()] zed = [tick.label.set_fontsize(18) for tick in ax.yaxis.get_major_ticks()] sb.despine() plt.text(m_i,m+0.002, str(round(m, 3)), fontsize=15, transform=ax.transData) plt.text(m_i+0.05,min(correlations), "$\mathregular{w_0}$ = "+str(round((1-m_i), 2))+"\n$\mathregular{w_1}$ = "+str(round(m_i, 2)), fontsize=18, transform=ax.transData) plt.tight_layout() plt.savefig(output, dpi=300) plt.close() def plot_correlations_3_iter(c, weight_triple, weights, correlations, output): """ Plot the correlation values for all weights. """ n = len(weights) wt = np.array(weight_triple)*100 data = np.ones((n,n)) mask = np.zeros((n,n)) for index, (w0, w1, w2) in enumerate(wt): data[int(round(w2))][int(round(w1))] = correlations[index] for i in range(n): for j in range(n): if data[i][j] == 1: mask[i][j] = True else: mask[i][j] = False data= DataFrame((data)) fig, ax = plt.subplots() ax = sb.heatmap(data, mask=mask,vmin=0.7, vmax=1, xticklabels=100, yticklabels=100, cmap="YlGnBu", square=True, annot=False, fmt='g',cbar=True, cbar_kws={}, rasterized=True) # "log":True ax.invert_yaxis() plt.xlabel("$\mathregular{w_1}$", fontsize=22) plt.ylabel("$\mathregular{w_2}$", fontsize=22) plt.tight_layout() plt.savefig(output, dpi=300) plt.close() def evaluate_2_iter(weights, wl0, wl1, edit_sim, output_prefix3): """ """ correlations_pearson, correlations_spearman, correlations_cosine = [], [], [] mean_correlation = [] rev_w = [] for w in weights: wl_sim = w*wl0 + (1-w)*wl1 plot_wl_vs_edit((1-w), wl_sim, edit_sim, output_prefix+"{0:.2f}_wl_vs_edit.pdf".format((1-w))) c_pearson, p = pearsonr(wl_sim, edit_sim) c_spearman, p = spearmanr(wl_sim, edit_sim) c_cos = 1-cosine(wl_sim, edit_sim) correlations_pearson.append(c_pearson) correlations_spearman.append(c_spearman) correlations_cosine.append(c_cos) mean_correlation.append(sqrt(c_cos * c_pearson)) rev_w.append(1-w) plot_correlations("Pearson correlation", rev_w, correlations_pearson, output_prefix+"correlations_pearson.pdf") plot_correlations("Spearmanr correlation", rev_w, correlations_spearman, output_prefix+"correlations_spearman.pdf") plot_correlations("Cosine similarity", rev_w, correlations_cosine, output_prefix+"correlations_cosine.pdf") plot_correlations("sqrt(cosine_sim * pearson_cor)", rev_w, mean_correlation, output_prefix+"correlations_mean.pdf") return (correlations_pearson, correlations_spearman, correlations_cosine, mean_correlation) def evaluate_3_iter(weights, wl0, wl1, wl2, edit_sim, output_prefix): """ """ correlations_pearson, correlations_spearman, correlations_cosine = [], [], [] mean_correlation = [] weight_triple = [] for w1 in weights: for w2 in weights: if w1+w2 > 1: continue w0 = 1-(w1+w2) weight_triple.append((w0,w1,w2)) wl_sim = w0*wl0 + w1*wl1 + w2*wl2 #((1-w0), wl_sim, edit_sim, output_prefix+"{0:.2f}_{1:.2f}_{2:.2f}_wl_vs_edit.pdf".format(w0, w1, w2)) c_pearson, p = pearsonr(wl_sim, edit_sim) c_spearman, p = spearmanr(wl_sim, edit_sim) c_cos = 1-cosine(wl_sim, edit_sim) correlations_pearson.append(c_pearson) correlations_spearman.append(c_spearman) correlations_cosine.append(c_cos) mean_correlation.append(sqrt(c_cos * c_pearson)) plot_correlations_3_iter("Pearson correlation", weight_triple, weights, correlations_pearson, output_prefix+"correlations_pearson3.pdf") plot_correlations_3_iter("Spearmanr correlation", weight_triple, weights, correlations_spearman, output_prefix+"correlations_spearman3.pdf") plot_correlations_3_iter("Cosine similarity", weight_triple, weights, correlations_cosine, output_prefix+"correlations_cosine3.pdf") plot_correlations_3_iter("sqrt(cosine_sim * pearson_cor)", weight_triple, weights, mean_correlation, output_prefix+"correlations_mean3.pdf") return (weight_triple, correlations_pearson, correlations_spearman, correlations_cosine, mean_correlation) if __name__ == "__main__": distance_file = snakemake.input[0] output_prefix = dirname(abspath(snakemake.output[1]))+"/" output_prefix3 = dirname(abspath(snakemake.output[4]))+"/" (pairs, edit_sim, wl0, wl1, wl2) = read_values(distance_file) print(len(pairs), "pairs") weights = snakemake.params["w"] corr_pearson_2, corr_spearman_2, corr_cosine_2, mean_correlation_2 = evaluate_2_iter(weights, wl0, wl1, edit_sim, output_prefix) weight_triple, corr_pearson_3, corr_spearman_3, corr_cosine_3, mean_correlation_3 = evaluate_3_iter(weights, wl0, wl1, wl2, edit_sim, output_prefix3) with open(snakemake.output[0], "w") as f: print("stat function", "max value", "index of maximum", "weight of maximum", sep="\t", file=f) print("pearsonr 2iter", max(corr_pearson_2), corr_pearson_2.index(max(corr_pearson_2)), weights[corr_pearson_2.index(max(corr_pearson_2))], sep="\t", file=f) print("spearmanr 2iter", max(corr_spearman_2), corr_spearman_2.index(max(corr_spearman_2)), weights[corr_spearman_2.index(max(corr_spearman_2))], sep="\t", file=f) print("cosine similarity 2iter", max(corr_cosine_2), corr_cosine_2.index(max(corr_cosine_2)), weights[corr_cosine_2.index(max(corr_cosine_2))], sep="\t", file=f) print("sqrt(cosine_sim * pearson_cor) 2iter", max(mean_correlation_2), mean_correlation_2.index(max(mean_correlation_2)), weights[mean_correlation_2.index(max(mean_correlation_2))], sep="\t", file=f) print("pearsonr 3iter", max(corr_pearson_3), corr_pearson_3.index(max(corr_pearson_3)), weight_triple[corr_pearson_3.index(max(corr_pearson_3))], sep="\t", file=f) print("spearmanr 3iter", max(corr_spearman_3), corr_spearman_3.index(max(corr_spearman_3)), weight_triple[corr_spearman_3.index(max(corr_spearman_3))], sep="\t", file=f) print("cosine similarity 3iter", max(corr_cosine_3), corr_cosine_3.index(max(corr_cosine_3)), weight_triple[corr_cosine_3.index(max(corr_cosine_3))], sep="\t", file=f) print("sqrt(cosine_sim * pearson_cor) 3iter", max(mean_correlation_3), mean_correlation_3.index(max(mean_correlation_3)), weight_triple[mean_correlation_3.index(max(mean_correlation_3))], sep="\t", file=f)
nilq/baby-python
python
from base import * import numpy as np from typing import List def numpy_heavy_create_dot(number, base): start = time.time() - base DIMS = 3000 a = np.random.rand(DIMS, DIMS) b = np.random.rand(DIMS, DIMS) np.dot(a, b) stop = time.time() - base return start, stop nums = range(1, 8) run_test(numpy_heavy_create_dot, nums)
nilq/baby-python
python
from ..encoding import wif_to_secret_exponent from ..convention import tx_fee from .Spendable import Spendable from .Tx import Tx from .TxOut import TxOut, standard_tx_out_script from .pay_to import build_hash160_lookup class SecretExponentMissing(Exception): pass class LazySecretExponentDB(object): """ The pycoin pure python implementation that converts secret exponents into public pairs is very slow, so this class does the conversion lazily and caches the results to optimize for the case of a large number of secret exponents. """ def __init__(self, wif_iterable, secret_exponent_db_cache): self.wif_iterable = iter(wif_iterable) self.secret_exponent_db_cache = secret_exponent_db_cache def get(self, v): if v in self.secret_exponent_db_cache: return self.secret_exponent_db_cache[v] for wif in self.wif_iterable: secret_exponent = wif_to_secret_exponent(wif) d = build_hash160_lookup([secret_exponent]) self.secret_exponent_db_cache.update(d) if v in self.secret_exponent_db_cache: return self.secret_exponent_db_cache[v] self.wif_iterable = [] return None def create_tx(spendables, payables, fee="standard", lock_time=0, version=1): """ This function provides the easiest way to create an unsigned transaction. All coin values are in satoshis. spendables: a list of Spendable objects, which act as inputs. These can be either a Spendable or a Spendable.as_text or a Spendable.as_dict if you prefer a non-object-based input (which might be easier for airgapped transactions, for example). payables: a list where each entry is a bitcoin address, or a tuple of (bitcoin address, coin_value). If the coin_value is missing or zero, this address is thrown into the "split pool". Funds not explicitly claimed by the fee or a bitcoin address are shared as equally as possible among the split pool. [Minor point: if the amount to be split does not divide evenly, some of the earlier bitcoin addresses will get an extra satoshi.] fee: a value, or "standard" for it to be calculated. version: the version to use in the transaction. Normally 1. lock_time: the lock_time to use in the transaction. Normally 0. Returns the unsigned Tx transaction. Note that unspents are set, so the transaction can be immediately signed. Example: tx = create_tx( spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH"), ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"], fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never). """ def _fix_spendable(s): if isinstance(s, Spendable): return s if not hasattr(s, "keys"): return Spendable.from_text(s) return Spendable.from_dict(s) spendables = [_fix_spendable(s) for s in spendables] txs_in = [spendable.tx_in() for spendable in spendables] txs_out = [] for payable in payables: if len(payable) == 2: bitcoin_address, coin_value = payable else: bitcoin_address = payable coin_value = 0 script = standard_tx_out_script(bitcoin_address) txs_out.append(TxOut(coin_value, script)) tx = Tx(version=version, txs_in=txs_in, txs_out=txs_out, lock_time=lock_time) tx.set_unspents(spendables) distribute_from_split_pool(tx, fee) return tx def distribute_from_split_pool(tx, fee): """ This function looks at TxOut items of a transaction tx and and puts TxOut items with a coin value of zero into a "split pool". Funds not explicitly claimed by the fee or other TxOut items are shared as equally as possible among the split pool. If the amount to be split does not divide evenly, some of the earlier TxOut items will get an extra satoshi. tx: the transaction fee: the reserved fee set aside """ # calculate fees if fee == 'standard': # TODO: improve this # 1: the tx is not fully built out, so it will actually be larger than implied at this point # 2: recommended_fee_for_tx gives estimates that are too high fee = tx_fee.recommended_fee_for_tx(tx) zero_count = sum(1 for tx_out in tx.txs_out if tx_out.coin_value == 0) if zero_count > 0: total_coin_value = sum(spendable.coin_value for spendable in tx.txs_in_as_spendable()) coins_allocated = sum(tx_out.coin_value for tx_out in tx.txs_out) + fee remaining_coins = total_coin_value - coins_allocated if remaining_coins < 0: raise ValueError("insufficient inputs for outputs") value_each, extra_count = divmod(remaining_coins, zero_count) if value_each < 1: raise ValueError("not enough to pay nonzero amounts to at least one of the unspecified outputs") for tx_out in tx.txs_out: if tx_out.coin_value == 0: tx_out.coin_value = value_each + (1 if extra_count > 0 else 0) extra_count -= 1 return zero_count def sign_tx(tx, wifs=[], secret_exponent_db={}, **kwargs): """ This function provides an convenience method to sign a transaction. The transaction must have "unspents" set by, for example, calling tx.unspents_from_db. wifs: the list of WIFs required to sign this transaction. secret_exponent_db: an optional dictionary (or any object with a .get method) that contains a bitcoin address => (secret_exponent, public_pair, is_compressed) tuple. This will be built automatically lazily with the list of WIFs. You can pass in an empty dictionary and as WIFs are processed, they will be cached here. If you have multiple transactions to sign, each with the same WIF list, passing a cache dictionary in may speed things up a bit. Returns the signed Tx transaction, or raises an exception. At least one of "wifs" and "secret_exponent_db" must be included for there to be any hope of signing the transaction. Example: sign_tx(wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"]) """ tx.sign(LazySecretExponentDB(wifs, secret_exponent_db), **kwargs) def create_signed_tx(spendables, payables, wifs=[], fee="standard", lock_time=0, version=1, secret_exponent_db={}, **kwargs): """ This function provides an easy way to create and sign a transaction. All coin values are in satoshis. spendables, payables, fee, lock_time, version are as in create_tx, above. wifs, secret_exponent_db are as in sign_tx, above. Returns the signed Tx transaction, or raises an exception. At least one of "wifs" and "secret_exponent_db" must be included for there to be any hope of signing the transaction. Example: tx = create_signed_tx( spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH"), ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"], wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"], fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never). """ tx = create_tx(spendables, payables, fee=fee, lock_time=lock_time, version=version) sign_tx(tx, wifs=wifs, secret_exponent_db=secret_exponent_db, **kwargs) for idx, tx_out in enumerate(tx.txs_in): if not tx.is_signature_ok(idx): raise SecretExponentMissing("failed to sign spendable for %s" % tx.unspents[idx].bitcoin_address()) return tx
nilq/baby-python
python
from calendar import timegm from datetime import datetime import logging from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.auth.middleware import AuthenticationMiddleware from django.core.urlresolvers import reverse from django.conf import settings from rest_framework import exceptions from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings class UserJwtTokenMiddleware: """ Custom middleware to set the User on the request when using Jwt Token authentication. """ def process_request(self, request): """ Override only the request to add the user """ try: return request.user except AttributeError: pass obj = JSONWebTokenAuthentication() try: user_auth_tuple = obj.authenticate(request) except exceptions.APIException: user_auth_tuple = None if not user_auth_tuple is None: request.user, _auth = user_auth_tuple return class SlidingJwtTokenMiddleware: """ Custom middleware to set a sliding window for the jwt auth token expiration. """ def process_response(self, request, response): """ Override only the request to add the new token """ obj = JSONWebTokenAuthentication() try: user_auth_tuple = obj.authenticate(request) except exceptions.APIException: user_auth_tuple = None # Check if request includes valid token if not user_auth_tuple is None: user, _auth = user_auth_tuple # Get the payload details jwt_decode_handler = api_settings.JWT_DECODE_HANDLER payload = jwt_decode_handler(_auth) logging.debug('JWT payload found: {0}'.format(payload)) # Check whether we need to renew the token. This will happen if the token # hasn't been renewed in JWT_TOKEN_RENEWAL_DELTA exp = payload.get('exp') created_timestamp = exp - int(api_settings.JWT_EXPIRATION_DELTA.total_seconds()) renewal_timestamp = created_timestamp + int(settings.JWT_TOKEN_RENEWAL_DELTA.total_seconds()) now_timestamp = timegm(datetime.utcnow().utctimetuple()) # If it has been less than JWT_TOKEN_RENEWAL_DELTA time since the token was created then # we will pass on created a renewed token and just return the response unchanged. if now_timestamp < renewal_timestamp: logging.debug('JWT_TOKEN_RENEWAL_DELTA not exceeded: returning response unchanged.') return response # Get and check orig_iat orig_iat = payload.get('orig_iat') if orig_iat: # verify expiration expiration_timestamp = orig_iat + int(api_settings.JWT_TOKEN_RENEWAL_LIMIT.total_seconds()) if now_timestamp > expiration_timestamp: # Token has passed renew time limit - just return existing response. We need to test # this process because it is probably the case that the response has already been # set to an unauthorized status now_timestamp > expiration_timestamp. logging.debug('JWT token has expired: returning response unchanged.') return response else: # orig_iat field is required - just return existing response logging.debug('JWT token orig_iat field not defined: returning response unchanged.') return response jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER new_payload = jwt_payload_handler(user) new_payload['orig_iat'] = orig_iat # Attach the renewed token to the response jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER response['Refresh-Token'] = "JWT {0}".format(jwt_encode_handler(new_payload)) logging.debug('JWT token has been renewed.') return response else: # No authenticated user - just return existing response logging.debug('No JWT authenticated user: returning response unchanged.') return response class AdminOnlySessionMiddleware(SessionMiddleware): """ Only do the session stuff for admin urls. The frontend relies on auth tokens. """ def process_request(self, request): if request.path.startswith(reverse('admin:index')): super(AdminOnlySessionMiddleware, self).process_request(request) else: return def process_response(self, request, response): if request.path.startswith(reverse('admin:index')): return super(AdminOnlySessionMiddleware, self).process_response(request, response) else: return response class AdminOnlyAuthenticationMiddleware(AuthenticationMiddleware): """ Only do the session authentication stuff for admin urls. The frontend relies on auth tokens so we clear the user. """ def process_request(self, request): if request.path.startswith(reverse('admin:index')): super(AdminOnlyAuthenticationMiddleware, self).process_request(request) class AdminOnlyCsrf(object): """ Disable csrf for non-Admin requests, eg API """ def process_request(self, request): if not request.path.startswith(reverse('admin:index')): setattr(request, '_dont_enforce_csrf_checks', True)
nilq/baby-python
python
import json import uuid from moto.awslambda.exceptions import ( PreconditionFailedException, UnknownPolicyException, ) class Policy: def __init__(self, parent): self.revision = str(uuid.uuid4()) self.statements = [] self.parent = parent def wire_format(self): p = self.get_policy() p["Policy"] = json.dumps(p["Policy"]) return json.dumps(p) def get_policy(self): return { "Policy": { "Version": "2012-10-17", "Id": "default", "Statement": self.statements, }, "RevisionId": self.revision, } # adds the raw JSON statement to the policy def add_statement(self, raw): policy = json.loads(raw, object_hook=self.decode_policy) if len(policy.revision) > 0 and self.revision != policy.revision: raise PreconditionFailedException( "The RevisionId provided does not match the latest RevisionId" " for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve" " the latest RevisionId for your resource." ) self.statements.append(policy.statements[0]) self.revision = str(uuid.uuid4()) # removes the statement that matches 'sid' from the policy def del_statement(self, sid, revision=""): if len(revision) > 0 and self.revision != revision: raise PreconditionFailedException( "The RevisionId provided does not match the latest RevisionId" " for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve" " the latest RevisionId for your resource." ) for statement in self.statements: if "Sid" in statement and statement["Sid"] == sid: self.statements.remove(statement) break else: raise UnknownPolicyException() # converts AddPermission request to PolicyStatement # https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html def decode_policy(self, obj): # import pydevd # pydevd.settrace("localhost", port=5678) policy = Policy(self.parent) policy.revision = obj.get("RevisionId", "") # set some default values if these keys are not set self.ensure_set(obj, "Effect", "Allow") self.ensure_set(obj, "Resource", self.parent.function_arn + ":$LATEST") self.ensure_set(obj, "StatementId", str(uuid.uuid4())) # transform field names and values self.transform_property(obj, "StatementId", "Sid", self.nop_formatter) self.transform_property(obj, "Principal", "Principal", self.principal_formatter) self.transform_property( obj, "SourceArn", "SourceArn", self.source_arn_formatter ) self.transform_property( obj, "SourceAccount", "SourceAccount", self.source_account_formatter ) # remove RevisionId and EventSourceToken if they are set self.remove_if_set(obj, ["RevisionId", "EventSourceToken"]) # merge conditional statements into a single map under the Condition key self.condition_merge(obj) # append resulting statement to policy.statements policy.statements.append(obj) return policy def nop_formatter(self, obj): return obj def ensure_set(self, obj, key, value): if key not in obj: obj[key] = value def principal_formatter(self, obj): if isinstance(obj, str): if obj.endswith(".amazonaws.com"): return {"Service": obj} if obj.endswith(":root"): return {"AWS": obj} return obj def source_account_formatter(self, obj): return {"StringEquals": {"AWS:SourceAccount": obj}} def source_arn_formatter(self, obj): return {"ArnLike": {"AWS:SourceArn": obj}} def transform_property(self, obj, old_name, new_name, formatter): if old_name in obj: obj[new_name] = formatter(obj[old_name]) if new_name != old_name: del obj[old_name] def remove_if_set(self, obj, keys): for key in keys: if key in obj: del obj[key] def condition_merge(self, obj): if "SourceArn" in obj: if "Condition" not in obj: obj["Condition"] = {} obj["Condition"].update(obj["SourceArn"]) del obj["SourceArn"] if "SourceAccount" in obj: if "Condition" not in obj: obj["Condition"] = {} obj["Condition"].update(obj["SourceAccount"]) del obj["SourceAccount"]
nilq/baby-python
python
""" TensorFlow 基础概念 """ #%% 导入 TensorFlow import tensorflow as tf #%% 什么是Tensor # Tensor 是 TensorFlow 的基本对象 # 说白了就是多维向量 t0 = tf.constant(1) # 0阶 tensor t1 = tf.constant([1, 2]) # 1阶 tensor t2 = tf.constant([[1, 2], [3, 4]]) # 2阶 tensor t3 = tf.constant([[[1., 2., 3.]], [[7., 8., 9.]]]) # 3阶 tensor print(t0) print(t1) print(t2) print(t3) #%% Session # TensorFlow 的基本对象是 graph node 需要依赖于 Session 进行求值 sess = tf.Session() print(sess.run([t0, t1, t2, t3])) #%% 基本运算也是一个 graph node, 并且这些运算是向量化的 add = tf.add(t0, t1) print(sess.run(add)) #%% placeholder # 用来表示一个输入数据的占位符,其值在执行时给定 a = tf.placeholder(tf.float32) b = tf.placeholder(tf.float32) add_node = a + b print(sess.run(add_node, {a: 2, b: 3})) print(sess.run(add_node, {a: [1, 2], b: [3, 4]})) #%% Variable W = tf.Variable([1.], tf.float32) b = tf.Variable([1.], tf.float32) x = tf.placeholder(tf.float32) linear_model = W * x + b sess.run(tf.global_variables_initializer()) # 必须显式声明初始化 print(sess.run(linear_model, {x: [1, 2, 3, 4]})) #%% 定义损失函数 y = tf.placeholder(tf.float32) loss = tf.reduce_sum(tf.square(linear_model - y)) print(sess.run(loss, {x: [1, 2, 3], y: [2, 4, 8]})) #%% 赋值 sess.run([tf.assign(W, [2]), tf.assign(b, [-1])]) print(sess.run(loss, {x: [1, 2, 3], y: [1, 3, 5]})) #%% 训练模型 optimizer = tf.train.GradientDescentOptimizer(0.01) train = optimizer.minimize(loss) sess.run(tf.global_variables_initializer()) # 重置为错误的值 # 训练 for i in range(1000): sess.run(train, {x: [1, 2, 3], y: [1, 3, 5]}) print(sess.run([W, b]))
nilq/baby-python
python
import unittest import pytest from api.utils import encode from api.utils import decode class UtilsTest(unittest.TestCase): def test_base62_encode_zero(self): n = 0 encoded_digit = encode(n) assert "0" == encoded_digit def test_base62_encode_digit(self): n = 4 encoded_digit = encode(n) assert str(n) == encoded_digit def test_base62_encode_small_number(self): n = 10 encoded_n = encode(n) assert "a" == encoded_n def test_base62_encode_large_number(self): n = 3213213 encoded_n = encode(n) assert "dtU1" == encoded_n def test_base62_decode_zero(self): decoded_digit = decode("0") assert 0 == decoded_digit def test_base62_decode_digit(self): decoded_digit = decode("4") assert 4 == decoded_digit def test_base62_decode_a(self): dencoded_n = decode("a") assert 10 == dencoded_n def test_base62_decode_large_number(self): decoded_n = decode("dtU1") assert 3213213 == decoded_n
nilq/baby-python
python
#!/usr/bin/env python import subprocess import os import sys import glob import json import traceback import re import logging log = logging.getLogger('run-ci') import time import threading from benchmark import framework_test from benchmark.utils import gather_tests from benchmark.utils import header # Cross-platform colored text from colorama import Fore, Back, Style # Needed for various imports sys.path.append('.') sys.path.append('toolset/setup/linux') sys.path.append('toolset/benchmark') from setup.linux import setup_util class CIRunnner: ''' Manages running TFB on the Travis Continuous Integration system. Makes a best effort to avoid wasting time and resources by running useless jobs. Only verifies the first test in each directory ''' SUPPORTED_DATABASES = "mysql postgres mongodb cassandra elasticsearch sqlite redis none".split() def __init__(self, mode, testdir=None): ''' mode = [verify] for what we want to do testdir = framework directory we are running ''' self.directory = testdir self.mode = mode logging.basicConfig(level=logging.INFO) try: # NOTE: THIS IS VERY TRICKY TO GET RIGHT! # # Our goal: Look at the files changed and determine if we need to # run a verification for this folder. For a pull request, we want to # see the list of files changed by any commit in that PR. For a # push to master, we want to see a list of files changed by the pushed # commits. If this list of files contains the current directory, or # contains the toolset/ directory, then we need to run a verification # # If modifying, please consider: # - the commit range for a pull request is the first PR commit to # the github auto-merge commit # - the commits in the commit range may include merge commits # other than the auto-merge commit. An git log with -m # will know that *all* the files in the merge were changed, # but that is not the changeset that we care about # - git diff shows differences, but we care about git log, which # shows information on what was changed during commits # - master can (and will!) move during a build. This is one # of the biggest problems with using git diff - master will # be updated, and those updates will include changes to toolset, # and suddenly every job in the build will start to run instead # of fast-failing # - commit_range is not set if there was only one commit pushed, # so be sure to test for that on both master and PR # - commit_range and commit are set very differently for pushes # to an owned branch versus pushes to a pull request, test # - For merge commits, the TRAVIS_COMMIT and TRAVIS_COMMIT_RANGE # will become invalid if additional commits are pushed while a job is # building. See https://github.com/travis-ci/travis-ci/issues/2666 # - If you're really insane, consider that the last commit in a # pull request could have been a merge commit. This means that # the github auto-merge commit could have more than two parents # - Travis cannot really support rebasing onto an owned branch, the # commit_range they provide will include commits that are non-existant # in the repo cloned on the workers. See https://github.com/travis-ci/travis-ci/issues/2668 # # - TEST ALL THESE OPTIONS: # - On a branch you own (e.g. your fork's master) # - single commit # - multiple commits pushed at once # - commit+push, then commit+push again before the first # build has finished. Verify all jobs in the first build # used the correct commit range # - multiple commits, including a merge commit. Verify that # the unrelated merge commit changes are not counted as # changes the user made # - On a pull request # - repeat all above variations # # # ==== CURRENT SOLUTION FOR PRs ==== # # For pull requests, we will examine Github's automerge commit to see # what files would be touched if we merged this into the current master. # You can't trust the travis variables here, as the automerge commit can # be different for jobs on the same build. See https://github.com/travis-ci/travis-ci/issues/2666 # We instead use the FETCH_HEAD, which will always point to the SHA of # the lastest merge commit. However, if we only used FETCH_HEAD than any # new commits to a pull request would instantly start affecting currently # running jobs and the the list of changed files may become incorrect for # those affected jobs. The solution is to walk backward from the FETCH_HEAD # to the last commit in the pull request. Based on how github currently # does the automerge, this is the second parent of FETCH_HEAD, and # therefore we use FETCH_HEAD^2 below # # This may not work perfectly in situations where the user had advanced # merging happening in their PR. We correctly handle them merging in # from upstream, but if they do wild stuff then this will likely break # on that. However, it will also likely break by seeing a change in # toolset and triggering a full run when a partial run would be # acceptable # # ==== CURRENT SOLUTION FOR OWNED BRANCHES (e.g. master) ==== # # This one is fairly simple. Find the commit or commit range, and # examine the log of files changes. If you encounter any merges, # then fully explode the two parent commits that made the merge # and look for the files changed there. This is an aggressive # strategy to ensure that commits to master are always tested # well log.debug("TRAVIS_COMMIT_RANGE: %s", os.environ['TRAVIS_COMMIT_RANGE']) log.debug("TRAVIS_COMMIT : %s", os.environ['TRAVIS_COMMIT']) is_PR = (os.environ['TRAVIS_PULL_REQUEST'] != "false") if is_PR: log.debug('I am testing a pull request') first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split('...')[0] last_commit = subprocess.check_output("git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n') log.debug("Guessing that first commit in PR is : %s", first_commit) log.debug("Guessing that final commit in PR is : %s", last_commit) if first_commit == "": # Travis-CI is not yet passing a commit range for pull requests # so we must use the automerge's changed file list. This has the # negative effect that new pushes to the PR will immediately # start affecting any new jobs, regardless of the build they are on log.debug("No first commit, using Github's automerge commit") self.commit_range = "--first-parent -1 -m FETCH_HEAD" elif first_commit == last_commit: # There is only one commit in the pull request so far, # or Travis-CI is not yet passing the commit range properly # for pull requests. We examine just the one commit using -1 # # On the oddball chance that it's a merge commit, we pray # it's a merge from upstream and also pass --first-parent log.debug("Only one commit in range, examining %s", last_commit) self.commit_range = "-m --first-parent -1 %s" % last_commit else: # In case they merged in upstream, we only care about the first # parent. For crazier merges, we hope self.commit_range = "--first-parent %s...%s" % (first_commit, last_commit) if not is_PR: log.debug('I am not testing a pull request') # Three main scenarios to consider # - 1 One non-merge commit pushed to master # - 2 One merge commit pushed to master (e.g. a PR was merged). # This is an example of merging a topic branch # - 3 Multiple commits pushed to master # # 1 and 2 are actually handled the same way, by showing the # changes being brought into to master when that one commit # was merged. Fairly simple, `git log -1 COMMIT`. To handle # the potential merge of a topic branch you also include # `--first-parent -m`. # # 3 needs to be handled by comparing all merge children for # the entire commit range. The best solution here would *not* # use --first-parent because there is no guarantee that it # reflects changes brought into master. Unfortunately we have # no good method inside Travis-CI to easily differentiate # scenario 1/2 from scenario 3, so I cannot handle them all # separately. 1/2 are the most common cases, 3 with a range # of non-merge commits is the next most common, and 3 with # a range including merge commits is the least common, so I # am choosing to make our Travis-CI setup potential not work # properly on the least common case by always using # --first-parent # Handle 3 # Note: Also handles 2 because Travis-CI sets COMMIT_RANGE for # merged PR commits self.commit_range = "--first-parent -m %s" % os.environ['TRAVIS_COMMIT_RANGE'] # Handle 1 if self.commit_range == "": self.commit_range = "--first-parent -m -1 %s" % os.environ['TRAVIS_COMMIT'] except KeyError: log.warning("I should only be used for automated integration tests e.g. Travis-CI") log.warning("Were you looking for run-tests.py?") self.commit_range = "-m HEAD^...HEAD" # # Find the one test from benchmark_config.json that we are going to run # tests = gather_tests() self.fwroot = setup_util.get_fwroot() target_dir = self.fwroot + '/frameworks/' + testdir log.debug("Target directory is %s", target_dir) dirtests = [t for t in tests if t.directory == target_dir] # Travis-CI is linux only osvalidtests = [t for t in dirtests if t.os.lower() == "linux" and (t.database_os.lower() == "linux" or t.database_os.lower() == "none")] # Our Travis-CI only has some databases supported validtests = [t for t in osvalidtests if t.database.lower() in self.SUPPORTED_DATABASES] supported_databases = ','.join(self.SUPPORTED_DATABASES) log.info("Found %s usable tests (%s valid for linux, %s valid for linux and {%s}) in directory '%s'", len(dirtests), len(osvalidtests), len(validtests), supported_databases, '$FWROOT/frameworks/' + testdir) if len(validtests) == 0: log.critical("Found no test that is possible to run in Travis-CI! Aborting!") if len(osvalidtests) != 0: log.critical("Note: Found these tests that could run in Travis-CI if more databases were supported") log.critical("Note: %s", osvalidtests) databases_needed = [t.database for t in osvalidtests] databases_needed = list(set(databases_needed)) log.critical("Note: Here are the needed databases:") log.critical("Note: %s", databases_needed) sys.exit(1) self.names = [t.name for t in validtests] log.info("Using tests %s to verify directory %s", self.names, '$FWROOT/frameworks/' + testdir) def _should_run(self): ''' Decides if the current framework test should be tested. Examines git commits included in the latest push to see if any files relevant to this framework were changed. If you do rewrite history (e.g. rebase) then it's up to you to ensure that both old and new (e.g. old...new) are available in the public repository. For simple rebase onto the public master this is not a problem, only more complex rebases may have issues ''' # Don't use git diff multiple times, it's mega slow sometimes\ # Put flag on filesystem so that future calls to run-ci see it too if os.path.isfile('.run-ci.should_run'): return True if os.path.isfile('.run-ci.should_not_run'): return False def touch(fname): open(fname, 'a').close() log.debug("Using commit range `%s`", self.commit_range) log.debug("Running `git log --name-only --pretty=\"format:\" %s`" % self.commit_range) changes = "" try: changes = subprocess.check_output("git log --name-only --pretty=\"format:\" %s" % self.commit_range, shell=True) except subprocess.CalledProcessError, e: log.error("Got errors when using git to detect your changes, assuming that we must run this verification!") log.error("Error was: %s", e.output) log.error("Did you rebase a branch? If so, you can safely disregard this error, it's a Travis limitation") return True changes = os.linesep.join([s for s in changes.splitlines() if s]) # drop empty lines if len(changes.splitlines()) > 1000: log.debug("Change list is >1000 lines, uploading to sprunge.us instead of printing to console") url = subprocess.check_output("git log --name-only %s | curl -F 'sprunge=<-' http://sprunge.us" % self.commit_range, shell=True) log.debug("Uploaded to %s", url) else: log.debug("Result:\n%s", changes) # Look for changes to core TFB framework code if re.search(r'^toolset/', changes, re.M) is not None: log.info("Found changes to core framework code") touch('.run-ci.should_run') return True # Look for changes relevant to this test if re.search("^frameworks/%s/" % re.escape(self.directory), changes, re.M) is None: log.info("No changes found for directory %s", self.directory) touch('.run-ci.should_not_run') return False log.info("Changes found for directory %s", self.directory) touch('.run-ci.should_run') return True def run(self): ''' Do the requested command using TFB ''' if not self._should_run(): log.info("I found no changes to `%s` or `toolset/`, aborting verification", self.directory) return 0 # Do full setup now that we've verified that there's work to do try: p = subprocess.Popen("config/travis_setup.sh", shell=True) p.wait() except subprocess.CalledProcessError: log.critical("Subprocess Error") print trackback.format_exc() return 1 except Exception as err: log.critical("Exception from running and waiting on subprocess to set up Travis environment") log.error(err.child_traceback) return 1 names = ' '.join(self.names) # Assume mode is verify command = "toolset/run-tests.py --mode verify --test %s" % names # Run the command log.info("Running mode %s with commmand %s", self.mode, command) try: p = subprocess.Popen(command, shell=True) p.wait() return p.returncode except subprocess.CalledProcessError: log.critical("Subprocess Error") print traceback.format_exc() return 1 except Exception as err: log.critical("Exception from running+wait on subprocess") log.error(err.child_traceback) return 1 if __name__ == "__main__": args = sys.argv[1:] usage = '''Usage: toolset/run-ci.py [verify] <framework-directory> run-ci.py selects one test from <framework-directory>/benchark_config, and automates a number of calls into run-tests.py specific to the selected test. It is guaranteed to always select the same test from the benchark_config, so multiple runs with the same <framework-directory> reference the same test. The name of the selected test will be printed to standard output. verify - run a verification on the selected test using `--mode verify` run-ci.py expects to be run inside the Travis-CI build environment, and will expect environment variables such as $TRAVIS_BUILD''' if len(args) != 2: print usage sys.exit(1) mode = args[0] testdir = args[1] if len(args) == 2 and (mode == 'verify'): runner = CIRunnner(mode, testdir) else: print usage sys.exit(1) retcode = 0 try: retcode = runner.run() except KeyError as ke: log.warning("Environment key missing, are you running inside Travis-CI?") print traceback.format_exc() retcode = 1 except Exception: log.critical("Unknown error") print traceback.format_exc() retcode = 1 finally: sys.exit(retcode) # vim: set sw=2 ts=2 expandtab
nilq/baby-python
python
from .dns_server import DnsServer, DnsServerNotRunningException from .dns_demo_server import DnsDemoServer
nilq/baby-python
python
import tempfile import unittest from pathlib import Path import numpy as np import pandas as pd from tests.fixtures.algorithms import DeviatingFromMean, DeviatingFromMedian from tests.fixtures.dataset_fixtures import CUSTOM_DATASET_PATH from timeeval import TimeEval, Algorithm, AlgorithmParameter, DatasetManager, InputDimensionality def generates_results(dataset, from_file: bool = False) -> pd.DataFrame: def preprocess(x: AlgorithmParameter, args: dict) -> np.ndarray: if isinstance(x, np.ndarray): return x else: # if isinstance(x, (PosixPath, WindowsPath)): return pd.read_csv(x).values[:, 1:-1] algorithms = [ Algorithm(name="deviating_from_mean", main=DeviatingFromMean(), preprocess=preprocess, data_as_file=from_file), Algorithm(name="deviating_from_median", main=DeviatingFromMedian(), preprocess=preprocess, data_as_file=from_file) ] datasets = DatasetManager("./tests/example_data", custom_datasets_file=CUSTOM_DATASET_PATH) with tempfile.TemporaryDirectory() as tmp_path: timeeval = TimeEval(datasets, [dataset], algorithms, results_path=Path(tmp_path)) timeeval.run() return timeeval.results def generates_results_multi(dataset) -> pd.DataFrame: algorithms = [ Algorithm(name="deviating_from_mean", main=DeviatingFromMean(), data_as_file=False, input_dimensionality=InputDimensionality.MULTIVARIATE), Algorithm(name="deviating_from_median", main=DeviatingFromMedian(), data_as_file=False, input_dimensionality=InputDimensionality.MULTIVARIATE) ] datasets = DatasetManager("./tests/example_data", custom_datasets_file=CUSTOM_DATASET_PATH) with tempfile.TemporaryDirectory() as tmp_file: timeeval = TimeEval(datasets, [dataset], algorithms, results_path=Path(tmp_file)) timeeval.run() return timeeval.results class TestImportData(unittest.TestCase): def setUp(self) -> None: # We only compare the columns "algorithm", "collection", "dataset", "score" # without the time measurements, status and error messages # (columns: "preprocessing_time", "main_time", "postprocessing_time", "status", "error_messages"). self.results = pd.read_csv("./tests/example_data/results.csv") self.multi_results = pd.read_csv("./tests/example_data/results_multi.csv") self.KEYS = ['algorithm', 'collection', 'dataset', 'ROC_AUC'] def test_generates_correct_results(self): DATASET = ("custom", "dataset.1") generated_results = generates_results(DATASET) true_results = self.results[self.results.dataset == DATASET[1]] np.testing.assert_array_equal(generated_results[self.KEYS].values, true_results[self.KEYS].values) def test_generates_correct_results_from_multi_file(self): DATASET = ("custom", "dataset.4") generated_results = generates_results_multi(DATASET) true_results = self.multi_results[self.multi_results.dataset == DATASET[1]] np.testing.assert_array_equal(generated_results[self.KEYS].values, true_results[self.KEYS].values) def test_algorithm_with_filename(self): DATASET = ("custom", "dataset.1") generated_results = generates_results(DATASET, from_file=True) true_results = self.results[self.results.dataset == DATASET[1]] np.testing.assert_array_equal(generated_results[self.KEYS].values, true_results[self.KEYS].values) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
"""Kata: Binary Addition - Return the opposite of the input number. #1 Best Practices Solution by arzyk and 7 others def add_binary(a,b): return bin(a+b)[2:] """ def add_binary(a, b): return bin(a + b)[2:]
nilq/baby-python
python
# -*- coding: utf-8 -*- import dataiku import pandas as pd, numpy as np from dataiku import pandasutils as pdu import requests import urllib import json import time from datetime import datetime from dataiku.customrecipe import * import dataiku_esri_utils import common from dataiku_esri_utils import recipe_config_get_str_or_none def append_item_features(df_values, results_values, ii, P_OPTION_DATA_AS_TRANSACTIONS): features = results_values[u'FeatureSet'][0][u'features'][ii]['attributes'] if P_OPTION_DATA_AS_TRANSACTIONS is True: df_values_tmp = pd.DataFrame.from_dict(features, orient='index') df_values_tmp= df_values_tmp.reset_index() df_values_tmp.columns=['name','value'] else: df_values_tmp = pd.DataFrame.from_dict(features, orient='index').T return df_values.append(df_values_tmp) def update_batch_metadata(df_metadata, results_values, country): fd = results_values[u'FeatureSet'][0][u'fields'] df_fields_definition_tmp_master =pd.DataFrame() for fd_tmp in fd: df_fields_definition_tmp = pd.DataFrame.from_dict(fd_tmp, orient='index').T df_fields_definition_tmp_master = pd.concat((df_fields_definition_tmp_master, df_fields_definition_tmp), axis=0) df_fields_definition_tmp_master['Country']=country df_metadata = pd.concat((df_metadata, df_fields_definition_tmp_master), axis=0) df_metadata = df_metadata[df_metadata['component'].notnull()] return df_metadata def write_outputs( result_dataset, df_values, metadata_dataset, df_metadata, geometry_dataset, df_geometry_result, log_api_dataset, df_api_log, P_ACTIVATE_BACKUP, backup_basename, P_OPTION_DATA_AS_TRANSACTIONS,date ): # UGLY Temporary if P_ACTIVATE_BACKUP is True: backup_path = dataiku.get_custom_variables()["dip.home"] + '/tmp/' filename = 'dataiku_plugin_esri_' + backup_basename + '_data_backup_' + date + '.csv' f = backup_path + filename print 'Exporting backup of your data with (key,value) format: %s' % (P_OPTION_DATA_AS_TRANSACTIONS) df_values.to_csv(f,sep='|',index='none') print 'Backup stored into: %s ' % (f) result_dataset.write_with_schema(df_values) if metadata_dataset is not None and df_metadata.shape[0] > 0: print "Writing metdata: %s" % df_metadata df_metadata = df_metadata.reset_index() df_metadata = df_metadata.drop('index',axis=1) df_metadata = df_metadata.drop_duplicates(take_last=True) metadata_dataset.write_with_schema(df_metadata) if geometry_dataset is not None: geometry_dataset.write_with_schema(df_geometry_result) if log_api_dataset is not None: log_api_dataset.write_with_schema(df_api_log)
nilq/baby-python
python
""" This file is needed as 1.6 only finds tests in files labelled test_*, and ignores tests/__init__.py. """ from south.tests import *
nilq/baby-python
python
""" Copyright (c) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np from ..config import BoolField, NumberField, ConfigError from ..representation import ( SegmentationAnnotation, SegmentationPrediction, BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction, OAR3DTilingSegmentationAnnotation, ) from .metric import PerImageEvaluationMetric from ..utils import finalize_metric_result class SegmentationMetric(PerImageEvaluationMetric): annotation_types = (SegmentationAnnotation, ) prediction_types = (SegmentationPrediction, ) CONFUSION_MATRIX_KEY = 'segmentation_confusion_matrix' @classmethod def parameters(cls): parameters = super().parameters() parameters.update({ 'use_argmax': BoolField( optional=True, default=True, description="Allows to use argmax for prediction mask." ), 'ignore_label': NumberField( optional=True, value_type=int, min_value=0, description='Ignore prediction and annotation of specified class during metric calculation' ) }) return parameters def evaluate(self, annotations, predictions): raise NotImplementedError def configure(self): self.use_argmax = self.get_value_from_config('use_argmax') if not self.dataset.labels: raise ConfigError('semantic segmentation metrics require label_map providing in dataset_meta' 'Please provide dataset meta file or regenerated annotation') self.ignore_label = self.get_value_from_config('ignore_label') if self.profiler: self.profiler.names = self.dataset.labels def update(self, annotation, prediction): n_classes = len(self.dataset.labels) prediction_mask = np.argmax(prediction.mask, axis=0) if self.use_argmax else prediction.mask.astype('int64') def confusion_matrix(): label_true = annotation.mask.flatten() label_pred = prediction_mask.flatten() mask = (label_true >= 0) & (label_true < n_classes) & (label_pred < n_classes) & (label_pred >= 0) hist = np.bincount(n_classes * label_true[mask].astype(int) + label_pred[mask], minlength=n_classes ** 2) hist = hist.reshape(n_classes, n_classes) if self.ignore_label is not None: hist[self.ignore_label, :] = 0 hist[:, self.ignore_label] = 0 return hist def accumulate(confusion_matrixs): return confusion_matrixs + cm cm = confusion_matrix() self._update_state(accumulate, self.CONFUSION_MATRIX_KEY, lambda: np.zeros((n_classes, n_classes))) return cm def reset(self): self.state = {} self._update_iter = 0 if self.profiler: self.profiler.reset() class SegmentationAccuracy(SegmentationMetric): __provider__ = 'segmentation_accuracy' def update(self, annotation, prediction): cm = super().update(annotation, prediction) result = np.diag(cm).sum() / cm.sum() if self.profiler: self.profiler.update(annotation.identifier, self.name, cm, result, prediction.mask) return result def evaluate(self, annotations, predictions): confusion_matrix = self.state[self.CONFUSION_MATRIX_KEY] if self.profiler: self.profiler.finish() return np.diag(confusion_matrix).sum() / confusion_matrix.sum() class SegmentationIOU(SegmentationMetric): __provider__ = 'mean_iou' def update(self, annotation, prediction): cm = super().update(annotation, prediction) diagonal = np.diag(cm).astype(float) union = cm.sum(axis=1) + cm.sum(axis=0) - diagonal iou = np.divide(diagonal, union, out=np.full_like(diagonal, np.nan), where=union != 0) if self.ignore_label is not None: iou = np.delete(iou, self.ignore_label) if self.profiler: self.profiler.update(annotation.identifier, self.name, cm, iou, prediction.mask) return iou def evaluate(self, annotations, predictions): confusion_matrix = self.state[self.CONFUSION_MATRIX_KEY] diagonal = np.diag(confusion_matrix) union = confusion_matrix.sum(axis=1) + confusion_matrix.sum(axis=0) - diagonal iou = np.divide(diagonal, union, out=np.full_like(diagonal, np.nan), where=union != 0) cls_names = list(self.dataset.labels.values()) if self.ignore_label is not None: iou = np.delete(iou, self.ignore_label) cls_names = [cls_name for cls_id, cls_name in self.dataset.labels.items() if cls_id != self.ignore_label] values, names = finalize_metric_result(iou, cls_names) self.meta['names'] = names if self.profiler: self.profiler.finish() return values class SegmentationMeanAccuracy(SegmentationMetric): __provider__ = 'mean_accuracy' def update(self, annotation, prediction): cm = super().update(annotation, prediction) diagonal = np.diag(cm).astype(float) per_class_count = cm.sum(axis=1) acc_cls = np.divide(diagonal, per_class_count, out=np.full_like(diagonal, np.nan), where=per_class_count != 0) if self.profiler: self.profiler.update(annotation.identifier, self.name, cm, acc_cls, prediction.mask) return acc_cls def evaluate(self, annotations, predictions): confusion_matrix = self.state[self.CONFUSION_MATRIX_KEY] diagonal = np.diag(confusion_matrix) per_class_count = confusion_matrix.sum(axis=1) acc_cls = np.divide(diagonal, per_class_count, out=np.full_like(diagonal, np.nan), where=per_class_count != 0) values, names = finalize_metric_result(acc_cls, list(self.dataset.labels.values())) self.meta['names'] = names if self.profiler: self.profiler.finish() return values class SegmentationFWAcc(SegmentationMetric): __provider__ = 'frequency_weighted_accuracy' def update(self, annotation, prediction): cm = super().update(annotation, prediction) diagonal = np.diag(cm).astype(float) union = cm.sum(axis=1) + cm.sum(axis=0) - diagonal iou = np.divide(diagonal, union, out=np.zeros_like(diagonal), where=union != 0) freq = cm.sum(axis=1) / cm.sum() result = (freq[freq > 0] * iou[freq > 0]).sum() if self.profiler: self.profiler.update(annotation.identifier, self.name, cm, result, prediction.mask) return result def evaluate(self, annotations, predictions): confusion_matrix = self.state[self.CONFUSION_MATRIX_KEY] diagonal = np.diag(confusion_matrix) union = confusion_matrix.sum(axis=1) + confusion_matrix.sum(axis=0) - diagonal iou = np.divide(diagonal, union, out=np.zeros_like(diagonal), where=union != 0) freq = confusion_matrix.sum(axis=1) / confusion_matrix.sum() if self.profiler: self.profiler.finish() return (freq[freq > 0] * iou[freq > 0]).sum() class SegmentationDSCAcc(PerImageEvaluationMetric): __provider__ = 'dice' annotation_types = (BrainTumorSegmentationAnnotation,) prediction_types = (BrainTumorSegmentationPrediction,) overall_metric = [] def update(self, annotation, prediction): result = [] for prediction_mask, annotation_mask in zip(prediction.mask, annotation.mask): annotation_mask = np.transpose(annotation_mask, (2, 0, 1)) annotation_mask = np.expand_dims(annotation_mask, 0) numerator = np.sum(prediction_mask * annotation_mask) * 2.0 + 1.0 denominator = np.sum(annotation_mask) + np.sum(prediction_mask) + 1.0 result.append(numerator / denominator) self.overall_metric.extend(result) return np.mean(result) def evaluate(self, annotations, predictions): return sum(self.overall_metric) / len(self.overall_metric) def reset(self): self.overall_metric = [] class SegmentationDIAcc(PerImageEvaluationMetric): __provider__ = 'dice_index' annotation_types = (BrainTumorSegmentationAnnotation, SegmentationAnnotation, OAR3DTilingSegmentationAnnotation) prediction_types = (BrainTumorSegmentationPrediction, SegmentationPrediction, ) overall_metric = [] @classmethod def parameters(cls): parameters = super().parameters() parameters.update({ 'mean': BoolField(optional=True, default=True, description='Allows calculation mean value.'), 'median': BoolField(optional=True, default=False, description='Allows calculation median value.'), }) return parameters def configure(self): self.mean = self.get_value_from_config('mean') self.median = self.get_value_from_config('median') self.output_order = self.get_value_from_config('output_order') labels = list(self.dataset.labels.values()) if self.dataset.metadata else ['overall'] self.classes = len(labels) names_mean = ['mean@{}'.format(name) for name in labels] if self.mean else [] names_median = ['median@{}'.format(name) for name in labels] if self.median else [] self.meta['names'] = names_mean + names_median self.meta['calculate_mean'] = False self.overall_metric = [] def update(self, annotation, prediction): result = np.zeros(shape=self.classes) annotation_data = annotation.mask prediction_data = prediction.mask if prediction_data.shape[0] != 1 and len(prediction_data.shape) != 3: raise RuntimeError("For '{}' metric prediction mask should has only 1 channel, but more found. " "Specify 'make_argmax' option in adapter or postprocessor." .format(self.__provider__)) label_order = getattr(prediction, 'label_order', [0, 1, 2, 3]) for c, p in enumerate(label_order, 1): annotation_data_ = (annotation_data == c) prediction_data_ = (prediction_data == p) intersection_count = np.logical_and(annotation_data_, prediction_data_).sum() union_count = annotation_data_.sum() + prediction_data_.sum() if union_count > 0: result[c] += 2.0*intersection_count / union_count annotation_data_ = (annotation_data > 0) prediction_data_ = (prediction_data > 0) intersection_count = np.logical_and(annotation_data_, prediction_data_).sum() union_count = annotation_data_.sum() + prediction_data_.sum() if union_count > 0: result[0] += 2.0 * intersection_count / union_count self.overall_metric.append(result) return result def evaluate(self, annotations, predictions): mean = np.mean(self.overall_metric, axis=0) if self.mean else [] median = np.median(self.overall_metric, axis=0) if self.median else [] result = np.concatenate((mean, median)) return result def reset(self): labels = self.dataset.labels.values() if self.dataset.metadata else ['overall'] self.classes = len(labels) names_mean = ['mean@{}'.format(name) for name in labels] if self.mean else [] names_median = ['median@{}'.format(name) for name in labels] if self.median else [] self.meta['names'] = names_mean + names_median self.meta['calculate_mean'] = False self.overall_metric = [] class SegmentationUnet3D(PerImageEvaluationMetric): __provider__ = 'dice_unet3d' annotation_types = (BrainTumorSegmentationAnnotation, SegmentationAnnotation, OAR3DTilingSegmentationAnnotation) prediction_types = (BrainTumorSegmentationPrediction, SegmentationPrediction, ) overall_metric = [] @classmethod def parameters(cls): parameters = super().parameters() parameters.update({ 'mean': BoolField(optional=True, default=True, description='Allows calculation mean value.'), 'median': BoolField(optional=True, default=False, description='Allows calculation median value.'), }) return parameters def configure(self): self.mean = self.get_value_from_config('mean') self.median = self.get_value_from_config('median') self.output_order = self.get_value_from_config('output_order') labels = ['whole tumor', 'tumor core', 'enhancing tumor'] self.classes = len(labels) names_mean = ['mean@{}'.format(name) for name in labels] if self.mean else [] names_median = ['median@{}'.format(name) for name in labels] if self.median else [] self.meta['names'] = names_mean + names_median self.meta['calculate_mean'] = False self.overall_metric = [] def update(self, annotation, prediction): result = np.zeros(shape=self.classes) annotation_data = annotation.mask prediction_data = prediction.mask for c in range(self.classes): annotation_data_ = (annotation_data > c) prediction_data_ = (prediction_data > c) intersection_count = np.logical_and(annotation_data_, prediction_data_).sum() union_count = annotation_data_.sum() + prediction_data_.sum() if union_count > 0: result[c] = 2.0*intersection_count / union_count else: result[c] = np.nan self.overall_metric.append(result) return result def evaluate(self, annotations, predictions): mean = np.nanmean(self.overall_metric, axis=0) if self.mean else [] median = np.nanmedian(self.overall_metric, axis=0) if self.median else [] result = np.concatenate((mean, median)) return result def reset(self): self.overall_metric = [] class SegmentationOAR3DTiling(PerImageEvaluationMetric): __provider__ = 'dice_oar3d' annotation_types = (OAR3DTilingSegmentationAnnotation,) prediction_types = (SegmentationPrediction,) overall_metric = [] def configure(self): self.overall_metric = [] def update(self, annotation, prediction): eps = 1e-6 numerator = 2.0 * np.sum(annotation.mask * prediction.mask) denominator = np.sum(annotation.mask) + np.sum(prediction.mask) result = (numerator + eps) / (denominator + eps) self.overall_metric.append(result) return result def evaluate(self, annotations, predictions): result = np.mean(self.overall_metric, axis=0) return result def reset(self): self.overall_metric = []
nilq/baby-python
python
""" The ``agar.json`` module contains classes to assist with creating json web service handlers. """ import datetime import logging from google.appengine.ext.db import BadRequestError, BadValueError from agar.config import Config from agar.models import ModelException from pytz.gae import pytz from restler.serializers import json_response as restler_json_response from webapp2 import RequestHandler, HTTPException INVALID_CURSOR = 'INVALID_CURSOR' class JsonConfig(Config): """ :py:class:`~agar.config.Config` settings for the ``agar.json`` library. Settings are under the ``agar_json`` namespace. The following settings (and defaults) are provided:: agar_url_DEFAULT_PAGE_SIZE = 10 agar_url_MAX_PAGE_SIZE = 100 agar_url_USE_DATA_ROOT_NODE = True agar_url_ADD_SUCCESS_FLAG = False To override ``agar.json`` settings, define values in the ``appengine_config.py`` file in the root of your project. """ _prefix = 'agar_json' DEFAULT_PAGE_SIZE = 10 MAX_PAGE_SIZE = 100 USE_DATA_ROOT_NODE = True ADD_SUCCESS_FLAG = False config = JsonConfig.get_config() def string_to_int(s, default=10): try: return int(s) except: return default class JsonRequestHandler(RequestHandler): """ A `webapp2.RequestHandler`_ implementation to help with json web service handlers, including error handling. """ def _setup_context(self, context): if not context: context = {} context['request'] = self.request return context def _setup_data(self, model_or_query, status_code, status_text, errors=None): data = dict() data['status_code'] = status_code data['status_text'] = status_text data['timestamp'] = datetime.datetime.now(pytz.utc) if config.ADD_SUCCESS_FLAG: if status_code < 400: data['sucess'] = True else: data['sucess'] = False if errors is not None: data['errors'] = errors if config.USE_DATA_ROOT_NODE: data['data'] = model_or_query else: data.update(model_or_query) return data def json_response(self, model_or_query, strategy=None, status_code=200, status_text='OK', errors=None, context=None): """ Fills in the `webapp2.Response`_ with the contents of the passed model or query serialized using the :py:mod:`restler` library. :param model_or_query: The `Model`_ or `Query`_ to serialize. :param strategy: The :py:class:`~restler.serializers.SerializationStrategy` to use to serialize. :param status_code: The HTTP status code to set in the `webapp2.Response`_. :param status_text: A text description of the status code. :param errors: A dictionary of errors to add to the response. :param context: The context to be used when serializing. :return: The serialized text to be used as the HTTP response data. """ context = self._setup_context(context) data = self._setup_data(model_or_query, status_code, status_text, errors=errors) return restler_json_response(self.response, data, strategy=strategy, status_code=status_code, context=context) def handle_exception(self, exception, debug_mode): """ The `webapp2.RequestHandler`_ exception handler. Sets the `webapp2.Response`_ with appropriate settings. :param exception: The uncaught exception. :param debug_mode: Whether we're running in debug mode. """ errors = None status_text = exception.message if isinstance(exception, HTTPException): code = exception.code status_text = "BAD_REQUEST" errors = exception.message elif isinstance(exception, ModelException): code = 400 status_text = "BAD_REQUEST" errors = exception.message else: code = 500 status_text = "INTERNAL_SERVER_ERROR" errors = exception.message logging.error("API 500 ERROR: %s" % exception) if code == 401: status_text = 'UNAUTHORIZED' if code == 403: status_text = 'FORBIDDEN' if code == 404: status_text = 'NOT_FOUND' if code == 405: status_text = 'METHOD_NOT_ALLOWED' self.json_response({}, status_code=code, status_text=status_text, errors=errors) class MultiPageHandler(JsonRequestHandler): """ A :py:class:`~agar.json.JsonRequestHandler` class to help with ``page_size`` and ``cursor`` parsing and logic. """ @property def page_size(self): """ The requested ``page_size`` constrained between ``1`` and the configuration value ``agar_json_MAX_PAGE_SIZE``. If ``page_size`` isn't passed in, it will default to the configuration value ``agar_json_DEFAULT_PAGE_SIZE``. :return: The requested page size for fetching. """ page_size = string_to_int(self.request.get('page_size', str(config.DEFAULT_PAGE_SIZE))) page_size = min(max(page_size, 1), config.MAX_PAGE_SIZE) return page_size def fetch_page(self, query): """ Fetches a page of the passed ``query`` using the :py:attr:`~agar.json.MultiPageHandler.page_size` and the ``cursor`` request parameter. :param query: The `Query`_ to fetch from. :return: A two-tuple containing results of the paged fetch and the next page's cursor if there's more results. """ cursor = self.request.get('cursor', None) if cursor is not None: try: query = query.with_cursor(cursor) except (BadValueError, BadRequestError): self.abort(400, INVALID_CURSOR) results = [] try: results = query.fetch(self.page_size) except (BadValueError, BadRequestError): self.abort(400, INVALID_CURSOR) next_cursor = None if len(results) == self.page_size: next_cursor = query.cursor() return results, next_cursor class CorsMultiPageHandler(MultiPageHandler): """ A :py:class:`~agar.json.MultiPageHandler` to help with Cross-Origin Resource sharing . """ def options(self): origin = self.request.headers.get('Origin', 'unknown origin') self.response.headers['Access-Control-Allow-Methods'] = 'POST, GET, PUT, DELETE, OPTIONS' self.response.headers['Access-Control-Max-Age'] = 1728000 self.response.headers['Access-Control-Allow-Credentials'] = \ self.request.headers.get('Access-Credentials', 'true') self.response.headers['Access-Control-Allow-Origin']= ':'.join(origin.split(':')[0:2]) self.response.headers['Access-Control-Allow-Origin']= origin.strip() self.response.headers['Access-Control-Allow-Headers'] = \ self.request.headers.get('Access-Control-Request-Headers', '') def json_response(self, model_or_query, strategy=None, status_code=200, status_text='OK', errors=None, context=None): context = self._setup_context(context) data = self._setup_data(model_or_query, status_code, status_text, errors=errors) origin = self.request.headers.get('Origin', '') if origin: self.response.headers['Access-Control-Allow-Origin'] = origin else: self.response.headers['Access-Control-Allow-Origin'] = "/".join(self.request.headers.get("Referer", "").split("/")[0:3]) self.response.headers['Access-Control-Allow-Headers'] = "true" self.response.headers['Access-Control-Allow-Credentials'] = "true" return restler_json_response(self.response, data, strategy=strategy, status_code=status_code, context=context)
nilq/baby-python
python
#!/usr/bin/python from math import floor def find_largest_factor(n): """ Return the largest prime factor of n: 1. Find i such that i is the smallest number that i * j = n 2. Therefore the largest prime factor of n is also the largest prime factor of j 3. Repeat until j is a prime number """ for i in range(2,floor(n/2)): if n % i == 0: return find_largest_factor(int(n/i)) return n if __name__ == '__main__': print("The largest prime factor of 600851475143 is " + str(find_largest_factor(600851475143)))
nilq/baby-python
python
# Generated by Django 3.2.6 on 2021-09-04 16:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('telegrambot', '0011_telegramuserbot'), ] operations = [ migrations.AddField( model_name='telegramuserbot', name='session_file', field=models.FileField(blank=True, null=True, upload_to='userbot-sessions/', verbose_name='telethon session file'), ), ]
nilq/baby-python
python
''' File name: utilities.py Author: Simonas Laurinavicius Email: simonas.laurinavicius@mif.stud.vu.lt Python Version: 3.7.6 Purpose: Utilities module defines various helper functions used by different modules ''' # Local modules import formats def return_shorter_str(str1, str2): if len(str1) < len(str2): return str1 elif len(str1) > len(str2): return str2 else: return str1 # Reference: [https://stackoverflow.com/questions/12173774/how-to-modify-bits-in-an-integer] def set_bit(byte, value, idx): mask = 1 << idx byte &= ~mask if value: byte |= mask return byte def match(question, record): all_types = False if question.q_type == formats.Type['*']: # If user wants all records with a certain name, type is not important all_types = True if question.q_name == record["name"] and (question.q_type == formats.Type[record["type"]] or all_types): return True else: return False def check_for_name_error(answer_rr): name_err = False if len(answer_rr) == 0: name_err = True return name_err def encode_domain_name(name): domain_parts = name.split('.') name = b'' for domain in domain_parts: name += len(domain).to_bytes(1, byteorder="big") name += str.encode(domain) return name def encode_record_addr(addr): addr_parts = addr.split('.') addr = b'' for octet in addr_parts: addr += int(octet).to_bytes(1, byteorder="big") return addr def record_to_bytes(record, class_): name = encode_domain_name(record["name"]) type_ = formats.Type[record["type"]] class_ = formats.Class[class_] if record["type"] != "SOA": ttl = record["ttl"].to_bytes(4, byteorder="big") else: ttl = (0).to_bytes(4, byteorder="big") # SOA records are always distributed with zero TTL to prohibit caching r_data = get_rdata(record, class_) rd_length = len(r_data).to_bytes(2, byteorder="big") resource_record = formats.RR(name, type_, class_, ttl, rd_length, r_data) return resource_record def get_soa_rdata(record): r_data = b'' domain_entries = ["name_server", "maintainer"] time_fields = ["serial", "refresh", "retry", "expire", "minimum"] for entry in domain_entries: r_data += encode_domain_name(record[entry]) for field in time_fields: r_data += record[field].to_bytes(4, byteorder="big") return r_data def get_ns_rdata(record): r_data = encode_domain_name(record["host_name"]) return r_data def get_mx_rdata(record): r_data = record["preference"].to_bytes(2, byteorder="big") r_data += encode_domain_name(record["exchange"]) return r_data def get_a_rdata(record): r_data = encode_record_addr(record["address"]) return r_data def get_rdata(record, class_): r_data = b'' if class_ == formats.Class["IN"]: if record["type"] == "SOA": r_data = get_soa_rdata(record) elif record["type"] == "NS": r_data = get_ns_rdata(record) elif record["type"] == "MX": r_data = get_mx_rdata(record) else: r_data = get_a_rdata(record) # We set default to be type A RR return r_data
nilq/baby-python
python
CLAUSE_LIST = [(1,), (0,)] N = 3 A = 65 class SAT: """ This class is an SAT solver. Create an instance by passing in a list of clauses and the number of variables Uses notation of 2N + 1 to input tuples of clauses Ex: (A+B)(~B+C) - > (0, 2)(3, 4) There are 3 variables so: A = 0 = 2*(0) ~A = 1 = 2*(0) + 1 B = 2 = 2*(1) ~B = 3 = 2*(1) + 1 C = 4 = 2*(3) ~C = 5 = 2*(1) + 1 """ def __init__(self, clauseList, numOfVar): self.clauseList = clauseList self.numOfVar = numOfVar self.vars = {} self.solutions = {} self.masterSolutionSet = [] self.final = [] self.makeDict() self.makeSolutions() self.neg = False def makeSolutions(self): for key in self.vars.keys(): self.solutions[key] = [] def makeDict(self): """ Will auto gen a dict containing all variables in it as a look-up reference Sets val of each key/val pair to None True=1 False=0 None=Variable has not been set yet For example: (0, 2)(3, 4) will return a dict of { "X0": None, "X1": None, "X2": None } :return: """ i = 0 while i < self.numOfVar: temp = {"X{}".format(i): None} self.vars.update(temp) i += 1 def getBool(self, val, vars): """ Returns the value of each variable in a clause Will return True, False, or None based on current var values Example: The current set variables (vars) is: { "X0": True, "X1": None, "X2": None } For these given val inputs here are the expect outputs: Input: 0 -> Output: True Input: 1 -> Output: False Input: 2 -> Output: None Input: 3 -> Output: None Input: 4 -> Output: None Input: 5 -> Output: None :param val: :param vars: :return: """ key, isNot = self.getKeyForBool(val=val) boolVal = vars.get(key) if boolVal is None: return boolVal if isNot: boolVal = not boolVal return boolVal def getKeyForBool(self, val): isNot = False if (val % 2) != 0: isNot = True val - 1 n = val // 2 key = "X{}".format(n) return key, isNot def testClause(self, pair, vars): """ Input a pair - i.e. a single clause (of type=tuple) Will determine if that clause is True, False, or None True=contains a 1 thus the 'or'ing will evaluate clause to 1 False=contains no 1's and all variables in clause are not None None=more branching is needed Example: Given inputs of ... vars = { "X0": True, "X1": None, "X2": None } pair = (0, 2) Then ... boolList = (True, None) return True ..... Given inputs of ... vars = { "X0": True, "X1": None, "X2": None } pair = (3, 4) Then ... boolList = (None, None) return None :param pair: :param vars: :return: """ boolList = self.getBoolList(pair=pair, vars=vars) if True in boolList: return True if None in boolList: return else: return False def getBoolList(self, pair, vars): tempList = [] for item in pair: tempList.append(self.getBool(val=item, vars=vars)) return tempList def checkClauses(self, vars, clauses): """ Takes in a list of clauses to operate on and check their 'truth-y-ness' Returns a list of evaluations :param vars: :param clauses: :return: """ results = [] for clause in clauses: results.append(self.testClause(pair=clause, vars=vars)) return results def preBranch(self, clauses, vars): """ This will check what is going on with the current branch operation It takes a list of clauses and the dict of current vars Will determine what, if any, clauses have been satisfied and add variables to the results list If a clause has been satisfied it will be removed from the list of clauses for the next branch as we have already solved it Example: Given Inputs of ... clauses = [(0, 2), (2, 5)] vars = { "X0": True, "X1": None, "X2": None } Then ... results = [True, None] Will return ... tempClauses = [(2,5)] tempVar = ["X0=1"] :param clauses: :param vars: :return: """ tempClauses = clauses.copy() tempVar = [] results = self.checkClauses(vars=vars, clauses=clauses) for result, val in zip(results, clauses): if result: varVals = self.getVarValues(vars=vars, val=val) boolList = self.getBoolList(vars=vars, pair=val) while None in boolList: boolList.remove(None) for solution, status in zip(varVals, boolList): if status: temp = solution.split("=")[0] kelVal = vars.get(temp) if kelVal: newResult = f"{temp}=1" tempVar.append(newResult) else: newResult = f"{temp}=0" tempVar.append(newResult) tempClauses.remove(val) if result is False: return None, None return tempClauses, tempVar def getVarValues(self, vars, val): """ Takes in a var that has been determined to solve a clause. Finds the values for the variables in the solved clause. Does not count variables of value None as an answer Example: Given inputs of ... vars = { "X0": True, "X1": None, "X2": None } val = (0,2) returns ... results = ["X0=1"] :param vars: :param val: :return: """ results = [] for item in val: value = self.getBool(vars=vars, val=item) if value is not None: key, isNot = self.getKeyForBool(val=item) if value: toSave = 1 else: toSave = 0 strToAdd = f"{key}={toSave}" results.append(strToAdd) return results def starter(self): vars = self.vars clauses = self.clauseList keyList = [*self.vars] self.tree(key=keyList[0], vars=vars, clauses=clauses, keyList=keyList) print("\n____Solution Set____") if self.final: for count, thing in enumerate(self.final): count += 1 val = f"Solution {count}: " for item in thing: val += f"{item}, " print(val[:-2]) else: print("No solutions exist.") def formatSolutions(self): for key in self.solutions.keys(): solutionList = self.solutions.get(key) for item in solutionList: self.masterSolutionSet.append(item) def tree(self, key, vars, clauses, keyList, solutionSet=None): keyListA = keyList.copy() keyListB = keyList.copy() keyCopyA = key keyCopyB = key if solutionSet is None: solutionSet = [] self.posSolver(vars=vars, clauses=clauses, key=keyCopyA, keyList=keyListA, solutionSet=self.solutions) vars = self.varReset(vars=vars) for key in self.solutions: self.solutions[key] = [] self.negSolver(vars=vars, clauses=clauses, key=keyCopyB, keyList=keyListB, solutionSet=self.solutions) def solutionReset(self): for item in self.masterSolutionSet: tempVal = item for key in item.keys(): tempVal[key] = [] def varReset(self, vars): for key in vars.keys(): vars[key] = None return vars def posSolver(self, vars, clauses, key, keyList, solutionSet): vars[key] = True self.solver(vars=vars, clauses=clauses, key=key, keyList=keyList, solutionSet=solutionSet) def negSolver(self, vars, clauses, key, keyList, solutionSet): vars[key] = False self.solver(vars=vars, clauses=clauses, key=key, keyList=keyList, solutionSet=solutionSet) def solver(self, vars, clauses, key, keyList, solutionSet): reset = False for tempKey in vars.keys(): if reset: vars[tempKey] = None if tempKey == key: reset = True remainingClauses, currentSolutionSet = run.preBranch(vars=vars, clauses=clauses) if remainingClauses is None: return if currentSolutionSet: if currentSolutionSet not in solutionSet.get(key): solutionSet[key].append(currentSolutionSet) if remainingClauses: keyList.remove(key) key = keyList[0] self.posSolver(key=key, vars=vars, clauses=remainingClauses.copy(), keyList=keyList, solutionSet={k: v.copy() for k, v in solutionSet.items()}) self.negSolver(key=key, vars=vars, clauses=remainingClauses.copy(), keyList=keyList, solutionSet={k: v.copy() for k, v in solutionSet.items()}) else: tempSolutions = [] for key in solutionSet.keys(): tempList = solutionSet.get(key) for solutionList in tempList: for item in solutionList: tempSolutions.append(item) if tempSolutions not in self.final and tempSolutions: self.final.append(tempSolutions) if __name__ == '__main__': run = SAT(clauseList=CLAUSE_LIST, numOfVar=N) run.starter()
nilq/baby-python
python