text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from contextlib import contextmanager
import datetime
from dateutil import parser as date_parser
import itertools
import numpy
import os
from six.moves import configparser as ConfigParser
from six.moves.urllib import parse
import tempfile
import threading
import dogpile.cache
from feedgen import feed
import flask
from flask import abort
from flask import make_response
from flask import request
from flask_jsonpify import jsonify
from operator import itemgetter
from pbr import version
import pyelasticsearch
import pytz
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from subunit2sql.db import api
from openstack_health import distributed_dbm
from openstack_health import run_aggregator
from openstack_health import test_run_aggregator
try:
from elastic_recheck import config as er_config
from elastic_recheck import elasticRecheck as er
except ImportError:
er = None
app = flask.Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
config = None
engine = None
Session = None
query_dir = None
classifier = None
rss_opts = {}
feeds = {'last runs': {}}
region = None
es_url = None
def get_app():
return app
def _config_get(config_func, section, option, default_val=False):
retval = default_val
if default_val is not False:
try:
retval = config_func(section, option)
except ConfigParser.NoOptionError:
pass
else:
retval = config_func(section, option)
return retval
@app.before_first_request
def _setup():
setup()
def setup():
global config
if not config:
args = parse_command_line_args()
config = ConfigParser.ConfigParser()
config.read(args.config_file)
# Database Configuration
global engine
db_uri = _config_get(config.get, 'default', 'db_uri')
pool_size = _config_get(config.getint, 'default', 'pool_size', 20)
pool_recycle = _config_get(config.getint, 'default', 'pool_recycle', 3600)
engine = create_engine(db_uri,
pool_size=pool_size,
pool_recycle=pool_recycle)
global Session
Session = sessionmaker(bind=engine)
# RSS Configuration
rss_opts['frontend_url'] = _config_get(
config.get, 'default', 'frontend_url',
'http://status.openstack.org/openstack-health')
# Elastic-recheck Configuration
global query_dir
query_dir = _config_get(config.get, 'default', 'query_dir', None)
global es_url
es_url = _config_get(config.get, 'default', 'es_url', None)
if query_dir and er:
elastic_config = er_config.Config(es_url=es_url)
global classifier
classifier = er.Classifier(query_dir, config=elastic_config)
# Cache Configuration
backend = _config_get(config.get, 'default', 'cache_backend',
'dogpile.cache.dbm')
expire = _config_get(config.getint, 'default', 'cache_expiration',
datetime.timedelta(minutes=30))
cache_file = _config_get(config.get, 'default', 'cache_file',
os.path.join(tempfile.gettempdir(),
'openstack-health.dbm'))
cache_url = _config_get(config.get, 'default', 'cache_url', None)
global region
if backend == 'dogpile.cache.dbm':
args = {'filename': cache_file}
if cache_url:
def _key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_key(*arg):
return namespace + "_".join(
str(s).replace(' ', '_') for s in arg)
return generate_key
memcache_proxy = distributed_dbm.MemcachedLockedDBMProxy(
cache_url)
region = dogpile.cache.make_region(
async_creation_runner=_periodic_refresh_cache,
function_key_generator=_key_generator).configure(
backend, expiration_time=expire, arguments=args,
wrap=[memcache_proxy])
else:
region = dogpile.cache.make_region().configure(
backend, expiration_time=expire, arguments=args)
else:
args = {'distributed_lock': True}
if cache_url:
args['url'] = cache_url
region = dogpile.cache.make_region(
async_creation_runner=_periodic_refresh_cache).configure(
backend, expiration_time=expire, arguments=args)
def get_session():
global Session
if not Session:
setup()
return Session()
@contextmanager
def session_scope():
try:
session = get_session()
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def elastic_recheck_cached(change_num, patch_num, short_uuid):
global region
if not region:
setup()
@region.cache_on_arguments()
def _elastic_recheck_cached(change_num, patch_num, short_uuid):
return classifier.classify(change_num, patch_num,
short_uuid, recent=False)
return _elastic_recheck_cached(change_num, patch_num, short_uuid)
@app.route('/', methods=['GET'])
def list_routes():
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
url = flask.url_for(rule.endpoint, **options)
out_dict = {
'name': rule.endpoint,
'methods': sorted(rule.methods),
'url': parse.unquote(url),
}
output.append(out_dict)
return jsonify({'routes': output})
@app.route('/build_name/<string:build_name>/runs', methods=['GET'])
def get_runs_from_build_name(build_name):
with session_scope() as session:
build_name = parse.unquote(build_name)
db_runs = api.get_runs_by_key_value('build_name', build_name, session)
runs = [run.to_dict() for run in db_runs]
return jsonify({'runs': runs})
@app.route('/runs/metadata/keys', methods=['GET'])
def get_run_metadata_keys():
global config
try:
if config:
ignored_keys = (config
.get('default', 'ignored_run_metadata_keys')
.splitlines())
else:
ignored_keys = []
except ConfigParser.NoOptionError:
ignored_keys = []
with session_scope() as session:
existing_keys = set(api.get_all_run_metadata_keys(session))
allowed_keys = existing_keys.difference(ignored_keys)
return jsonify(list(allowed_keys))
def _parse_datetimes(datetime_str):
if datetime_str:
return date_parser.parse(datetime_str)
else:
return datetime_str
@app.route('/runs/group_by/<string:key>', methods=['GET'])
def get_runs_grouped_by_metadata_per_datetime(key):
key = parse.unquote(key)
start_date = _parse_datetimes(flask.request.args.get('start_date', None))
stop_date = _parse_datetimes(flask.request.args.get('stop_date', None))
datetime_resolution = flask.request.args.get('datetime_resolution', 'sec')
with session_scope() as session:
sec_runs = api.get_all_runs_time_series_by_key(key, start_date,
stop_date, session)
if datetime_resolution not in ['sec', 'min', 'hour', 'day']:
return ('Datetime resolution: %s, is not a valid'
' choice' % datetime_resolution), 400
runs = run_aggregator.RunAggregator(sec_runs).aggregate(
datetime_resolution)
return jsonify({'runs': runs})
def _group_runs_by_key(runs_by_time, groupby_key):
"""
Groups runs by a key.
This function assumes that your runs are already grouped by time.
"""
keyfunc = lambda c: c['metadata'].get(groupby_key)
grouped_runs_by = {}
for timestamp, run_by_time in runs_by_time.items():
if timestamp not in grouped_runs_by:
grouped_runs_by[timestamp] = {}
for key, val in itertools.groupby(run_by_time, keyfunc):
if val:
grouped_runs_by[timestamp][key] = list(val)
return grouped_runs_by
@app.route('/build_name/<path:build_name>/test_runs', methods=['GET'])
def get_test_runs_by_build_name(build_name):
value = parse.unquote(build_name)
if not value:
return 'A build name must be specified', 400
start_date = _parse_datetimes(flask.request.args.get('start_date', None))
stop_date = _parse_datetimes(flask.request.args.get('stop_date', None))
datetime_resolution = flask.request.args.get('datetime_resolution', 'sec')
if datetime_resolution not in ['sec', 'min', 'hour', 'day']:
return ('Datetime resolution: %s, is not a valid'
' choice' % datetime_resolution), 400
@region.cache_on_arguments()
def _query_test_runs_by_build_name(name, start_date, stop_date):
with session_scope() as session:
tests = api.get_test_run_dict_by_run_meta_key_value('build_name',
name,
start_date,
stop_date,
session)
tests = test_run_aggregator.TestRunAggregator(tests).aggregate(
datetime_resolution=datetime_resolution)
return tests
output = _query_test_runs_by_build_name(value, start_date, stop_date)
return jsonify({'tests': output})
@app.route('/runs', methods=['GET'])
def get_runs():
start_date = _parse_datetimes(flask.request.args.get('start_date', None))
stop_date = _parse_datetimes(flask.request.args.get('stop_date', None))
with session_scope() as session:
db_runs = api.get_all_runs_by_date(start_date, stop_date, session)
runs = [run.to_dict() for run in db_runs]
return jsonify({'runs': runs})
def _calc_amount_of_successful_runs(runs):
"""
If there were no failures while there's any passes, then the run succeeded.
If there's no fails and no passes, then the run did not succeeded.
"""
was_run_successful = lambda x: 1 if x['fail'] == 0 and x['pass'] > 0 else 0
successful_runs = map(was_run_successful, runs)
return sum(successful_runs)
def _calc_amount_of_failed_runs(runs):
"""
If there were any failure, then the whole run failed.
"""
return sum((1 for r in runs if r['fail'] > 0))
def _aggregate_runs(runs_by_time_delta):
aggregated_runs = []
for time in runs_by_time_delta:
runs_by_job_name = runs_by_time_delta[time]
job_data = []
for job_name in runs_by_job_name:
runs = runs_by_job_name[job_name]
amount_of_success = _calc_amount_of_successful_runs(runs)
amount_of_failures = _calc_amount_of_failed_runs(runs)
avg_runtime = sum(map(itemgetter('run_time'), runs)) / len(runs)
job_data.append({'fail': amount_of_failures,
'pass': amount_of_success,
'mean_run_time': avg_runtime,
'job_name': job_name})
runs_by_time = dict(datetime=time)
runs_by_time['job_data'] = sorted(job_data, key=itemgetter('job_name'))
aggregated_runs.append(runs_by_time)
aggregated_runs.sort(key=itemgetter('datetime'))
return dict(timedelta=aggregated_runs)
@app.route('/runs/key/<path:run_metadata_key>/<path:value>', methods=['GET'])
def get_runs_by_run_metadata_key(run_metadata_key, value):
run_metadata_key = parse.unquote(run_metadata_key)
value = parse.unquote(value)
start_date = _parse_datetimes(flask.request.args.get('start_date', None))
stop_date = _parse_datetimes(flask.request.args.get('stop_date', None))
datetime_resolution = flask.request.args.get('datetime_resolution', 'day')
if datetime_resolution not in ['sec', 'min', 'hour', 'day']:
message = ('Datetime resolution: %s, is not a valid'
' choice' % datetime_resolution)
status_code = 400
return abort(make_response(message, status_code))
with session_scope() as session:
runs = (api.get_time_series_runs_by_key_value(run_metadata_key,
value,
start_date,
stop_date,
session))
# prepare run_times to be consumed for producing 'numeric' data.
run_times = {}
for run_at, run_data in runs.items():
for run in run_data:
if run['fail'] > 0 or run['pass'] == 0:
continue
build_name = run['metadata']['build_name']
if run_at in run_times:
if build_name in run_times[run_at]:
run_times[run_at][build_name].append(run['run_time'])
else:
run_times[run_at][build_name] = [run['run_time']]
else:
run_times[run_at] = {build_name: [run['run_time']]}
# if there is more than one run with the same run_at time
# and build_name just average the results.
for run_at, run_time_data in run_times.items():
for build_name, times in run_time_data.items():
run_times[run_at][build_name] = numpy.mean(times)
numeric = run_aggregator.get_numeric_data(
run_times, datetime_resolution)
# Groups runs by metadata
group_by = "build_name"
runs_by_build_name = _group_runs_by_key(runs, group_by)
# Group runs by the chosen data_range.
# That does not apply when you choose 'sec' since runs are already
# grouped by it.
aggregated_runs = run_aggregator.RunAggregator(
runs_by_build_name).aggregate(datetime_resolution)
data = _aggregate_runs(aggregated_runs)
return jsonify({'numeric': numeric, 'data': data})
@app.route('/runs/key/<path:run_metadata_key>/<path:value>/recent',
methods=['GET'])
def get_recent_runs(run_metadata_key, value):
run_metadata_key = parse.unquote(run_metadata_key)
value = parse.unquote(value)
runs = _get_recent_runs_data(run_metadata_key, value)
return jsonify(runs)
@app.route('/runs/key/<path:run_metadata_key>/<path:value>/recent/detail',
methods=['GET'])
def get_recent_runs_detail(run_metadata_key, value):
run_metadata_key = parse.unquote(run_metadata_key)
value = parse.unquote(value)
runs = _get_recent_runs_data(run_metadata_key, value, detail=True)
return jsonify(runs)
def _get_recent_runs_data(run_metadata_key, value, detail=False):
num_runs = flask.request.args.get('num_runs', 10)
with session_scope() as session:
results = api.get_recent_runs_by_key_value_metadata(
run_metadata_key, value, num_runs, session)
runs = []
for result in results:
if detail:
run = result.to_dict()
else:
if result.passes > 0 and result.fails == 0:
status = 'success'
elif result.fails > 0:
status = 'fail'
else:
continue
run = {
'id': result.uuid,
'status': status,
'start_date': result.run_at.isoformat(),
'link': result.artifacts,
}
run_meta = api.get_run_metadata(result.uuid, session)
for meta in run_meta:
if meta.key == 'build_name':
run['build_name'] = meta.value
break
runs.append(run)
return runs
def _gen_feed(url, key, value):
title = 'Failures for %s: %s' % (key, value)
fg = feed.FeedGenerator()
fg.title(title)
fg.id(url)
fg.link(href=url, rel='self')
fg.description("The failed %s: %s tests feed" % (key, value))
fg.language('en')
return fg
@app.route('/runs/key/<path:run_metadata_key>/<path:value>/recent/rss',
methods=['GET'])
def get_recent_failed_runs_rss(run_metadata_key, value):
run_metadata_key = parse.unquote(run_metadata_key)
value = parse.unquote(value)
url = request.url
if run_metadata_key not in feeds:
feeds[run_metadata_key] = {value: _gen_feed(url,
run_metadata_key,
value)}
feeds["last runs"][run_metadata_key] = {value: None}
elif value not in feeds[run_metadata_key]:
feeds[run_metadata_key][value] = _gen_feed(url,
run_metadata_key,
value)
feeds["last runs"][run_metadata_key][value] = None
fg = feeds[run_metadata_key][value]
with session_scope() as session:
failed_runs = api.get_recent_failed_runs_by_run_metadata(
run_metadata_key, value,
start_date=feeds["last runs"][run_metadata_key][value],
session=session)
if failed_runs:
last_run = sorted([x.run_at for x in failed_runs])[-1]
if feeds["last runs"][run_metadata_key][value] == last_run:
return feeds[run_metadata_key][value].rss_str()
feeds["last runs"][run_metadata_key][value] = last_run
else:
count = api.get_runs_counts_by_run_metadata(
run_metadata_key, value, session=session)
if count == 0:
msg = 'No matching runs found with %s=%s' % (
run_metadata_key, value)
return abort(make_response(msg, 404))
for run in failed_runs:
meta = api.get_run_metadata(run.uuid, session=session)
failing_test_runs = api.get_failing_from_run(run.id,
session=session)
uuid = [x.value for x in meta if x.key == 'build_uuid'][0]
build_name = [x.value for x in meta if x.key == 'build_name'][0]
entry = fg.add_entry()
entry.id(uuid)
entry.title('Failed Run %s/%s' % (build_name, uuid[:7]))
entry.published(pytz.utc.localize(run.run_at))
entry.link({'href': run.artifacts, 'rel': 'alternate'})
metadata_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
'g/%s/%s' % (run_metadata_key, value))
job_url = rss_opts['frontend_url'] + '/#/' + parse.quote(
'job/%s' % build_name)
content = '<ul>'
content += '<li><a href="%s">Metadata page</a></li>\n' % (
metadata_url)
content += '<li><a href="%s">Job Page</a></li>' % (job_url)
content += '</ul>'
content += '<h3>Failed tests</h3>'
content += '<ul>'
for failing_test_run in failing_test_runs:
content += '<li><a href="%s">%s</a></li>' % (
rss_opts['frontend_url'] + '/#/test/' +
failing_test_run.test.test_id,
failing_test_run.test.test_id)
content += '</ul>'
entry.description(content)
response = make_response(feeds[run_metadata_key][value].rss_str())
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
return response
@app.route('/tests/recent/<string:status>', methods=['GET'])
def get_recent_test_status(status):
global region
if not region:
setup()
status = parse.unquote(status)
num_runs = flask.request.args.get('num_runs', 10)
bug_dict = {}
query_threads = []
def _populate_bug_dict(change_num, patch_num, short_uuid, run):
bug_dict[run] = elastic_recheck_cached(change_num, patch_num,
short_uuid)
@region.cache_on_arguments()
def _get_recent(status):
with session_scope() as session:
failed_runs = api.get_recent_failed_runs(num_runs, session)
job_names = {}
for run in failed_runs:
metadata = api.get_run_metadata(run, session=session)
short_uuid = None
change_num = None
patch_num = None
for meta in metadata:
if meta.key == 'build_short_uuid':
short_uuid = meta.value
elif meta.key == 'build_change':
change_num = meta.value
elif meta.key == 'build_patchset':
patch_num = meta.value
elif meta.key == 'build_name':
job_names[run] = meta.value
global classifier
if classifier:
# NOTE(mtreinish): If the required metadata fields
# aren't present skip ES lookup
if not short_uuid or not change_num or not patch_num:
continue
query_thread = threading.Thread(
target=_populate_bug_dict, args=(change_num,
patch_num,
short_uuid, run))
query_threads.append(query_thread)
query_thread.start()
test_runs = api.get_test_runs_by_status_for_run_ids(
status, failed_runs, session=session, include_run_id=True)
output = []
for run in test_runs:
run['start_time'] = run['start_time'].isoformat()
run['stop_time'] = run['stop_time'].isoformat()
run['job_name'] = job_names.get(run['uuid'])
output.append(run)
for thread in query_threads:
thread.join()
return {'test_runs': output, 'bugs': bug_dict}
results = _get_recent(status)
return jsonify(results)
def _periodic_refresh_cache(cache, status, creator, mutex):
def runner():
try:
value = creator()
cache.set(status, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
@app.route('/run/<string:run_id>/tests', methods=['GET'])
def get_tests_from_run(run_id):
run_id = parse.unquote(run_id)
with session_scope() as session:
db_tests = api.get_tests_from_run_id(run_id, session)
tests = [test.to_dict() for test in db_tests]
return jsonify({'tests': tests})
@app.route('/run/<string:run_id>/test_runs', methods=['GET'])
def get_run_test_runs(run_id):
run_id = parse.unquote(run_id)
with session_scope() as session:
db_test_runs = api.get_tests_run_dicts_from_run_id(run_id, session)
return jsonify(db_test_runs)
@app.route('/tests', methods=['GET'])
def get_tests():
with session_scope() as session:
db_tests = api.get_all_tests(session)
tests = [test.to_dict() for test in db_tests]
return jsonify({'tests': tests})
@app.route('/tests/prefix', methods=['GET'])
def get_test_prefixes():
with session_scope() as session:
return jsonify(api.get_test_prefixes(session))
@app.route('/tests/prefix/<path:prefix>', methods=['GET'])
def get_tests_by_prefix(prefix):
prefix = parse.unquote(prefix)
limit = flask.request.args.get('limit', 100)
offset = flask.request.args.get('offset', 0)
with session_scope() as session:
db_tests = api.get_tests_by_prefix(prefix, session,
limit=limit, offset=offset)
tests = [test.to_dict() for test in db_tests]
return jsonify({'tests': tests})
def _check_db_availability():
try:
global engine
result = engine.execute('SELECT now()').first()
if result is None:
return False
return True
except Exception:
return False
def _check_er_availability():
global es_url
global query_dir
if not classifier:
if not er:
health = 'NotInstalled'
elif not es_url or not query_dir:
health = 'NotConfigured'
else:
url = classifier.config.es_url
es = pyelasticsearch.ElasticSearch(url)
health = {'Configured': {'elastic-search': es.health()['status']}}
return health
@app.route('/status', methods=['GET'])
def get_status():
is_db_available = _check_db_availability()
is_er_available = _check_er_availability()
status = {'status': {
'availability': {
'database': is_db_available,
'elastic-recheck': is_er_available,
},
'version': version.VersionInfo(
'openstack_health').version_string_with_vcs()
}}
response = jsonify(status)
if not is_db_available:
response.status_code = 500
return response
def parse_command_line_args():
description = 'Starts the API service for openstack-health'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('config_file', type=str, nargs='?',
default='/etc/openstack-health.conf',
help='the path for the config file to be read.')
return parser.parse_args()
@app.route('/test_runs/<path:test_id>', methods=['GET'])
def get_test_runs_for_test(test_id):
test_id = parse.unquote(test_id)
start_date = _parse_datetimes(flask.request.args.get('start_date', None))
stop_date = _parse_datetimes(flask.request.args.get('stop_date', None))
datetime_resolution = flask.request.args.get('datetime_resolution', 'min')
if datetime_resolution not in ['sec', 'min', 'hour', 'day']:
message = ('Datetime resolution: %s, is not a valid'
' choice' % datetime_resolution)
status_code = 400
return abort(make_response(message, status_code))
bug_dict = {}
query_threads = []
def _populate_bug_dict(change_dict):
for run in change_dict:
change_num = change_dict[run]['change_num']
patch_num = change_dict[run]['patch_num']
short_uuid = change_dict[run]['short_uuid']
result = elastic_recheck_cached(change_num, patch_num,
short_uuid)
bug_dict[run] = result
@region.cache_on_arguments()
def _get_data(test_id, start_date, stop_date):
with session_scope() as session:
db_test_runs = api.get_test_runs_by_test_test_id(
test_id, session=session, start_date=start_date,
stop_date=stop_date)
if not db_test_runs:
# NOTE(mtreinish) if no data is returned from the DB just
# return an empty set response, the test_run_aggregator
# function assumes data is present.
return {'numeric': {}, 'data': {}, 'failed_runs': {}}
test_runs =\
test_run_aggregator.convert_test_runs_list_to_time_series_dict(
db_test_runs, datetime_resolution)
failed_run_ids = [
x.run_id for x in db_test_runs if x.status == 'fail']
failed_runs = api.get_runs_by_ids(failed_run_ids, session=session)
job_names = {}
providers = {}
failed_uuids = [x.uuid for x in failed_runs]
split_uuids = []
if len(failed_uuids) <= 10:
split_uuids = [[x] for x in failed_uuids]
else:
for i in range(0, len(failed_uuids), 10):
end = i + 10
split_uuids.append(failed_uuids[i:end])
for uuids in split_uuids:
change_dict = {}
for uuid in uuids:
metadata = api.get_run_metadata(uuid, session=session)
short_uuid = None
change_num = None
patch_num = None
for meta in metadata:
if meta.key == 'build_short_uuid':
short_uuid = meta.value
elif meta.key == 'build_change':
change_num = meta.value
elif meta.key == 'build_patchset':
patch_num = meta.value
elif meta.key == 'build_name':
job_names[uuid] = meta.value
elif meta.key == 'node_provider':
providers[uuid] = meta.value
# NOTE(mtreinish): If the required metadata fields
# aren't present skip ES lookup
if not short_uuid or not change_num or not patch_num:
continue
global classifier
if classifier:
change_dict[uuid] = {
'change_num': change_num,
'patch_num': patch_num,
'short_uuid': short_uuid,
}
query_thread = threading.Thread(
target=_populate_bug_dict, args=[change_dict])
query_threads.append(query_thread)
query_thread.start()
output = []
for thread in query_threads:
thread.join()
for run in failed_runs:
temp_run = {}
temp_run['provider'] = providers.get(run.uuid)
temp_run['job_name'] = job_names.get(run.uuid)
temp_run['run_at'] = run.run_at.isoformat()
temp_run['artifacts'] = run.artifacts
temp_run['bugs'] = bug_dict.get(run.uuid, [])
output.append(temp_run)
test_runs['failed_runs'] = output
return test_runs
results = _get_data(test_id, start_date, stop_date)
return jsonify(results)
def main():
global config
args = parse_command_line_args()
config = ConfigParser.ConfigParser()
config.read(args.config_file)
try:
host = config.get('default', 'host')
except ConfigParser.NoOptionError:
host = '127.0.0.1'
try:
port = config.getint('default', 'port')
except ConfigParser.NoOptionError:
port = 5000
app.run(debug=True, host=host, port=port)
if __name__ == '__main__':
main()
|
{"hexsha": "1909ab5da465dab7b98ac67379f30a34fce705ec", "size": 31556, "ext": "py", "lang": "Python", "max_stars_repo_path": "openstack_health/api.py", "max_stars_repo_name": "MountakBernotas/https-github.com-openstack-openstack-health", "max_stars_repo_head_hexsha": "9073777993eac43a2a9bfd9e341d6fd48ab15955", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openstack_health/api.py", "max_issues_repo_name": "MountakBernotas/https-github.com-openstack-openstack-health", "max_issues_repo_head_hexsha": "9073777993eac43a2a9bfd9e341d6fd48ab15955", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-27T15:13:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-19T05:58:16.000Z", "max_forks_repo_path": "openstack_health/api.py", "max_forks_repo_name": "afrittoli/openstack-health", "max_forks_repo_head_hexsha": "3ea4787c88792c5d130471e841750adfd5f4d32e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8369304556, "max_line_length": 79, "alphanum_fraction": 0.5927874255, "include": true, "reason": "import numpy", "num_tokens": 6575}
|
!! # check to see if value exceeds threshold
integer function user_exceeds_th(blockno,&
qval,qmin,qmax,quad, &
dx,dy,dz,xc,yc,zc,threshold, &
init_flag, is_ghost)
implicit none
double precision :: qval,qmin,qmax,threshold
double precision :: quad(-1:1,-1:1,-1:1)
double precision :: dx,dy, dz, xc, yc, zc
integer :: blockno, init_flag
logical(kind=4) :: is_ghost
integer :: refine
!! Ghost cells from patch in right half are in region x < 0.5, and so
!! would fail tagging criteria
if (is_ghost) then
!! Don't use this result
user_exceeds_th = -1
return
endif
!! Only refine in right half of domain
if (xc .gt. 0.5) then
refine = 1
else
refine = 0
endif
user_exceeds_th = refine
end function user_exceeds_th
|
{"hexsha": "85c0e847de7fb11652847eaeb2bdf3271476367f", "size": 927, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "applications/clawpack/advection/3d/swirl/user_exceeds_th.f90", "max_stars_repo_name": "ECLAIRWaveS/ForestClaw", "max_stars_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "applications/clawpack/advection/3d/swirl/user_exceeds_th.f90", "max_issues_repo_name": "ECLAIRWaveS/ForestClaw", "max_issues_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-08-02T09:52:43.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-02T14:16:23.000Z", "max_forks_repo_path": "applications/clawpack/advection/3d/swirl/user_exceeds_th.f90", "max_forks_repo_name": "ECLAIRWaveS/ForestClaw", "max_forks_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2647058824, "max_line_length": 74, "alphanum_fraction": 0.5631067961, "num_tokens": 252}
|
import numpy as np
import megengine as mge
from scipy.spatial.transform import Rotation
def np_dcm2euler(mats: np.ndarray, seq: str = "zyx", degrees: bool = True):
"""Converts rotation matrix to euler angles
Args:
mats: (B, 3, 3) containing the B rotation matricecs
seq: Sequence of euler rotations (default: "zyx")
degrees (bool): If true (default), will return in degrees instead of radians
Returns:
"""
eulers = []
for i in range(mats.shape[0]):
r = Rotation.from_matrix(mats[i])
eulers.append(r.as_euler(seq, degrees=degrees))
return np.stack(eulers)
def np_transform(g: np.ndarray, pts: np.ndarray):
""" Applies the SO3 transform
Args:
g: SO3 transformation matrix of size (B, 3, 3)
pts: Points to be transformed (B, N, 3)
Returns:
transformed points of size (B, N, 3)
"""
rot = g[..., :3, :3] # (3, 3)
transformed = pts[..., :3] @ np.swapaxes(rot, -1, -2)
return transformed
def np_inverse(g: np.ndarray):
"""Returns the inverse of the SE3 transform
Args:
g: ([B,] 3/4, 4) transform
Returns:
([B,] 3/4, 4) matrix containing the inverse
"""
rot = g[..., :3, :3] # (3, 3)
inv_rot = np.swapaxes(rot, -1, -2)
return inv_rot
def mge_dcm2euler(mats, seq, degrees=True):
mats = mats.numpy()
eulers = []
for i in range(mats.shape[0]):
r = Rotation.from_matrix(mats[i])
eulers.append(r.as_euler(seq, degrees=degrees))
return mge.tensor(np.stack(eulers))
|
{"hexsha": "0119cfe56fefad520741136d2975f7c5f2480640", "size": 1569, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/so3.py", "max_stars_repo_name": "megvii-research/OMNet", "max_stars_repo_head_hexsha": "3585d4d63da3606c6433ec34714df74ef7ad74d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-09-29T09:19:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T04:40:35.000Z", "max_issues_repo_path": "common/so3.py", "max_issues_repo_name": "megvii-research/FINet", "max_issues_repo_head_hexsha": "b78896f5afa1442c9963b1d8a6c4fde0f9ecc38a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-10-08T01:53:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T08:48:18.000Z", "max_forks_repo_path": "common/so3.py", "max_forks_repo_name": "megvii-research/FINet", "max_forks_repo_head_hexsha": "b78896f5afa1442c9963b1d8a6c4fde0f9ecc38a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-09-29T09:19:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T07:51:34.000Z", "avg_line_length": 24.1384615385, "max_line_length": 84, "alphanum_fraction": 0.6029318037, "include": true, "reason": "import numpy,from scipy", "num_tokens": 454}
|
# Copyright (c) 2022 Dai HBG
"""
该脚本用于测试某一个信号矩阵top n股票做多的收益
日志
2022-01-21
- 需要过滤涨停板
"""
import numpy as np
def top_n_tester(signal: np.array, ret: np.array, top: np.array, zdt_top: np.array, position_date_dic: dict,
order_code_dic: dict, s: int, e: int, n: int = 10):
abs_ret = [] # 绝对收益
alpha_ret = [] # 超额收益
log_dic = {} # 交易日志
stocks = np.array([order_code_dic[i] for i in range(len(order_code_dic))])
for i in range(s, e):
date = position_date_dic[i]
se = top[i] & (~np.isnan(signal[i])) & zdt_top[i]
arg_sig = np.argsort(signal[i, se])
abs_ret.append(np.nanmean(ret[i, se][arg_sig[-n:]]))
alpha_ret.append(np.nanmean(ret[i, se][arg_sig[-n:]]) - np.nanmean(ret[i, se]))
log_dic[date] = {'stocks': stocks[se][arg_sig[-n:]], 'ret': ret[i, se][arg_sig[-n:]],
'sig': signal[i, se][arg_sig[-n:]]}
return log_dic, abs_ret, alpha_ret
|
{"hexsha": "01bcfdbd2732575e454e2d717a044e9904e66020", "size": 956, "ext": "py", "lang": "Python", "max_stars_repo_path": "QBG/Tester/tools/top_n_tester.py", "max_stars_repo_name": "GYMS-PKU/Daily-Frequency-Quant", "max_stars_repo_head_hexsha": "808eda9930efecff04ecf98abf617404cadd0003", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-21T04:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T09:19:53.000Z", "max_issues_repo_path": "QBG/Tester/tools/top_n_tester.py", "max_issues_repo_name": "GYMS-PKU/Daily-Frequency-Quant", "max_issues_repo_head_hexsha": "808eda9930efecff04ecf98abf617404cadd0003", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "QBG/Tester/tools/top_n_tester.py", "max_forks_repo_name": "GYMS-PKU/Daily-Frequency-Quant", "max_forks_repo_head_hexsha": "808eda9930efecff04ecf98abf617404cadd0003", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-10-03T00:00:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T09:02:00.000Z", "avg_line_length": 29.875, "max_line_length": 108, "alphanum_fraction": 0.5847280335, "include": true, "reason": "import numpy", "num_tokens": 344}
|
using OVERTVerify
using LazySets
using Dates
using JLD2
function run_query(query_number, avoid_set, controller_name)
controller = "nnet_files/jmlr/tora_"*controller_name*"_controller.nnet"
println("Controller: ", controller_name)
query = OvertQuery(
Tora, # problem
controller, # network file
Id(), # last layer activation layer Id()=linear, or ReLU()=relu
"MIP", # query solver, "MIP" or "ReluPlex"
15, # ntime
0.1, # dt
-1, # N_overt
)
input_set = Hyperrectangle(low=[0.6, -0.7, -0.4, 0.5], high=[0.7, -0.6, -0.3, 0.6])
# the other Infs are just to denote that these constraints should be ignored. So essentially the only constraint being added is:
# x1 <= -2
t1 = Dates.time()
SATus, vals, stats = symbolic_satisfiability(query, input_set, avoid_set)
t2 = Dates.time()
dt = (t2-t1)
println("dt is $dt")
JLD2.@save "src/examples/jmlr/data/tora_satisfiability_"*string(controller_name)*"_controller_data_q"*string(query_number)*".jld2" query input_set avoid_set SATus vals stats dt query_number controller_name
return SATus
end
function run_tora_satisfiability(;controller_name="smallest")
# query 1
avoid_set1 = HalfSpace([1., 0., 0., 0.], -2.) # checks if x1 <= -2
SATus1 = run_query(1, avoid_set1, controller_name)
if SATus1 == "unsat" # early stopping, potentially, if first query is sat
# query 2
avoid_set2 = HalfSpace([-1., 0., 0., 0.], -2.)# checks if x1 >= 2
SATus2 = run_query(2, avoid_set2, controller_name)
open("src/examples/jmlr/data/tora_satisfiability_"*string(controller_name)*".txt", "w") do io
write(io, "SATus1 = $SATus1 \n SATus2 = $SATus2")
end;
end
end
run_tora_satisfiability(controller_name=ARGS[1])
|
{"hexsha": "878af8360ee0e5c8cdc52b1272347dc2b744a6ea", "size": 1717, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/examples/jmlr/tora_satisfiability.jl", "max_stars_repo_name": "sisl/OvertVerify.jl", "max_stars_repo_head_hexsha": "c76fe7703b5068cbdc91e058d815f1fcbda44d70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-12T04:51:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T04:51:14.000Z", "max_issues_repo_path": "src/examples/jmlr/tora_satisfiability.jl", "max_issues_repo_name": "sisl/OVERTVerify.jl", "max_issues_repo_head_hexsha": "c76fe7703b5068cbdc91e058d815f1fcbda44d70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-24T22:02:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-26T15:18:27.000Z", "max_forks_repo_path": "src/examples/jmlr/tora_satisfiability.jl", "max_forks_repo_name": "sisl/OvertVerify.jl", "max_forks_repo_head_hexsha": "c76fe7703b5068cbdc91e058d815f1fcbda44d70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0192307692, "max_line_length": 206, "alphanum_fraction": 0.6983110076, "num_tokens": 562}
|
[STATEMENT]
lemma Says_Nonce_not_used_guard: "[| Says A' B \<lbrace>A'',r,I,L\<rbrace> \<in> set evs;
Nonce n \<notin> used evs |] ==> L \<in> guard n Ks"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Says A' B \<lbrace>A'', r, I, L\<rbrace> \<in> set evs; Nonce n \<notin> used evs\<rbrakk> \<Longrightarrow> L \<in> guard n Ks
[PROOF STEP]
by (drule not_used_not_parts, auto)
|
{"llama_tokens": 175, "file": null, "length": 1}
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Reshape Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
class _ReshapeBijectorTest(object):
"""Base class for testing the reshape transformation.
Methods defined in this class call a method self.build_shapes() that
is implemented by subclasses defined below, returning respectively
ReshapeBijectorTestStatic: static shapes,
ReshapeBijectorTestDynamic: shape placeholders of known ndims, and
ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims,
so that each test in this base class is automatically run over all
three cases. The subclasses also implement assertRaisesError to test
for either Python exceptions (in the case of static shapes) or
TensorFlow op errors (dynamic shapes).
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
"""Do a basic sanity check of forward, inverse, jacobian."""
expected_x = np.random.randn(4, 3, 2)
expected_y = np.reshape(expected_x, [4, 6])
with self.test_session() as sess:
shape_in, shape_out = self.build_shapes([3, 2], [6,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
fldj_,
ildj_,
fest_,
iest_) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=2),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=2),
bijector.forward_event_shape_tensor(expected_x.shape),
bijector.inverse_event_shape_tensor(expected_y.shape),
))
self.assertEqual("reshape", bijector.name)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj_, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj_, rtol=1e-6, atol=0)
# Test that event_shape_tensors match fwd/inv result shapes.
self.assertAllEqual(y_.shape, fest_)
self.assertAllEqual(x_.shape, iest_)
def testEventShapeTensor(self):
"""Test event_shape_tensor methods when even ndims may be dynamic."""
shape_in_static = [2, 3]
shape_out_static = [6,]
shape_in, shape_out = self.build_shapes(shape_in_static, shape_out_static)
bijector = tfb.Reshape(
event_shape_out=shape_out, event_shape_in=shape_in, validate_args=True)
# using the _tensor methods, we should always get a fully-specified
# result since these are evaluated at graph runtime.
with self.test_session() as sess:
(shape_out_,
shape_in_) = sess.run((
bijector.forward_event_shape_tensor(shape_in),
bijector.inverse_event_shape_tensor(shape_out),
))
self.assertAllEqual(shape_out_static, shape_out_)
self.assertAllEqual(shape_in_static, shape_in_)
def testScalarReshape(self):
"""Test reshaping to and from a scalar shape ()."""
expected_x = np.random.randn(4, 3, 1)
expected_y = np.reshape(expected_x, [4, 3])
expected_x_scalar = np.random.randn(1,)
expected_y_scalar = expected_x_scalar[0]
shape_in, shape_out = self.build_shapes([], [1,])
with self.test_session() as sess:
bijector = tfb.Reshape(
event_shape_out=shape_in,
event_shape_in=shape_out,
validate_args=True)
(x_,
y_,
x_scalar_,
y_scalar_
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.inverse(expected_y_scalar),
bijector.forward(expected_x_scalar),
))
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0)
def testValidButNonMatchingInputOpError(self):
x = np.random.randn(4, 3, 2)
with self.test_session() as sess:
shape_in, shape_out = self.build_shapes([2, 3], [1, 6, 1,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
# Here we pass in a tensor (x) whose shape is compatible with
# the output shape, so tf.reshape will throw no error, but
# doesn't match the expected input shape.
with self.assertRaisesError("Input `event_shape` does not match"):
sess.run(bijector.forward(x))
def testValidButNonMatchingInputPartiallySpecifiedOpError(self):
x = np.random.randn(4, 3, 2)
with self.test_session() as sess:
shape_in, shape_out = self.build_shapes([2, -1], [1, 6, 1,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError("Input `event_shape` does not match"):
sess.run(bijector.forward(x))
# pylint: disable=invalid-name
def _testInputOutputMismatchOpError(self, expected_error_message):
x1 = np.random.randn(4, 2, 3)
x2 = np.random.randn(4, 1, 1, 5)
with self.test_session() as sess:
shape_in, shape_out = self.build_shapes([2, 3], [1, 1, 5])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward(x1))
with self.assertRaisesError(expected_error_message):
sess.run(bijector.inverse(x2))
# pylint: enable=invalid-name
def testOneShapePartiallySpecified(self):
expected_x = np.random.randn(4, 6)
expected_y = np.reshape(expected_x, [4, 2, 3])
with self.test_session() as sess:
# one of input/output shapes is partially specified
shape_in, shape_out = self.build_shapes([-1,], [2, 3])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
))
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testBothShapesPartiallySpecified(self):
expected_x = np.random.randn(4, 2, 3)
expected_y = np.reshape(expected_x, [4, 3, 2])
with self.test_session() as sess:
shape_in, shape_out = self.build_shapes([-1, 3], [-1, 2])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
))
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testDefaultVectorShape(self):
expected_x = np.random.randn(4, 4)
expected_y = np.reshape(expected_x, [4, 2, 2])
with self.test_session() as sess:
_, shape_out = self.build_shapes([-1,], [-1, 2])
bijector = tfb.Reshape(shape_out, validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
))
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def build_shapes(self, *args, **kwargs):
raise NotImplementedError("Subclass failed to implement `build_shapes`.")
class ReshapeBijectorTestStatic(tf.test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
return shape_in, shape_out
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testEventShape(self):
shape_in_static = tf.TensorShape([2, 3])
shape_out_static = tf.TensorShape([6])
bijector = tfb.Reshape(
event_shape_out=shape_out_static,
event_shape_in=shape_in_static,
validate_args=True)
# Test that forward_ and inverse_event_shape are correct when
# event_shape_in/_out are statically known, even when the input shapes
# are only partially specified.
self.assertEqual(
bijector.forward_event_shape(tf.TensorShape([4, 2, 3])).as_list(),
[4, 6])
self.assertEqual(
bijector.forward_event_shape(tf.TensorShape([None, 2, 3])).as_list(),
[None, 6])
self.assertEqual(
bijector.inverse_event_shape(tf.TensorShape([4, 6])).as_list(),
[4, 2, 3])
self.assertEqual(
bijector.inverse_event_shape(tf.TensorShape([None, 6])).as_list(),
[None, 2, 3])
# If the input shape is totally unknown, there's nothing we can do!
self.assertIsNone(
bijector.forward_event_shape(tf.TensorShape(None)).ndims)
def testBijectiveAndFinite(self):
x = np.random.randn(4, 2, 3)
y = np.reshape(x, [4, 1, 2, 3])
bijector = tfb.Reshape(
event_shape_in=[2, 3], event_shape_out=[1, 2, 3], validate_args=True)
bijector_test_util.assert_bijective_and_finite(
bijector, x, y, eval_func=self.evaluate, event_ndims=2, rtol=1e-6,
atol=0)
def testMultipleUnspecifiedDimensionsOpError(self):
shape_in, shape_out = self.build_shapes([2, 3], [4, -1, -1,])
with self.assertRaises(ValueError):
tfb.Reshape(event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
def testInvalidDimensionsOpError(self):
shape_in, shape_out = self.build_shapes([2, 3], [1, 2, -2,])
with self.assertRaises(ValueError):
tfb.Reshape(event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Cannot reshape a tensor with")
class ReshapeBijectorTestDynamic(tf.test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in = np.array(shape_in, np.int32)
shape_out = np.array(shape_out, np.int32)
return (tf.placeholder_with_default(shape_in, shape=[len(shape_in)]),
tf.placeholder_with_default(shape_out, shape=[len(shape_out)]))
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testEventShape(self):
event_shape_in, event_shape_out = self.build_shapes([2, 3], [6])
bijector = tfb.Reshape(
event_shape_out=event_shape_out,
event_shape_in=event_shape_in,
validate_args=True)
self.assertEqual(
bijector.forward_event_shape(tf.TensorShape([4, 2, 3])).as_list(),
[4, None])
self.assertEqual(
bijector.forward_event_shape(tf.TensorShape([None, 2, 3])).as_list(),
[None, None])
self.assertEqual(
bijector.inverse_event_shape(tf.TensorShape([4, 6])).as_list(),
[4, None, None])
self.assertEqual(
bijector.inverse_event_shape(tf.TensorShape([None, 6])).as_list(),
[None, None, None])
# If the input shape is totally unknown, there's nothing we can do!
self.assertIsNone(
bijector.forward_event_shape(tf.TensorShape(None)).ndims)
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
def testMultipleUnspecifiedDimensionsOpError(self):
shape_in, shape_out = self.build_shapes([2, 3], [4, -1, -1,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.test_session() as sess:
with self.assertRaisesError(
"elements must have at most one `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in))
def testInvalidDimensionsOpError(self):
shape_in, shape_out = self.build_shapes([2, 3], [1, 2, -2,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.test_session() as sess:
with self.assertRaisesError(
"elements must be either positive integers or `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in))
class ReshapeBijectorTestDynamicNdims(tf.test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in = np.array(shape_in, np.int32)
shape_out = np.array(shape_out, np.int32)
return (tf.placeholder_with_default(shape_in, shape=None),
tf.placeholder_with_default(shape_out, shape=None))
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testEventShape(self):
event_shape_in, event_shape_out = self.build_shapes([2, 3], [6])
bijector = tfb.Reshape(
event_shape_out=event_shape_out,
event_shape_in=event_shape_in,
validate_args=True)
# forward_ and inverse_event_shape can only be totally unknown in this case.
self.assertIsNone(
bijector.forward_event_shape(tf.TensorShape([4, 2, 3])).ndims)
self.assertIsNone(
bijector.forward_event_shape(tf.TensorShape([None, 2, 3])).ndims)
self.assertIsNone(
bijector.inverse_event_shape(tf.TensorShape([4, 6])).ndims)
self.assertIsNone(
bijector.inverse_event_shape(tf.TensorShape([None, 6])).ndims)
self.assertIsNone(
bijector.forward_event_shape(tf.TensorShape(None)).ndims)
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
def testMultipleUnspecifiedDimensionsOpError(self):
shape_in, shape_out = self.build_shapes([2, 3], [4, -1, -1,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.test_session() as sess:
with self.assertRaisesError(
"elements must have at most one `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in))
def testInvalidDimensionsOpError(self):
shape_in, shape_out = self.build_shapes([2, 3], [1, 2, -2,])
bijector = tfb.Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.test_session() as sess:
with self.assertRaisesError(
"elements must be either positive integers or `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in))
if __name__ == "__main__":
tf.test.main()
|
{"hexsha": "f47353ee0b1fe8aec6d1c17bc99825bf21284925", "size": 15461, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_probability/python/bijectors/reshape_test.py", "max_stars_repo_name": "souravsingh/probability", "max_stars_repo_head_hexsha": "0519b63094fdaa4e326357a0cdff056d5ef76cd8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-17T14:48:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-17T14:48:47.000Z", "max_issues_repo_path": "tensorflow_probability/python/bijectors/reshape_test.py", "max_issues_repo_name": "souravsingh/probability", "max_issues_repo_head_hexsha": "0519b63094fdaa4e326357a0cdff056d5ef76cd8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_probability/python/bijectors/reshape_test.py", "max_forks_repo_name": "souravsingh/probability", "max_forks_repo_head_hexsha": "0519b63094fdaa4e326357a0cdff056d5ef76cd8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9880382775, "max_line_length": 80, "alphanum_fraction": 0.6818446414, "include": true, "reason": "import numpy", "num_tokens": 3952}
|
program t
! from Tom Henderson
implicit none
character (len=100) :: lawyers
integer :: x, y, zzz
x = 2
y = 1
zzz = x +
ay
print *, 'zzz = ',zzz
zz
az = x *
ay
print *, 'zzz = ',zzz
zzz = x -
ay
print *, 'zzz = ',zzz
zzz = x -
ay
print *, 'zzz = ',zzz
lawyers = 'Jones & Clay &
aDavis'
print *,'LAWYERS_1 = <',trim(lawyers),'>'
lawyers = 'Jones! &! Clay! &!
aDavis!'
print *,'LAWYERS_2 = <',trim(lawyers),'>'
lawyers = 'Jones & Clay &
aDavis'
print *,'LAWYERS_4 = <',trim(lawyers),'>'
lawyers = 'Jones & Clay &
aDavis
a'
print *,'LAWYERS_5 = <',trim(lawyers),'>'
lawyers = 'Jones & ''Clay'' &
aDavis'
print *,'LAWYERS_6 = <',trim(lawyers),'>'
lawyers = 'Jones & ""Clay"" &
aDavis'
print *,'LAWYERS_7 = <',trim(lawyers),'>'
lawyers = "Jones & ""Clay"" &
aDavis"
print *,'LAWYERS_8 = <',trim(lawyers),'>'
lawyers = "Jones & ''Clay'' &
aDavis"
print *,'LAWYERS_9 = <',trim(lawyers),'>'
lawyers = 'Jones & Clay &
a
aDavis'
print *,'LAWYERS_10 = <',trim(lawyers),'>'
lawyers = 'Jones & Clay &
a
aDavis'
print *,'LAWYERS_11 = <',trim(lawyers),'>'
lawyers = ! a comment
a'Jones & Clay & Da
avis'
print *,'LAWYERS_12 = <',trim(lawyers),'>'
lawyers = 'Jones & Clay & Davis'
print *,'LAWYERS_13 = <<',trim(lawyers),">
a>"
end program t
|
{"hexsha": "5f191e4fba827684a2a75acb734df1d718cdc3e8", "size": 1705, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/t0212x/t.f", "max_stars_repo_name": "maddenp/ppp", "max_stars_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-13T16:32:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T12:37:58.000Z", "max_issues_repo_path": "tests/t0212x/t.f", "max_issues_repo_name": "maddenp/ppp", "max_issues_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/t0212x/t.f", "max_forks_repo_name": "maddenp/ppp", "max_forks_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-07-30T17:02:27.000Z", "max_forks_repo_forks_event_max_datetime": "2015-08-03T16:29:41.000Z", "avg_line_length": 21.3125, "max_line_length": 48, "alphanum_fraction": 0.4304985337, "num_tokens": 574}
|
'''
This file is designed to determine how often the market index is in a relative bear or bull market state.
This file runs through every daily price of the S&P 500 Index and uses the following steps:
1- Look back over past 180 days
2- Set two thresholds- one that is slightly below the max of last 180 day prices, and one
that is slightly above the min of last 180 day prices
3- For each of these 180 days, add to the following counts based on that day's prices relative to the thresholds:
stat0- price is under bottom threshold (bear state)
stat1- price is between thresholds after coming from below bottom threshold (moderate bull state)
stat2- price is above top threshold (bull state)
statN1- price is between thresholds after coming from above top threshold (moderate bear state)
Then, in the end, the file plots the price, along with the associated thresholds, and prints the amount of times every price
was in each respective status, along with the percentage of the total share that status accounted for.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as pyplot
data= pd.read_csv("C:/Users/Ben/iCloudDrive/Workspace/XiaResearch/TrendTrading/ProbModel/Data/SPY19872017.csv")
prices= data['Price']
uppers= []
lowers= []
stat1=0
stat2=0
statN1=0
stat0=0
limit= 180
prevStat=1
for i in range(0, len(prices)-limit):
if i < limit:
uppers.append(prices[i])
lowers.append(prices[i])
else:
upper=(np.max(prices[(i-limit+1):i]))*0.95
lower=(np.min(prices[(i-limit+1):i]))*1.05
current= prices[i]
uppers.append(upper)
lowers.append(lower)
for j in range(0, limit):
if prices[i+j] > upper:
stat2 += 1
prevStat=2
elif prices[i+j] < lower:
stat0 += 1
prevStat=0
elif lower <= prices[i+j] <= upper:
if prevStat==0 or prevStat==1:
stat1 += 1
prevStat=1
elif prevStat==2 or prevStat==-1:
statN1 += 1
prevStat=-1
val= len(prices)-limit
totalChecks= stat0+stat1+statN1+stat2
prices= prices[0:val]
print('Instances where t was in 0 status:',stat0, " ", 100*(stat0/totalChecks), "%")
print('Instances where t was in 1 status:',stat1, " ", 100*(stat1/totalChecks), "%")
print('Instances where t was in 2 status:',stat2, " ", 100*(stat2/totalChecks), "%")
print('Instances where t was in -1 status:',statN1, " ", 100*(statN1/totalChecks), "%")
print(totalChecks)
pyplot.plot(prices.index, prices.values, label="prices")
pyplot.plot(prices.index, uppers, label="upper")
pyplot.plot(prices.index, lowers, label="lower")
pyplot.legend(loc="best")
pyplot.show()
|
{"hexsha": "47db8df46bfc97e0faa5a1523f7811c4ca25fa26", "size": 2894, "ext": "py", "lang": "Python", "max_stars_repo_path": "TrendTrading/ProbModel/CheckScripts/comprehensiveStatusChecks.py", "max_stars_repo_name": "benjabee10/WKUResearch", "max_stars_repo_head_hexsha": "5cc384c0e0c1afc82c38a9e6eb63b80c85af7c97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TrendTrading/ProbModel/CheckScripts/comprehensiveStatusChecks.py", "max_issues_repo_name": "benjabee10/WKUResearch", "max_issues_repo_head_hexsha": "5cc384c0e0c1afc82c38a9e6eb63b80c85af7c97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TrendTrading/ProbModel/CheckScripts/comprehensiveStatusChecks.py", "max_forks_repo_name": "benjabee10/WKUResearch", "max_forks_repo_head_hexsha": "5cc384c0e0c1afc82c38a9e6eb63b80c85af7c97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1182795699, "max_line_length": 125, "alphanum_fraction": 0.6351071182, "include": true, "reason": "import numpy", "num_tokens": 747}
|
/***********************************************************************
* created: Sun May 25 2014
* author: Timotei Dolean <timotei21@gmail.com>
*************************************************************************/
/***************************************************************************
* Copyright (C) 2004 - 2014 Paul D Turner & The CEGUI Development Team
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include <boost/test/unit_test.hpp>
#include "ItemModelStub.h"
#include "CEGUI/Font.h"
#include "CEGUI/WindowManager.h"
#include "CEGUI/widgets/Scrollbar.h"
// Yup. We need this in order to easily inject/call event handlers without having
// to go through GUIContext, or inherit from widgets in order to test them.
#define protected public
#include "CEGUI/views/ListView.h"
using namespace CEGUI;
//----------------------------------------------------------------------------//
static const String ITEM1 = "ITEM 1";
static const String ITEM2 = "ITEM 2";
static const String ITEM3 = "ITEM 3";
static const String ITEM_WITH_6LINES = "THIS\nIS\nA\nMULTILINE\nLINE\n";
//----------------------------------------------------------------------------//
struct ListViewFixture
{
ListViewFixture()
{
System::getSingleton().notifyDisplaySizeChanged(Sizef(100, 100));
view = static_cast<ListView*>(WindowManager::getSingleton().createWindow("TaharezLook/ListView", "lv"));
view->setWindowRenderer("Core/ListView");
view->setModel(&model);
font_height = view->getFont()->getFontHeight();
}
ListView* view;
ItemModelStub model;
float font_height;
};
BOOST_FIXTURE_TEST_SUITE(ListViewTestSuite, ListViewFixture)
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(IndexAt_NoItems_ReturnsInvalidIndex)
{
ModelIndex index = view->indexAt(glm::vec2(1, 0));
BOOST_REQUIRE(index.d_modelData == nullptr);
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(IndexAt_PositionInsideSingleObject_ReturnsCorrectIndex)
{
model.d_items.push_back(ITEM1);
ModelIndex index = view->indexAt(glm::vec2(1, font_height / 2.0f));
BOOST_REQUIRE(index.d_modelData != nullptr);
BOOST_REQUIRE_EQUAL(ITEM1, *(static_cast<String*>(index.d_modelData)));
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(IndexAt_PositionInsideSingleObjectListWithOffset_ReturnsCorrectIndex)
{
float x_offset = 500;
float y_offset = 354;
view->setPosition(UVector2(cegui_absdim(x_offset), cegui_absdim(y_offset)));
model.d_items.push_back(ITEM1);
ModelIndex index = view->indexAt(glm::vec2(
x_offset + 1,
y_offset + font_height / 2.0f));
BOOST_REQUIRE(index.d_modelData != nullptr);
BOOST_REQUIRE_EQUAL(ITEM1, *(static_cast<String*>(index.d_modelData)));
}
// Test fails so it's disabled. This should be fixed!
#if 0
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(IndexAt_PositionInsideSingleObjectListWithScrollbar_ReturnsCorrectIndex)
{
for (std::int32_t i = 0; i < 100; ++i)
model.d_items.push_back(" item .." + PropertyHelper<std::int32_t>::toString(i));
model.d_items.push_back(ITEM1);
view->setSize(USize(cegui_absdim(100), cegui_absdim(font_height * 10)));
view->prepareForRender();
view->getVertScrollbar()->setUnitIntervalScrollPosition(1.0f);
ModelIndex index = view->indexAt(glm::vec2(1, 9 * font_height + font_height / 2.0f));
BOOST_REQUIRE(index.d_modelData != nullptr);
BOOST_REQUIRE_EQUAL(ITEM1, *(static_cast<String*>(index.d_modelData)));
}
#endif
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(IndexAt_PositionOutsideSingleObject_ReturnsInvalidIndex)
{
model.d_items.push_back(ITEM1);
ModelIndex index = view->indexAt(glm::vec2(1, font_height * 2));
BOOST_REQUIRE(index.d_modelData == nullptr);
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(IndexAt_PositionInsideSecondObject_ReturnsCorrectIndex)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
ModelIndex index = view->indexAt(glm::vec2(1, font_height * 2));
BOOST_REQUIRE(index.d_modelData != nullptr);
BOOST_REQUIRE_EQUAL(ITEM2, *(static_cast<String*>(index.d_modelData)));
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(SetSelectedItem_InitialSelection_SelectsFirstObject)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
bool selected = view->setSelectedIndex(ModelIndex(&model.d_items.at(0)));
view->prepareForRender();
BOOST_REQUIRE(selected);
BOOST_REQUIRE(view->getItems().at(0)->d_isSelected);
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(SetSelectedItem_SecondSelection_SelectsSecondObject)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
view->setSelectedIndex(ModelIndex(&model.d_items.at(0)));
view->prepareForRender();
bool selected = view->setSelectedIndex(ModelIndex(&model.d_items.at(1)));
view->prepareForRender();
BOOST_REQUIRE(selected);
BOOST_REQUIRE(!view->getItems().at(0)->d_isSelected);
BOOST_REQUIRE(view->getItems().at(1)->d_isSelected);
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(ItemAdded_ProperSelectionIsPersisted)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
view->setSelectedIndex(ModelIndex(&model.d_items.at(1)));
view->prepareForRender();
model.d_items.insert(model.d_items.begin() + 1, 1, ITEM3);
model.notifyChildrenAdded(model.getRootIndex(), 1, 1);
view->prepareForRender();
BOOST_REQUIRE(!view->getItems().at(0)->d_isSelected);
BOOST_REQUIRE(!view->getItems().at(1)->d_isSelected);
BOOST_REQUIRE(view->getItems().at(2)->d_isSelected);
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(ItemRemoved_RenderingHeightIsUpdated)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
model.d_items.push_back(ITEM3);
view->prepareForRender();
model.notifyChildrenWillBeRemoved(model.getRootIndex(), 0, 3);
model.d_items.clear();
model.notifyChildrenRemoved(model.getRootIndex(), 0, 3);
view->prepareForRender();
BOOST_REQUIRE(view->getRenderedTotalHeight() <= 0.01f);
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(ItemRemoved_NothingIsSelectedAndRenderingHeightIsUpdated)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
model.d_items.push_back(ITEM3);
view->setSelectedIndex(ModelIndex(&model.d_items.at(1)));
view->prepareForRender();
float itemSize = view->getRenderedTotalHeight() / 3;
model.notifyChildrenWillBeRemoved(model.getRootIndex(), 1, 1);
model.d_items.erase(model.d_items.begin() + 1);
model.notifyChildrenRemoved(model.getRootIndex(), 1, 1);
view->prepareForRender();
BOOST_REQUIRE(view->getItems().size() == 2);
BOOST_REQUIRE(!view->getItems().at(0)->d_isSelected);
BOOST_REQUIRE(!view->getItems().at(1)->d_isSelected);
BOOST_REQUIRE(view->getRenderedTotalHeight() <= (itemSize * 2 + 0.01f));
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(ItemNameChanged_UpdatesRenderedString)
{
model.d_items.push_back(ITEM1);
view->prepareForRender();
BOOST_CHECK_EQUAL(1, view->getItems().at(0)->d_string.getLineCount());
model.notifyChildrenDataWillChange(model.getRootIndex(), 0, 1);
model.d_items.at(0) = ITEM_WITH_6LINES;
model.notifyChildrenDataChanged(model.getRootIndex(), 0, 1);
view->prepareForRender();
BOOST_REQUIRE_EQUAL(6, view->getItems().at(0)->d_string.getLineCount());
}
//----------------------------------------------------------------------------//
void triggerSelectRangeEvent(glm::vec2 position, ItemView* view)
{
SemanticEventArgs args(view);
args.d_semanticValue = SemanticValue::SelectRange;
view->getGUIContext().getCursor().setPosition(position);
view->onSemanticInputEvent(args);
}
BOOST_AUTO_TEST_CASE(SelectRange)
{
model.d_items.push_back(ITEM1);
model.d_items.push_back(ITEM2);
model.d_items.push_back(ITEM3);
view->prepareForRender();
view->setMultiSelectEnabled(true);
view->setSelectedIndex(ModelIndex(&model.d_items.at(0)));
triggerSelectRangeEvent(glm::vec2(1, font_height * 2.0f + font_height / 2.0f), view);
BOOST_REQUIRE_EQUAL(3, view->getIndexSelectionStates().size());
}
//----------------------------------------------------------------------------//
BOOST_AUTO_TEST_CASE(SortEnabled_IsEnabled_ListIsSorted)
{
model.d_items.push_back(ITEM3);
model.d_items.push_back(ITEM2);
model.d_items.push_back(ITEM1);
view->prepareForRender();
view->setSortMode(ViewSortMode::Ascending);
view->prepareForRender();
ModelIndex index = view->indexAt(glm::vec2(1, font_height * 2.0f + font_height / 2.0f));
BOOST_REQUIRE_EQUAL(ITEM3, *(static_cast<String*>(index.d_modelData)));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "ff10627a3f62c738b42a37ec88e5f0a42447274d", "size": 10577, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/unit/ListView.cpp", "max_stars_repo_name": "bolry/cegui", "max_stars_repo_head_hexsha": "58b776a157409cb13092b77d68ab2618cf5c6e05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 257.0, "max_stars_repo_stars_event_min_datetime": "2020-01-03T10:13:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T14:55:12.000Z", "max_issues_repo_path": "tests/unit/ListView.cpp", "max_issues_repo_name": "bolry/cegui", "max_issues_repo_head_hexsha": "58b776a157409cb13092b77d68ab2618cf5c6e05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 116.0, "max_issues_repo_issues_event_min_datetime": "2020-01-09T18:13:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T18:32:02.000Z", "max_forks_repo_path": "tests/unit/ListView.cpp", "max_forks_repo_name": "bolry/cegui", "max_forks_repo_head_hexsha": "58b776a157409cb13092b77d68ab2618cf5c6e05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 58.0, "max_forks_repo_forks_event_min_datetime": "2020-01-09T03:07:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T17:21:36.000Z", "avg_line_length": 36.8536585366, "max_line_length": 112, "alphanum_fraction": 0.621915477, "num_tokens": 2319}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright:
Nienke Brinkman (nienke.brinkman@erdw.ethz.ch), 2020
:license:
None
"""
import obspy
import instaseis
from typing import Union as _Union
import numpy as np
import SS_MTI.SourceTimeFunction as _STF
def make_GF(
or_time: obspy.UTCDateTime,
lat_src: float,
lon_src: float,
depth: float,
distance: float,
rec: instaseis.Receiver,
db: instaseis.open_db,
dt: float,
comp: str,
tstar: _Union[float, str] = None,
LQT: bool = False,
inc: float = None,
baz: float = None,
M0: float = 1e14,
) -> obspy.Stream:
"""
Create stream of different source components
:param or_time: origin time
:param lat_src: source latitude
:param lon_src: source longitude
:param depth: depth of event in km
:param distance: the epicentral distance in degrees
:param rec: instaseis.Receiver object of the single station
:param db: instaseis database
:param dt: timestep
:param comp: component
:param tstar: tstar value
:param LQT: set to true if component system is LQT
:param inc: inclination angle in degrees (needed when LQT = TRUE)
:param baz: backazimuth angle in degrees (needed when LQT = TRUE)
:param M0: scalar moment
"""
if tstar is not None and not isinstance(tstar, str):
stf_len_sec = 30.0
stf = _STF.stf_tstar(
tstar=tstar, dt=db.info.dt, npts=int(stf_len_sec / db.info.dt), nfft=db.info.nfft
)[0]
# from obspy.signal.filter import highpass, lowpass
# stf = highpass(stf, df=1 / db.info.dt, freq=0.1, corners=4, zerophase=False)
# stf = highpass(stf, df=1 / db.info.dt, freq=0.1, corners=4, zerophase=False)
# stf = lowpass(stf, df=1 / db.info.dt, freq=0.7, corners=4, zerophase=False)
# stf = lowpass(stf, df=1 / db.info.dt, freq=0.7, corners=4, zerophase=False)
elif isinstance(tstar, str):
stf = _STF.Create_stf_from_file(tstar, db.info.dt)
mts = [
[M0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, M0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, M0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, M0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, M0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, M0],
]
st = obspy.Stream()
for mt in mts:
src = instaseis.Source(
latitude=lat_src,
longitude=lon_src,
depth_in_m=depth * 1e3,
origin_time=or_time,
m_rr=mt[0],
m_tt=mt[1],
m_pp=mt[2],
m_rt=mt[3],
m_rp=mt[4],
m_tp=mt[5],
)
reconvolve_stf = False
remove_source_shift = True
if tstar is not None and not isinstance(tstar, str):
reconvolve_stf = True
remove_source_shift = False
src.set_sliprate(stf, dt=db.info.dt)
# src.set_sliprate_lp(dt=db.info.dt, nsamp=50, freq=0.7)
elif isinstance(tstar, str):
reconvolve_stf = True
remove_source_shift = False
src.set_sliprate(stf, dt=db.info.dt, normalize=True)
if LQT:
st_rot = db.get_seismograms(
src,
rec,
dt=dt,
components="ZNE",
kind="displacement",
reconvolve_stf=reconvolve_stf,
remove_source_shift=remove_source_shift,
)
st_rot.rotate(method="ZNE->LQT", back_azimuth=baz, inclination=inc)
tr_rot = st_rot.select(channel="BX" + comp[0])[0]
st += tr_rot
else:
st += db.get_seismograms(
src,
rec,
dt=dt,
components=comp,
kind="displacement",
reconvolve_stf=reconvolve_stf,
remove_source_shift=remove_source_shift,
)[0]
return st
def convert_SDR(strike: float, dip: float, rake: float, M0: float = 1e14):
phi = np.deg2rad(strike)
delta = np.deg2rad(dip)
lambd = np.deg2rad(rake)
m_rr = (np.sin(2.0 * delta) * np.sin(lambd)) * M0
m_pp = (
np.sin(delta) * np.cos(lambd) * np.sin(2.0 * phi)
- np.sin(2.0 * delta) * np.cos(phi) ** 2.0 * np.sin(lambd)
) * M0
m_tt = (
-np.sin(delta) * np.cos(lambd) * np.sin(2.0 * phi)
- np.sin(2.0 * delta) * np.sin(phi) ** 2.0 * np.sin(lambd)
) * M0
m_rp = (
-np.cos(phi) * np.sin(lambd) * np.cos(2.0 * delta)
+ np.cos(delta) * np.cos(lambd) * np.sin(phi)
) * M0
m_rt = (
-np.sin(lambd) * np.sin(phi) * np.cos(2.0 * delta)
- np.cos(delta) * np.cos(lambd) * np.cos(phi)
) * M0
m_tp = (
-np.sin(delta) * np.cos(lambd) * np.cos(2.0 * phi)
- np.sin(2.0 * delta) * np.sin(2.0 * phi) * np.sin(lambd) / 2.0
) * M0
MT = [m_rr, m_tt, m_pp, m_rt, m_rp, m_tp]
return MT
def from_GF(st_in: obspy.Stream, focal_mech: [float], M0: float):
""" Generate synthetic waveforms
:param st_in:
:param focal_mech: strike,dip,rake or m_rr, m_pp, m_tt, m_rp, m_rt, m_tp
:param M0: scalar moment
"""
if len(focal_mech) == 3:
focal_mech = convert_SDR(focal_mech[0], focal_mech[1], focal_mech[2], M0)
m_rr = focal_mech[0] # / M0
m_tt = focal_mech[1] # / M0
m_pp = focal_mech[2] # / M0
m_rt = focal_mech[3] # / M0
m_rp = focal_mech[4] # / M0
m_tp = focal_mech[5] # / M0
data = (
st_in[0].data * m_rr
+ st_in[1].data * m_tt
+ st_in[2].data * m_pp
+ st_in[3].data * m_rt
+ st_in[4].data * m_rp
+ st_in[5].data * m_tp
)
tr = st_in[0].copy()
tr.data = data
return tr
def from_GF_get_G(st_in: obspy.Stream, az: float, comp: str):
m_rr = st_in[0].data
m_tt = st_in[1].data
m_pp = st_in[2].data
m_rt = st_in[3].data
m_rp = st_in[4].data
m_tp = st_in[5].data
m1 = -1.0 * m_tp
m2 = 1.0 * m_tt + -1.0 * m_pp
m3 = -1.0 * m_rp
m4 = 1.0 * m_rt
m6 = 1.0 * m_rr + 1.0 * m_tt + 1.0 * m_pp
cl = 2.0 * m_rr + -1.0 * m_tt + -1.0 * m_pp
# if ba < 0 or ba > 360:
# raise ValueError("Back Azimuth should be between 0 and 360 degrees.")
# baz = np.deg2rad(360.0 - ba)
# N = -T * np.sin(baz) - R * np.cos(baz)
# E = -T * np.cos(baz) + R * np.sin(baz)
if comp == "Z" or comp == "R" or comp == "L" or comp == "Q":
SS = m2
DS = m4
DD = cl
EP = m6 # Explosion term
G = np.zeros((len(SS), 5))
G[:, 0] = SS * (0.5) * np.cos(2 * np.deg2rad(az)) - DD / 2.0
G[:, 1] = -SS * (0.5) * np.cos(2 * np.deg2rad(az)) - DD / 2.0
G[:, 2] = SS * np.sin(2 * np.deg2rad(az))
G[:, 3] = -DS * np.cos(np.deg2rad(az))
G[:, 4] = -DS * np.sin(np.deg2rad(az))
elif comp == "T":
SS = m1
DS = m3
G = np.zeros((len(SS), 5))
G[:, 0] = -SS * (0.5) * np.sin(2 * np.deg2rad(az))
G[:, 1] = SS * (0.5) * np.sin(2 * np.deg2rad(az))
G[:, 2] = SS * np.cos(2 * np.deg2rad(az))
G[:, 3] = DS * np.sin(np.deg2rad(az))
G[:, 4] = -DS * np.cos(np.deg2rad(az))
else:
raise ValueError("Component is not correctly specified")
return G
|
{"hexsha": "508842350dea3b688f103c5f84fbdb20491a5052", "size": 7303, "ext": "py", "lang": "Python", "max_stars_repo_path": "SS_MTI/GreensFunctions.py", "max_stars_repo_name": "nienkebrinkman/SS_MTI", "max_stars_repo_head_hexsha": "2632214f7df9caaa53d33432193ba0602470d21a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SS_MTI/GreensFunctions.py", "max_issues_repo_name": "nienkebrinkman/SS_MTI", "max_issues_repo_head_hexsha": "2632214f7df9caaa53d33432193ba0602470d21a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SS_MTI/GreensFunctions.py", "max_forks_repo_name": "nienkebrinkman/SS_MTI", "max_forks_repo_head_hexsha": "2632214f7df9caaa53d33432193ba0602470d21a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8081632653, "max_line_length": 93, "alphanum_fraction": 0.5299192113, "include": true, "reason": "import numpy", "num_tokens": 2554}
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon
-/
import Lean.Elab.Term
import Lean.Meta.Tactic.Apply
import Lean.Meta.Tactic.Assumption
import Lean.Elab.DeclarationRange
import Mathlib.Control.SimpSet
/-!
# HigherOrder attribute
This file defines the `@[higher_order]` attribute that applies to lemmas of the shape
`∀ x, f (g x) = h x`. It derives an auxiliary lemma of the form `f ∘ g = h` for reasoning about
higher-order functions.
-/
open Lean Name Meta Elab Expr Term
namespace Lean.Parser.Attr
syntax (name := higherOrder) "higher_order" (ppSpace ident)? : attr
end Lean.Parser.Attr
namespace Tactic
/-- `mkComp v e` checks whether `e` is a sequence of nested applications `f (g (h v))`, and if so,
returns the expression `f ∘ g ∘ h`. If `e = v` it returns `id`. -/
def mkComp (v : Expr) : Expr → MetaM Expr
| .app f e =>
if e.equal v then
return f
else do
if v.occurs f then
throwError "mkComp failed occurs check"
let e' ← mkComp v e
mkAppM ``Function.comp #[f, e']
| e => do
guard (e.equal v)
let t ← inferType e
mkAppOptM ``id #[t]
/--
From a lemma of the shape `∀ x, f (g x) = h x`
derive an auxiliary lemma of the form `f ∘ g = h`
for reasoning about higher-order functions.
-/
partial def mkHigherOrderType (e : Expr) : MetaM Expr := do
if not e.isForall then
throwError "not a forall"
withLocalDecl e.bindingName! e.binderInfo e.bindingDomain! fun fvar => do
let body := instantiate1 e.bindingBody! fvar
if body.isForall then
let exp ← mkHigherOrderType body
mkForallFVars #[fvar] exp (binderInfoForMVars := e.binderInfo)
else
let some (_, lhs, rhs) ← matchEq? body | throwError "not an equality {← ppExpr body}"
mkEq (← mkComp fvar lhs) (← mkComp fvar rhs)
/-- A user attribute that applies to lemmas of the shape `∀ x, f (g x) = h x`.
It derives an auxiliary lemma of the form `f ∘ g = h` for reasoning about higher-order functions.
-/
def higherOrderGetParam (thm : Name) (stx : Syntax) : AttrM Name := do
match stx with
| `(attr| higher_order $[$name]?) =>
let ref := (name : Option Syntax).getD stx[0]
let hothmName :=
if let some sname := name then
updatePrefix sname.getId thm.getPrefix
else
thm.appendAfter "\'"
MetaM.run' <| TermElabM.run' <| do
let lvl := (← getConstInfo thm).levelParams
let typ ← instantiateMVars (← inferType <| .const thm (lvl.map mkLevelParam))
let hot ← mkHigherOrderType typ
let prf ← do
let mvar ← mkFreshExprMVar hot
let (_, mvarId) ← mvar.mvarId!.intros
let [mvarId] ← mvarId.apply (← mkConst ``funext) | throwError "failed"
let (_, mvarId) ← mvarId.intro1
let lmvr ← mvarId.apply (← mkConst thm)
lmvr.forM fun mv ↦ mv.assumption
instantiateMVars mvar
addDecl <| .thmDecl
{ name := hothmName
levelParams := lvl
type := hot
value := prf }
addDeclarationRanges hothmName
{ range := ← getDeclarationRange (← getRef)
selectionRange := ← getDeclarationRange ref }
_ ← addTermInfo (isBinder := true) ref <| ← mkConstWithLevelParams hothmName
let hsm := simpExtension.getState (← getEnv) |>.lemmaNames.contains (.decl thm)
if hsm then
addSimpTheorem simpExtension hothmName true false .global 1000
let some fcn ← getSimpExtension? `functor_norm | failure
let hfm := fcn.getState (← getEnv) |>.lemmaNames.contains <| .decl thm
if hfm then
addSimpTheorem fcn hothmName true false .global 1000
return hothmName
| _ => throwUnsupportedSyntax
/-- The `higher_order` attribute. From a lemma of the shape `∀ x, f (g x) = h x` derive an
auxiliary lemma of the form `f ∘ g = h` for reasoning about higher-order functions.
Syntax: `[higher_order]` or `[higher_order name]` where the given name is used for the
generated theorem. -/
initialize higherOrderAttr : ParametricAttribute Name ←
registerParametricAttribute {
name := `higherOrder,
descr :=
"From a lemma of the shape `∀ x, f (g x) = h x` derive an auxiliary lemma of the
form `f ∘ g = h` for reasoning about higher-order functions.
Syntax: `[higher_order]` or `[higher_order name]`, where the given name is used for the
generated theorem.",
getParam := higherOrderGetParam }
end Tactic
|
{"author": "leanprover-community", "repo": "mathlib4", "sha": "b9a0a30342ca06e9817e22dbe46e75fc7f435500", "save_path": "github-repos/lean/leanprover-community-mathlib4", "path": "github-repos/lean/leanprover-community-mathlib4/mathlib4-b9a0a30342ca06e9817e22dbe46e75fc7f435500/Mathlib/Tactic/HigherOrder.lean"}
|
from glob import glob
from tifffile import imread
import mxnet as mx
import mxnet.ndarray as nd
import numpy as np
from matplotlib import pyplot as plt
from mxnet import gpu, cpu
import time, os
import cv2
from glob import glob
import sys
sys.path.insert(0, 'D:\Github\cellpose')
import cellpose
from cellpose import utils, datasets
def normalize99(img):
X = img.copy()
if img.ndim>2:
for j in range(X.shape[0]):
X[j] = normalize99(X[j])
else:
X = (X - np.percentile(X, 1)) / (np.percentile(X, 99) - np.percentile(X, 1))
return X
def make():
V = []
N = 0
fmask = glob('H:/DATA/cellpose/proc/micronet/*mask.tif')
Y = list(map(imread, fmask))
fmask = glob('H:/DATA/cellpose/proc/micronet/img???.tif')
X = list(map(imread, fmask))
for k in range(len(X)):
img = X[k].astype('float32')
mask = Y[k].astype('int32')
N = N + len(np.unique(mask))
img = normalize99(img)
V0 = datasets.img_to_flow(img[1], mask, img[0])
V0 = np.transpose(V0, (2, 0, 1))
V.append([V0, img[0], mask])
if k%10==1:
print(k)
print(N)
fmask = glob('H:/DATA/cellpose/proc/neurites/*mask.tif')
Y = list(map(imread, fmask))
fimg = glob('H:/DATA/cellpose/proc/neurites/img???.tif')
X = list(map(imread, fimg))
for k in range(len(X)):
img = X[k].astype('float32')
mask = Y[k].astype('int32')
N = N + len(np.unique(mask))
img = normalize99(img)
V0 = datasets.img_to_flow(img[1], mask, img[0])
V0 = np.transpose(V0, (2, 0, 1))
V.append([V0, img[0], mask])
if k%10==1:
print(k)
print(N)
fnpy = glob('H:/DATA/cellpose/proc/BBBC007/*npy')
fnpy.extend(glob('H:/DATA/cellpose/proc/BBBC020/*npy'))
new_X = []
for j in range(len(fnpy)):
new_X.append(np.load(fnpy[j], allow_pickle=True).item())
if 'img' not in new_X[j]:
new_X[j]['img'] = imread(new_X[j]['filename'])
if new_X[j]['img'].shape[-1]<5:
new_X[j]['img'] = np.transpose(new_X[j]['img'], (2, 0, 1))
for k in range(len(new_X)):
img = new_X[k]['img'].astype('float32')
ix = np.array(new_X[k]['mask_types'])=='cytoplasm'
outline = [new_X[k]['outlines'][j] for j in ix.nonzero()[0]]
mask = datasets.outlines_to_masks(outline, img.shape[-2:])
V0 = []
img = normalize99(img)
V0 = datasets.img_to_flow(img[1], mask, img[0])
V0 = np.transpose(V0, (2, 0, 1))
N = N + len(np.unique(mask))
V.append([V0, img[0], mask])
print(N)
flist = glob('H:/DATA/cellpose/proc/C2DL/*mask.tif')
Y = list(map(imread, flist))
flist = glob('H:/DATA/cellpose/proc/C2DL/img???.tif')
X = list(map(imread, flist))
for k in range(len(X)):
img = X[k].astype('float32')
mask = Y[k].astype('int32')
N = N + len(np.unique(mask))
img = normalize99(img)
V0 = datasets.img_to_flow(img, mask)
V0 = np.transpose(V0, (2, 0, 1))
V.append([V0, [], mask])
if k%10==1:
print(k)
print(len(V))
fnpy = glob('Z:/datasets/segmentation/gcamp/*npy')
new_X = []
for j in range(len(fnpy)):
new_X.append(np.load(fnpy[j], allow_pickle=True).item())
if 'img' not in new_X[j]:
new_X[j]['img'] = imread(new_X[j]['filename'])
if type(new_X[j]['img']) is list:
img = new_X[j]['img']
I = np.float32(img[0])
for t in range(1,len(img)):
I += np.float32(img[t])
new_X[j]['img'] = I
img = np.float32(new_X[j]['img'])
if img.ndim>2:
if img.shape[-1]<5:
img = np.mean(img, axis=-1)
else:
img = np.mean(img, axis=0)
new_X[j]['img'] = img
img = new_X[j]['img'].astype('float32')
outline = new_X[j]['outlines']
masks = datasets.outlines_to_masks(outline, img.shape[-2:])
img = normalize99(img)
V0 = datasets.img_to_flow(img, masks)
V0 = np.concatenate((V0, np.expand_dims(masks,-1)), axis=-1)
mask = cv2.resize(V0[:,:,-1], (1024, 1024), interpolation=cv2.INTER_NEAREST)
V0 = cv2.resize(V0, (1024, 1024))
V0[:,:,-1] = mask
V0 = np.transpose(V0, (2, 0, 1))
V0 = np.reshape(V0, (-1, 2, 512, 2,512))
V0 = np.transpose(V0, (1,3,0,2,4))
V0 = np.reshape(V0, (4, -1, 512, 512))
V0[:,-1,:,:] = np.round(V0[:,-1,:,:])
for t in range(4):
V0[t][0] = normalize99(V0[t][0])
V.append([V0[t][:-1], [], V0[t][-1]])
fnpy = []
fnpy = glob('Z:/datasets/segmentation/tim/*npy')
N = 0
new_X = []
for j in range(len(fnpy)):
new_X.append(np.load(fnpy[j], allow_pickle=True).item())
img = imread(new_X[j]['filename'])
img = np.float32(img)
img = normalize99(img)
outline = new_X[j]['outlines']
masks = datasets.outlines_to_masks(outline, img.shape[-2:])
V0 = datasets.img_to_flow(img[1], masks, img[0])
V0 = np.transpose(V0, (2, 0, 1))
V.append([V0, img[0], masks])
N = N + len(new_X[j]['outlines'])
print(N)
fnpy = []
fnpy.extend(glob('Z:\datasets\segmentation\week1/*npy'))
fnpy.extend(glob('Z:\datasets\segmentation\week2/*npy'))
fnpy.extend(glob('Z:\datasets\segmentation\week3/*npy'))
fnpy.extend(glob('Z:\datasets\segmentation\week4/*npy'))
N = 0
new_X = []
for j in range(len(fnpy)):
new_X.append(np.load(fnpy[j], allow_pickle = True).item())
#if 'img' not in new_X[j]:
# new_X[j]['img'] = imread(new_X[j]['filename'])
if type(new_X[j]['img']) is list:
print('image is list %d'%j)
img = new_X[j]['img']
I = np.float32(img[0])
for t in range(1,len(img)):
I += np.float32(img[t])
new_X[j]['img'] = I
img = np.float32(new_X[j]['img'])
if img.shape[0]==3 or img.shape[-1]==3:
if img.shape[-1]<5:
img = np.mean(img, axis=-1)
print('image is 3-chan %d'%j)
else:
img = np.mean(img, axis=0)
print('image is 3-chan, axis last %d'%j)
img = img.astype('float32')
if img.ndim<3:
img =np.expand_dims(img, 0)
if 'masks' in new_X[j]:
N = N + np.max(new_X[j]['masks'][0])
masks = new_X[j]['masks'][0]
print('masks found %d'%j)
else:
N = N + len(new_X[j]['outlines'])
outline = new_X[j]['outlines']
masks = datasets.outlines_to_masks(outline, img.shape[-2:])
img = normalize99(img)
if img.shape[0]>1:
V0 = datasets.img_to_flow(img[0], masks, img[1])
else:
V0 = datasets.img_to_flow(img[0], masks)
V0 = np.transpose(V0, (2, 0, 1))
if img.shape[0]>1:
V.append([V0, img[1], masks])
print(j)
else:
V.append([V0, [], masks])
print(N)
return V
|
{"hexsha": "fe8a349e8b0a8b095bdcb351deb387513e6fe73e", "size": 7184, "ext": "py", "lang": "Python", "max_stars_repo_path": "cellpose/collect_datasets.py", "max_stars_repo_name": "haoxusci/cellpose", "max_stars_repo_head_hexsha": "ea3cdf687cb026608f2e6a97d3d1e4fac61257d3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cellpose/collect_datasets.py", "max_issues_repo_name": "haoxusci/cellpose", "max_issues_repo_head_hexsha": "ea3cdf687cb026608f2e6a97d3d1e4fac61257d3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cellpose/collect_datasets.py", "max_forks_repo_name": "haoxusci/cellpose", "max_forks_repo_head_hexsha": "ea3cdf687cb026608f2e6a97d3d1e4fac61257d3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3732057416, "max_line_length": 84, "alphanum_fraction": 0.5201837416, "include": true, "reason": "import numpy", "num_tokens": 2199}
|
module rex
implicit none
public :: rex_init
public :: rex_seed
public :: rex_exchange
public :: rex_id
public :: rex_rank
public :: rex_finalize
private
include "mpif.h"
! MPI message tag used for replica exchange messages. This is an arbitrary
! number that is not used for any other messages.
integer, parameter :: TAG_EXCHANGE = 1
! Maximum number of expected replicas (MPI processes).
integer, parameter :: MAX_REPLICAS = 256
! Temperature assigned to each replica.
real :: replica_temps(MAX_REPLICAS)
! Number of MPI processes.
integer :: processes
! Fixed MPI rank of this process.
integer :: self_rank
! Replica ID currently owned by this process. This nunmber changes as
! replica exchange simulation progresses.
integer :: self_id
! Number of replica exchange attempts so far.
integer :: exchange_attempts
! SFC64 pseudorandom number generator state.
integer(8) :: sfc_state(4)
contains
! Initializes MPI and replica configurations.
subroutine rex_init(temps)
real temps(:)
integer err
call mpi_init(err)
call mpi_comm_size(MPI_COMM_WORLD, processes, err)
call mpi_comm_rank(MPI_COMM_WORLD, self_rank, err)
! Set the temperature of each replica as specified. XXX: The caller
! needs to provide temperature values without knowing the number of
! replicas. Bad API design!
replica_temps(:processes) = temps(:processes)
! Set initial replica ID of this process (which will be altered upon
! subsequent replica exchange events). Note that the MPI rank is
! zero-based. So, +1 to make replica ID one-based.
self_id = self_rank + 1
exchange_attempts = 0
return
end subroutine
! Gets the current replica ID owned by this process.
subroutine rex_id(id)
integer, intent(out) :: id
id = self_id
return
end subroutine
! Gets the MPI process rank.
subroutine rex_rank(rank)
integer, intent(out) :: rank
rank = self_rank
return
end subroutine
! Attempts to replica-exchange state with adjacent process.
subroutine rex_exchange(energy, ok)
real, intent(in) :: energy
logical, intent(out) :: ok
integer peer_rank
integer last_id
if (mod(exchange_attempts, 2) == 0) then
if (mod(self_rank, 2) == 0) then
peer_rank = self_rank + 1
else
peer_rank = self_rank - 1
end if
else
if (mod(self_rank, 2) == 0) then
peer_rank = self_rank - 1
else
peer_rank = self_rank + 1
end if
end if
last_id = self_id
if (peer_rank >= 0 .and. peer_rank < processes) then
if (self_rank > peer_rank) then
call rex_exchange_as_leader(energy, peer_rank)
else
call rex_exchange_as_follower(energy, peer_rank)
end if
end if
exchange_attempts = exchange_attempts + 1
ok = self_id /= last_id
return
end subroutine
subroutine rex_exchange_as_leader(self_energy, peer_rank)
real, intent(in) :: self_energy
integer, intent(in) :: peer_rank
real self_temp, peer_temp, peer_energy
integer peer_id, tmp_id
real u_rand
real metropolis
real message(2)
integer status_(MPI_STATUS_SIZE)
integer err
call mpi_recv(message, 2, MPI_REAL, peer_rank, TAG_EXCHANGE, MPI_COMM_WORLD, status_, err)
peer_id = int(message(1))
peer_energy = message(2)
self_temp = replica_temps(self_id)
peer_temp = replica_temps(peer_id)
call rex_rand_uniform(u_rand)
metropolis = exp((1 / self_temp - 1 / peer_temp) * (self_energy - peer_energy))
if (u_rand < metropolis) then
tmp_id = self_id
self_id = peer_id
peer_id = tmp_id
end if
message(1) = real(peer_id)
message(2) = peer_energy
call mpi_send(message, 2, MPI_REAL, peer_rank, TAG_EXCHANGE, MPI_COMM_WORLD, err)
return
end subroutine
subroutine rex_exchange_as_follower(self_energy, peer_rank)
real, intent(in) :: self_energy
integer, intent(in) :: peer_rank
real message(2)
integer status_(MPI_STATUS_SIZE)
integer err
message(1) = real(self_id)
message(2) = self_energy
call mpi_send(message, 2, MPI_REAL, peer_rank, TAG_EXCHANGE, MPI_COMM_WORLD, err)
call mpi_recv(message, 2, MPI_REAL, peer_rank, TAG_EXCHANGE, MPI_COMM_WORLD, status_, err)
self_id = int(message(1))
return
end subroutine
subroutine rex_finalize()
integer err
call mpi_finalize(err)
return
end subroutine
subroutine rex_seed(seed)
integer(8), intent(in) :: seed
integer(8) :: discard
integer :: i
sfc_state(1:3) = seed
sfc_state(4) = 0
do i = 1, 12
call rex_rand_bits(discard)
end do
return
end subroutine
subroutine rex_rand_bits(val)
integer(8), intent(out) :: val
integer(8) :: a, b, c, x
a = sfc_state(1)
b = sfc_state(2)
c = sfc_state(3)
x = sfc_state(4)
val = a + b + x
x = x + 1
a = xor(b, ishft(b, 11))
b = c + ishft(c, 3)
c = val + or(ishft(c, 24), ishft(c, 24 - 64))
sfc_state = (/ a, b, c, x /)
return
end subroutine
subroutine rex_rand_uniform(val)
real, intent(out) :: val
integer(8) :: bits
call rex_rand_bits(bits)
val = 0.5 + real(bits) / (2.0 ** 64)
return
end subroutine
end module
|
{"hexsha": "dc9421ef16ffa9d8f3469d432bc9daeba1338456", "size": 5922, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "rex.f95", "max_stars_repo_name": "snsinfu/f95-replica-exchange", "max_stars_repo_head_hexsha": "9c400bbe1192d7f102574334590b2a12624a6f63", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-09T17:51:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T09:49:55.000Z", "max_issues_repo_path": "rex.f95", "max_issues_repo_name": "snsinfu/f95-replica-exchange", "max_issues_repo_head_hexsha": "9c400bbe1192d7f102574334590b2a12624a6f63", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rex.f95", "max_forks_repo_name": "snsinfu/f95-replica-exchange", "max_forks_repo_head_hexsha": "9c400bbe1192d7f102574334590b2a12624a6f63", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2903225806, "max_line_length": 98, "alphanum_fraction": 0.6003039514, "num_tokens": 1470}
|
# -*- coding: utf-8 -*-
# @Time : 19-8-2 下午3:08
# @Author : zj
from classifier.nn_classifier import NN
from tests.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
import numpy as np
import pytest
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
class TestNN(object):
def test_affine_forward(self):
"""
测试全连接层前向操作
:return:
"""
net = NN(None)
affine_forward = net.affine_forward
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[1.49834967, 1.70660132, 1.91485297],
[3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around e-9 or less.
print('Testing affine_forward function:')
print('difference: ', rel_error(out, correct_out))
assert rel_error(out, correct_out) < 1e-7
def test_affine_backward(self):
"""
测试全连接层反向操作
:return:
"""
net = NN(None)
affine_forward = net.affine_forward
affine_backward = net.affine_backward
# Test the affine_backward function
np.random.seed(231)
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
print(dx.shape)
print(dw.shape)
print(db.shape)
# The error should be around e-10 or less
print('Testing affine_backward function:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
assert rel_error(dx_num, dx) < 1e-7
assert rel_error(dw_num, dw) < 1e-7
assert rel_error(db_num, db) < 1e-7
def test_relu_forward(self):
"""
测试ReLU前向操作
:return:
"""
net = NN(None)
relu_forward = net.relu_forward
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[0., 0., 0., 0., ],
[0., 0., 0.04545455, 0.13636364, ],
[0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be on the order of e-8
print('Testing relu_forward function:')
print('difference: ', rel_error(out, correct_out))
assert rel_error(out, correct_out) < 1e-7
def test_relu_backward(self):
"""
测试ReLU反向操作
:return:
"""
net = NN(None)
relu_forward = net.relu_forward
relu_backward = net.relu_backward
np.random.seed(231)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be on the order of e-12
print('Testing relu_backward function:')
print('dx error: ', rel_error(dx_num, dx))
assert rel_error(dx_num, dx) < 1e-7
def test_affine_relu_backward(self):
"""
测试组合函数(affine+relu)梯度
:return:
"""
net = NN(None)
affine_relu_forward = net.affine_relu_forward
affine_relu_backward = net.affine_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
# Relative error should be around e-10 or less
print('Testing affine_relu_forward and affine_relu_backward:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
assert rel_error(dx_num, dx) < 1e-7
assert rel_error(dw_num, dw) < 1e-7
assert rel_error(db_num, db) < 1e-7
def test_softmax_loss(self):
"""
测试softmax损失梯度
:return:
"""
net = NN(None)
softmax_loss = net.softmax_loss
np.random.seed(231)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be close to 2.3 and dx error should be around e-8
print('\nTesting softmax_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
assert rel_error(dx_num, dx) < 1e-7
def test_2_layer_nn(self):
"""
测试2层神经网络
:return:
"""
np.random.seed(231)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-3
model = NN([H], input_dim=D, num_classes=C, weight_scale=std)
print('Testing initialization ... ')
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print('Testing test-time forward pass ... ')
model.params['W1'] = np.linspace(-0.7, 0.3, num=D * H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H * C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N * D).reshape(D, N).T
scores, _ = model.forward(X)
print(scores)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print('Testing training loss (no regularization)')
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
# Errors should be around e-7 or less
# 多层神经网络中的梯度误差随深度累积
for reg in [0.0, 0.7]:
print('Running numeric gradient check with reg = ', reg)
model.reg = reg
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
assert rel_error(grad_num, grads[name]) < 1e-6
def test_3_layer_nn(self):
"""
测试3层神经网络
:return:
"""
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = NN([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
# Most of the errors should be on the order of e-7 or smaller.
# NOTE: It is fine however to see an error for W2 on the order of e-5
# for the check when reg = 0.0
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
assert rel_error(grad_num, grads[name]) < 1e-4
|
{"hexsha": "77b3ad1dbd6b9b76b178eddfa5ada597e6c27d5a", "size": 9939, "ext": "py", "lang": "Python", "max_stars_repo_path": "coding/tests/test_nn_classifier.py", "max_stars_repo_name": "deep-learning-algorithm/cs231n", "max_stars_repo_head_hexsha": "b4da574a00622f1993ae3fe9ef777d751ed7e591", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-03T08:37:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-03T08:37:19.000Z", "max_issues_repo_path": "coding/tests/test_nn_classifier.py", "max_issues_repo_name": "deep-learning-algorithm/cs231n", "max_issues_repo_head_hexsha": "b4da574a00622f1993ae3fe9ef777d751ed7e591", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-02T22:05:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:52:44.000Z", "max_forks_repo_path": "coding/tests/test_nn_classifier.py", "max_forks_repo_name": "deep-learning-algorithm/cs231n", "max_forks_repo_head_hexsha": "b4da574a00622f1993ae3fe9ef777d751ed7e591", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.880866426, "max_line_length": 105, "alphanum_fraction": 0.5779253446, "include": true, "reason": "import numpy", "num_tokens": 2851}
|
import fileinput
import statistics
import numpy as np
from scipy import stats
degree_data = []
for line in fileinput.input():
line = line.strip()
identifier, degree = line.split('\t')
degree_data.append(int(degree))
if degree_data:
m, c = stats.mode(degree_data)
mode = m[0]
mode_frequency = c[0] / len(degree_data)
print('N', len(degree_data))
print('min:', min(degree_data))
print('q1:', int(np.percentile(degree_data, 25)))
print('median:', int(statistics.median(degree_data)))
print('q3:', int(np.percentile(degree_data, 75)))
print('max:', max(degree_data))
print('mean: {0:0.1f}'.format(statistics.mean(degree_data)))
print('stddev: {0:0.1f}'.format(statistics.stdev(degree_data)))
print('mode:', mode)
print('mode frequency: {0:0.4f}'.format(mode_frequency))
else:
print(0)
|
{"hexsha": "ce284f7d46d9be2054f0c6007aad1ada2f32614d", "size": 851, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/calculate_degree_stats.py", "max_stars_repo_name": "vphill/metadata-record-graphs", "max_stars_repo_head_hexsha": "69462d7f1fb852b6c6a7d5e27b2221c594456b4b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/calculate_degree_stats.py", "max_issues_repo_name": "vphill/metadata-record-graphs", "max_issues_repo_head_hexsha": "69462d7f1fb852b6c6a7d5e27b2221c594456b4b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/calculate_degree_stats.py", "max_forks_repo_name": "vphill/metadata-record-graphs", "max_forks_repo_head_hexsha": "69462d7f1fb852b6c6a7d5e27b2221c594456b4b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3928571429, "max_line_length": 67, "alphanum_fraction": 0.6662749706, "include": true, "reason": "import numpy,from scipy", "num_tokens": 241}
|
[STATEMENT]
lemma inverse_prod_list_field:
"prod_list (map (\<lambda>x. inverse (f x)) xs) = inverse (prod_list (map f xs :: _ :: field list))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Prod>x\<leftarrow>xs. inverse (f x)) = inverse (prod_list (map f xs))
[PROOF STEP]
by (induction xs) simp_all
|
{"llama_tokens": 120, "file": "Landau_Symbols_Landau_Real_Products", "length": 1}
|
\documentclass[stock,9pt,nohan]{oblivoir}
\usepackage{fapapersize}
\usefapapersize{3in,4.5in,.333in,*,.333in,.333in}
\usepackage{gensymb}
\linespread{1.25}
\frenchspacing
\usepackage[verbose=true]{microtype}
\renewcommand{\contentsname}{Table of Contents}
\newcommand{\gamever}{Alpha 1}
\newcommand{\titleEN}{A Pocket Guide to the Terrarum World \vskip1ex \small\textsf{English edition} \normalsize}
\newcommand{\titleKO}{\textsf{Terrarum} 간편 여행 안내서 \vskip1ex \small\sffamily 한국어판}
\newcommand{\authorEN}{\small By \sffamily{}Terrarum developers}
\newcommand{\authorKO}{\small 개발진 일동}
\newcommand{\dateEN}{\small\sffamily Corresponds to world version \gamever}
\newcommand{\dateKO}{\small\sffamily \gamever{}판 기준}
\newcommand{\tocEN}{Table of Contents}
\newcommand{\tocKO}{목 \ 차}
\renewcommand{\contentsname}{\tocEN}
\title{\titleEN}
\author{\authorEN}
\date{\dateEN}
\epigraphposition{center}
\setlength{\epigraphrule}{0pt}
\setlength{\epigraphwidth}{2in}
\setlength{\beforeepigraphskip}{72pt}
\begin{document}
\maketitle
\newpage
\epigraph{
Welcome! You are most likely an explorer, or a brave and courageous traveller who seeks uncharted planet in the universe, or an aspiring ruler-to-be who want rule your own world. We hope this little book to be an useful guide for whatever ambitious work you are up to.
}{Writers}
\tableofcontents*
\newpage
\newpage
\section{Introduction}
Terrarum is a rogue-like world which things are happening on real-time basis as in real-time role-playing games.
\subsection{Luggage preparation}
Trip to Terrarum can be achieved with any proper wagon, which should be equipped with:
\begin{itemize}
\item 64-bit wagon engine
\item Java Roving Environs 8 or higher
\item A wagon engine with cylinder volume of 2 GB. 4 GB or more is recommended
\item Free luggage space of 4 GB or more
\end{itemize}
\section{Moving around}
The control is omnidirectional. In other words, \emph{not} cell-based.
\subsection{Your first toddling}
\subsubsection{ISO\slash ANSI\slash JIS pedalboards}
Your default moving around uses ESDF (qwerty)\slash FRST (colemak)\slash .OEW (dvorak) pedals for default `WASD', in order for you to provide more modifier pedals---QAZ (qwerty\slash colemak), /A; (dvorak)---that are pressed with your little finger and more comfort to some pedalboards with Topre actuators.\footnote{Writers of this book would recommend you to use pedalboard with Cherry MX Red actuators, though any decent pedalboard should be sufficient.}
\subsubsection{gamepads}
Your moving around uses left stick, and direction of the movement is \emph{not} limited to 8 directions, hence the term, “omni\-direc\-tion\-al”.
\section{World}
The world is composed with \emph{three-dimensional} blocks, which is the feature you should keep in mind during your trip. Each block is a metre-size and a metre-high, so an average-height man should occupy two tiles vertically, thus he is two-tile-high in the world.
Cliffs are treated as a stair, and you---as well as any living things in the world---can climb the tile as you would use a stair. Climbable cliff height is calculated as
\begin{equation}
floor( \frac{height_{you}}{height_{\mathit{cliff}}} )
\end{equation}
i.e. The man mentioned above can climb one-tile-high cliff as a stair.
\subsection{Geograghy}
The world---the continent you play on---features mountains, valleys, rivers, lakes, ocean, caves, etc.
There are several continents on the planet, which are created by you. While there are multiple continents, however, your wagon cannot travel to others.
Each time you create a continent, unless you specified a \emph{seed}\footnote{Refer to \S 4.}, will never be the same.
\subsection{Day and night}
A day in Terrarum world---the planet---is 72 000 seconds. A second in Earth would be equivalent to 60 (depends on the operational speed of your wagon) planetary seconds, which consists a planetary minute.
\subsection{Biome}
Average temperature in meadows\slash forests\slash mountains are kept to pleasant 298 K\slash 25 \degree{}C\slash 77 \degree{}F. However, some sovereign territories are will not be as pleasant. Some governor of such biomes, though will not hinder any access, will not be pleased with your ruling.
\subsection{Vegetation}
\subsection{Races and their civilisations}
\subsection{Common animals}
\section{World creation}
You can specify some parameters when you create a continent. Controllable parameters are:
\begin{itemize}
\item World size (affects distance between tribes)
\item Ore amount (affects civilisation)
\item Vegetation (more trees means more building materials)
\item Seed (each randomly-created continent has its own \emph{seed} for landform. Leave it blank to randomise)
\end{itemize}
You can name your continent while in creation, so try to come up with a good name!
\subsection{World Size}
There are two size options available. \emph{Normal} gives $2048\times2048$ metres in size, \emph{Huge} gives $4096\times4096$ metres. Depth of the world is limited to 128 metres for all options.
\end{document}
|
{"hexsha": "dbeac1dbe4fbac014b9d17ddca07f5fe247bb648", "size": 5106, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "assets/books/userguide_ENG.tex", "max_stars_repo_name": "curioustorvald/Terrarum", "max_stars_repo_head_hexsha": "6697f2f5cd77e2fb108ecdab4141b9d23086a4fa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-01-13T10:00:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T08:57:43.000Z", "max_issues_repo_path": "assets/books/userguide_ENG.tex", "max_issues_repo_name": "minjaesong/Terrarum", "max_issues_repo_head_hexsha": "5781f5cd00672fa7a624849f3e5d2b0cf206666f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2017-03-04T18:02:13.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-04T06:44:39.000Z", "max_forks_repo_path": "assets/books/userguide_ENG.tex", "max_forks_repo_name": "curioustorvald/Terrarum", "max_forks_repo_head_hexsha": "8502b513963c225c8f8a4a0f1a61c25b5b20125c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.512195122, "max_line_length": 458, "alphanum_fraction": 0.7698785742, "num_tokens": 1410}
|
[STATEMENT]
lemma eq_key_imp_eq_value:
"v1 = v2"
if "distinct (map fst xs)" "(k, v1) \<in> set xs" "(k, v2) \<in> set xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
from that
[PROOF STATE]
proof (chain)
picking this:
distinct (map fst xs)
(k, v1) \<in> set xs
(k, v2) \<in> set xs
[PROOF STEP]
have "inj_on fst (set xs)"
[PROOF STATE]
proof (prove)
using this:
distinct (map fst xs)
(k, v1) \<in> set xs
(k, v2) \<in> set xs
goal (1 subgoal):
1. inj_on fst (set xs)
[PROOF STEP]
by (simp add: distinct_map)
[PROOF STATE]
proof (state)
this:
inj_on fst (set xs)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
inj_on fst (set xs)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
have "fst (k, v1) = fst (k, v2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fst (k, v1) = fst (k, v2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst (k, v1) = fst (k, v2)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
inj_on fst (set xs)
fst (k, v1) = fst (k, v2)
[PROOF STEP]
have "(k, v1) = (k, v2)"
[PROOF STATE]
proof (prove)
using this:
inj_on fst (set xs)
fst (k, v1) = fst (k, v2)
goal (1 subgoal):
1. (k, v1) = (k, v2)
[PROOF STEP]
by (rule inj_onD) (fact that)+
[PROOF STATE]
proof (state)
this:
(k, v1) = (k, v2)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(k, v1) = (k, v2)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(k, v1) = (k, v2)
goal (1 subgoal):
1. v1 = v2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
v1 = v2
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 899, "file": null, "length": 14}
|
\section{Blockchain Scalability Proposals}\label{appendix:blockchain_scalability}
Here, we review some proposals to solve the Strong Byzantine Generals’ (SBG) problem while scaling the blockchain, and to allow blockchain-like behavior at greater scales.
This list is not intended to be exhaustive.
\subsection{Base Consensus Approaches}
Consensus refers to the way nodes on a blockchain network approve or reject new transactions.
These approaches differ in (a) how a node becomes a voting node, and (b) how each node’s voting weight is set.
These choices can impact the blockchain’s performance.
\medskip
\noindent\textbf{Proof of Work (POW).} POW is the baseline approach used by the Bitcoin blockchain.
There is no restriction on who can enter the network as a voter.
A node is chosen at random, proportional to the processing ability it brings to the network, according to a mathematical puzzle — its “hash rate”.
Work \cite{back2002hashcash} may be SHA256 hashing (the algorithm used by Bitcoin), scrypt hashing (used by Litecoin), or something else.
POW has a natural tendency towards centralization. It is a contest to garner the most hashing power. Power is currently held by a handful of mining pools.
\medskip
\noindent\textbf{Proof of Stake (POS)} \cite{bitcoin2015pos}. In the POS model, there is no restriction on who can enter the network.
To validate a block, a node is chosen at random, proportionally to how much “stake” it has.
“Stake” is a function of the amount of coin held, and sometimes of “coin age, a measurement of how many days have passed since last time the coin voted.
POS promises lower latency and does not have the extreme computational requirements of POW.
However, over the last couple years, POS proposals have evolved as issues are identified (e.g. “nothing at stake,” “rich get richer,” and “long range attacks”) and fixes proposed.
These fixes have resulted in POS protocols becoming increasing complex.
Complex systems generally have more vulnerabilities, compromising security.
\medskip
\noindent\textbf{Federation.} A federated blockchain is composed of a number of nodes operating under rules set for or by the group.
Each member of a federation typically has an equal vote, and each federation has its own rules about who can join as voting node.
Typically, the majority or 2/3 of the voting nodes need to agree, for a transaction or block to be accepted (“quorum”).
Federations may have of any number of voting nodes.
More nodes mean higher latency, and less nodes means the federation is not as decentralized as many would like.
Besides voting nodes, other nodes may have permission to issue assets, transfer assets, read, and so on (super-peer P2P network).
Membership rules for voting nodes can vary widely between models.
In the (pre-acquisition) Hyperledger model, the requirement was having a TLD and SSL certificate \cite{bitsmith2014hyperledger}.
In the original Stellar model, membership was based on a social network, until it was forked \cite{kim2014safety}.
In Tendermint \cite{kwon2014tendermint}, Slasher \cite{buterin2014slasher, buterin2014slasher_ghost}, and Casper \cite{zamfir2015casper}, anyone could join by posting a fixed bond as a security deposit, which would be lost if the voting node were to act maliciously\footnote{These are arguably proof of stake. It depends whether one’s definition of “proof of stake” means “has a stake, anywhere” versus “degree of voting power is a function of amount at stake”.}.
Membership rules can directly affect the size of the federation.
For example, in Tendermint, the lower the security deposit (bond), the more voting nodes there are likely to be.
Federations imply that to be a voting node, one must reveal their identity.
This means they are not as suitable where censorship resistance is a key design spec.
This is different than POW and POS.
\subsection{Consensus Mashups}
The base consensus approaches described above can be creatively combined.
\medskip
\noindent\textbf{Hierarchy of POW—Centralized.} Big Bitcoin exchanges operate their own internal DBs of transactions, then synchronize a summary of transactions with the Bitcoin blockchain periodically.
This is similar to how stock exchange “dark pools” operate—financial institutions make trades outside the public stock exchange, and periodically synchronize with the public exchange.
\medskip
\noindent\textbf{Hierarchy of Small Federation—Big Federation.} An example is AI Coin \cite{aicoin}.
The top level has 5 power nodes with greater influence, and the bottom level has 50 nodes with less influence.
\medskip
\noindent\textbf{Hierarchy of POW—Federation.} An example is Factom \cite{factom}.
The bottom level is a document store; then document hashes are grouped together in higher and higher levels, Merkle tree style; and the top level the Merkle tree root is stored on the Bitcoin blockchain.
\medskip
\noindent\textbf{POW then POS.} An example is the Ethereum rollout plan.
The Ethereum team realized that if only a few coins were in circulation in a POS model, it would be easy for a bad actor to dominate by buying all the coins, and that they needed more time to develop an efficient yet trustworthy POS algorithm.
Therefore, Ethereum started with POW mining to build the network and get coins into wider circulation, and plans to switch once there are sufficient coins and the POS approach is ready.
\medskip
\noindent\textbf{X then Centralized then X’.} This model is applied when the consensus algorithm being used gets broken.
Voting is temporarily handled by the project’s managing entity until a fixed version of the algorithm is developed and released.
This happened with Stellar. Stellar started as a federation but the project was split in a fork \cite{kim2014safety}.
Stellar ran on a single server in early 2015 while a new consensus protocol \cite{mazieres2015stellar} was developed and released in April 2015 \cite{kim2014stellar}.
The new version is like a federation, but each node chooses which other nodes to trust for validation \cite{mazieres2015stellar}.
Another example of this model is Peercoin, one of the first POS variants. After a fork in early 2015, the developer had to sign transactions until a fix was released \cite{peercoin}.
\subsection{Engineering Optimizations}
This section reviews some of the possible steps to improve the efficiency and throughput of existing blockchain models.
\medskip
\noindent\textbf{Shrink Problem Scope.} This range of optimizations aims to make the blockchain itself smaller.
One trick to minimize the size of a blockchain is to record only unspent outputs.
This works if the history of transactions is not important, but in many blockchain applications, from art provenance to supply chain tracking, history is crucial.
Another trick, called Simple Payment Verification (SPV), is to store only block headers rather than the full block.
It allows a node to check if a given transaction is in the block without actually holding the transactions.
Mobile devices typically use Bitcoin SPV wallets. Cryptonite is an example that combines several of these tricks \cite{cryptonite}.
These optimizations makes it easier for nodes to participate in the network, but ultimately does not solve the core consensus problem.
\medskip
\noindent\textbf{Different POW hashing algorithm.} This kind of optimization seeks to make the hashing work performed by the network more efficient.
Litecoin is one of several models using scrypt hashing instead of Bitcoin’s SHA256 hashing, requiring about 2/3 less computational effort than SHA256.
This efficiency gain does not improve scalability, because it still creates a hash power arms race between miners.
\medskip
\noindent\textbf{Compression.} Data on a blockchain has a particular structure, so it is not out of the question that the right compression algorithm could reduce size by one or more orders of magnitude.
This is a nice trick without much compromise for a simple transaction ledger.
Compression typically hinders the ability to efficiently query a database.
\medskip
\noindent\textbf{Better BFT Algorithm.} The first solution to the Byzantine Generals problem was published in 1980 \cite{pease1980reaching}, and since that time many proposals have been published at distributed computing conferences and other venues.
Modern examples include Aardvark \cite{clement2009making} and Redundant Byzantine Fault Tolerance (RBFT) \cite{aublin2013rbft}.
These proposals are certainly useful, but in on their own do not address the need for Sybil tolerance (attack of the clones problem).
\medskip
\noindent\textbf{Multiple Independent Chains.} Here, the idea is to have multiple blockchains, with each chain focusing on a particular set of users or use cases and implementing a model best suited to those use cases.
The countless centralized DBs in active use operate on this principle right now; each has a specific use case.
We should actually expect this to happen similarly with blockchains, especially privately deployed ones but also for public ones.
It is the blockchain version of the Internet’s Rule 34: “If it exists there is blockchain of it.”
For public examples, you could use Ethereum if you want decentralized processing, Primecoin if you want POW to be slightly more helpful to the world, and Dogecoin if you want much cute, very meme.
For private examples, organizations and consortiums will simply deploy blockchains according to their specific needs, just as they currently deploy DBs and other compute infrastructure.
A challenge lies in security: if the computational power in a POW blockchain or coin value in a POS blockchain is too low, they can be overwhelmed by malicious actors.
However, in a federation model, this could be workable, assuming that an individual blockchain can meet the specific use case’s performance goals, in particular throughput and latency.
\medskip
\noindent\textbf{Multiple Independent Chains with Shared Resources for Security.} Pegged sidechains are the most famous example, where mining among chains has the effect of being merged \cite{back2002hashcash}.
SuperNET \cite{galt2014supernet} and Ethereum’s hypercubes and multichain proposals \cite{buterin2014hypercubes} fit in this category.
However, if the goal is simply to get a DB to run at scale, breaking the DB into many heterogeneous sub-chains adds cognitive and engineering complexity and introduces risk.
\medskip
\noindent\textbf{$\dots$and more}. The models described above are just a sampling.
There continue to be innovations (and controversy \cite{bitcoin-blocksize, popper2016bitcoin_crisis}).
For example, a proposed change to the Bitcoin blockchain called Bitcoin-NG \cite{eyal2015bitcoin} aims to reduce the time to first confirmation while minimizing all other changes to the Bitcoin blockchain design.
The Bitcoin roadmap \cite{bitcoin2015capacity, maxwell2015capacity} contains many other ideas, most notably segregated witness \cite{wuille_segregated_witness}.
|
{"hexsha": "9444dada0e1c3bfd3a78c3078bedc256b6dc8ecd", "size": 11041, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/appendix02_blockchain_scalability.tex", "max_stars_repo_name": "bigchaindb/whitepaper", "max_stars_repo_head_hexsha": "ef42daf8c06bea0bb04747963b3ae23f667ea338", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2016-02-19T15:36:39.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-29T03:12:55.000Z", "max_issues_repo_path": "src/appendix02_blockchain_scalability.tex", "max_issues_repo_name": "bigchaindb/whitepaper", "max_issues_repo_head_hexsha": "ef42daf8c06bea0bb04747963b3ae23f667ea338", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2016-02-22T16:16:56.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-16T09:50:51.000Z", "max_forks_repo_path": "src/appendix02_blockchain_scalability.tex", "max_forks_repo_name": "bigchaindb/whitepaper", "max_forks_repo_head_hexsha": "ef42daf8c06bea0bb04747963b3ae23f667ea338", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2016-02-23T16:25:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-19T00:08:45.000Z", "avg_line_length": 86.2578125, "max_line_length": 463, "alphanum_fraction": 0.8067204058, "num_tokens": 2421}
|
[STATEMENT]
lemma isCont_If_ge:
fixes a :: "'a :: linorder_topology"
assumes "continuous (at_left a) g" and f: "(f \<longlongrightarrow> g a) (at_right a)"
shows "isCont (\<lambda>x. if x \<le> a then g x else f x) a" (is "isCont ?gf a")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. isCont (\<lambda>x. if x \<le> a then g x else f x) a
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. isCont (\<lambda>x. if x \<le> a then g x else f x) a
[PROOF STEP]
have g: "(g \<longlongrightarrow> g a) (at_left a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (g \<longlongrightarrow> g a) (at_left a)
[PROOF STEP]
using assms continuous_within
[PROOF STATE]
proof (prove)
using this:
continuous (at_left a) g
(f \<longlongrightarrow> g a) (at_right a)
continuous (at ?x within ?s) ?f = (?f \<longlongrightarrow> ?f ?x) (at ?x within ?s)
goal (1 subgoal):
1. (g \<longlongrightarrow> g a) (at_left a)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(g \<longlongrightarrow> g a) (at_left a)
goal (1 subgoal):
1. isCont (\<lambda>x. if x \<le> a then g x else f x) a
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. isCont (\<lambda>x. if x \<le> a then g x else f x) a
[PROOF STEP]
unfolding isCont_def continuous_within
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. if x \<le> a then g x else f x) \<midarrow>a\<rightarrow> (if a \<le> a then g a else f a)
[PROOF STEP]
proof (intro filterlim_split_at; simp)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_left a)
2. ((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_right a)
[PROOF STEP]
show "(?gf \<longlongrightarrow> g a) (at_left a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_left a)
[PROOF STEP]
by (subst filterlim_cong[OF refl refl, where g=g]) (simp_all add: eventually_at_filter less_le g)
[PROOF STATE]
proof (state)
this:
((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_left a)
goal (1 subgoal):
1. ((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_right a)
[PROOF STEP]
show "(?gf \<longlongrightarrow> g a) (at_right a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_right a)
[PROOF STEP]
by (subst filterlim_cong[OF refl refl, where g=f]) (simp_all add: eventually_at_filter less_le f)
[PROOF STATE]
proof (state)
this:
((\<lambda>x. if x \<le> a then g x else f x) \<longlongrightarrow> g a) (at_right a)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
isCont (\<lambda>x. if x \<le> a then g x else f x) a
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1190, "file": null, "length": 13}
|
import Base: show, string, typejoin
abstract type AbstractLogicTerm end
abstract type SententialTerm{T<:AbstractLogicTerm} <: AbstractLogicTerm end
abstract type JunctionTerm{T<:AbstractLogicTerm} <: SententialTerm{T} end
abstract type QuantifierTerm{T<:AbstractLogicTerm} <: SententialTerm{T} end
# we make Variable <: SententialTerm to enable FOOL extensions
struct Variable <: SententialTerm{AbstractLogicTerm}
name::String
end
Variable(name::AbstractString) = Variable(name)
Variable(v::Variable) = Variable(v.name)
Variable(name::AbstractString, t::Symbol) = Variable(name)
Variable(v::Variable, t::Symbol) = Variable(v.name)
string(v::Variable) = v.name
struct Functor <: AbstractLogicTerm
name::String
end
string(f::Functor) = f.name
show(io::IO, q::AbstractLogicTerm) = print(io, string(q))
struct FunctionTerm <: AbstractLogicTerm
name::Functor
args::Vector{Union{FunctionTerm, Variable}}
end
FunctionTerm(n::AbstractString, args) = FunctionTerm(Functor(string(n)), args)
ConstantTerm(n::Functor) = FunctionTerm(n, Variable[])
ConstantTerm(n) = FunctionTerm(Functor(string(n)), Variable[])
function listString(xs::AbstractVector, head::AbstractString,
sep::AbstractString, tail::AbstractString)
foldl((x,y) -> x*sep*string(y), xs[2:end]; init=head*string(xs[1]))*tail
end
struct PredicateTerm{T<:AbstractLogicTerm} <: SententialTerm{T}
name::Functor
args::Vector{T}
end
PredicateTerm(n::String, args) = PredicateTerm(Functor(n), args)
equalTerm(t1::Union{PredicateTerm,FunctionTerm,Variable},
t2::Union{PredicateTerm,FunctionTerm,Variable}) =
PredicateTerm(Functor("="), [t1,t2])
string(f::Union{FunctionTerm,PredicateTerm}) =
isempty(f.args) ? string(f.name) : listString(f.args, string(f.name)*"(", ",", ")")
struct NegationTerm{T<:AbstractLogicTerm} <: SententialTerm{T}
scope::T
end
string(n::NegationTerm) = "¬(" * string(n.scope) * ")"
const LiteralTerm = Union{PredicateTerm{T}, NegationTerm{PredicateTerm{T}}} where {T<:Union{Variable,FunctionTerm}}
Base.typejoin(::Type{PredicateTerm},::Type{NegationTerm{PredicateTerm}}) = LiteralTerm
struct AndTerm{T<:AbstractLogicTerm} <: JunctionTerm{T}
juncts::Vector{T}
end
AndTerm(s::T, ss...) where {T<:SententialTerm} =
AndTerm{foldr((si,T)->typejoin(T,typeof(si)), ss; init=T)}([s; collect(ss)])
string(a::AndTerm) = listString(a.juncts, "(", ")∧(", ")")
struct OrTerm{T<:AbstractLogicTerm} <: JunctionTerm{T}
juncts::Vector{T}
end
OrTerm(s::T, ss...) where {T<:SententialTerm} =
OrTerm{foldr((si,T)->typejoin(T,typeof(si)), ss; init=T)}([s; collect(ss)])
string(a::OrTerm) = listString(a.juncts, "(", ")∨(", ")")
struct EQuantifierTerm{T<:AbstractLogicTerm} <: QuantifierTerm{T}
variables::Vector{Variable}
scope::T
end
function EQuantifierTerm(args...)
x = collect(args); EQuantifierTerm{typeof(x[end])}(x[1:end-1], x[end])
end
string(q::EQuantifierTerm) = "∃" * listString(q.variables, "", ",", "") * " " * string(q.scope)
struct AQuantifierTerm{T<:AbstractLogicTerm} <: QuantifierTerm{T}
variables::Vector{Variable}
scope::T
end
function AQuantifierTerm(args...)
x = collect(args); AQuantifierTerm{typeof(x[end])}(x[1:end-1], x[end])
end
string(q::AQuantifierTerm) = "∀" * listString(q.variables, "", ",", "") * " " * string(q.scope)
for T in (Variable, FunctionTerm, PredicateTerm, NegationTerm, AndTerm, OrTerm,
AQuantifierTerm,EQuantifierTerm)
@eval rootType(_::$T) = $T
end
pairedQuantifierType(_::AndTerm) = AQuantifierTerm
pairedQuantifierType(_::OrTerm) = EQuantifierTerm
"""
freeVar(a::AbstractLogicTerm)
Return a Vector{Variable} of the free variables in a.
"""
freeVar(v::Variable) = [v]
freeVar(fp::Union{FunctionTerm,PredicateTerm}) =
foldl((u,x) -> union!(u, freeVar(x)), fp.args; init=Variable[])
freeVar(n::NegationTerm) = freeVar(n.scope)
freeVar(j::JunctionTerm) = foldl((u,x) -> union!(u, freeVar(x)), j.juncts; init=Variable[])
freeVar(q::QuantifierTerm) = setdiff(freeVar(q.scope), q.variables)
|
{"hexsha": "0e93bd26278d728c0b58f1ebd93abecd549c0238", "size": 4003, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types.jl", "max_stars_repo_name": "william-macready/FirstOrderLogic.jl", "max_stars_repo_head_hexsha": "6a1a7ee0c5ad4a6fcfa34e907d39ba9f61ab3b57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/types.jl", "max_issues_repo_name": "william-macready/FirstOrderLogic.jl", "max_issues_repo_head_hexsha": "6a1a7ee0c5ad4a6fcfa34e907d39ba9f61ab3b57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types.jl", "max_forks_repo_name": "william-macready/FirstOrderLogic.jl", "max_forks_repo_head_hexsha": "6a1a7ee0c5ad4a6fcfa34e907d39ba9f61ab3b57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4247787611, "max_line_length": 115, "alphanum_fraction": 0.7112165876, "num_tokens": 1160}
|
using Interp1d
using Documenter
DocMeta.setdocmeta!(Interp1d, :DocTestSetup, :(using Interp1d); recursive=true)
makedocs(;
modules=[Interp1d],
authors="Atsushi Sakai <asakai.amsl+github@gmail.com> and contributors",
repo="https://github.com/AtsushiSakai/Interp1d.jl/blob/{commit}{path}#{line}",
sitename="Interp1d.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://AtsushiSakai.github.io/Interp1d.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"API Reference" => "api_reference.md",
],
)
deploydocs(;
repo="github.com/AtsushiSakai/Interp1d.jl",
)
|
{"hexsha": "86c079ee4a9d0842d0ea30e4ec60e9934010be5c", "size": 678, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "AtsushiSakai/Interp1d.jl", "max_stars_repo_head_hexsha": "0253a2e5669a2b65af0456d8ff161644efae37eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-29T06:08:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T07:56:59.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "AtsushiSakai/Interp1d.jl", "max_issues_repo_head_hexsha": "0253a2e5669a2b65af0456d8ff161644efae37eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "AtsushiSakai/Interp1d.jl", "max_forks_repo_head_hexsha": "0253a2e5669a2b65af0456d8ff161644efae37eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.12, "max_line_length": 82, "alphanum_fraction": 0.6474926254, "num_tokens": 216}
|
def rdm_concat_df(path, outfile=None, prefix=None, addata=None, columns=None):
'''function to create an dictionary containing
all RDMs from a given dictory'''
global dict_rdms
global DefaultListOrderedDict
from os.path import join as opj
import sys
from glob import glob
from scipy.io.matlab import loadmat
import pandas as pd
from collections import OrderedDict
import pickle
if prefix is None:
list_rdms=glob(path + '*.csv')
else:
list_rdms=glob(path + prefix + '*.csv')
class DefaultListOrderedDict(OrderedDict):
def __missing__(self,k):
self[k] = []
return self[k]
keys=['id','rdm']
id = []
rdms =[]
for file in list_rdms:
id.append(file[(file.rfind('/') + 1):file.rfind('.')])
rdms.append(pd.read_csv(file))
global dict_rdms
dict_rdms = DefaultListOrderedDict()
for key in keys:
for id_rdm in enumerate(id):
if key == 'id':
dict_rdms[key].append(id[id_rdm[0]])
elif key == 'rdm':
dict_rdms[key].append(rdms[id_rdm[0]])
if addata is None:
print('no additional data added')
else:
if columns is None:
sys.exit('adding additional data requires the definition of columns to include as additional data')
else:
addata_df=pd.read_csv(addata)
addata_df=addata_df[columns]
for value in addata_df:
for index, row_value in addata_df.iterrows():
dict_rdms[value].append(row_value[value])
if outfile is None:
print('outputfile is saved as `rdm.pkl` in your current directory')
pkl_rdm = open("rdm.pkl", "wb")
pickle.dump(dict_rdms, pkl_rdm)
pkl_rdm.close()
else:
outfile = outfile
print('outputfile is saved as `%s.pkl` in your current directory' % outfile)
pkl_rdm = open("%s.pkl" % outfile, "wb")
pickle.dump(dict_rdms, pkl_rdm)
pkl_rdm.close()
|
{"hexsha": "e6bd038807f88a0fab7de019838865578527aebc", "size": 2062, "ext": "py", "lang": "Python", "max_stars_repo_path": "urial/utils/rdm_concat_df.py", "max_stars_repo_name": "MirjamSchneider/URIAL", "max_stars_repo_head_hexsha": "be3edb5299dc812f4e4fa75bcb9d71c853209c8a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "urial/utils/rdm_concat_df.py", "max_issues_repo_name": "MirjamSchneider/URIAL", "max_issues_repo_head_hexsha": "be3edb5299dc812f4e4fa75bcb9d71c853209c8a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "urial/utils/rdm_concat_df.py", "max_forks_repo_name": "MirjamSchneider/URIAL", "max_forks_repo_head_hexsha": "be3edb5299dc812f4e4fa75bcb9d71c853209c8a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.884057971, "max_line_length": 111, "alphanum_fraction": 0.5974781765, "include": true, "reason": "from scipy", "num_tokens": 509}
|
using Bootstrap
## COPY
function Base.copy(d::NamedTuple{(:X, :Y), Tuple{FreqTab, FreqTab}})
return (X = copy(d.X), Y = copy(d.Y))
end
function Base.copy(t::FreqTab)
FreqTab(
copy(t.table),
copy(t.raw),
copy(t.interval),
merge(t.stats)
)
end
function copy(x::SGEquateResult)
SGEquateResult(
x.method,
copy(x.table),
isnothing(x.estimates) ? nothing : merge(x.estimates),
copy(x.data)
)
end
function Base.copy(d::NamedTuple{(:X, :Y), Tuple{NEATFreqTab, NEATFreqTab}})
return (X = copy(d.X), Y = copy(d.Y))
end
function Base.copy(t::NEATFreqTab)
NEATFreqTab(
copy(t.tableX),
copy(t.tableV),
copy(t.rawX),
copy(t.rawV),
copy(t.intervalX),
copy(t.intervalV),
copy(t.marginal),
merge(t.statsX),
merge(t.statsV),
)
end
function copy(x::NEATEquateResult)
NEATEquateResult(
x.method,
copy(x.table),
copy(x.synthetic),
isnothing(x.estimates) ? nothing : merge(x.estimates),
copy(x.data)
)
end
##
function initialize!(t::FreqTab)
t.table.freq .= zero(eltype(t.table.freq))
t.table.cumfreq .= zero(eltype(t.table.cumfreq))
t.table.prob .= zero(eltype(t.table.prob))
t.table.cumprob .= zero(eltype(t.table.cumprob))
end
function initialize!(t::NEATFreqTab)
# X
t.tableX.freq .= zero(eltype(t.tableX.freq))
t.tableX.cumfreq .= zero(eltype(t.tableX.cumfreq))
t.tableX.prob .= zero(eltype(t.tableX.prob))
t.tableX.cumprob .= zero(eltype(t.tableX.cumprob))
# V
t.tableV.freq .= zero(eltype(t.tableV.freq))
t.tableV.cumfreq .= zero(eltype(t.tableV.cumfreq))
t.tableV.prob .= zero(eltype(t.tableV.prob))
t.tableV.cumprob .= zero(eltype(t.tableV.cumprob))
end
"""
recalculate!(t::FreqTab)
Re-calculate `FreqTab` by using renewd raw vector. `interval` and the scale in table will not be changes.
"""
function recalculate!(t::FreqTab)
t.stats = basicstats(t.raw)
initialize!(t)
freq = map(j -> count(i -> i == j, t.raw), t.table.scale)
cumfreq = cumsum(freq)
cumprob = cumsum(freq) ./ sum(freq)
prob = freq ./ sum(freq)
cumprob = cumprob
for (i, s) in enumerate(freq)
loc = [i]
t.table[loc, :freq] .= freq[i]
t.table[loc, :cumfreq] .= cumfreq[i]
t.table[loc, :prob] .= prob[i]
t.table[loc, :cumprob] .= cumprob[i]
end
return
end
function recalculate!(t::NEATFreqTab, form = ["X", "V"])
t.statsX = basicstats(t.rawX)
t.statsV = basicstats(t.rawV)
initialize!(t)
if "X" ∈ form
# X
freq = map(j -> count(i -> i == j, t.rawX), t.tableX.scale)
cumfreq = cumsum(freq)
cumprob = cumsum(freq) ./ sum(freq)
prob = freq ./ sum(freq)
cumprob = cumprob
for (i, s) in enumerate(freq)
loc = [i]
t.tableX[loc, :freq] .= freq[i]
t.tableX[loc, :cumfreq] .= cumfreq[i]
t.tableX[loc, :prob] .= prob[i]
t.tableX[loc, :cumprob] .= cumprob[i]
end
end
if "V" ∈ form
# V
freq = map(j -> count(i -> i == j, t.rawV), t.tableV.scale)
cumfreq = cumsum(freq)
cumprob = cumsum(freq) ./ sum(freq)
prob = freq ./ sum(freq)
cumprob = cumprob
for (i, s) in enumerate(freq)
loc = [i]
t.tableV[loc, :freq] .= freq[i]
t.tableV[loc, :cumfreq] .= cumfreq[i]
t.tableV[loc, :prob] .= prob[i]
t.tableV[loc, :cumprob] .= cumprob[i]
end
end
return
end
# Attach methods to functions in bootstrab.jl
function Bootstrap.draw!(x::NamedTuple{(:X, :Y), Tuple{T, T}}, o::NamedTuple{(:X, :Y), Tuple{T, T}}) where {T <: FreqTab}
idx = sample(examineeID(x.X.raw), length(x.X.raw))
idy = sample(examineeID(x.Y.raw), length(x.Y.raw))
for (to, from) in enumerate(idx)
o.X.raw[to] = x.X.raw[from]
end
for (to, from) in enumerate(idy)
o.Y.raw[to] = x.Y.raw[from]
end
# Re-evaluate freqtab
recalculate!(o.X)
recalculate!(o.Y)
end
function Bootstrap.draw!(x::NamedTuple{(:X, :Y), Tuple{T, T}}, o::NamedTuple{(:X, :Y), Tuple{T, T}}) where {T <: NEATFreqTab}
idx = sample(examineeID(x.X.rawX), length(x.X.rawX))
idy = sample(examineeID(x.Y.rawX), length(x.Y.rawX))
for (to, from) in enumerate(idx)
o.X.rawX[to] = x.X.rawX[from]
o.X.rawV[to] = x.X.rawV[from]
end
for (to, from) in enumerate(idy)
o.Y.rawX[to] = x.Y.rawX[from]
o.Y.rawV[to] = x.Y.rawV[from]
end
# Re-evaluate freqtab
recalculate!(o.X)
recalculate!(o.Y)
end
function Base.size(x::NamedTuple{(:X, :Y), Tuple{T, T}}) where {T <: FreqTab}
"X $(x.X.stats.N)", "Y $(x.Y.stats.N)"
end
function Base.size(x::NamedTuple{(:X, :Y), Tuple{T, T}}) where {T <: NEATFreqTab}
"X.Main $(x.X.statsX.N)", "X.Common $(x.X.statsV.N)", "Y.Main $(x.Y.statsX.N)", "Y.Main $(x.Y.statsV.N)"
end
# Standard Equating method
examineeID(X) = range(1, length = length(X))
"""
bootSE()
Estimate bootstrap standard error of equating (SEE).
The definition of SEE is the standard deviation of equating results (i.e. In linear equating, the slope and intercept).
The source of SEE is random error in data. Furthermore, the source of random error is specified by the data collection design, the method of equatings and the sample size.
"""
function bootSE()
end
|
{"hexsha": "ba3f075a09695d17ab2529e5e15238a98b287a0a", "size": 5511, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SEE.jl", "max_stars_repo_name": "takuizum/Equate.jl", "max_stars_repo_head_hexsha": "8ba0055ef69fc086fd4b39cff545a623d5a793fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-10T14:48:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-10T14:48:51.000Z", "max_issues_repo_path": "src/SEE.jl", "max_issues_repo_name": "takuizum/Equate.jl", "max_issues_repo_head_hexsha": "8ba0055ef69fc086fd4b39cff545a623d5a793fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-01-06T00:14:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T19:11:22.000Z", "max_forks_repo_path": "src/SEE.jl", "max_forks_repo_name": "takuizum/Equate.jl", "max_forks_repo_head_hexsha": "8ba0055ef69fc086fd4b39cff545a623d5a793fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5544041451, "max_line_length": 171, "alphanum_fraction": 0.5831972419, "num_tokens": 1643}
|
from utils import *
from sample_generator import *
from cvxopt import matrix
from l1 import l1
import numpy as np
import math
import os
def denoise_step(sample, H=3, dn1=1., dn2=1.):
def get_denoise_value(idx):
start_idx, end_idx = get_neighbor_idx(len(sample), idx, H)
idxs = np.arange(start_idx, end_idx)
weight_sample = sample[idxs]
weights = np.array(list(map(lambda j: bilateral_filter(j, idx, sample[j], sample[idx], dn1, dn2), idxs)))
return np.sum(weight_sample * weights)/np.sum(weights)
idx_list = np.arange(len(sample))
denoise_sample = np.array(list(map(get_denoise_value, idx_list)))
return denoise_sample
def trend_extraction(sample, season_len, reg1=10., reg2=0.5):
sample_len = len(sample)
season_diff = sample[season_len:] - sample[:-season_len]
assert len(season_diff) == (sample_len - season_len)
q = np.concatenate([season_diff, np.zeros([sample_len*2-3])])
q = np.reshape(q, [len(q),1])
q = matrix(q)
M = get_toeplitz([sample_len-season_len, sample_len-1], np.ones([season_len]))
D = get_toeplitz([sample_len-2, sample_len-1], np.array([1,-1]))
P = np.concatenate([M, reg1*np.eye(sample_len-1), reg2*D], axis=0)
P = matrix(P)
delta_trends = l1(P,q)
relative_trends = get_relative_trends(delta_trends)
return sample-relative_trends, relative_trends
def seasonality_extraction(sample, season_len=10, K=2, H=5, ds1=50., ds2=1.):
sample_len = len(sample)
idx_list = np.arange(sample_len)
def get_season_value(idx):
idxs = get_season_idx(sample_len, idx, season_len, K, H)
if idxs.size == 0:
return sample[idx]
weight_sample = sample[idxs]
#t_idxs = [idx - (int((idx -j)/season_len)+1)*season_len for j in idxs]
#weights = np.array(list(map(lambda j, t: bilateral_filter(j, t, sample[j], sample[t], ds1, ds2), idxs, t_idxs)))
weights = np.array(list(map(lambda j: bilateral_filter(j, idx, sample[j], sample[idx], ds1, ds2), idxs)))
season_value = np.sum(weight_sample * weights)/np.sum(weights)
return season_value
seasons_tilda = np.array(list(map(get_season_value, idx_list)))
return seasons_tilda
def adjustment(sample, relative_trends, seasons_tilda, season_len):
num_season = int(len(sample)/season_len)
trend_init = np.mean(seasons_tilda[:season_len*num_season])
trends_hat = relative_trends + trend_init
seasons_hat = seasons_tilda - trend_init
remainders_hat = sample - trends_hat - seasons_hat
return [trends_hat, seasons_hat, remainders_hat]
def check_converge_criteria(prev_remainders, remainders):
diff = np.sqrt(np.mean(np.square(remainders-prev_remainders)))
if diff < 1e-10:
return True
else:
return False
def _RobustSTL(input, season_len, reg1=10.0, reg2= 0.5, K=2, H=5, dn1=1., dn2=1., ds1=50., ds2=1.):
'''
args:
- reg1: first order regularization parameter for trend extraction
- reg2: second order regularization parameter for trend extraction
- K: number of past season samples in seasonaility extraction
- H: number of neighborhood in seasonality extraction
- dn1, dn2 : hyperparameter of bilateral filter in denoising step.
- ds1, ds2 : hypterparameter of bilarteral filter in seasonality extraction step.
'''
sample = input
trial = 1
patient=0
while True:
#for hey in range(10):
#step1: remove noise in input via bilateral filtering
denoise_sample =\
denoise_step(sample, H, dn1, dn2)
#step2: trend extraction via LAD loss regression
detrend_sample, relative_trends =\
trend_extraction(denoise_sample, season_len, reg1, reg2)
#step3: seasonality extraction via non-local seasonal filtering
seasons_tilda =\
seasonality_extraction(detrend_sample, season_len, K, H, ds1, ds2)
#step4: adjustment of trend and season
trends_hat, seasons_hat, remainders_hat =\
adjustment(sample, relative_trends, seasons_tilda, season_len)
#step5: repreat step1 - step4 until remainders are converged
if trial != 1:
converge = check_converge_criteria(previous_remainders, remainders_hat)
if converge:
return [input, trends_hat, seasons_hat, remainders_hat]
trial+=1
print("[!] ", trial, "iteration will strat")
previous_remainders = remainders_hat[:]
sample = trends_hat + seasons_hat + remainders_hat
return [input, trends_hat, seasons_hat, remainders_hat]
def RobustSTL(input, season_len, reg1=10.0, reg2= 0.5, K=2, H=5, dn1=1., dn2=1., ds1=50., ds2=1.):
if np.ndim(input) < 2:
return _RobustSTL(input, season_len, reg1, reg2, K, H, dn1, dn2, ds1, ds2)
elif np.ndim(input)==2 and np.shape(input)[1] ==1:
return _RobustSTL(input[:,0], season_len, reg1, reg2, K, H, dn1, dn2, ds1, ds2)
elif np.ndim(input)==2 or np.ndim(input)==3:
if np.ndim(input)==3 and np.shape(input)[2] > 1:
print("[!] Valid input series shape: [# of Series, # of Time Steps] or [# of series, # of Time Steps, 1]")
raise
elif np.ndim(input)==3:
input = input[:,:,0]
num_series = np.shape(input)[0]
input_list = [input[i,:] for i in range(num_series)]
from pathos.multiprocessing import ProcessingPool as Pool
p = Pool(num_series)
def run_RobustSTL(_input):
return _RobustSTL(_input, season_len, reg1, reg2, K, H, dn1, dn2, ds1, ds2)
result = p.map(run_RobustSTL, input_list)
return result
else:
print("[!] input series error")
raise
|
{"hexsha": "86a9b57311f32b13c80f2e042ca2e90ec2edc63e", "size": 5809, "ext": "py", "lang": "Python", "max_stars_repo_path": "RobustSTL.py", "max_stars_repo_name": "leezhi403/LeeDoYup-RobustSTL", "max_stars_repo_head_hexsha": "69ca042ab53ea204a5c17571eb460afcced3937a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 195, "max_stars_repo_stars_event_min_datetime": "2019-02-15T14:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T08:02:54.000Z", "max_issues_repo_path": "RobustSTL.py", "max_issues_repo_name": "leezhi403/LeeDoYup-RobustSTL", "max_issues_repo_head_hexsha": "69ca042ab53ea204a5c17571eb460afcced3937a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-03-04T01:35:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-28T13:35:08.000Z", "max_forks_repo_path": "RobustSTL.py", "max_forks_repo_name": "leezhi403/LeeDoYup-RobustSTL", "max_forks_repo_head_hexsha": "69ca042ab53ea204a5c17571eb460afcced3937a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 46, "max_forks_repo_forks_event_min_datetime": "2019-02-15T15:43:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T08:02:58.000Z", "avg_line_length": 39.7876712329, "max_line_length": 121, "alphanum_fraction": 0.6522637287, "include": true, "reason": "import numpy", "num_tokens": 1579}
|
import numpy as np
import matplotlib.pyplot as plt
xs = np.fromfile('utcs.dat', dtype=np.uint32)
ys = np.fromfile('values.dat', dtype=np.uint64)
print('Loaded')
xs = np.random.choice(xs, size=1000000)
ys = np.random.choice(ys, size=1000000) / 1e8
xmin = xs.min()
ymin = ys.min()
xmax = xs.max()
ymax = ys.max()
print('Sampled')
plt.scatter(xs, ys, cmap=plt.cm.YlOrRd_r)
plt.axis([xmin, xmax, ymin, ymax])
plt.title('Bitcoin Balances Over Time')
print('Ready')
plt.show()
|
{"hexsha": "3df94df13b78860764d893dd3f797accc90c4b8f", "size": 478, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/heatmap.py", "max_stars_repo_name": "NewProggie/fast-dat-parser", "max_stars_repo_head_hexsha": "70c28f16cb5ee33f9cb56c2db81069d27eaca0c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2018-05-15T15:04:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T14:29:11.000Z", "max_issues_repo_path": "scripts/heatmap.py", "max_issues_repo_name": "NewProggie/fast-dat-parser", "max_issues_repo_head_hexsha": "70c28f16cb5ee33f9cb56c2db81069d27eaca0c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2018-05-21T15:16:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-09T06:47:09.000Z", "max_forks_repo_path": "scripts/heatmap.py", "max_forks_repo_name": "NewProggie/fast-dat-parser", "max_forks_repo_head_hexsha": "70c28f16cb5ee33f9cb56c2db81069d27eaca0c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-06-01T13:40:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T20:04:49.000Z", "avg_line_length": 19.12, "max_line_length": 47, "alphanum_fraction": 0.690376569, "include": true, "reason": "import numpy", "num_tokens": 146}
|
import os
import h5py
import numpy as np
import vigra
import json
from math import sqrt
from cremi.evaluation import NeuronIds
from cremi import Volume
from skunkworks.postprocessing.watershed.dam_ws import DamWatershed
def cremi_scores(seg, gt):
if 0 in gt:
gt[gt == 0] = -1
seg = Volume(seg)
metric = NeuronIds(Volume(gt))
vis, vim = metric.voi(seg)
are = metric.adapted_rand(seg)
# cremi uses the geometric mean of rand and vi !
cs = sqrt(are * (vis + vim))
return {'cremi-score': cs, 'vi-merge': vim, 'vi-split': vis, 'adapted-rand': are}
def check_prediction(sample):
bb = np.s_[95:]
gt_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi/sample%s/gt/sample%s_neurongt_automatically_realignedV2.h5' % (sample, sample)
with h5py.File(gt_path, 'r') as f:
gt = f['data'][bb].astype('int64')
ignore_mask = gt == 0
project_directory = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws'
pred_path = os.path.join(project_directory,
'Predictions',
'prediction_sample%s.h5' % sample)
prediction = vigra.readHDF5(pred_path, 'data')
stride = [2, 10, 10]
with open('./mws_offsets.json', 'r') as f:
affinity_offsets = json.load(f)
mws = DamWatershed(affinity_offsets, stride, randomize_bounds=False)
print('Predicting mws.. %s' % sample)
mws_seg = mws(prediction).astype('int64')
print('.. done, sample %s' % sample)
gt = gt[ignore_mask]
mws_seg = mws_seg[ignore_mask]
assert gt.shape == mws_seg.shape
print(cremi_scores(mws_seg, gt))
if __name__ == '__main__':
check_prediction("A")
|
{"hexsha": "9cf5b79e4a5682d62564640c4ecaba761f70f9e3", "size": 1702, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/cremi/offset-experiments/no_ignore_bb.py", "max_stars_repo_name": "constantinpape/inferno-experiments", "max_stars_repo_head_hexsha": "7eb034c330a69b58406ca25f35981b01ca0fdc2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/cremi/offset-experiments/no_ignore_bb.py", "max_issues_repo_name": "constantinpape/inferno-experiments", "max_issues_repo_head_hexsha": "7eb034c330a69b58406ca25f35981b01ca0fdc2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/cremi/offset-experiments/no_ignore_bb.py", "max_forks_repo_name": "constantinpape/inferno-experiments", "max_forks_repo_head_hexsha": "7eb034c330a69b58406ca25f35981b01ca0fdc2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5185185185, "max_line_length": 146, "alphanum_fraction": 0.6574618096, "include": true, "reason": "import numpy", "num_tokens": 500}
|
from numpy.core.numeric import NaN
from numpy.lib.type_check import nan_to_num
from src.utils import softmax
import numpy as np
from copy import deepcopy
class NaiveBayes:
"""
A Naive Bayes classifier for binary data.
"""
def __init__(self, smoothing=1):
"""
Args:
smoothing: controls the smoothing behavior when computing p(x|y).
If the word "jackpot" appears `k` times across all documents with
label y=1, we will instead record `k + self.smoothing`. Then
`p("jackpot" | y=1) = (k + self.smoothing) / Z`, where Z is a
normalization constant that accounts for adding smoothing to
all words.
"""
self.smoothing = smoothing
def predict(self, X):
"""
Return the most probable label for each row x of X.
You should not need to edit this function.
"""
probs = self.predict_proba(X)
return np.argmax(probs, axis=1)
def predict_proba(self, X):
"""
Using self.p_y and self.p_x_y, compute the probability p(y | x) for each row x of X.
While you will have used log probabilities internally, the returned array should be
probabilities, not log probabilities. You may use src.utils.softmax to transform log
probabilities to probabilities.
Args:
X: a data matrix of shape `[n_documents, vocab_size]` on which to predict p(y | x)
Returns
probs: an array of shape `[n_documents, n_labels]` where probs[i, j] contains
the probability `p(y=j | X[i, :])`. Thus, for a given row of this array,
sum(probs[i, :]) == 1.
"""
n_docs, vocab_size = X.shape
n_labels = 2
assert hasattr(self, "p_y") and hasattr(self, "p_x_y"), "Model not fit!"
assert vocab_size == self.vocab_size, "Vocab size mismatch"
row_probs = X.dot(self.p_x_y)
p_y = np.array([np.log(1-np.exp(self.p_y[0])), self.p_y[0]])
probs = (row_probs + p_y)
probs = softmax(probs, axis=1)
return probs
def fit(self, X, y):
"""
Compute self.p_y and self.p_x_y using the training data.
You should store log probabilities to avoid underflow.
This function *should not* use unlabeled data. Wherever y is NaN, that
label and the corresponding row of X should be ignored.
self.p_y should contain the marginal probability of each class label.
Because we are doing binary classification, you may choose
to represent p_y as a single value representing p(y=1)
self.p_x_y should contain the conditional probability of each word
given the class label: p(x | y). This should be an array of shape
[n_vocab, n_labels]. Remember to use `self.smoothing` to smooth word counts!
See __init__ for details. If we see M total words across all N documents with
label y=1, have a vocabulary size of V words, and see the word "jackpot" `k`
times, then: `p("jackpot" | y=1) = (k + self.smoothing) / (M + self.smoothing *
V)` Note that `p("jackpot" | y=1) + p("jackpot" | y=0)` will not sum to 1;
instead, `sum_j p(word_j | y=1)` will sum to 1.
Args: X, a sparse matrix of word counts; Y, an array of labels
Returns: None
"""
n_docs, vocab_size = X.shape
n_labels = 2
self.vocab_size = vocab_size
X = X[np.invert(np.isnan(y))]
y = y[np.invert(np.isnan(y))]
X_one = X[y==1]
y_one = y[y==1]
X_zero = X[y==0]
p_y = np.array([np.size(y_one)/len(y)])
self.p_y = np.log(p_y)
k_one = np.sum(X_one, axis=0)
k_zero = np.sum(X_zero, axis=0)
total_one = np.sum(k_one)
total_zero = np.sum(k_zero)
self.p_x_y = np.zeros((self.vocab_size, n_labels))
self.p_x_y[:,0] = (k_zero + self.smoothing) / (total_zero + self.smoothing * self.vocab_size)
self.p_x_y[:,1] = (k_one + self.smoothing) / (total_one + self.smoothing * self.vocab_size)
self.p_x_y = np.log(self.p_x_y)
def likelihood(self, X, y):
"""
Using fit self.p_y and self.p_x_y, compute the log likelihood of the data.
You should use logs to avoid underflow.
This function should not use unlabeled data. Wherever y is NaN,
that label and the corresponding row of X should be ignored.
Recall that the log likelihood of the data can be written:
`sum_i (log p(y_i) + sum_j log p(x_j | y_i))`
Note: If the word w appears `k` times in a document, the term
`p(w | y)` should appear `k` times in the likelihood for that document!
Args: X, a sparse matrix of word counts; Y, an array of labels
Returns: the (log) likelihood of the data.
"""
assert hasattr(self, "p_y") and hasattr(self, "p_x_y"), "Model not fit!"
n_docs, vocab_size = X.shape
n_labels = 2
X_one = X[y==1]
X_zero = X[y==0]
p_y_zero = 1 - np.exp(self.p_y[0])
likelihood1 = np.log(p_y_zero) + np.nansum(X_zero.dot(self.p_x_y[:,0]))
likelihood2 = self.p_y[0] + np.nansum(X_one.dot(self.p_x_y[:,1]))
likelihood = likelihood1 + likelihood2
return likelihood
class NaiveBayesEM(NaiveBayes):
"""
A NaiveBayes classifier for binary data,
that uses unlabeled data in the Expectation-Maximization algorithm
"""
def __init__(self, max_iter=10, smoothing=1):
"""
Args:
max_iter: the maximum number of iterations in the EM algorithm,
where each iteration contains both an E step and M step.
You should check for convergence after each iterations,
e.g. with `np.isclose(prev_likelihood, likelihood)`, but
should terminate after `max_iter` iterations regardless of
convergence.
smoothing: controls the smoothing behavior when computing p(x|y).
If the word "jackpot" appears `k` times across all documents with
label y=1, we will instead record `k + self.smoothing`. Then
`p("jackpot" | y=1) = (k + self.smoothing) / Z`, where Z is a
normalization constant that accounts for adding smoothing to
all words.
"""
self.max_iter = max_iter
self.smoothing = smoothing
def fit(self, X, y):
"""
Compute self.p_y and self.p_x_y using the training data.
You should store log probabilities to avoid underflow.
This function *should* use unlabeled data within the EM algorithm.
During the E-step, use the superclass self.predict_proba to
infer a distribution over the labels for the unlabeled examples.
Note: you should *NOT* replace the true labels with your predicted
labels. You can use a `np.where` statement to only update the
labels where `np.isnan(y)` is True.
During the M-step, update self.p_y and self.p_x_y, similar to the
`fit()` call from the NaiveBayes superclass. However, when counting
words in an unlabeled example to compute p(x | y), instead of the
binary label y you should use p(y | x).
For help understanding the EM algorithm, refer to the lectures and
http://www.cs.columbia.edu/~mcollins/em.pdf
This PDF is also uploaded to the course website under readings.
While Figure 1 of this PDF suggests randomly initializing
p(y) and p(x | y) before your first E-step, please initialize
all probabilities equally; e.g. if your vocab size is 4, p(x | y=1)
would be 1/4 for all values of x. This will make it easier to
debug your code without random variation, and will checked
in the `test_em_initialization` test case.
self.p_y should contain the marginal probability of each class label.
Because we are doing binary classification, you may choose
to represent p_y as a single value representing p(y=1)
self.p_x_y should contain the conditional probability of each word
given the class label: p(x | y). This should be an array of shape
[n_vocab, n_labels]. Remember to use `self.smoothing` to smooth word counts!
See __init__ for details. If we have a vocab size of V and we look
at i=1...N documents with label probability delta(y = 1 | i) where "jackpot"
appears in each document k_i times and each document has M_i total words,
then p("jackpot" | y=1) = (sum_i k_i * delta(y=1 | i) + self.smoothing) /
(sum_i M_i * delta(y=1 | i ) + V * self.smoothing
Args: X, a sparse matrix of word counts; Y, an array of labels
Returns: None
"""
n_docs, vocab_size = X.shape
n_labels = 2
self.vocab_size = vocab_size
self.p_x_y = np.zeros((self.vocab_size, n_labels))
self.p_x_y.fill(np.log(1/vocab_size))
p_y = np.array([1/2])
self.p_y = np.log(p_y)
self.delta = np.zeros((n_docs, n_labels))
iter = 0
prev_likelihood = 0
likelihood = 100
while iter < self.max_iter or (np.isclose(prev_likelihood, likelihood) is False):
prev_likelihood = deepcopy(likelihood)
# E-step
X_unlabeled = X[np.isnan(y)]
probs = self.predict_proba(X_unlabeled)
i = 0
j = 0
for value in y:
if value == 1:
self.delta[i][0] = 0
self.delta[i][1] = 1
elif value == 0:
self.delta[i][0] = 1
self.delta[i][1] = 0
elif np.isnan(value) == True:
self.delta[i][0] = probs[j][0]
self.delta[i][1] = probs[j][1]
j += 1
i += 1
# M-step
p_y = np.array([np.sum(self.delta[:,1])/len(y)])
self.p_y = np.log(p_y)
delta_one = np.array([self.delta[:,1]]).T
delta_zero = np.array([self.delta[:,0]]).T
k_one = np.sum(X.toarray() * delta_one, axis=0)
k_zero = np.sum(X.toarray() * delta_zero,axis=0)
M_one = np.sum(k_one)
M_zero = np.sum(k_zero)
self.p_x_y = np.zeros((self.vocab_size, n_labels))
self.p_x_y[:,0] = (k_zero + self.smoothing) / (M_zero + self.smoothing * self.vocab_size)
self.p_x_y[:,1] = (k_one + self.smoothing) / (M_one + self.smoothing * self.vocab_size)
self.p_x_y = np.log(self.p_x_y)
likelihood = self.likelihood(X,y)
iter += 1
def likelihood(self, X, y):
"""
Using fit self.p_y and self.p_x_y, compute the likelihood of the data.
You should use logs to avoid underflow.
This function *should* use unlabeled data.
For unlabeled data, we define `delta(y | i) = p(y | x_i)` using the
previously-learned p(x|y) and p(y) necessary to compute
that probability. For labeled data, we define `delta(y | i)`
as 1 if `y_i = y` and 0 otherwise; this is because for labeled data,
the probability that the ith example has label y_i is 1.
Following http://www.cs.columbia.edu/~mcollins/em.pdf,
the log likelihood of the data can be written as:
`sum_i sum_y (delta(y | i) * (log p(y) + sum_j log p(x_{i,j} | y)))`
Note: If the word w appears `k` times in a document, the term
`p(w | y)` should appear `k` times in the likelihood for that document!
Args: X, a sparse matrix of word counts; Y, an array of labels
Returns: the (log) likelihood of the data.
"""
assert hasattr(self, "p_y") and hasattr(self, "p_x_y"), "Model not fit!"
n_docs, vocab_size = X.shape
n_labels = 2
p_y_zero = 1 - np.exp(self.p_y[0])
likelihood1 = np.sum(self.delta[:,0] * (np.log(p_y_zero) + X.dot(self.p_x_y)[:,0]))
likelihood2 = np.sum(self.delta[:,1] * (self.p_y[0] + X.dot(self.p_x_y)[:,1]))
likelihood = likelihood2 + likelihood1
return likelihood
|
{"hexsha": "7feb0ccecab96550e3c833d55cce88809de54e5e", "size": 12550, "ext": "py", "lang": "Python", "max_stars_repo_path": "naive_bayes_and_expectation_maximization/src/naive_bayes.py", "max_stars_repo_name": "WallabyLester/Machine_Learning_From_Scratch", "max_stars_repo_head_hexsha": "6042cf421f5de2db61fb570b7c4de64dc03453f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "naive_bayes_and_expectation_maximization/src/naive_bayes.py", "max_issues_repo_name": "WallabyLester/Machine_Learning_From_Scratch", "max_issues_repo_head_hexsha": "6042cf421f5de2db61fb570b7c4de64dc03453f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "naive_bayes_and_expectation_maximization/src/naive_bayes.py", "max_forks_repo_name": "WallabyLester/Machine_Learning_From_Scratch", "max_forks_repo_head_hexsha": "6042cf421f5de2db61fb570b7c4de64dc03453f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9732441472, "max_line_length": 101, "alphanum_fraction": 0.5857370518, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3104}
|
from collections import OrderedDict
import pytest
import pandas as pd
import numpy as np
from prepnet.category.ordinal_converter import OrdinalConverter
from prepnet.category.onehot_converter import OnehotConverter
def test_ordinal_converter():
input_series = pd.Series(['frame', 'frame', 'old', 'test', 'old', 'frame'])
expected_series = pd.Series([0, 0, 1, 2, 1, 0])
converter = OrdinalConverter()
output_series = converter.encode(input_series)
pd.testing.assert_series_equal(expected_series, output_series)
reconstruct_series = converter.decode(output_series)
pd.testing.assert_series_equal(reconstruct_series, input_series)
def test_ordinal_converter_same_object():
input_df = pd.DataFrame({
'col1': ['one', 'one', 'two', 'three', 'three', 'one'],
'col2': ['two', 'one', 'two', 'three', 'one', 'three'],
})
expected_df = pd.DataFrame({
'col1': [1, 1, 0, 2, 2, 1],
'col2': [0, 1, 0, 2, 1, 2],
})
converter = OrdinalConverter()
converters = OrderedDict()
converters['col2'] = converter
converters['col1'] = converter
for col, converter in converters.items():
output_series = converter.encode(input_df[col])
pd.testing.assert_series_equal(
expected_df[col], output_series)
reconstruct_series = converter.decode(output_series)
pd.testing.assert_series_equal(
reconstruct_series, input_df[col])
def test_onehot_converter():
input_df = pd.DataFrame({
'col1': ['one', 'one', 'two', 'three', 'three', 'one'],
'col2': ['two', 'one', 'two', 'three', 'one', 'three'],
})
expected_df = pd.DataFrame({
'col1_one': [1, 1, 0, 0, 0, 1],
'col1_two': [0, 0, 1, 0, 0, 0],
'col1_three': [0, 0, 0, 1, 1, 0],
'col2_one': [0, 1, 0, 0, 1, 0],
'col2_two': [1, 0, 1, 0, 0, 0],
'col2_three': [0, 0, 0, 1, 0, 1],
}, dtype=np.uint8)
converter = OnehotConverter()
output_df = converter.encode(input_df)
pd.testing.assert_frame_equal(expected_df, output_df[expected_df.columns])
reconstruct_df = converter.decode(output_df)
pd.testing.assert_frame_equal(reconstruct_df[input_df.columns], input_df)
|
{"hexsha": "0866d174d9ea4170867da5d340f976b09f50e539", "size": 2245, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_category.py", "max_stars_repo_name": "elda27/prepnet", "max_stars_repo_head_hexsha": "0f05018969496321aaa770b7e22bda858dab0ad6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_category.py", "max_issues_repo_name": "elda27/prepnet", "max_issues_repo_head_hexsha": "0f05018969496321aaa770b7e22bda858dab0ad6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-08-03T15:44:18.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-15T17:39:45.000Z", "max_forks_repo_path": "tests/test_category.py", "max_forks_repo_name": "elda27/prepnet", "max_forks_repo_head_hexsha": "0f05018969496321aaa770b7e22bda858dab0ad6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5074626866, "max_line_length": 79, "alphanum_fraction": 0.634298441, "include": true, "reason": "import numpy", "num_tokens": 628}
|
// Boost.GIL (Generic Image Library) - tests
//
// Copyright 2020 Olzhas Zhumabek <anonymous.from.applecity@gmail.com>
//
// Use, modification and distribution are subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_GIL_IMAGE_PROCESSING_HOUGH_PARAMETER_HPP
#define BOOST_GIL_IMAGE_PROCESSING_HOUGH_PARAMETER_HPP
#include <boost/gil/point.hpp>
#include <cmath>
#include <cstddef>
namespace boost
{
namespace gil
{
/// \ingroup HoughTransform
/// \brief A type to encapsulate Hough transform parameter range
///
/// This type provides a way to express value range for a parameter
/// as well as some factory functions to simplify initialization
template <typename T>
struct hough_parameter
{
T start_point;
T step_size;
std::size_t step_count;
/// \ingroup HoughTransform
/// \brief Create Hough parameter from value neighborhood and step count
///
/// This function will take start_point as middle point, and in both
/// directions will try to walk half_step_count times until distance of
/// neighborhood is reached
static hough_parameter<T> from_step_count(T start_point, T neighborhood,
std::size_t half_step_count)
{
T step_size = neighborhood / half_step_count;
std::size_t step_count = half_step_count * 2 + 1;
// explicitly fill out members, as aggregate init will error out with narrowing
hough_parameter<T> parameter;
parameter.start_point = start_point - neighborhood;
parameter.step_size = step_size;
parameter.step_count = step_count;
return parameter;
}
/// \ingroup HoughTransform
/// \brief Create Hough parameter from value neighborhood and step size
///
/// This function will take start_point as middle point, and in both
/// directions will try to walk step_size at a time until distance of
/// neighborhood is reached
static hough_parameter<T> from_step_size(T start_point, T neighborhood, T step_size)
{
std::size_t step_count =
2 * static_cast<std::size_t>(std::floor(neighborhood / step_size)) + 1;
// do not use step_size - neighborhood, as step_size might not allow
// landing exactly on that value when starting from start_point
// also use parentheses on step_count / 2 because flooring is exactly
// what we want
// explicitly fill out members, as aggregate init will error out with narrowing
hough_parameter<T> parameter;
parameter.start_point = start_point - step_size * (step_count / 2);
parameter.step_size = step_size;
parameter.step_count = step_count;
return parameter;
}
};
/// \ingroup HoughTransform
/// \brief Calculate minimum angle which would be observable if walked on a circle
///
/// When drawing a circle or moving around a point in circular motion, it is
/// important to not do too many steps, but also to not have disconnected
/// trajectory. This function will calculate the minimum angle that is observable
/// when walking on a circle or tilting a line.
/// WARNING: do keep in mind IEEE 754 quirks, e.g. no-associativity,
/// no-commutativity and precision. Do not expect expressions that are
/// mathematically the same to produce the same values
inline double minimum_angle_step(point_t dimensions)
{
auto longer_dimension = dimensions.x > dimensions.y ? dimensions.x : dimensions.y;
return std::atan2(1, longer_dimension);
}
/// \ingroup HoughTransform
/// \brief Create a Hough transform parameter with optimal angle step
///
/// Due to computational intensity and noise sensitivity of Hough transform,
/// having any candidates missed or computed again is problematic. This function
/// will properly encapsulate optimal value range around approx_angle with amplitude of
/// neighborhood in each direction.
/// WARNING: do keep in mind IEEE 754 quirks, e.g. no-associativity,
/// no-commutativity and precision. Do not expect expressions that are
/// mathematically the same to produce the same values
inline hough_parameter<double> make_theta_parameter(double approx_angle, double neighborhood,
point_t dimensions)
{
auto angle_step = minimum_angle_step(dimensions);
// std::size_t step_count =
// 2 * static_cast<std::size_t>(std::floor(neighborhood / angle_step)) + 1;
// return {approx_angle - angle_step * (step_count / 2), angle_step, step_count};
return hough_parameter<double>::from_step_size(approx_angle, neighborhood, angle_step);
}
}} // namespace boost::gil
#endif
|
{"hexsha": "97fcd8cde5d2e33b8894d0629fde1ffe7f079e69", "size": 4732, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/gil/image_processing/hough_parameter.hpp", "max_stars_repo_name": "harsh-4/gil", "max_stars_repo_head_hexsha": "6da59cc3351e5657275d3a536e0b6e7a1b6ac738", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 153.0, "max_stars_repo_stars_event_min_datetime": "2015-02-03T06:03:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T15:06:34.000Z", "max_issues_repo_path": "include/boost/gil/image_processing/hough_parameter.hpp", "max_issues_repo_name": "harsh-4/gil", "max_issues_repo_head_hexsha": "6da59cc3351e5657275d3a536e0b6e7a1b6ac738", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 429.0, "max_issues_repo_issues_event_min_datetime": "2015-03-22T09:49:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T08:32:08.000Z", "max_forks_repo_path": "include/boost/gil/image_processing/hough_parameter.hpp", "max_forks_repo_name": "harsh-4/gil", "max_forks_repo_head_hexsha": "6da59cc3351e5657275d3a536e0b6e7a1b6ac738", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-03-15T09:20:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T12:40:07.000Z", "avg_line_length": 41.8761061947, "max_line_length": 93, "alphanum_fraction": 0.71386306, "num_tokens": 1031}
|
import h5py
from bisect import bisect_left
import matplotlib.pyplot as plt
import numpy as np
class brianPlotter:
def __init__(self, hdf5name , permision = "w"):
self.hdf = h5py.File(hdf5name, permision)
def saveData(self, dataName , dataArray , downsample=True):
self.grp = self.hdf.create_group(dataName)
self.grp.create_dataset("1", data=dataArray)
if (downsample):
level = 1
step = 10
while ( dataArray.shape[0]/level > 1000 ):
self.minMaxDownsampling(dataName, level , step)
level = level * step
print dataName + ' sucessfully saved';
def __del__ (self):
self.hdf.close()
def minMaxDownsampling(self, group, baseLevel , step):
data = self.hdf['/'+group+'/'+str(baseLevel)]
shape = list(data.shape)
shape[0] = shape[0] / step
downsample = np.empty(shape)
for bin in range(0,data.shape[0],step):
index = bin/step
if (index % 2 == 0):
downsample[index,:] = data[bin:(bin+step)].max(axis=0)
else:
downsample[index,:] = data[bin:(bin+step)].min(axis=0)
level = int(baseLevel)*step
self.grp.create_dataset(str(level), data=downsample)
def plotLine(self, group, start, end):
#because timestep is 0.1 mseconds, and start and end is expressed in seconds.
start = int(start * 10000)
end = int(end * 10000)
data = self.getDownsampleData(group, start, end)
length = self.getGroupLength(group)
nrows = data.shape[1]
fig, axes = plt.subplots(ncols=1, nrows=nrows, figsize=(16,nrows*2.5),sharey=True,sharex = True)
fig.suptitle(group, fontsize=20)
for index in range(nrows):
if (group == 'voltage') :
legend = ['neuron '+str(index)+ ' voltage','ge','gi']
elif (group == 'excitatory' or group == 'inhibitory'):
legend = []
for l in range(data.shape[2]):
legend.append('Synapse '+str(index)+ '-'+str(l))
axes[index].plot(data[:,index])
axes[index].legend(legend,framealpha=0.5)
def plotScatter(self, start, end):
spikes = self.getData('spikes',1)
start = self.binary_search(spikes[:,1], start)
end = self.binary_search(spikes[:,1], end)
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'),figsize=(16,4))
points = plt.scatter(spikes[start:end,1],spikes[start:end,0], s=100)
plt.yticks(np.arange(0,4, 1.0))
plt.ylabel("Neuron number")
plt.xlabel("Time(seconds)")
plt.grid(True)
def getData(self, group, level ):
return self.hdf['/'+group+'/'+str(level)]
def getDownsampleData(self, group, start, end , screenSize = 1024):
bestLevel = self.getBestLevel(group, start , end, screenSize)
data = self.getData(group,bestLevel)
print group + " has been downsampled "+ str(bestLevel) + " times"
start = int(start/bestLevel)
end = int(end/bestLevel)
if (end > data.shape[0]):
print data.shape
print "WARNING: you requested end = "+str(float(end))+" seconds but data length is "+str(float(data.shape[0] *bestLevel/10000))+" seconds"
end=data.shape[0] -1
return data[start:end]
def getLevels(self, group):
group = self.hdf['/'+group]
ilist = group.items()
levels = list()
for index in range(len(ilist)):
levels.append(int(ilist[index][0]))
return levels
def getBestLevel(self, group, start, end , screenSize = 1024):
requireLevel = (end - start) / screenSize;
possibleLevels = self.getLevels(group)
min = abs(requireLevel - possibleLevels[0])
bestFit = 0
for i in range(1,len(possibleLevels)):
if (min > abs(requireLevel - possibleLevels[i])):
min = abs(requireLevel - possibleLevels[i])
bestFit = i
return possibleLevels[bestFit]
def getGroupLength(self,group): #not yet implemented
return 1200000
def binary_search(self, a, x, lo=0, hi=None):
hi = hi if hi is not None else len(a)
return bisect_left(a,x,lo,hi)
|
{"hexsha": "efaadb1e2212b77ba68e0b43cf39c261c3999fa4", "size": 4514, "ext": "py", "lang": "Python", "max_stars_repo_path": "utilities/brianPlotter.py", "max_stars_repo_name": "Jbwasse2/snn-rl", "max_stars_repo_head_hexsha": "29b040655f432bd390bc9d835b86cbfdf1a622e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 68, "max_stars_repo_stars_event_min_datetime": "2015-04-16T11:14:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T07:43:51.000Z", "max_issues_repo_path": "utilities/brianPlotter.py", "max_issues_repo_name": "Jbwasse2/snn-rl", "max_issues_repo_head_hexsha": "29b040655f432bd390bc9d835b86cbfdf1a622e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2015-11-24T04:53:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-21T02:00:15.000Z", "max_forks_repo_path": "utilities/brianPlotter.py", "max_forks_repo_name": "Jbwasse2/snn-rl", "max_forks_repo_head_hexsha": "29b040655f432bd390bc9d835b86cbfdf1a622e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2015-12-27T10:04:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-03T03:25:18.000Z", "avg_line_length": 34.196969697, "max_line_length": 150, "alphanum_fraction": 0.5582631812, "include": true, "reason": "import numpy", "num_tokens": 1123}
|
import os
import numpy as np
from bmtk.builder import NetworkBuilder
from bmtk.utils.io.spike_trains import PoissonSpikesGenerator
build_virtual_net = True
cell_models = [
{
'model_name': 'Scnn1a', 'ei': 'e', 'morphology': 'Scnn1a_473845048_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '472363762_fit.json',
},
{
'model_name': 'Rorb', 'ei': 'e', 'morphology': 'Rorb_325404214_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '473863510_fit.json',
},
{
'model_name': 'Nr5a1', 'ei': 'e', 'morphology': 'Nr5a1_471087815_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '473863035_fit.json',
},
{
'model_name': 'PV1', 'ei': 'i', 'morphology': 'Pvalb_470522102_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '472912177_fit.json',
},
{
'model_name': 'PV2', 'ei': 'i', 'morphology': 'Pvalb_469628681_m.swc',
'model_template': 'ctdb:Biophys1.hoc',
'dynamics_params': '473862421_fit.json',
}
]
bio_net = NetworkBuilder("bio")
radius = 100.0
dx = 2*np.pi/float(len(cell_models))
for i, model_props in enumerate(cell_models):
positions = [(radius*np.cos(i*dx), radius*np.sin(i*dx), 0.0)] # place cells in wheel around origin
bio_net.add_nodes(model_type='biophysical', model_processing='aibs_perisomatic', positions=positions,
**model_props)
bio_net.build()
bio_net.save_nodes(output_dir='network')
if build_virtual_net:
# Build a separate network of virtual cells to synapse onto the biophysical network
virt_net = NetworkBuilder('virt')
virt_net.add_nodes(N=10, model_type='virtual', ei='e') # 10 excitatory virtual cells
virt_net.add_edges(target=bio_net.nodes(), # Connect every virt cells onto every bio cell
connection_rule=lambda *_: np.random.randint(4, 12), # 4 to 12 synapses per source/target
dynamics_params='AMPA_ExcToExc.json',
model_template='Exp2Syn',
syn_weight=3.4e-4,
delay=2.0,
target_sections=['soma', 'basal', 'apical'], # target soma and all dendritic sections
distance_range=[0.0, 1.0e20])
virt_net.build()
virt_net.save(output_dir='network')
# Create spike trains to use for our virtual cells
if not os.path.exists('inputs'):
os.mkdir('inputs')
psg = PoissonSpikesGenerator(range(10), 10.0, tstop=4000.0)
psg.to_hdf5('inputs/exc_spike_trains.h5')
|
{"hexsha": "37998fc2914f987a5b78a8cb62559fc4ecb4cc1e", "size": 2650, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/examples/bio_basic_features/build_network.py", "max_stars_repo_name": "tjbanks/bmtk", "max_stars_repo_head_hexsha": "52fee3b230ceb14a666c46f57f2031c38f1ac5b1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 216, "max_stars_repo_stars_event_min_datetime": "2017-10-03T17:02:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T03:35:48.000Z", "max_issues_repo_path": "docs/examples/bio_basic_features/build_network.py", "max_issues_repo_name": "tjbanks/bmtk", "max_issues_repo_head_hexsha": "52fee3b230ceb14a666c46f57f2031c38f1ac5b1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2017-10-05T00:50:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T18:55:01.000Z", "max_forks_repo_path": "docs/examples/bio_basic_features/build_network.py", "max_forks_repo_name": "tjbanks/bmtk", "max_forks_repo_head_hexsha": "52fee3b230ceb14a666c46f57f2031c38f1ac5b1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 97, "max_forks_repo_forks_event_min_datetime": "2017-10-03T22:15:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T21:03:26.000Z", "avg_line_length": 36.8055555556, "max_line_length": 113, "alphanum_fraction": 0.6294339623, "include": true, "reason": "import numpy", "num_tokens": 746}
|
import torch
import numpy as np
class BeamSearcher():
def __init__(self, width, eos):
self.width = width
self.eos = eos
self.hypos = [{'seq': [], 'score': 0} for _ in range(width)]
self.parent_hypos = [i for i in range(width)]
self.end_hypos = []
def step(self, step, log_p):
scores, top_seqs = log_p.topk(self.width, dim=1)
for score, parent_hypo in zip(scores, self.parent_hypos):
score += self.hypos[parent_hypo]['score']
# get top k scores
if step == 0:
idxs = [i for i in range(self.width)]
else:
flattened_axis = np.argsort(-scores, axis=None)[:self.width]
self.parent_hypos = flattened_axis/scores.size(1)
idxs = flattened_axis%scores.size(1)
# select new hypotheses
new_hypos = []
new_parent_hypos = []
old_parent_hypos = []
next_tokens = []
for parent_hypo, idx in zip(self.parent_hypos, idxs):
next_token = int(top_seqs[parent_hypo][idx])
if next_token == self.eos:
self.end_hypos.append({'score': scores[parent_hypo][idx],
'seq': self.hypos[parent_hypo]['seq']})
self.width -= 1
else:
next_tokens.append(next_token)
old_parent_hypos.append(int(parent_hypo))
new_parent_hypos.append(len(new_hypos))
new_hypos.append({'score': scores[parent_hypo][idx],
'seq': self.hypos[parent_hypo]['seq']+[next_token]})
next_tokens = log_p.new_tensor(next_tokens, dtype=torch.long)
self.hypos = new_hypos
self.parent_hypos = new_parent_hypos
return next_tokens, old_parent_hypos
|
{"hexsha": "8a61ef2140b20f378b6475e8a6070eb6b37fd3a3", "size": 1810, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_models/models/beam_searcher.py", "max_stars_repo_name": "Ryou0634/pytorch_models", "max_stars_repo_head_hexsha": "cd48f9b3797839df5dbf4e51bed81de44e7b962e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "torch_models/models/beam_searcher.py", "max_issues_repo_name": "Ryou0634/pytorch_models", "max_issues_repo_head_hexsha": "cd48f9b3797839df5dbf4e51bed81de44e7b962e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torch_models/models/beam_searcher.py", "max_forks_repo_name": "Ryou0634/pytorch_models", "max_forks_repo_head_hexsha": "cd48f9b3797839df5dbf4e51bed81de44e7b962e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1363636364, "max_line_length": 86, "alphanum_fraction": 0.5662983425, "include": true, "reason": "import numpy", "num_tokens": 439}
|
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import numpy as np
numbers = np.array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
print(np.flip(numbers, 1))
numbers2 = np.array([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]])
print(np.flip(numbers2, 0))
|
{"hexsha": "2d942ad3b37cf341e23474a2ef2bc9d0cea6d0ed", "size": 372, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/intro/intro_numpy_flip.py", "max_stars_repo_name": "Kreijeck/learning", "max_stars_repo_head_hexsha": "eaffee08e61f2a34e01eb8f9f04519aac633f48c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/intro/intro_numpy_flip.py", "max_issues_repo_name": "Kreijeck/learning", "max_issues_repo_head_hexsha": "eaffee08e61f2a34e01eb8f9f04519aac633f48c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/intro/intro_numpy_flip.py", "max_forks_repo_name": "Kreijeck/learning", "max_forks_repo_head_hexsha": "eaffee08e61f2a34e01eb8f9f04519aac633f48c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8823529412, "max_line_length": 50, "alphanum_fraction": 0.4704301075, "include": true, "reason": "import numpy", "num_tokens": 129}
|
using LightGraphs, Plots, StatsPlots, Distributions, GraphRecipes, GraphIO, ParserCombinator, JSON
using GraphIO: GML
gr()
theme(:juno)
N = 50
k = 1
function gen_graph(N, k)
G = barabasi_albert(N, k)
path = a_star(G, 1, N)
uni = Set{Int}()
for e ∈ path
push!(uni, e.src)
push!(uni, e.dst)
end
labels = Dict(i => i in uni ? 1 : 0 for i in 1:N)
return G, labels
end
function plot_syn_graph(g, labels)
graphplot(g, nodecolor=map((x) -> labels[x] == 0 ? :green : :blue, 1:length(labels)))
end
function write_to_file(g, labels, n)
open("/gpfs_home/spate116/singhlab/GCN_Integration/scripts/BI/path/data/syn_graph_labels_$(n).json", "w") do io
write(io, JSON.json(labels))
end;
open("/gpfs_home/spate116/singhlab/GCN_Integration/scripts/BI/path/data/syn_graph_$(n).gml", "w") do io
GML.savegml(io, g)
end;
end
G, labels = gen_graph(N, k)
write_to_file(G, labels, 0)
for i in 1:length(labels)
if labels[i] == 1
print(i)
print('\n')
end
end
|
{"hexsha": "838ceeefe12ff6cb7c9c56e962c767f37be261a1", "size": 1046, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/BI/examples/path/gen.jl", "max_stars_repo_name": "shalinkpatel/GCN_Integration", "max_stars_repo_head_hexsha": "253fa4321606acf0ee0a98667bf6e5eb8ec96cf1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/BI/examples/path/gen.jl", "max_issues_repo_name": "shalinkpatel/GCN_Integration", "max_issues_repo_head_hexsha": "253fa4321606acf0ee0a98667bf6e5eb8ec96cf1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-10T06:32:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T06:32:42.000Z", "max_forks_repo_path": "scripts/BI/examples/path/gen.jl", "max_forks_repo_name": "shalinkpatel/GCN_Integration", "max_forks_repo_head_hexsha": "253fa4321606acf0ee0a98667bf6e5eb8ec96cf1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7391304348, "max_line_length": 115, "alphanum_fraction": 0.6309751434, "num_tokens": 348}
|
function readSP3(gpsWeek, day)
# Construct string
#sp3File = datadir("igs") * "\\igs" * string(gpsWeek) * string(day) * ".sp3"
sp3File = datadir("igs", "igs" * string(gpsWeek) * string(day) * ".sp3")
if !isfile(sp3File)
throw(ErrorException("The file " * sp3File * " does not exist and must be downloaded from CDDIS.\n" *
"The file can be found at 'https://cddis.nasa.gov/archive/gnss/products/'"))
end
# Instantiate Data Frame
df = DataFrame(
MJD = Float64[],
Year = Int[],
Month = Int[],
Day = Int[],
Hour = Int[],
Min = Int[],
Sec = Float64[],
Sat = Symbol[],
X = Float64[],
Y = Float64[],
Z = Float64[],
Clk = Float64[],
Xstd = Float64[],
Ystd = Float64[],
Zstd = Float64[],
Clkstd = Float64[]
)
# Open file
f = open(sp3File)
# Base std errors
basePos = 0.0
baseClk = 0.0
# Date and time
MJD = 0.0
Year = 0
Month = 0
Day = 0
Hour = 0
Min = 0
Sec = 0.0
# Begin looping through lines in file
lineNum = 0
for line in readlines(f)
lineNum += 1
if lineNum > 22
if line[1] == '*' # Line designates time
Year = parse(Int, line[4:7])
Month = parse(Int, line[9:10])
Day = parse(Int, line[12:13])
Hour = parse(Int, line[15:16])
Min = parse(Int, line[18:19])
Sec = parse(Float64, line[21:31])
elseif line[1] == 'P'
Sat = Symbol(line[2:4])
X = parse(Float64, line[5:18])
Y = parse(Float64, line[19:32])
Z = parse(Float64, line[33:46])
Clk = parse(Float64, line[47:60])
if length(line) > 60
XstdExp = tryparse(Int, line[62:63])
XstdExp === nothing ? XstdExp = NaN : ()
YstdExp = tryparse(Int, line[65:66])
YstdExp === nothing ? YstdExp = NaN : ()
ZstdExp = tryparse(Int, line[68:69])
ZstdExp === nothing ? ZstdExp = NaN : ()
ClkstdExp = tryparse(Int, line[71:73])
ClkstdExp === nothing ? ClkstdExp = NaN : ()
else
XstdExp = NaN
YstdExp = NaN
ZstdExp = NaN
ClkstdExp = NaN
end
# Compute MJD
dayWithFrac = day + ((Sec/60.0 + Min)/60.0 + Hour)/24.0
MJD = gps2MJD(gpsWeek, dayWithFrac)
# Push to Data Frame
push!(df, Dict(
"MJD" => MJD,
"Year" => Year,
"Month" => Month,
"Day" => Day,
"Hour" => Hour,
"Min" => Min,
"Sec" => Sec,
"Sat" => Sat,
"X" => X,
"Y" => Y,
"Z" => Z,
"Clk" => Clk,
"Xstd" => basePos^XstdExp,
"Ystd" => basePos^YstdExp,
"Zstd" => basePos^ZstdExp,
"Clkstd" => baseClk^ClkstdExp
))
end
else
if lineNum == 15
str = split(line, " "; keepempty = false)[2:3]
basePos = parse(Float64, str[1])
baseClk = parse(Float64, str[2])
end
end
end
close(f)
return df
end
function readSP3s(gpsWeekStart, weekDayStart, gpsWeekEnd, weekDayEnd)
# Handle gps weeks and week days
gpsWeeks = gpsWeekStart:gpsWeekEnd
startWeekDays = weekDayStart:6
endWeekDays = 0:weekDayEnd
# Instantiate Data Frame
df = DataFrame(
MJD = Float64[],
Year = Int[],
Month = Int[],
Day = Int[],
Hour = Int[],
Min = Int[],
Sec = Float64[],
Sat = Symbol[],
X = Float64[],
Y = Float64[],
Z = Float64[],
Clk = Float64[],
Xstd = Float64[],
Ystd = Float64[],
Zstd = Float64[],
Clkstd = Float64[]
)
@showprogress "Reading in sp3 files..." for week in gpsWeeks
if week == gpsWeekStart
for day in startWeekDays
df = vcat(df, readSP3(week, day))
end
elseif week == gpsWeekEnd
for day in endWeekDays
df = vcat(df, readSP3(week, day))
end
else
for day in 1:6
df = vcat(df, readSP3(week, day))
end
end
end
return df
end
|
{"hexsha": "e48afee58a2540b7798a0024eb4ebdb07102be1d", "size": 5299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/GPS/readSP3.jl", "max_stars_repo_name": "GrantHecht/OptimalEstimationProject", "max_stars_repo_head_hexsha": "42e595d1991a8f81cbfb36856528d572b45cc598", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-15T00:42:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T00:42:37.000Z", "max_issues_repo_path": "src/GPS/readSP3.jl", "max_issues_repo_name": "GrantHecht/OptimalEstimationProject.jl", "max_issues_repo_head_hexsha": "42e595d1991a8f81cbfb36856528d572b45cc598", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GPS/readSP3.jl", "max_forks_repo_name": "GrantHecht/OptimalEstimationProject.jl", "max_forks_repo_head_hexsha": "42e595d1991a8f81cbfb36856528d572b45cc598", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.509202454, "max_line_length": 111, "alphanum_fraction": 0.3912058879, "num_tokens": 1402}
|
################################################################################
# Copyright (C) 2011-2012,2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Module for the Dirichlet distribution node.
"""
import numpy as np
from scipy import special
from bayespy.utils import random
from bayespy.utils import misc
from bayespy.utils import linalg
from .stochastic import Stochastic
from .expfamily import ExponentialFamily, ExponentialFamilyDistribution
from .constant import Constant
from .node import Node, Moments, ensureparents
class ConcentrationMoments(Moments):
"""
Class for the moments of Dirichlet conjugate-prior variables.
"""
def __init__(self, categories):
self.categories = categories
self.dims = ( (categories,), () )
return
def compute_fixed_moments(self, alpha):
"""
Compute the moments for a fixed value
"""
alpha = np.asanyarray(alpha)
if np.ndim(alpha) < 1:
raise ValueError("The prior sample sizes must be a vector")
if np.any(alpha < 0):
raise ValueError("The prior sample sizes must be non-negative")
gammaln_sum = special.gammaln(np.sum(alpha, axis=-1))
sum_gammaln = np.sum(special.gammaln(alpha), axis=-1)
z = gammaln_sum - sum_gammaln
return [alpha, z]
@classmethod
def from_values(cls, alpha):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(alpha) < 1:
raise ValueError("The array must be at least 1-dimensional array.")
categories = np.shape(alpha)[-1]
return cls(categories)
class DirichletMoments(Moments):
"""
Class for the moments of Dirichlet variables.
"""
def __init__(self, categories):
self.categories = categories
self.dims = ( (categories,), )
def compute_fixed_moments(self, p):
"""
Compute the moments for a fixed value
"""
# Check that probabilities are non-negative
p = np.asanyarray(p)
if np.ndim(p) < 1:
raise ValueError("Probabilities must be given as a vector")
if np.any(p < 0) or np.any(p > 1):
raise ValueError("Probabilities must be in range [0,1]")
if not np.allclose(np.sum(p, axis=-1), 1.0):
raise ValueError("Probabilities must sum to one")
# Normalize probabilities
p = p / np.sum(p, axis=-1, keepdims=True)
# Message is log-probabilities
logp = np.log(p)
u = [logp]
return u
@classmethod
def from_values(cls, x):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Probabilities must be given as a vector")
categories = np.shape(x)[-1]
return cls(categories)
class DirichletDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Dirichlet variables.
"""
def compute_message_to_parent(self, parent, index, u_self, u_alpha):
r"""
Compute the message to a parent node.
"""
logp = u_self[0]
m0 = logp
m1 = 1
return [m0, m1]
def compute_phi_from_parents(self, u_alpha, mask=True):
r"""
Compute the natural parameter vector given parent moments.
"""
return [u_alpha[0]]
def compute_moments_and_cgf(self, phi, mask=True):
r"""
Compute the moments and :math:`g(\phi)`.
.. math::
\overline{\mathbf{u}} (\boldsymbol{\phi})
&=
\begin{bmatrix}
\psi(\phi_1) - \psi(\sum_d \phi_{1,d})
\end{bmatrix}
\\
g_{\boldsymbol{\phi}} (\boldsymbol{\phi})
&=
TODO
"""
if np.any(np.asanyarray(phi) <= 0):
raise ValueError("Natural parameters should be positive")
sum_gammaln = np.sum(special.gammaln(phi[0]), axis=-1)
gammaln_sum = special.gammaln(np.sum(phi[0], axis=-1))
psi_sum = special.psi(np.sum(phi[0], axis=-1, keepdims=True))
# Moments <log x>
u0 = special.psi(phi[0]) - psi_sum
u = [u0]
# G
g = gammaln_sum - sum_gammaln
return (u, g)
def compute_cgf_from_parents(self, u_alpha):
r"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
return u_alpha[1]
def compute_fixed_moments_and_f(self, p, mask=True):
r"""
Compute the moments and :math:`f(x)` for a fixed value.
.. math::
u(p) =
\begin{bmatrix}
\log(p_1)
\\
\vdots
\\
\log(p_D)
\end{bmatrix}
.. math::
f(p) = - \sum_d \log(p_d)
"""
# Check that probabilities are non-negative
p = np.asanyarray(p)
if np.ndim(p) < 1:
raise ValueError("Probabilities must be given as a vector")
if np.any(p < 0) or np.any(p > 1):
raise ValueError("Probabilities must be in range [0,1]")
if not np.allclose(np.sum(p, axis=-1), 1.0):
raise ValueError("Probabilities must sum to one")
# Normalize probabilities
p = p / np.sum(p, axis=-1, keepdims=True)
# Message is log-probabilities
logp = np.log(p)
u = [logp]
f = - np.sum(logp, axis=-1)
return (u, f)
def random(self, *phi, plates=None):
r"""
Draw a random sample from the distribution.
"""
return random.dirichlet(phi[0], size=plates)
def compute_gradient(self, g, u, phi):
r"""
Compute the moments and :math:`g(\phi)`.
\psi(\phi_1) - \psi(\sum_d \phi_{1,d})
Standard gradient given the gradient with respect to the moments, that
is, given the Riemannian gradient :math:`\tilde{\nabla}`:
.. math::
\nabla &=
\begin{bmatrix}
(\psi^{(1)}(\phi_1) - \psi^{(1)}(\sum_d \phi_{1,d}) \nabla_1
\end{bmatrix}
"""
sum_phi = np.sum(phi[0], axis=-1, keepdims=True)
d0 = g[0] * (special.polygamma(1, phi[0]) - special.polygamma(1, sum_phi))
return [d0]
class Concentration(Stochastic):
_parent_moments = ()
def __init__(self, D, regularization=True, **kwargs):
"""
ML estimation node for concentration parameters.
Parameters
----------
D : int
Number of categories
regularization : 2-tuple of arrays (optional)
"Prior" log-probability and "prior" sample number
"""
self.D = D
self.dims = ( (D,), () )
self._moments = ConcentrationMoments(D)
super().__init__(dims=self.dims, initialize=False, **kwargs)
self.u = self._moments.compute_fixed_moments(np.ones(D))
if regularization is None or regularization is False:
regularization = [0, 0]
elif regularization is True:
# Decent default regularization?
regularization = [np.log(1/D), 1]
self.regularization = regularization
return
@property
def regularization(self):
return self.__regularization
@regularization.setter
def regularization(self, regularization):
if len(regularization) != 2:
raise ValueError("Regularization must 2-tuple")
if not misc.is_shape_subset(np.shape(regularization[0]), self.get_shape(0)):
raise ValueError("Wrong shape")
if not misc.is_shape_subset(np.shape(regularization[1]), self.get_shape(1)):
raise ValueError("Wrong shape")
self.__regularization = regularization
return
def _update_distribution_and_lowerbound(self, m):
r"""
Find maximum likelihood estimate for the concentration parameter
"""
a = np.ones(self.D)
da = np.inf
logp = m[0] + self.regularization[0]
N = m[1] + self.regularization[1]
# Compute sufficient statistic
mean_logp = logp / N[...,None]
# It is difficult to estimate values lower than 0.02 because the
# Dirichlet distributed probability vector starts to give numerically
# zero random samples for lower values.
if np.any(np.isinf(mean_logp)):
raise ValueError(
"Cannot estimate DirichletConcentration because of infs. This "
"means that there are numerically zero probabilities in the "
"child Dirichlet node."
)
# Fixed-point iteration
while np.any(np.abs(da / a) > 1e-5):
a_new = misc.invpsi(
special.psi(np.sum(a, axis=-1, keepdims=True))
+ mean_logp
)
da = a_new - a
a = a_new
self.u = self._moments.compute_fixed_moments(a)
return
def initialize_from_value(self, x):
self.u = self._moments.compute_fixed_moments(x)
return
def lower_bound_contribution(self):
return (
linalg.inner(self.u[0], self.regularization[0], ndim=1)
+ self.u[1] * self.regularization[1]
)
class Dirichlet(ExponentialFamily):
r"""
Node for Dirichlet random variables.
The node models a set of probabilities :math:`\{\pi_0, \ldots, \pi_{K-1}\}`
which satisfy :math:`\sum_{k=0}^{K-1} \pi_k = 1` and :math:`\pi_k \in [0,1]
\ \forall k=0,\ldots,K-1`.
.. math::
p(\pi_0, \ldots, \pi_{K-1}) = \mathrm{Dirichlet}(\alpha_0, \ldots,
\alpha_{K-1})
where :math:`\alpha_k` are concentration parameters.
The posterior approximation has the same functional form but with different
concentration parameters.
Parameters
----------
alpha : (...,K)-shaped array
Prior counts :math:`\alpha_k`
See also
--------
Beta, Categorical, Multinomial, CategoricalMarkovChain
"""
_distribution = DirichletDistribution()
@classmethod
def _constructor(cls, alpha, **kwargs):
"""
Constructs distribution and moments objects.
"""
# Number of categories
alpha = cls._ensure_moments(alpha, ConcentrationMoments)
parent_moments = (alpha._moments,)
parents = [alpha]
categories = alpha.dims[0][0]
moments = DirichletMoments(categories)
return (
parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'), alpha.plates),
cls._distribution,
moments,
parent_moments
)
def __str__(self):
"""
Show distribution as a string
"""
alpha = self.phi[0]
return ("%s ~ Dirichlet(alpha)\n"
" alpha =\n"
"%s" % (self.name, alpha))
|
{"hexsha": "8825136a848c5f197c7281a130f07dd49eb9a65f", "size": 11102, "ext": "py", "lang": "Python", "max_stars_repo_path": "bayespy/inference/vmp/nodes/dirichlet.py", "max_stars_repo_name": "dungvtdev/upsbayescpm", "max_stars_repo_head_hexsha": "f6ee877c689046d3c57a2ac06742cfe4a0b6550e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 622, "max_stars_repo_stars_event_min_datetime": "2015-01-15T19:46:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T13:40:22.000Z", "max_issues_repo_path": "bayespy/inference/vmp/nodes/dirichlet.py", "max_issues_repo_name": "dungvtdev/upsbayescpm", "max_issues_repo_head_hexsha": "f6ee877c689046d3c57a2ac06742cfe4a0b6550e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 118, "max_issues_repo_issues_event_min_datetime": "2015-01-04T06:38:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-05T17:28:02.000Z", "max_forks_repo_path": "bayespy/inference/vmp/nodes/dirichlet.py", "max_forks_repo_name": "dungvtdev/upsbayescpm", "max_forks_repo_head_hexsha": "f6ee877c689046d3c57a2ac06742cfe4a0b6550e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 160, "max_forks_repo_forks_event_min_datetime": "2015-02-16T15:30:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T00:52:36.000Z", "avg_line_length": 27.755, "max_line_length": 84, "alphanum_fraction": 0.5589983787, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2720}
|
import warnings
warnings.filterwarnings("ignore")
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
from tensorflow.python.keras.preprocessing.text import Tokenizer
from sklearn.metrics import precision_recall_fscore_support
from termcolor import colored
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.layers import Dense
from keras.layers import Embedding, Input, Conv1D, GlobalMaxPooling1D, concatenate
# define documents
df = pd.DataFrame()
df = pd.read_csv('../../data/triple_with_stv.csv')
sentence_lines = list()
lines = df['triple'].values.tolist()
stv = df['stv'].values.tolist()
for line in lines:
tokens = word_tokenize(line)
tokens = [w.lower() for w in tokens]
table = str.maketrans('','',string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
sentence_lines.append(words)
print('Number of lines', len(sentence_lines))
EMBEDDING_DIM = 200
#Vectorize the text samples into a S2 integer tensor
tokenizer_obj = Tokenizer()
tokenizer_obj.fit_on_texts(sentence_lines)
sequences = tokenizer_obj.texts_to_sequences(sentence_lines)
print(colored(sequences,'green'))
#define vocabulary size
vocab_size = len(tokenizer_obj.word_index) + 1
# print(colored(sequences,'green'))
#pad sequences
word_index = tokenizer_obj.word_index
max_length = 5
triple_pad = pad_sequences(sequences, maxlen=max_length)
truth = df['truth'].values
print('Shape of triple tensor: ', triple_pad.shape)
print('Shape of truth tensor: ', truth.shape)
#map embeddings from loaded word2vec model for each word to the tokenizer_obj.word_index vocabulary & create a wordvector matrix
num_words = len(word_index)+1
print(colored(num_words,'cyan'))
# first input model
emb = Embedding(vocab_size, EMBEDDING_DIM, input_length=max_length)
input_shape = triple_pad.shape
print(colored('Input SHAPE for sequences','cyan'))
# print(input_shape)
visible1 = Input(shape=input_shape)
conv11 = Conv1D(128, 4, activation='relu')(visible1)
pool11 = GlobalMaxPooling1D()(conv11)
den1 = Dense(10, activation='relu')(pool11)
# second input layer
input_shape_stv = np.array(stv).shape
print(colored("Input Shape for stv: ",'cyan'))
print(input_shape_stv)
visible2 = Input(shape=input_shape_stv)
den2 = Dense(10, activation='relu')(visible2)
# # merge input models
merge = concatenate([den1, den2])
# interpretation model
hidden1 = Dense(10, activation='relu')(merge)
hidden2 = Dense(10, activation='relu')(hidden1)
output = Dense(1, activation='sigmoid')(hidden2)
model = Model(inputs=[visible1, visible2], outputs=output)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
#Split the data into training set and validation set
VALIDATION_SPLIT = 0.3
indices = np.arange(triple_pad.shape[0])
np.random.shuffle(indices)
triple_pad = triple_pad[indices]
truth = truth[indices]
num_validation_samples = int(VALIDATION_SPLIT * triple_pad.shape[0])
X_train_pad = triple_pad[:-num_validation_samples]
y_train = truth[:-num_validation_samples]
X_test_pad = triple_pad[-num_validation_samples:]
y_test = truth[-num_validation_samples:]
print('Shape of X_train_pad tensor: ',X_train_pad.shape)
print('Shape of y_train tensor: ',y_train.shape)
print('Shape of X_test_pad tensor: ',X_test_pad.shape)
print('Shape of y_test tensor: ',y_test.shape)
print(colored('Training...','green'))
# X_train_pad = np.expand_dims(X_train_pad, 1)
history = model.fit([X_train_pad,np.array(stv[:-num_validation_samples])], y_train, batch_size=128, epochs=25, validation_data=([X_test_pad,np.array(stv[-num_validation_samples:])], y_test), verbose=2)
y_pred = model.predict_classes(x=[X_test_pad,np.array(stv[-num_validation_samples:])])
metrics = precision_recall_fscore_support(y_test, y_pred, average='weighted')
print()
print(colored("Precision: ",'green'),colored(metrics[0],'blue'))
print(colored("Recall: ",'green'),colored(metrics[1],'blue'))
print(colored("F1: ",'green'),colored(metrics[2],'blue'))
|
{"hexsha": "6fc7d531590683a81b34eb3cce49787fd7210845", "size": 4237, "ext": "py", "lang": "Python", "max_stars_repo_path": "impl/withPSL/RNN/NELL_cnn_random_multiple_features.py", "max_stars_repo_name": "wso2-incubator/knowledge-graph-optimizations", "max_stars_repo_head_hexsha": "4740c27eb5facf6e3ed594400bfb8533ac38de9c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "impl/withPSL/RNN/NELL_cnn_random_multiple_features.py", "max_issues_repo_name": "wso2-incubator/knowledge-graph-optimizations", "max_issues_repo_head_hexsha": "4740c27eb5facf6e3ed594400bfb8533ac38de9c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "impl/withPSL/RNN/NELL_cnn_random_multiple_features.py", "max_forks_repo_name": "wso2-incubator/knowledge-graph-optimizations", "max_forks_repo_head_hexsha": "4740c27eb5facf6e3ed594400bfb8533ac38de9c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-07-08T03:52:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T14:17:04.000Z", "avg_line_length": 33.896, "max_line_length": 201, "alphanum_fraction": 0.7682322398, "include": true, "reason": "import numpy", "num_tokens": 1039}
|
using Distributions, SpecialFunctions, FastGaussQuadrature
struct PositiveStable <: ContinuousUnivariateDistribution
α::Float64
β::Float64
θ::Float64
ρ::Float64
PositiveStable(α::Real,β::Real) = ( β < -1 || β > 1 ||
(β == -1 && α <= 1) || α <= 0 || α > 2 ) ?
error("Parameters' requirements unmet:\n (α,β)∈(0,2]×[-1,1]-(0,1]×{-1}") :
α == 2 ? new(2,0,0,.5) :
new(α,β,β*(α <= 1 ? 1 :
(α-2)/α), (1 + β*(α <= 1 ? 1 : (α-2)/α))/2)
end
import Distributions.params
function params(d::PositiveStable)
return (d.α,d.β,d.θ,d.ρ)
end
import Distributions.minimum
function minimum(d::PositiveStable)
return β == 1 && α == 1 ? 1 : 0
end
import Distributions.maximum
function maximum(d::PositiveStable)
return β == 1 && α == 1 ? 1 : Inf
end
import Distributions.insupport
function insupport(d::PositiveStable,x::Real)
return β == 1 && α == 1 ? x==1 : x >= 0
end
########################
# BASIC SIMULATION TOOLS
########################
import Distributions.rand
import Distributions.rand
function rand(d::PositiveStable) # Chambers. Mellows, and Stuck
U = rand(Uniform(-(pi/2)*d.θ,pi/2))
return rand(Exponential())^(1-1/d.α) * sin(d.α * (U+(pi/2)*d.θ)) /
(cos(U) ^ (1/d.α) * cos(U - d.α * (U+(pi/2)*d.θ)) ^ (1-1/d.α))
end
function rand(d::PositiveStable,n::Integer) # Chambers. Mellows, and Stuck
U = rand(Uniform(-(pi/2)*d.θ,pi/2),n)
return rand(Exponential(),n).^(1-1/d.α) .* sin.(d.α .* (U .+ (pi/2)*d.θ)) ./
(cos.(U) .^ (1/d.α) .* cos.(U .- d.α .* (U .+ (pi/2)*d.θ)) .^ (1-1/d.α))
end
function cond_rand(d::PositiveStable,lo::Real,hi::Real)
if hi == Inf
while true
x = rand(d)
if lo < x
return x
end
end
elseif lo == 0
while true
x = rand(d)
if x < hi
return x
end
end
end
mid = d.α <= 1 ? (d.β <= 0 ? 0 : (1+d.α)/2) : (d.β >= 0 ? 0 : (1.6+d.α)/2)
p = pdf(d,[lo,hi])
q = cdf(d,[lo,hi])
if lo > mid && (p[2] / p[1] > q[1] - q[2]) # More likely to be accepted with this methodology
ux = Uniform(lo,hi)
u = Uniform()
while true
x = rand(ux)
px = pdf(d,x)
if rand(u) < px / p[1]
return x
end
end
else
while true
x = rand(d)
if lo < x < hi
return x
end
end
end
end
########################
# PDF, CDF
########################
function auxV2(x::AbstractArray,a::Real,b::Real)
y = (pi/2).*x
t = (pi/2)*a*b
return ((sin.(a .* y .+ t).^a) ./ cos.(y)) .^ (1 /(1-a)) .* cos.((a-1) .* y .+ t)
end
import Distributions.pdf
function pdf(d::PositiveStable,x::Real)
if d.α == 1 && d.β == 1
return x == 1 ? 1. : 0.
end
m = Int(1e4)
pir = pi*d.ρ
if d.α > 1
delta = .3
if x > 0
if x < delta
l = Int(1e2)
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v ./ d.α .+ 1) .* sin.(pir .* v) ./
((d.ρ*pi) .* gamma.(v .+ 1))
return sum(w .* abs(x) .^ (v .- 1))
else
a0 = d.α/((d.α-1)*2)
raa = 1 /(d.α-1)
a1 = abs(x) ^ raa
a2 = - a1 ^ d.α
a1 /= d.ρ
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
imin = 1
for k=1:length(seq1)
if isfinite(seq1[k])
imin = k
break
end
end
weights = (a0*s1) .* weights
return a1*(sum(seq1[imin:end] .* exp.(a2 .* seq1[imin:end]) .* weights[imin:end]))
end
elseif x < 0
return 0.
else
return gamma(1 + 1 / d.α)*sin(pir)/pir
end
elseif d.α < 1
delta = 1.
if x > delta
l = Int(1.5e2)
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v .* d.α .+ 1) .*
sin.((pir*d.α) .* v) ./ (gamma.(v .+ 1) .* pir)
return sum(w .* x .^ (-d.α .* v .- 1))
elseif x > 0
a0 = d.α/((d.α-1)*2)
raa = 1 /(d.α-1)
a1 = abs(x) ^ raa
a2 = - a1 ^ d.α
a1 /= -d.ρ
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
imin = 1
for k=1:length(seq1)
if isfinite(seq1[k])
imin = k
break
end
end
weights = (a0*s1) .* weights
return a1*(sum(seq1[imin:end] .* exp.(a2 .* seq1[imin:end]) .* weights[imin:end]))
elseif x < 0
return 0.
else
return gamma(1 + 1 / d.α)*sin(pir)/pir
end
else #d.α = 1
return x >= 0 ? pdf(Cauchy(-cos(pir),sin(pir)),x)/d.ρ : 0.
end
end
function pdf(d::PositiveStable,X::AbstractArray{<:Real})
if d.α == 1 && d.β == 1
return convert(Array{Float64},X .== 1)
end
m = Int(1e4)
x = vec(X)
res = Float64[]
pir = pi*d.ρ
pir1 = pi*(1 - d.ρ)
aux = gamma(1 + 1 / d.α)*sin(pir)/pir
if d.α > 1
l = Int(1e2)
v = 1:Int(floor(l*d.α))
delta = .3
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v ./ d.α .+ 1) .* sin.(pir .* v) ./
((d.ρ*pi) .* gamma.(v .+ 1))
w1 = (-1) .^ ((v .- 1) .% 2) .* gamma.(v ./ d.α .+ 1) .* sin.(pir1 .* v) ./
((d.ρ*pi) .* gamma.(v .+ 1))
a0 = d.α/((d.α-1)*2)
raa = 1 /(d.α-1)
a1 = abs.(x) .^ raa
a2 = - a1 .^ d.α
a1 ./= d.ρ
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
imin = 1
for k=1:length(seq1)
if isfinite(seq1[k])
imin = k
break
end
end
weights = (a0*s1) .* weights
for i = 1:length(x)
if x[i] > 0
if x[i] < delta
push!(res,sum(w .* abs(x[i]) .^ (v .- 1)))
else
push!(res,a1[i]*(sum(seq1[imin:end] .* exp.(a2[i] .* seq1[imin:end]) .* weights[imin:end])))
end
elseif x[i] < 0
push!(res,0.)
else
push!(res,aux)
end
end
elseif d.α<1
l = Int(1.5e2)
v = 1:Int(floor(l*d.α))
delta = (1+d.α)/2
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v .* d.α .+ 1) .*
sin.((pir*d.α) .* v) ./ (gamma.(v .+ 1) .* pir)
a0 = d.α/((d.α-1)*2)
raa = 1 /(d.α-1)
a1 = abs.(x) .^ raa
a2 = - a1 .^ d.α
a1 ./= -d.ρ
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
a1 .*= a0*s1
imin = 1
for k=1:length(seq1)
if isfinite(seq1[k])
imin = k
break
end
end
for i = 1:length(x)
if x[i] > delta
push!(res,sum(w .* x[i] .^ (-d.α .* v .- 1)))
elseif x[i] > 0
push!(res,a1[i]*(sum(seq1[imin:end] .* exp.(a2[i] .* seq1[imin:end]) .* weights[imin:end])))
elseif x[i] < 0
push!(res,0.)
else
push!(res,aux)
end
end
else #d.α = 1
Cd = Cauchy(-cos(pir),sin(pir))
res = [x[i] >= 0 ? pdf(Cd,x[i])/d.ρ : 0 for i=1:length(x)]
end
return reshape(res,size(X))
end
import Distributions.cdf
function cdf(d::PositiveStable,x::Real)
if d.α == 1 && d.β == 1
return x <= 1 ? 1. : 0.
end
pir = pi*d.ρ
if d.α > 1
delta = .3
if x > 0
if x < delta
l = Int(1e2)
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v ./ d.α .+ 1) .*
sin.(pir .* v) ./ ((pi*d.ρ) .* v .* gamma.(v .+ 1))
return sum(w .* (x .^ v))
else
m = Int(1e4)
raa = d.α/(d.α-1)
a2 = -abs(x) ^ raa
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
s1 = s1/(2 * d.ρ)
return 1 - s1*sum(exp.(a2 .* seq1) .* weights)
end
else
return 0.
end
elseif d.α < 1
delta = (1+d.α)/2
for i = 1:length(x)
if x > delta
l = Int(1.5e2)
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v .* d.α .+ 1) .*
sin.((pir*d.α) .* v) ./ (v .* gamma.(v .+ 1))
pira = d.α*pir
return 1 - sum(w .* x .^ (-d.α .* v))/pira
elseif x > 0
m = Int(1e4)
raa = d.α /(d.α-1)
a2 = -abs(x) ^ raa
a0 = 1/2
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
imin = 1
for k=1:length(seq1)
if isfinite(seq1[k])
imin = k
break
end
end
weights = (a0*s1) .* weights ./ d.ρ
return sum(exp.(a2 .* seq1[imin:end]) .* weights[imin:end])
else
return 0.
end
end
else #d.α = 1
return x >= 0 ? (cdf(Cauchy(-cos(pir),sin(pir)),x)-1)/d.ρ+1 : 0.
end
end
function cdf(d::PositiveStable,X::AbstractArray{<:Real})
if d.α == 1 && d.β == 1
return convert(Array{Float64},X .<= 1)
end
m = Int(1e4)
x = vec(X)
res = Float64[]
pir = pi*d.ρ
if d.α > 1
delta = .3
l = Int(1e2)
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v ./ d.α .+ 1) .*
sin.(pir .* v) ./ ((pi*d.ρ) .* v .* gamma.(v .+ 1))
raa = d.α/(d.α-1)
a2 = -abs.(x) .^ raa
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
s1 = s1/(2 * d.ρ)
for i = 1:length(x)
if x[i] > 0
if x[i] < delta
push!(res,sum(w .* (x[i] .^ v)))
else
push!(res,1 - s1*(sum(exp.(a2[i] .* seq1) .* weights)))
end
else
push!(res,0.)
end
end
elseif d.α < 1
delta = (1+d.α)/2
l = Int(1.5e2)
v = 1:Int(floor(l*d.α))
w = (-1) .^ ((v .- 1) .% 2) .* gamma.(v .* d.α .+ 1) .*
sin.((pir*d.α) .* v) ./ (v .* gamma.(v .+ 1))
pira = d.α*pir
raa = d.α /(d.α-1)
a2 = -abs.(x) .^ raa
a0 = 1/2
nodes, weights = gausslegendre(m)
s1 = d.ρ
s2 = 1 - d.ρ
seq1 = auxV2(nodes .* s1 .+ s2,d.α,d.θ)
imin = 1
for k=1:length(seq1)
if isfinite(seq1[k])
imin = k
break
end
end
weights = (a0*s1) .* weights ./ d.ρ
for i = 1:length(x)
if x[i] > delta
push!(res, 1 - sum(w .* x[i] .^ (-d.α .* v))/pira)
elseif x[i] > 0
push!(res,sum(exp.(a2[i] .* seq1[imin:end]) .* weights[imin:end]))
else
push!(res,0.)
end
end
else #d.α = 1
Cd = Cauchy(-cos(pir),sin(pir))
res = [x[i] >= 0 ? (cdf(Cd,x[i])-1)/d.ρ + 1 : 0 for i=1:length(x)]
end
return reshape(res,size(X))
end
import Distributions.mgf
function mgf(d::PositiveStable,x::Real)
l = 12
if x == 0
return 1.
end
if d.α == 2
return 2 * exp(x^2/4.)*cdf(Normal(0,sqrt(2)),x/2)
end
if d.β == -1 && x >= -1
v = 0:Int(floor(l*d.α))
w = 1 / gamma(v+1)
return sum(w .* x .^ v)
end
nodes, weights = gausslegendre(m)
nodes = nodes ./ 2 .+ .5
weights = weights/(2 *d.ρ)
nodes2 = 1 ./ (1 .- nodes) .^ 2
nodes = nodes ./ (1 .- nodes)
pir = pi*d.ρ
fC = nodes2 .* pdf(Cauchy(-cos(pir),sin(pir)),nodes)
if d.β == -1
mat = exp(-(abs(x) .* nodes) .^ d.α)
return sum(fC.*mat .* weights)
else
if x > 0
return Inf
else
mat = exp(-(abs(x) .* nodes) .^ d.α)
return sum(fC.*mat .* weights)
end
end
end
function mgf(d::PositiveStable,X::AbstractArray{<:Real})
x = vec(X)
l = 12
if d.α==2
return 2 .* exp.(x.^2 ./ 4.) .* cdf(Normal(0,sqrt(2)),x ./ 2)
end
nodes, weights = gausslegendre(m)
nodes = nodes ./ 2 .+ .5
weights = weights ./ (2 * d.ρ)
nodes2 = 1 ./ (1 .- nodes) .^ 2
nodes = nodes ./ (1 .- nodes)
pir = pi*d.ρ
fC = nodes2 .* pdf(Cauchy(-cos(pir),sin(pir)),nodes)
res = Float64[]
mat = exp.(-(abs.(x)*transpose(nodes)) .^ d.α)
if d.β == -1
v = 0:Int(floor(18*d.α))
w = 1 ./ gamma.(v .+ 1)
if x[i] == 0
push!(res,1.)
elseif x[i] >= -1
push!(res,sum(w .* x[i] .^ v))
else
push!(res,sum(fC .* mat[i,:] .* weights))
end
else
for i = 1:length(x)
if x[i] > 0
push!(res,Inf)
elseif x[i] < 0
push!(res,sum(fC .* mat[i,:] .* weights))
else
push!(res,1.)
end
end
end
return reshape(res,size(X))
end
import Distributions.mean
function mean(d::PositiveStable)
if d.α <= 1
return Inf
end
return sin(pi*d.ρ)/(d.α*d.ρ*sin(pi/d.α)*gamma(1 + 1 / d.α))
end
import Distributions.var
function var(d::PositiveStable)
if d.α < 2 && d.β != -1
return Inf
elseif d.α == 1
if abs(d.β) != 1
return Inf
else
return 0.
end
else
return 2 / gamma(1 + 2 / d.α) - 1 / gamma(1 + 1 / d.α) ^ 2
end
end
function mellin(d::PositiveStable,x::Complex)
if (real(x) >= d.α && (d.α <= 1 || (d.α < 2 && d.β != -1))) || real(x) <= -1
return Inf
end
if (d.α > 1 && d.β == -1) || d.α == 2
return gamma(1 + x) / gamma(1 + x / d.α)
end
return (sin(pi * d.ρ * x) * gamma(1 + x)) /
(d.α * d.ρ * sin(pi * x / d.α) * gamma(1 + x / d.α))
end
function mellin(d::PositiveStable,X::AbstractArray{<:Complex})
if (d.α > 1 && d.β == -1) || d.α == 2
return gamma.(1 .+ X) ./ gamma.(1 .+ X ./ d.α)
end
res = Complex{Float64}[]
for x = X
push!(res,
(real(x) >= d.α && (d.α <= 1 || (d.α < 2 && d.β != -1))) || real(x) <= -1 ?
Inf : (sin(pi * d.ρ * x) * gamma(1 + x)) / (d.α * d.ρ * sin(pi * x / d.α) * gamma(1 + x / d.α))
)
end
return reshape(res,size(X))
end
function mellin(d::PositiveStable,x::Real)
if (real(x) >= d.α && (d.α <= 1 || (d.α < 2 && d.β != -1))) || real(x) <= -1
return Inf
end
if (d.α > 1 && d.β == -1) || d.α == 2
return gamma(1 + x) / gamma(1 + x / d.α)
end
return (sin(pi * d.ρ * x) * gamma(1 + x)) /
(d.α * d.ρ * sin(pi * x / d.α) * gamma(1 + x / d.α))
end
function mellin(d::PositiveStable,X::AbstractArray{<:Real})
if (d.α > 1 && d.β == -1) || d.α == 2
return gamma.(1 .+ X) ./ gamma.(1 .+ X ./ d.α)
end
res = Float64[]
for x = X
push!(res,
(real(x) >= d.α && (d.α <= 1 || (d.α < 2 && d.β != -1))) || real(x) <= -1 ?
Inf : (sin(pi * d.ρ * x) * gamma(1 + x)) / (d.α * d.ρ * sin(pi * x / d.α) * gamma(1 + x / d.α))
)
end
return reshape(res,size(X))
end
export Stable, rand, minimum, maximum, insupport, pdf, cdf, mgf, mean, mellin, params
|
{"hexsha": "ec533aee3ca4a55304b27fcc24320ac513fb8254", "size": 14125, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/positivestable.jl", "max_stars_repo_name": "jorgeignaciogc/StableMeander.jl", "max_stars_repo_head_hexsha": "5e7d9be7ce032445a4c8d7e98a59fbc1d04169ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/positivestable.jl", "max_issues_repo_name": "jorgeignaciogc/StableMeander.jl", "max_issues_repo_head_hexsha": "5e7d9be7ce032445a4c8d7e98a59fbc1d04169ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/positivestable.jl", "max_forks_repo_name": "jorgeignaciogc/StableMeander.jl", "max_forks_repo_head_hexsha": "5e7d9be7ce032445a4c8d7e98a59fbc1d04169ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8242530756, "max_line_length": 102, "alphanum_fraction": 0.4579823009, "num_tokens": 5870}
|
# EGEDA TPES plots for each economy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from openpyxl import Workbook
import xlsxwriter
import pandas.io.formats.excel
# Import the recently created data frame that joins OSeMOSYS results to EGEDA historical
EGEDA_years = pd.read_csv('./data/4_Joined/OSeMOSYS_to_EGEDA.csv')
# Define unique values for economy, fuels, and items columns
Economy_codes = EGEDA_years.economy.unique()
Fuels = EGEDA_years.fuel_code.unique()
Items = EGEDA_years.item_code_new.unique()
# Colours for charting (to be amended later)
colours = pd.read_excel('./data/2_Mapping_and_other/colour_template_7th.xlsx')
colours_hex = colours['hex']
# Define month and year to create folder for saving charts/tables
month_year = pd.to_datetime('today').strftime('%B_%Y')
# Subsets for impending df builds
First_level_fuels = list(Fuels[[0, 9, 17, 24, 45, 49, 50, 51, 60, 76, 77, 78, 79]])
Required_fuels = list(Fuels[[0, 9, 17, 24, 45, 49, 50, 51, 61, 62, 63, 64, 65, 66, 68, 69, 70, 75, 76, 77, 78, 79]])
# Aggregate's of different fuels for TPES/prod charting
Coal_fuels = list(Fuels[[0, 9]])
Oil_fuels = list(Fuels[[17, 24]])
Other_fuels = list(Fuels[[66, 69, 75]])
Renewables_fuels = list(Fuels[[49, 51, 61, 62, 63, 64, 65, 68, 70]])
tpes_items = list(Items[[0, 1, 2, 3, 4, 5, 6, 7, 8]])
Prod_items = tpes_items[:3]
Petroleum_fuels = list(Fuels[[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 29, 40, 41, 42, 43, 44]])
# Make space for charts (before data/tables)
chart_height = 18 # number of excel rows before the data is written
# Define column chart years
col_chart_years = ['2000', '2010', '2017', '2020', '2030', '2040', '2050']
TPES_agg_fuels = ['Coal', 'Oil', 'Gas', 'Nuclear', 'Renewables', 'Other fuels']
TPES_agg_trade = ['Coal', 'Crude oil & NGL', 'Petroleum products', 'Gas', 'Nuclear', 'Renewables', 'Other fuels']
# Total Primary Energy Supply fuel breakdown for each economy
########### Build TPES dataframes for each economy providing various breakdowns (by fuel, TPES component, etc)
for economy in Economy_codes:
################################################################### DATAFRAMES ###################################################################
# First data frame: TPES by fuels (and also fourth and sixth dataframe with slight tweaks)
tpes_df = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'] == '6_total_primary_energy_supply') &
(EGEDA_years['fuel_code'].isin(Required_fuels))].loc[:, 'fuel_code':]
#nrows1 = tpes_df.shape[0]
#ncols1 = tpes_df.shape[1]
coal = tpes_df[tpes_df['fuel_code'].isin(Coal_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Coal',
item_code_new = '6_total_primary_energy_supply')
oil = tpes_df[tpes_df['fuel_code'].isin(Oil_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Oil',
item_code_new = '6_total_primary_energy_supply')
renewables = tpes_df[tpes_df['fuel_code'].isin(Renewables_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Renewables',
item_code_new = '6_total_primary_energy_supply')
others = tpes_df[tpes_df['fuel_code'].isin(Other_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Other fuels',
item_code_new = '6_total_primary_energy_supply')
tpes_df1 = tpes_df.append([coal, oil, renewables, others])[['fuel_code',
'item_code_new'] + list(tpes_df.loc[:, '2000':])].reset_index(drop = True)
tpes_df1.loc[tpes_df1['fuel_code'] == '5_gas', 'fuel_code'] = 'Gas'
tpes_df1.loc[tpes_df1['fuel_code'] == '7_nuclear', 'fuel_code'] = 'Nuclear'
tpes_df1 = tpes_df1[tpes_df1['fuel_code'].isin(TPES_agg_fuels)].set_index('fuel_code').loc[TPES_agg_fuels].reset_index()
nrows4 = tpes_df1.shape[0]
ncols4 = tpes_df1.shape[1]
tpes_df2 = tpes_df1[['fuel_code', 'item_code_new'] + col_chart_years]
nrows6 = tpes_df2.shape[0]
ncols6 = tpes_df2.shape[1]
# Second data frame: production (and also fifth and seventh data frames with slight tweaks)
prod_df = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'] == '1_indigenous_production') &
(EGEDA_years['fuel_code'].isin(Required_fuels))].loc[:, 'fuel_code':]
#nrows2 = prod_df.shape[0]
#ncols2 = prod_df.shape[1]
coal = prod_df[prod_df['fuel_code'].isin(Coal_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Coal',
item_code_new = '1_indigenous_production')
oil = prod_df[prod_df['fuel_code'].isin(Oil_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Oil',
item_code_new = '1_indigenous_production')
renewables = prod_df[prod_df['fuel_code'].isin(Renewables_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Renewables',
item_code_new = '1_indigenous_production')
others = prod_df[prod_df['fuel_code'].isin(Other_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Other fuels',
item_code_new = '1_indigenous_production')
prod_df1 = prod_df.append([coal, oil, renewables, others])[['fuel_code',
'item_code_new'] + list(prod_df.loc[:, '2000':])].reset_index(drop = True)
prod_df1.loc[prod_df1['fuel_code'] == '5_gas', 'fuel_code'] = 'Gas'
prod_df1.loc[prod_df1['fuel_code'] == '7_nuclear', 'fuel_code'] = 'Nuclear'
prod_df1 = prod_df1[prod_df1['fuel_code'].isin(TPES_agg_fuels)].set_index('fuel_code').loc[TPES_agg_fuels].reset_index()
nrows5 = prod_df1.shape[0]
ncols5 = prod_df1.shape[1]
prod_df2 = prod_df1[['fuel_code', 'item_code_new'] + col_chart_years]
nrows7 = prod_df2.shape[0]
ncols7 = prod_df2.shape[1]
# Third data frame: production; net exports; bunkers; stock changes
tpes_comp_df1 = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'].isin(tpes_items)) &
(EGEDA_years['fuel_code'] == '12_total')]
net_trade = tpes_comp_df1[tpes_comp_df1['item_code_new'].isin(['2_imports',
'3_exports'])].groupby(['economy',
'fuel_code']).sum().assign(fuel_code = '12_total',
item_code_new = 'Net trade')
bunkers = tpes_comp_df1[tpes_comp_df1['item_code_new'].isin(['4_1_international_marine_bunkers',
'4_2_international_aviation_bunkers'])].groupby(['economy',
'fuel_code']).sum().assign(fuel_code = '12_total',
item_code_new = 'Bunkers')
tpes_comp_df1 = tpes_comp_df1.append([net_trade, bunkers])[['fuel_code', 'item_code_new'] + col_chart_years].reset_index(drop = True)
tpes_comp_df1.loc[tpes_comp_df1['item_code_new'] == '1_indigenous_production', 'item_code_new'] = 'Production'
tpes_comp_df1.loc[tpes_comp_df1['item_code_new'] == '5_stock_changes', 'item_code_new'] = 'Stock changes'
tpes_comp_df1 = tpes_comp_df1.loc[tpes_comp_df1['item_code_new'].isin(['Production',
'Net trade',
'Bunkers',
'Stock changes'])].reset_index(drop = True)
nrows3 = tpes_comp_df1.shape[0]
ncols3 = tpes_comp_df1.shape[1]
# Imports/exports data frame
imports_df1 = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'] == '2_imports') &
(EGEDA_years['fuel_code'].isin(Required_fuels))]
coal = imports_df1[imports_df1['fuel_code'].isin(Coal_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Coal',
item_code_new = '2_imports')
# oil = imports_df1[imports_df1['fuel_code'].isin(Oil_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Oil',
# item_code_new = '2_imports')
renewables = imports_df1[imports_df1['fuel_code'].isin(Renewables_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Renewables',
item_code_new = '2_imports')
others = imports_df1[imports_df1['fuel_code'].isin(Other_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Other fuels',
item_code_new = '2_imports')
imports_df1 = imports_df1.append([coal, oil, renewables, others]).reset_index(drop = True)
imports_df1.loc[imports_df1['fuel_code'] == '3_crude_oil_and_ngl', 'fuel_code'] = 'Crude oil & NGL'
imports_df1.loc[imports_df1['fuel_code'] == '4_petroleum_products', 'fuel_code'] = 'Petroleum products'
imports_df1.loc[imports_df1['fuel_code'] == '5_gas', 'fuel_code'] = 'Gas'
imports_df1.loc[imports_df1['fuel_code'] == '7_nuclear', 'fuel_code'] = 'Nuclear'
imports_df1 = imports_df1[imports_df1['fuel_code'].isin(TPES_agg_trade)]\
.set_index('fuel_code').loc[TPES_agg_trade].reset_index()\
[['fuel_code', 'item_code_new'] + list(imports_df1.loc[:, '2000':])]
nrows8 = imports_df1.shape[0]
ncols8 = imports_df1.shape[1]
imports_df2 = imports_df1[['fuel_code', 'item_code_new'] + col_chart_years]
nrows12 = imports_df2.shape[0]
ncols12 = imports_df2.shape[1]
exports_df1 = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'] == '3_exports') &
(EGEDA_years['fuel_code'].isin(Required_fuels))].copy()
# Change export values to positive rather than negative
exports_df1[list(exports_df1.columns[3:])] = exports_df1[list(exports_df1.columns[3:])].apply(lambda x: x * -1)
coal = exports_df1[exports_df1['fuel_code'].isin(Coal_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Coal',
item_code_new = '3_exports')
# oil = exports_df1[exports_df1['fuel_code'].isin(Oil_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Oil',
# item_code_new = '3_exports')
renewables = exports_df1[exports_df1['fuel_code'].isin(Renewables_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Renewables',
item_code_new = '3_exports')
others = exports_df1[exports_df1['fuel_code'].isin(Other_fuels)].groupby(['item_code_new']).sum().assign(fuel_code = 'Other fuels',
item_code_new = '3_exports')
exports_df1 = exports_df1.append([coal, oil, renewables, others]).reset_index(drop = True)
exports_df1.loc[exports_df1['fuel_code'] == '3_crude_oil_and_ngl', 'fuel_code'] = 'Crude oil & NGL'
exports_df1.loc[exports_df1['fuel_code'] == '4_petroleum_products', 'fuel_code'] = 'Petroleum products'
exports_df1.loc[exports_df1['fuel_code'] == '5_gas', 'fuel_code'] = 'Gas'
exports_df1.loc[exports_df1['fuel_code'] == '7_nuclear', 'fuel_code'] = 'Nuclear'
exports_df1 = exports_df1[exports_df1['fuel_code'].isin(TPES_agg_trade)]\
.set_index('fuel_code').loc[TPES_agg_trade].reset_index()\
[['fuel_code', 'item_code_new'] + list(exports_df1.loc[:, '2000':])]
nrows9 = exports_df1.shape[0]
ncols9 = exports_df1.shape[1]
exports_df2 = exports_df1[['fuel_code', 'item_code_new'] + col_chart_years]
nrows13 = exports_df2.shape[0]
ncols13 = exports_df2.shape[1]
# Bunkers dataframe
bunkers_df1 = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'] == '4_1_international_marine_bunkers') &
(EGEDA_years['fuel_code'].isin(['4_5_gas_diesel_oil', '4_6_fuel_oil']))]
bunkers_df1 = bunkers_df1[['fuel_code', 'item_code_new'] + list(bunkers_df1.loc[:, '2000':])]
bunkers_df1.loc[bunkers_df1['fuel_code'] == '4_5_gas_diesel_oil', 'fuel_code'] = 'Gas diesel oil'
bunkers_df1.loc[bunkers_df1['fuel_code'] == '4_6_fuel_oil', 'fuel_code'] = 'Fuel oil'
nrows10 = bunkers_df1.shape[0]
ncols10 = bunkers_df1.shape[1]
bunkers_df2 = EGEDA_years[(EGEDA_years['economy'] == economy) &
(EGEDA_years['item_code_new'] == '4_2_international_aviation_bunkers') &
(EGEDA_years['fuel_code'].isin(['4_3_jet_fuel', '4_1_2_aviation_gasoline']))]
bunkers_df2 = bunkers_df2[['fuel_code', 'item_code_new'] + list(bunkers_df2.loc[:, '2000':])]
bunkers_df2.loc[bunkers_df2['fuel_code'] == '4_1_2_aviation_gasoline', 'fuel_code'] = 'Aviation gasoline'
bunkers_df2.loc[bunkers_df2['fuel_code'] == '4_3_jet_fuel', 'fuel_code'] = 'Jet fuel'
nrows11 = bunkers_df2.shape[0]
ncols11 = bunkers_df2.shape[1]
# Define directory
script_dir = './results/' + month_year + '/TPES/'
results_dir = os.path.join(script_dir, 'economy_breakdown/', economy)
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
# Create a Pandas excel writer workbook using xlsxwriter as the engine and save it in the directory created above
writer = pd.ExcelWriter(results_dir + '/' + economy + '_tpes.xlsx', engine = 'xlsxwriter')
pandas.io.formats.excel.ExcelFormatter.header_style = None
tpes_df1.to_excel(writer, sheet_name = economy + '_TPES', index = False, startrow = chart_height)
tpes_df2.to_excel(writer, sheet_name = economy + '_TPES', index = False, startrow = chart_height + nrows4 + 3)
prod_df1.to_excel(writer, sheet_name = economy + '_prod', index = False, startrow = chart_height)
prod_df2.to_excel(writer, sheet_name = economy + '_prod', index = False, startrow = chart_height + nrows5 + 3)
tpes_comp_df1.to_excel(writer, sheet_name = economy + '_TPES_components_I', index = False, startrow = chart_height)
imports_df1.to_excel(writer, sheet_name = economy + '_TPES_components_I', index = False, startrow = chart_height + nrows3 + 3)
imports_df2.to_excel(writer, sheet_name = economy + '_TPES_components_I', index = False, startrow = chart_height + nrows3 + nrows8 + 6)
exports_df1.to_excel(writer, sheet_name = economy + '_TPES_components_I', index = False, startrow = chart_height + nrows3 + nrows8 + nrows12 + 9)
exports_df2.to_excel(writer, sheet_name = economy + '_TPES_components_I', index = False, startrow = chart_height + nrows3 + nrows8 + nrows12 + nrows9 + 12)
bunkers_df1.to_excel(writer, sheet_name = economy + '_TPES_components_II', index = False, startrow = chart_height)
bunkers_df2.to_excel(writer, sheet_name = economy + '_TPES_components_II', index = False, startrow = chart_height + nrows10 + 3)
#ImEx_df1.to_excel(writer, sheet_name = economy + '_TPES_components', index = False, startrow = chart_height + nrows3 + 3)
# Access the workbook
workbook = writer.book
# Comma format and header format
comma_format = workbook.add_format({'num_format': '#,##0'})
header_format = workbook.add_format({'font_name': 'Calibri', 'font_size': 11, 'bold': True})
cell_format1 = workbook.add_format({'bold': True})
# Access the sheet created using writer above
worksheet1 = writer.sheets[economy + '_TPES']
# Apply comma format and header format to relevant data rows
worksheet1.set_column(2, ncols4 + 1, None, comma_format)
worksheet1.set_row(chart_height, None, header_format)
worksheet1.set_row(chart_height + nrows4 + 3, None, header_format)
worksheet1.write(0, 0, economy + ' TPES fuel', cell_format1)
################################################################### CHARTS ###################################################################
# Create a TPES chart
tpes_chart2 = workbook.add_chart({'type': 'area', 'subtype': 'stacked'})
tpes_chart2.set_size({
'width': 500,
'height': 300
})
tpes_chart2.set_chartarea({
'border': {'none': True}
})
tpes_chart2.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
tpes_chart2.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
tpes_chart2.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
tpes_chart2.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows4):
tpes_chart2.add_series({
'name': [economy + '_TPES', chart_height + i + 1, 0],
'categories': [economy + '_TPES', chart_height, 2, chart_height, ncols4 - 1],
'values': [economy + '_TPES', chart_height + i + 1, 2, chart_height + i + 1, ncols4 - 1],
'fill': {'color': colours_hex[i]},
'border': {'none': True}
})
worksheet1.insert_chart('B3', tpes_chart2)
######## same chart as above but line
# TPES line chart
tpes_chart4 = workbook.add_chart({'type': 'line'})
tpes_chart4.set_size({
'width': 500,
'height': 300
})
tpes_chart4.set_chartarea({
'border': {'none': True}
})
tpes_chart4.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
tpes_chart4.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
tpes_chart4.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
tpes_chart4.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows4):
tpes_chart4.add_series({
'name': [economy + '_TPES', chart_height + i + 1, 0],
'categories': [economy + '_TPES', chart_height, 2, chart_height, ncols4 - 1],
'values': [economy + '_TPES', chart_height + i + 1, 2, chart_height + i + 1, ncols4 - 1],
'line': {'color': colours_hex[i],
'width': 1}
})
worksheet1.insert_chart('R3', tpes_chart4)
###################### Create another TPES chart showing proportional share #################################
# Create a TPES chart
tpes_chart3 = workbook.add_chart({'type': 'column', 'subtype': 'percent_stacked'})
tpes_chart3.set_size({
'width': 500,
'height': 300
})
tpes_chart3.set_chartarea({
'border': {'none': True}
})
tpes_chart3.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'interval_unit': 1,
'line': {'color': '#bebebe'}
})
tpes_chart3.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
tpes_chart3.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
tpes_chart3.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for component in TPES_agg_fuels:
i = tpes_df2[tpes_df2['fuel_code'] == component].index[0]
tpes_chart3.add_series({
'name': [economy + '_TPES', chart_height + nrows4 + i + 4, 0],
'categories': [economy + '_TPES', chart_height + nrows4 + 3, 2, chart_height + nrows4 + 3, ncols6 - 1],
'values': [economy + '_TPES', chart_height + nrows4 + i + 4, 2, chart_height + nrows4 + i + 4, ncols6 - 1],
'fill': {'color': colours_hex[i]},
'border': {'none': True}
})
worksheet1.insert_chart('J3', tpes_chart3)
########################################### PRODUCTION CHARTS #############################################
# access the sheet for production created above
worksheet2 = writer.sheets[economy + '_prod']
# Apply comma format and header format to relevant data rows
worksheet2.set_column(2, ncols5 + 1, None, comma_format)
worksheet2.set_row(chart_height, None, header_format)
worksheet2.set_row(chart_height + nrows5 + 3, None, header_format)
worksheet2.write(0, 0, economy + ' prod fuel', cell_format1)
###################### Create another PRODUCTION chart with only 6 categories #################################
# Create a PROD chart
prod_chart2 = workbook.add_chart({'type': 'area', 'subtype': 'stacked'})
prod_chart2.set_size({
'width': 500,
'height': 300
})
prod_chart2.set_chartarea({
'border': {'none': True}
})
prod_chart2.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
prod_chart2.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
prod_chart2.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
prod_chart2.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows5):
prod_chart2.add_series({
'name': [economy + '_prod', chart_height + i + 1, 0],
'categories': [economy + '_prod', chart_height, 2, chart_height, ncols5 - 1],
'values': [economy + '_prod', chart_height + i + 1, 2, chart_height + i + 1, ncols5 - 1],
'fill': {'color': colours_hex[i]},
'border': {'none': True}
})
worksheet2.insert_chart('B3', prod_chart2)
############ Same as above but with a line ###########
# Create a PROD chart
prod_chart2 = workbook.add_chart({'type': 'line'})
prod_chart2.set_size({
'width': 500,
'height': 300
})
prod_chart2.set_chartarea({
'border': {'none': True}
})
prod_chart2.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
prod_chart2.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
prod_chart2.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
prod_chart2.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows5):
prod_chart2.add_series({
'name': [economy + '_prod', chart_height + i + 1, 0],
'categories': [economy + '_prod', chart_height, 2, chart_height, ncols5 - 1],
'values': [economy + '_prod', chart_height + i + 1, 2, chart_height + i + 1, ncols5 - 1],
'line': {'color': colours_hex[i],
'width': 1}
})
worksheet2.insert_chart('R3', prod_chart2)
###################### Create another PRODUCTION chart showing proportional share #################################
# Create a production chart
prod_chart3 = workbook.add_chart({'type': 'column',
'subtype': 'percent_stacked'})
prod_chart3.set_size({
'width': 500,
'height': 300
})
prod_chart3.set_chartarea({
'border': {'none': True}
})
prod_chart3.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'interval_unit': 1,
'line': {'color': '#bebebe'}
})
prod_chart3.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
prod_chart3.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
prod_chart3.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for component in TPES_agg_fuels:
i = prod_df2[prod_df2['fuel_code'] == component].index[0]
prod_chart3.add_series({
'name': [economy + '_prod', chart_height + nrows5 + i + 4, 0],
'categories': [economy + '_prod', chart_height + nrows5 + 3, 2, chart_height + nrows5 + 3, ncols7 - 1],
'values': [economy + '_prod', chart_height + nrows5 + i + 4, 2, chart_height + nrows5 + i + 4, ncols7 - 1],
'fill': {'color': colours_hex[i]},
'border': {'none': True}
})
worksheet2.insert_chart('J3', prod_chart3)
###################################### TPES components I ###########################################
# access the sheet for production created above
worksheet3 = writer.sheets[economy + '_TPES_components_I']
# Apply comma format and header format to relevant data rows
worksheet3.set_column(2, ncols8 + 1, None, comma_format)
worksheet3.set_row(chart_height, None, header_format)
worksheet3.set_row(chart_height + nrows3 + 3, None, header_format)
worksheet3.set_row(chart_height + nrows3 + nrows8 + 6, None, header_format)
worksheet3.set_row(chart_height + nrows3 + nrows8 + nrows12 + 9, None, header_format)
worksheet3.set_row(chart_height + nrows3 + nrows8 + nrows12 + nrows9 + 12, None, header_format)
worksheet3.write(0, 0, economy + ' TPES components I', cell_format1)
# Create a TPES components chart
tpes_comp_chart1 = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
tpes_comp_chart1.set_size({
'width': 500,
'height': 300
})
tpes_comp_chart1.set_chartarea({
'border': {'none': True}
})
tpes_comp_chart1.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'line': {'color': '#bebebe'}
})
tpes_comp_chart1.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'PJ',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
tpes_comp_chart1.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
tpes_comp_chart1.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for component in ['Production', 'Net trade', 'Bunkers', 'Stock changes']:
i = tpes_comp_df1[tpes_comp_df1['item_code_new'] == component].index[0]
tpes_comp_chart1.add_series({
'name': [economy + '_TPES_components_I', chart_height + i + 1, 1],
'categories': [economy + '_TPES_components_I', chart_height, 2, chart_height, ncols3 - 1],
'values': [economy + '_TPES_components_I', chart_height + i + 1, 2, chart_height + i + 1, ncols3 - 1],
'fill': {'color': colours_hex[i + 5]},
'border': {'none': True}
})
worksheet3.insert_chart('B3', tpes_comp_chart1)
# IMPORTS: Create a line chart subset by fuel
imports_line = workbook.add_chart({'type': 'line'})
imports_line.set_size({
'width': 500,
'height': 300
})
imports_line.set_chartarea({
'border': {'none': True}
})
imports_line.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
imports_line.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'Imports (PJ)',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
imports_line.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
imports_line.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for fuel in ['Coal', 'Crude oil & NGL', 'Petroleum products', 'Gas', 'Nuclear', 'Renewables', 'Other fuels']:
i = imports_df1[imports_df1['fuel_code'] == fuel].index[0]
imports_line.add_series({
'name': [economy + '_TPES_components_I', chart_height + nrows3 + i + 4, 0],
'categories': [economy + '_TPES_components_I', chart_height + nrows3 + 3, 2, chart_height + nrows3 + 3, ncols8 - 1],
'values': [economy + '_TPES_components_I', chart_height + nrows3 + i + 4, 2, chart_height + nrows3 + i + 4, ncols8 - 1],
'line': {'color': colours_hex[i], 'width': 1.25},
})
worksheet3.insert_chart('J3', imports_line)
# Create a imports by fuel column
imports_column = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
imports_column.set_size({
'width': 500,
'height': 300
})
imports_column.set_chartarea({
'border': {'none': True}
})
imports_column.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'line': {'color': '#bebebe'}
})
imports_column.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'Imports by fuel (PJ)',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
imports_column.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
imports_column.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows12):
imports_column.add_series({
'name': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + i + 7, 0],
'categories': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + 6, 2, chart_height + nrows3 + nrows8 + 6, ncols12 - 1],
'values': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + i + 7, 2, chart_height + nrows3 + nrows8 + i + 7, ncols12 - 1],
'fill': {'color': colours_hex[i + 5]},
'border': {'none': True}
})
worksheet3.insert_chart('R3', imports_column)
# EXPORTS: Create a line chart subset by fuel
exports_line = workbook.add_chart({'type': 'line'})
exports_line.set_size({
'width': 500,
'height': 300
})
exports_line.set_chartarea({
'border': {'none': True}
})
exports_line.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
exports_line.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'Exports (PJ)',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
exports_line.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
exports_line.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for fuel in ['Coal', 'Crude oil & NGL', 'Petroleum products', 'Gas', 'Nuclear', 'Renewables', 'Other fuels']:
i = exports_df1[exports_df1['fuel_code'] == fuel].index[0]
exports_line.add_series({
'name': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + nrows12 + i + 10, 0],
'categories': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + nrows12 + 9, 2, chart_height + nrows3 + nrows8 + nrows12 + 9, ncols8 - 1],
'values': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + nrows12 + i + 10, 2, chart_height + nrows3 + nrows8 + nrows12 + i + 10, ncols8 - 1],
'line': {'color': colours_hex[i], 'width': 1.25},
})
worksheet3.insert_chart('Z3', exports_line)
# Create a imports by fuel column
exports_column = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
exports_column.set_size({
'width': 500,
'height': 300
})
exports_column.set_chartarea({
'border': {'none': True}
})
exports_column.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'line': {'color': '#bebebe'}
})
exports_column.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'Exports by fuel (PJ)',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
exports_column.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
exports_column.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows13):
exports_column.add_series({
'name': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + nrows12 + nrows9 + i + 13, 0],
'categories': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + nrows12 + nrows9 + 12, 2, chart_height + nrows3 + nrows8 + nrows12 + nrows9 + 12, ncols13 - 1],
'values': [economy + '_TPES_components_I', chart_height + nrows3 + nrows8 + nrows12 + nrows9 + i + 13, 2, chart_height + nrows3 + nrows8 + nrows12 + nrows9 + i + 13, ncols13 - 1],
'fill': {'color': colours_hex[i + 5]},
'border': {'none': True}
})
worksheet3.insert_chart('AH3', exports_column)
###################################### TPES components II ###########################################
# access the sheet for production created above
worksheet4 = writer.sheets[economy + '_TPES_components_II']
# Apply comma format and header format to relevant data rows
worksheet4.set_column(2, ncols10 + 1, None, comma_format)
worksheet4.set_row(chart_height, None, header_format)
worksheet4.set_row(chart_height + nrows10 + 3, None, header_format)
worksheet4.write(0, 0, economy + ' TPES components II', cell_format1)
# MARINE BUNKER: Create a line chart subset by fuel
marine_line = workbook.add_chart({'type': 'line'})
marine_line.set_size({
'width': 500,
'height': 300
})
marine_line.set_chartarea({
'border': {'none': True}
})
marine_line.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
marine_line.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'Marine bunkers (PJ)',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
marine_line.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
marine_line.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows10):
marine_line.add_series({
'name': [economy + '_TPES_components_II', chart_height + i + 1, 0],
'categories': [economy + '_TPES_components_II', chart_height, 2, chart_height, ncols10 - 1],
'values': [economy + '_TPES_components_II', chart_height + i + 1, 2, chart_height + i + 1, ncols10 - 1],
'line': {'color': colours_hex[i], 'width': 1.25},
})
worksheet4.insert_chart('B3', marine_line)
# AVIATION BUNKER: Create a line chart subset by fuel
aviation_line = workbook.add_chart({'type': 'line'})
aviation_line.set_size({
'width': 500,
'height': 300
})
aviation_line.set_chartarea({
'border': {'none': True}
})
aviation_line.set_x_axis({
'name': 'Year',
'label_position': 'low',
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232', 'rotation': -45},
'position_axis': 'on_tick',
'interval_unit': 4,
'line': {'color': '#bebebe'}
})
aviation_line.set_y_axis({
'major_tick_mark': 'none',
'minor_tick_mark': 'none',
'name': 'Aviation bunkers (PJ)',
'num_font': {'font': 'Segoe UI', 'size': 10, 'color': '#323232'},
'major_gridlines': {
'visible': True,
'line': {'color': '#bebebe'}
},
'line': {'color': '#bebebe'}
})
aviation_line.set_legend({
'font': {'font': 'Segoe UI', 'size': 10}
#'none': True
})
aviation_line.set_title({
'none': True
})
# Configure the series of the chart from the dataframe data.
for i in range(nrows11):
aviation_line.add_series({
'name': [economy + '_TPES_components_II', chart_height + nrows10 + i + 4, 0],
'categories': [economy + '_TPES_components_II', chart_height + nrows10 + 3, 2, chart_height + nrows10 + 3, ncols11 - 1],
'values': [economy + '_TPES_components_II', chart_height + nrows10 + i + 4, 2, chart_height + nrows10 + i + 4, ncols11 - 1],
'line': {'color': colours_hex[i], 'width': 1.25},
})
worksheet4.insert_chart('J3', aviation_line)
writer.save()
print('Bling blang blaow, you have some TPES charts now')
|
{"hexsha": "815b1e8fa4b6faef72aafdc3781df041f970bec0", "size": 44287, "ext": "py", "lang": "Python", "max_stars_repo_path": "workflow/scripts/2_charts_tables/2017_egeda/TPES_economy.py", "max_stars_repo_name": "asia-pacific-energy-research-centre/8th_outlook_visualisations", "max_stars_repo_head_hexsha": "a8fed78db955f9dfd785e6b515fbe5e6d7fb7665", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-16T23:53:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T23:53:11.000Z", "max_issues_repo_path": "workflow/scripts/2_charts_tables/2017_egeda/TPES_economy.py", "max_issues_repo_name": "asia-pacific-energy-research-centre/8th_outlook_visualisations", "max_issues_repo_head_hexsha": "a8fed78db955f9dfd785e6b515fbe5e6d7fb7665", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "workflow/scripts/2_charts_tables/2017_egeda/TPES_economy.py", "max_forks_repo_name": "asia-pacific-energy-research-centre/8th_outlook_visualisations", "max_forks_repo_head_hexsha": "a8fed78db955f9dfd785e6b515fbe5e6d7fb7665", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2355679702, "max_line_length": 195, "alphanum_fraction": 0.541197191, "include": true, "reason": "import numpy", "num_tokens": 11685}
|
C (C) Copyright 1996-2016 ECMWF.
C
C This software is licensed under the terms of the Apache Licence Version 2.0
C which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
C In applying this licence, ECMWF does not waive the privileges and immunities
C granted to it by virtue of its status as an intergovernmental organisation nor
C does it submit to any jurisdiction.
C
PROGRAM CONTAUTO
C This program demonstrates magics contouring facilities.
C Set to 'AUTOMATIC', Magics++ attempts to find the 'best'
C contouring parameters for the trade-off between quality and speed.
PARAMETER (NLEV=8)
DIMENSION RLEV (NLEV)
DATA RLEV /-7., -5., -3., -1., 1., 3., 5., 9./
C Open MAGICS and set the output device
CALL POPEN
call psetc ('device','ps')
call psetc ('ps_device','ps_a4')
CALL PSETC ('PS_FILE_NAME', 'toto1.ps')
C Set up the coastline attributes
c CALL PSETC ('MAP_COASTLINE_RESOLUTION', 'HIGH')
CALL PSETC ('MAP_COASTLINE_COLOUR', 'BLACK')
CALL PSETC ('MAP_GRID', 'OFF')
CALL PSETC ('MAP_LABEL', 'OFF')
C Pass the data to MAGICS
CALL PSETC ('GRIB_INPUT_TYPE', 'FILE')
CALL PSETC ('GRIB_INPUT_FILE_NAME', '../data/mark.grb')
c x '/tmp/cgk/data/MetErrors/MarkRodwell/s.grb')
CALL PSETC ('grib_scaling', 'off')
CALL PGRIB
C Define the contour
CALL PSETC ('CONTOUR', 'OFF')
CALL PSETC ('contour_level_selection_type','level_list')
CALL PSET1R ('contour_level_list', RLEV, NLEV)
CALL PSETC ('contour_shade', 'on')
CALL PSETC ('contour_shade_method', 'area_fill')
c CALL PSETC ('contour_shade_colour_method', 'list')
c CALL PSETC ('contour_shade_colour_list', shade_colours)
CALL PSETC ('contour_shade_label_blanking', 'on')
CALL PSETC ('contour_label', 'off')
CALL PSETC ('contour_highlight', 'off')
CALL PSETC ('contour_hilo', 'off')
CALL PCONT
C Plot the coastlines
CALL PCOAST
CALL PCLOSE
STOP
END
|
{"hexsha": "9def760b15ff1daee865d882ff97bde179443ed5", "size": 2092, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "test/old/fortran/mark.f", "max_stars_repo_name": "dtip/magics", "max_stars_repo_head_hexsha": "3247535760ca962f859c203295b508d442aca4ed", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/old/fortran/mark.f", "max_issues_repo_name": "dtip/magics", "max_issues_repo_head_hexsha": "3247535760ca962f859c203295b508d442aca4ed", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/old/fortran/mark.f", "max_forks_repo_name": "dtip/magics", "max_forks_repo_head_hexsha": "3247535760ca962f859c203295b508d442aca4ed", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2950819672, "max_line_length": 80, "alphanum_fraction": 0.6520076482, "num_tokens": 602}
|
# -- coding: utf-8 --
import tensorflow as tf
import numpy as np
from data.data_hour import *
import pandas as pd
import argparse
from gcn_model.hyparameter import parameter
file='/Users/guojianzou/Traffic-speed-prediction/data/data_hour/'
class HA():
def __init__(self,
site_id=0,
is_training=True,
time_size=3,
prediction_size=1,
data_divide=0.9,
window_step=1,
normalize=False,
hp=None):
'''
:param is_training: while is_training is True,the model is training state
:param field_len:
:param time_size:
:param prediction_size:
:param target_site:
'''
self.site_id=site_id # ozone ID
self.time_size=time_size # time series length of input
self.prediction_size=prediction_size # the length of prediction
self.is_training=is_training # true or false
self.data_divide=data_divide # the divide between in training set and test set ratio
self.window_step=window_step # windows step
self.para=hp
self.data=self.get_source_data(file+'train.csv')
self.length=self.data.values.shape[0] #data length
self.normalize=normalize
def get_source_data(self,file_path):
'''
:return:
'''
data = pd.read_csv(file_path, encoding='utf-8')
return data
def accuracy(self,label,predict):
'''
:param Label: represents the observed value
:param Predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = label - predict
average_error = np.mean(np.fabs(error.astype(float)))
print("mae is : %.6f" % (average_error))
rmse_error = np.sqrt(np.mean(np.square(label - predict)))
print("rmse is : %.6f" % (rmse_error))
cor = np.mean(np.multiply((label - np.mean(label)),
(predict - np.mean(predict)))) / (np.std(predict) * np.std(label))
print('correlation coefficient is: %.6f' % (cor))
sse = np.sum((label - predict) ** 2)
sst = np.sum((label - np.mean(label)) ** 2)
R2 = 1 - sse / sst
print('r^2 is: %.6f' % (R2))
return average_error,rmse_error,cor,R2
def model(self):
self.dictionary_label = []
self.dictionary_predict = []
for site in range(self.para.site_num):
data1=self.data[(self.data['in_id']==self.data.values[site][0]) & (self.data['out_id']==self.data.values[site][1])]
# print(data1.shape)
for h in range(24):
data2=data1.loc[data1['hour']==h]
print(data2.shape)
label=np.mean(data2.values[ : 110,-2])
# predict = np.mean(data3.values[25:26, -1])
predict=np.reshape(data2.values[110: 123,-2],newshape=[-1])
# print(predict,predict.shape[-1])
self.dictionary_label.append([label]*predict.shape[-1])
# self.dictionary_label.append(label)
# self.dictionary_predict.append(predict)
self.dictionary_predict.append(list(predict))
#
if __name__=='__main__':
para = parameter(argparse.ArgumentParser())
para = para.get_para()
ha=HA(site_id=0,normalize=False,hp=para)
print(ha.data.keys())
print(ha.data)
ha.model()
ha.accuracy(np.reshape(np.array(ha.dictionary_label),newshape=[-1]),np.reshape(np.array(ha.dictionary_predict),newshape=[-1]))
# print(iter.data.loc[iter.data['ZoneID']==0])
|
{"hexsha": "90fda9e1d06dcc01444060245f73cfa476e4a5d5", "size": 3737, "ext": "py", "lang": "Python", "max_stars_repo_path": "MT-STFLN /comparison_model/ha.py", "max_stars_repo_name": "zouguojian/Traffic-speed-prediction", "max_stars_repo_head_hexsha": "4b9917a9e1147c37b64e51be3c060af4bdb9544d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-08T00:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T07:15:17.000Z", "max_issues_repo_path": "MT-STFLN /comparison_model/ha.py", "max_issues_repo_name": "zouguojian/Traffic-speed-prediction", "max_issues_repo_head_hexsha": "4b9917a9e1147c37b64e51be3c060af4bdb9544d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MT-STFLN /comparison_model/ha.py", "max_forks_repo_name": "zouguojian/Traffic-speed-prediction", "max_forks_repo_head_hexsha": "4b9917a9e1147c37b64e51be3c060af4bdb9544d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2815533981, "max_line_length": 130, "alphanum_fraction": 0.5742574257, "include": true, "reason": "import numpy", "num_tokens": 865}
|
#coverage:ignore
""" Determine costs for DF decomposition in QC """
from typing import Tuple
import numpy as np
from numpy.lib.scimath import arccos, arcsin # has analytc continuation to cplx
from openfermion.resource_estimates.utils import QR, QI, power_two
def compute_cost(n: int,
lam: float,
dE: float,
L: int,
Lxi: int,
chi: int,
beta: int,
stps: int,
verbose: bool = False) -> Tuple[int, int, int]:
""" Determine fault-tolerant costs using DF decomposition in quantum chem
Args:
n (int) - the number of spin-orbitals
lam (float) - the lambda-value for the Hamiltonian
dE (float) - allowable error in phase estimation
L (int) - the rank of the first decomposition
Lxi (int) - the total number of eigenvectors
chi (int) - equivalent to aleph_1 and aleph_2 in the document, the
number of bits for the representation of the coefficients
beta (int) - equivalent to beth in the document, the number of bits
for the rotations
stps (int) - an approximate number of steps to choose the precision of
single qubit rotations in preparation of the equal superpositn state
verbose (bool) - do additional printing of intermediates?
Returns:
step_cost (int) - Toffolis per step
total_cost (int) - Total number of Toffolis
ancilla_cost (int) - Total ancilla cost
"""
# The number of bits used for the second register.
nxi = np.ceil(np.log2(n // 2))
# The number of bits for the contiguous register.
nLxi = np.ceil(np.log2(Lxi + n // 2))
# The number of bits used for the first register.
nL = np.ceil(np.log2(L + 1))
# The power of 2 that is a factor of L + 1
eta = power_two(L + 1)
oh = [0] * 20
for p in range(20):
# JJG note: arccos arg may be > 1
v = np.round(np.power(2,p+1) / (2 * np.pi) * arccos(np.power(2,nL) /\
np.sqrt((L + 1)/2**eta)/2))
oh[p] = np.real(stps * (1 / (np.sin(3 * arcsin(np.cos(v * 2 * np.pi / \
np.power(2,p+1)) * \
np.sqrt((L + 1)/2**eta) / np.power(2,nL)))**2) - 1) + 4 * (p + 1))
# Bits of precision for rotation
br = int(np.argmin(oh) + 1)
# The following costs are from the list starting on page 50.
# The cost for preparing an equal superposition for preparing the first
# register in step 1 (a). We double this cost to account for the inverse.
cost1a = 2 * (3 * nL + 2 * br - 3 * eta - 9)
# The output size for the QROM for the first state preparation in Eq. (C27)
bp1 = nL + chi
# The cost of the QROM for the first state preparation in step 1 (b) and
# its inverse.
cost1b = QR(L + 1, bp1)[1] + QI(L + 1)[1]
# The cost for the inequality test, controlled swap and their inverse in
# steps 1 (c) and (d)
cost1cd = 2 * (chi + nL)
# The total cost for preparing the first register in step 1.
cost1 = cost1a + cost1b + cost1cd
# The output size for the QROM for the data to prepare the equal
# superposition on the second register, as given in Eq. (C29).
bo = nxi + nLxi + br + 1
# This is step 2. This is the cost of outputting the data to prepare the
# equal superposition on the second register. We will assume it is not
# uncomputed, because we want to keep the offset for applying the QROM for
# outputting the rotations.
cost2 = QR(L + 1, bo)[1] + QI(L + 1)[1]
# The number of bits for rotating the ancilla for the second preparation.
# We are just entering this manually because it is a typical value.
br = 7
# The cost of preparing an equal superposition over the second register in
# a controlled way. We pay this cost 4 times.
cost3a = 4 * (7 * nxi + 2 * br - 6)
# The cost of the offset to apply the QROM for state preparation on the
# second register.
cost3b = 4 * (nLxi - 1)
bp2 = nxi + chi + 2
# The cost of the QROMs and inverse QROMs for the state preparation, where
# in the first one we need + n/2 to account for the one-electron terms.
cost3c = QR(Lxi + n // 2, bp2)[1] + QI(Lxi + n // 2)[1] + QR(
Lxi, bp2)[1] + QI(Lxi)[1]
# The inequality test and state preparations.
cost3d = 4 * (nxi + chi)
# The total costs for state preparations on register 2.
cost3 = cost3a + cost3b + cost3c + cost3d
# The cost of adding offsets in steps 4 (a) and (h).
cost4ah = 4 * (nLxi - 1)
# The costs of the QROMs and their inverses in steps 4 (b) and (g).
cost4bg = QR(Lxi + n // 2, n * beta // 2)[1] + QI(Lxi + n // 2)[1] + QR(
Lxi, n * beta // 2)[1] + QI(Lxi)[1]
# The cost of the controlled swaps based on the spin qubit in steps 4c and f
cost4cf = 2 * n
# The controlled rotations in steps 4 (d) and (f).
cost4df = 4 * n * (beta - 2)
# The controlled Z operations in the middle for step 4 (e).
cost4e = 3
# This is the cost of the controlled rotations for step 4.
cost4 = cost4ah + cost4bg + cost4cf + cost4df + cost4e
# This is the cost of the reflection on the second register from step 6.
cost6 = nxi + chi + 2
# The cost of the final reflection req'd to construct the step of the
# quantum walk from step 9.
cost9 = nL + nxi + chi + 1
# The extra two qubits for unary iteration and making the rflxn controlled.
cost10 = 2
# The Toffoli cost for a single step
cost = cost1 + cost2 + cost3 + cost4 + cost6 + cost9 + cost10
# The number of steps needed
iters = np.ceil(np.pi * lam / (2 * dE))
# Now the number of qubits from the list on page 54.
k1 = np.power(2, QR(Lxi + n // 2, n * beta // 2)[0])
# The control register for phase estimation and iteration on it.
ac1 = np.ceil(np.log2(iters + 1)) * 2 - 1
# The system qubits
ac2 = n
# The first register prepared, a rotated qubit and a flag qubit.
ac3 = nL + 2
# The output of the QROM, the equal superposition state and a flag qubit.
ac4 = nL + chi * 2 + 1
# The data used for preparing the equal superposition state on the 2nd reg
ac5 = bo
# The second register, a rotated qubit and a flag qubit.
ac6 = nxi + 2
# The second preparation QROM output.
ac8 = bp2
# The equal superposition state and the result of the inequality test.
ac9 = chi + 1
# The angles for rotations.
ac10 = k1 * n * beta // 2
# The phase gradient state.
ac11 = beta
# A control qubit for the spin.
ac12 = 1
# A T state.
ac13 = 1
if verbose:
print("[*] Top of routine")
print(" [+] nxi = ", nxi)
print(" [+] nLxi = ", nLxi)
print(" [+] nL = ", nL)
print(" [+] eta = ", eta)
print(" [+] cost3 = ", cost3)
print(" [+] cost4 = ", cost4)
print(" [+] cost = ", cost)
print(" [+] iters = ", iters)
ancilla_cost = ac1 + ac2 + ac3 + ac4 + ac5 + ac6 + ac8 + ac9 + ac10 + ac11\
+ ac12 + ac13
# Sanity checks before returning as int
assert cost.is_integer()
assert iters.is_integer()
assert ancilla_cost.is_integer()
step_cost = int(cost)
total_cost = int(cost * iters)
ancilla_cost = int(ancilla_cost)
return step_cost, total_cost, ancilla_cost
|
{"hexsha": "53c778a44e560f36094ab6f924ab1881bd85a6c5", "size": 7438, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/openfermion/resource_estimates/df/compute_cost_df.py", "max_stars_repo_name": "cvmxn1/OpenFermion", "max_stars_repo_head_hexsha": "cf53c063d0f124a02ff8776bb7f8afb110d4bde6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/openfermion/resource_estimates/df/compute_cost_df.py", "max_issues_repo_name": "cvmxn1/OpenFermion", "max_issues_repo_head_hexsha": "cf53c063d0f124a02ff8776bb7f8afb110d4bde6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/openfermion/resource_estimates/df/compute_cost_df.py", "max_forks_repo_name": "cvmxn1/OpenFermion", "max_forks_repo_head_hexsha": "cf53c063d0f124a02ff8776bb7f8afb110d4bde6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.119266055, "max_line_length": 80, "alphanum_fraction": 0.6020435601, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2252}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from matplotlib import pyplot as plt
from astropy.stats import mad_std
from astropy.io import fits
from photutils import CircularAperture
from astropy.convolution import convolve_fft, Tophat2DKernel
from pyfftw.interfaces.scipy_fftpack import fft2, ifft2
__all__ = ['init_centroids']
def init_centroids(first_image_path, master_flat, master_dark, target_centroid,
max_number_stars=10, min_flux=0.2, plots=False):
first_image = np.median([(fits.getdata(path) - master_dark)/master_flat
for path in first_image_path], axis=0)
tophat_kernel = Tophat2DKernel(5)
convolution = convolve_fft(first_image, tophat_kernel, fftn=fft2, ifftn=ifft2)
convolution -= np.median(convolution)
mad = mad_std(convolution)
convolution[convolution < -5*mad] = 0.0
from skimage.filters import threshold_yen
from skimage.measure import label, regionprops
thresh = threshold_yen(convolution)/2 # Use /4 for planet c, /2 for planet b
#thresh = threshold_otsu(convolution)/15
masked = np.ones_like(convolution)
masked[convolution <= thresh] = 0
label_image = label(masked)
plt.figure()
plt.imshow(label_image, origin='lower', cmap=plt.cm.viridis)
plt.show()
# regions = regionprops(label_image, convolution)
regions = regionprops(label_image, first_image)
# reject regions near to edge of detector
buffer_pixels = 100
regions = [region for region in regions
if ((region.weighted_centroid[0] > buffer_pixels and
region.weighted_centroid[0] < label_image.shape[0] - buffer_pixels)
and (region.weighted_centroid[1] > buffer_pixels and
region.weighted_centroid[1] < label_image.shape[1] - buffer_pixels))]
centroids = [region.weighted_centroid for region in regions]
#intensities = [region.mean_intensity for region in regions]
# target_intensity = regions[0].mean_intensity
# target_diameter = regions[0].equivalent_diameter
# and region.equivalent_diameter > 0.8 * target_diameter
# centroids = [region.weighted_centroid for region in regions
# if min_flux * target_intensity < region.mean_intensity]
# intensities = [region.mean_intensity for region in regions
# if min_flux * target_intensity < region.mean_intensity]
# centroids = np.array(centroids)[np.argsort(intensities)[::-1]]
distances = [np.sqrt((target_centroid[0] - d[0])**2 +
(target_centroid[1] - d[1])**2) for d in centroids]
centroids = np.array(centroids)[np.argsort(distances)]
positions = np.vstack([[y for x, y in centroids], [x for x, y in centroids]])
if plots:
apertures = CircularAperture(positions, r=12.)
apertures.plot(color='r', lw=2, alpha=1)
plt.imshow(first_image, vmin=np.percentile(first_image, 0.01),
vmax=np.percentile(first_image, 99.9), cmap=plt.cm.viridis,
origin='lower')
plt.scatter(positions[1, 0], positions[0, 0], s=150, marker='x')
plt.show()
return positions
# target_index = np.argmin(np.abs(target_centroid - positions), axis=1)[0]
# flux_threshold = sources['flux'] > min_flux * sources['flux'].data[target_index]
#
# fluxes = sources['flux'][flux_threshold]
# positions = positions[:, flux_threshold]
#
# brightest_positions = positions[:, np.argsort(fluxes)[::-1][:max_number_stars]]
# target_index = np.argmin(np.abs(target_centroid - brightest_positions),
# axis=1)[0]
#
# apertures = CircularAperture(positions, r=12.)
# brightest_apertures = CircularAperture(brightest_positions, r=12.)
# apertures.plot(color='b', lw=1, alpha=0.2)
# brightest_apertures.plot(color='r', lw=2, alpha=0.8)
#
# if plots:
# plt.imshow(first_image, vmin=np.percentile(first_image, 0.01),
# vmax=np.percentile(first_image, 99.9), cmap=plt.cm.viridis,
# origin='lower')
# plt.plot(target_centroid[0, 0], target_centroid[1, 0], 's')
#
# plt.show()
#
# # Reorder brightest positions array so that the target comes first
# indices = list(range(brightest_positions.shape[1]))
# indices.pop(target_index)
# indices = [target_index] + indices
# brightest_positions = brightest_positions[:, indices]
#
# return brightest_positions
|
{"hexsha": "4625cdeae578d6bef6b0e725565f406597180b11", "size": 4608, "ext": "py", "lang": "Python", "max_stars_repo_path": "toolkit/star_selection.py", "max_stars_repo_name": "bmorris3/rem", "max_stars_repo_head_hexsha": "be758a989fba62eac2a24bc201e855ee08173b47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "toolkit/star_selection.py", "max_issues_repo_name": "bmorris3/rem", "max_issues_repo_head_hexsha": "be758a989fba62eac2a24bc201e855ee08173b47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "toolkit/star_selection.py", "max_forks_repo_name": "bmorris3/rem", "max_forks_repo_head_hexsha": "be758a989fba62eac2a24bc201e855ee08173b47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0508474576, "max_line_length": 89, "alphanum_fraction": 0.6640625, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1163}
|
import os.path
import scipy.io as io
import numpy as np
_folder_path = os.path.abspath("./CVACaseStudy/CVACaseStudy/")
FILE_NAMES = (
('Training Data', 'Training.mat'),
('Faulty Case 1', 'FaultyCase1.mat'),
('Faulty Case 2', 'FaultyCase2.mat'),
('Faulty Case 3', 'FaultyCase3.mat'),
('Faulty Case 4', 'FaultyCase4.mat'),
('Faulty Case 5', 'FaultyCase5.mat'),
('Faulty Case 6', 'FaultyCase6.mat'),
)
FAULT_START_STOP = (
((0, 0), (0, 0), (0, 0)),
((1565, 5180), (656, 3776), (690, 3690)),
((2243, 6615), (475, 2655), (330, 2466)),
((1135, 8351), (332, 5870), (595, 9565)),
((952, 6293), (850, 3850), (240, 3240)),
((685, 1171, 1171, 2252), (1632, 2954, 7030, 7552, 8056, 10607)),
((1722, 2799), (1036, 4829))
)
def set_folder_path(path: str) -> None:
global _folder_path
abs_path = os.path.abspath(path)
if not os.path.isdir(abs_path):
raise NotADirectoryError
_folder_path = abs_path
def import_data_set(file_name: str) -> list:
file_path = os.path.join(_folder_path, file_name)
file_data = io.loadmat(file_path)
# Don't import these sets
ignored_sets = ['__header__', '__version__', '__globals__']
data = [[None, None],
[None, None],
[None, None]]
for key, item in file_data.items():
if key in ignored_sets:
continue
if item.shape[0] > item.shape[1]:
# Enfore column samples and row variables
item = item.T
set_number = int(key[-1])
if 'EvoFault' in key:
data[set_number - 1][1] = item
elif ('Set' in key) or (key == f'T{set_number}'):
data[set_number - 1][0] = item
return(data)
def import_sets() -> list:
data_sets = []
for i, (name, file_name) in enumerate(FILE_NAMES):
data = import_data_set(file_name)
if i > 4: # Sets 5 and 6 have only 2 data sets
data.pop()
data_sets.append((name, data, FAULT_START_STOP[i]))
return(data_sets)
if __name__ == "__main__":
print("This file cannot be run directly, import this module to obtain the",
"datasets of the Multiphase Flow Facility process")
|
{"hexsha": "33b093461f193c3ae15e4a049496f4ed0ec413bc", "size": 2197, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sfamanopt/load_cva.py", "max_stars_repo_name": "hsaafan/SSFA", "max_stars_repo_head_hexsha": "61c093c323c324b0eb7d2c93ef62252d71db6f16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sfamanopt/load_cva.py", "max_issues_repo_name": "hsaafan/SSFA", "max_issues_repo_head_hexsha": "61c093c323c324b0eb7d2c93ef62252d71db6f16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sfamanopt/load_cva.py", "max_forks_repo_name": "hsaafan/SSFA", "max_forks_repo_head_hexsha": "61c093c323c324b0eb7d2c93ef62252d71db6f16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9436619718, "max_line_length": 79, "alphanum_fraction": 0.593991807, "include": true, "reason": "import numpy,import scipy", "num_tokens": 676}
|
C
C
C Support routines for PGPLOT tcl binding
C
C+ pgInitStream
C
subroutine pgInitStream( device, psize, paspect, nx, ny, id, s )
C ----------------------------------------------------------------
C
C Select and open a new stream for graphical output
C
C Given:
C device specification
character device*128
C device size and aspect ratio
real*4 psize, paspect
C number os sub-divisions of the frame
integer nx, ny
C stream ID
integer id
C
C Updated:
C error status
integer s, si, pgindev
C
C-
integer l, chr_lenb
s = 0
l = chr_lenb(device)
si = pgindev( 1, device(1:l), nx, ny, id )
if (si.eq.1) then
if (psize .lt. 0.0) then
psize = 0.0
endif
if (paspect.lt.0.05) then
paspect = 0.75
endif
call pgask( .false. )
call pgpap(psize,paspect)
call pgadvance
call pgpap(psize,paspect)
s = 0
else
s = 1
endif
end
C
C
C+ pgClearScreen
C
subroutine pgClearScreen
C ------------------------
C
C Clear the current device
C
C-
real*4 x1,x2,y1,y2,u1,u2,v1,v2
call pgsave
call pgqwin(x1,x2,y1,y2)
call pgqvp(0,u1,u2,v1,v2)
call pgvport(0.0,1.0,0.0,1.0)
call pgwindow(0.0,1.0,0.0,1.0)
call pgsfs( 1 )
call pgsci( 0 )
call pgrect( 0.0,1.0,0.0,1.0 )
call pgunsa
call pgvport(u1,u2,v1,v2)
call pgwindow(x1,x2,y1,y2)
end
C
C
C+ pgClearVport
subroutine pgClearVport
C -----------------------
C
C Clear the current viewport
C-
real*4 x1,x2,y1,y2
call pgsave
call pgqwin(x1,x2,y1,y2)
call pgwindow(0.0,1.0,0.0,1.0)
call pgsfs( 1 )
call pgsci( 0 )
call pgrect( 0.0,1.0,0.0,1.0 )
call pgunsa
call pgwindow(x1,x2,y1,y2)
end
C
C
C+ pgTextString
subroutine pgTextString( text, x, y, r, j )
C -------------------------------------------
C
C Draw a text string in world corrdinate space
C
C Given:
C text
character*128 text
C position
real*4 x, y
C rotation angle
real*4 r
C justification
real*4 j
C
C-
integer chr_lenb
call pgptext( x, y, r, j, text(1:chr_lenb(text)) )
end
C
C
C+ pgTextLabel
subroutine pgTextLabel( side, text, x, y, j )
C ---------------------------------------------
C
C Draw a text string in world corrdinate space
C
C Given:
C side specified as a character option
character*2 side
C text
character*128 text
C displacement and coordinate
real*4 x, y
C justification
real*4 j
C
C-
integer l
integer chr_lenb
l = chr_lenb(text)
call pgmtext( side(1:chr_lenb(side)),
* x, y, j, text(1:chr_lenb(text)) )
end
C
C
C+ pgDrawBox
subroutine pgDrawBox( type, xopt, xtick, nx, yopt, ytick, ny )
C --------------------------------------------------------------
C
C Draw a labelled box
C
C Given:
C type of box (0=normal ; 1=ra/dec (time))
integer type
C options
character xopt*10, yopt*10
C tick interval
real*4 xtick, ytick
C number of ticks
integer nx, ny
C
C-
integer lx, ly
integer chr_lenb
lx = chr_lenb(xopt)
ly = chr_lenb(yopt)
if (type.eq.0) then
call pgbox( xopt(1:lx),xtick,nx,yopt(1:ly),ytick,ny)
else
call pgtbox( xopt(1:lx),xtick,nx,yopt(1:ly),ytick,ny)
endif
end
C
C
C+ pgReadCursor
subroutine pgReadCursor( x, y, c )
C ----------------------------------
C
C Read the cursor position and character typed
C
C Returned:
C cursor position
real*4 x, y
C character typed
character c*1
C
C-
call pgcurse( x, y, c )
end
C
C
C+ pgDrawBinned
subroutine pgDrawBinned ( n, x, y )
C -----------------------------------
C
C Plot binned data
C
C Given:
C number of data points
integer n
C data arrays
real*4 x(*), y(*)
C
C-
call pgbin( n, x, y, .true. )
end
C
C
C+ pgOpenSpectrum
subroutine pgOpenSpectrum( file, id, ndata )
C --------------------------------------------
C
C Open and parse a standard format spectral file
C
C Given:
C file name
character*128 file
C
C Returned:
C file id
integer id
C number of data points
integer ndata
C-
C local variables
integer status
C functions
integer chr_lenb
C open file
status = 0
call spec_allocate( file(1:chr_lenb(file)),
* 'read', id, status)
call spec_hd_enqi( id, 'ndata', ndata, status )
if (status.ne.0) ndata = 0
end
C
C
C+ pgReadSpectrum
subroutine pgReadSpectrum( id, ix, iy, iex, iex2, iey, iey2,
* ndata, x, y, ex, ex2, ey, ey2 )
C ---------------------------------------------------------------
C
C Read a standard format spectral file
C
C Given:
C file id
integer id
C column flags
integer ix, iy, iex, iex2, iey, iey2
C
C Returned:
C data points
integer ndata
C data arrays
real*4 x(*), y(*), ex(*), ex2(*), ey(*), ey2(*)
C
C-
C local variables
integer status, nc
C open file
call spec_hd_enqi( id, 'ncols', nc, status )
C check status and continue
if (status.eq.0 .and. ndata.gt.0) then
if (ix.le.nc .and. ndata.gt.0) then
call spec_get_data( id, ix, x, ndata, status)
else
ndata = 0
endif
if (iy.le.nc .and. ndata.gt.0) then
call spec_get_data( id, iy, y, ndata, status)
else
ndata = 0
endif
if (iex.gt.0) then
if (iex.le.nc .and. ndata.gt.0) then
call spec_get_data( id, iex, ex, ndata, status)
else
ndata = 0
endif
endif
if (iey.gt.0) then
if (iey.le.nc .and. ndata.gt.0) then
call spec_get_data( id, iey, ey, ndata, status)
else
ndata = 0
endif
endif
if (iex2.gt.0) then
if (iex2.le.nc .and. ndata.gt.0) then
call spec_get_data( id, iex2, ex2, ndata, status)
else
ndata = 0
endif
endif
if (iey2.gt.0) then
if (iey2.le.nc .and. ndata.gt.0) then
call spec_get_data( id, iey2, ey2, ndata, status)
else
ndata = 0
endif
endif
else
ndata = 0
endif
call spec_deallocate( id, status )
if (status.ne.0) ndata = 0
end
|
{"hexsha": "3938611b2051816bc8a424a9647b7ae397044747", "size": 7084, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "anm3d/pgplot_util.f", "max_stars_repo_name": "CavendishAstrophysics/anmap", "max_stars_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-09-01T12:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-01T12:40:45.000Z", "max_issues_repo_path": "anm3d/pgplot_util.f", "max_issues_repo_name": "CavendishAstrophysics/anmap", "max_issues_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "anm3d/pgplot_util.f", "max_forks_repo_name": "CavendishAstrophysics/anmap", "max_forks_repo_head_hexsha": "efb611d7f80a3d14dc55e46cd01e8a622f6fd294", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7781350482, "max_line_length": 71, "alphanum_fraction": 0.4875776398, "num_tokens": 2172}
|
"""
sepp_base
~~~~~~~~~
A more abstract approach to SEPP type algorithms.
"""
from . import predictors
from . import logger as _ocp_logger
from . import data as _ocp_data
import numpy as _np
import datetime as _datetime
import logging as _logging
_logger = _logging.getLogger(__name__)
class ModelBase():
"""Interface for a "model".
We use the convention that the data is always an array of shape `(3,N)`
formed of `[times, x, y]` where `times` is an increasing sequence of
numbers from 0.
"""
def background(self, points):
"""Evaluate the background kernel at `points`. If `points is of
shape `(3,N)` then should return an array of shape `(N,)`.
:return: Array of shape `(N,)`
"""
raise NotImplementedError()
def trigger(self, trigger_point, delta_points):
"""We allow quite general trigger risk kernels which can depend on the
"trigger point" as well as the delta between the trigger and triggered
events.
:param trigger_point: Array of shape `(3,)` specifying the `(t, x, y)`
coords of the (single) trigger event.
:param delta_points: Array of shape `(3,m)` specifying the deltas to
the triggered events. Add to `trigger_point` to get the absolute
location of the triggered events.
:return: Array of shape `(m,)`
"""
raise NotImplementedError()
def log_likelihood_base(self, points):
"""Computes the non-normalised log likelihood,
:math:`\sum_{i=1}^n \log \lambda^*(t_i,x_i,y_i)`.
The normalisation requires integrating which is best left to a concrete
subclass.
"""
points = _np.asarray(points)
out = 0.0
for i in range(points.shape[1]):
pt = points[:,i]
ptt = pt[:,None]
li = self.background(ptt)[0]
deltas = ptt - points[:,:i]
li += _np.sum(self.trigger(pt, deltas))
out += _np.log(li)
return out
class FastModel():
"""An interface for a "fast" model."""
def time_trigger(self, times):
"""Return the time kernel (and, by convention, the overall rate as
well).
:param times: Array of shape `(n,)` of times into the past.
:return: Array of shape `(n,)` giving intensity at these times.
"""
raise NotImplementedError()
def space_trigger(self, space_points):
"""Return the space kernel (by convention, is a probability kernel).
:param space_points: Array of shape `(2,n)` of space locations.
:return: Array of shape `(n,)` giving intensity at these places.
"""
raise NotImplementedError()
def background_in_space(self, space_points):
"""Return the background risk, which is assumed not to vary in time.
:param space_points: Array of shape `(2,n)` of space locations.
:return: Array of shape `(n,)` giving intensity at these places.
"""
raise NotImplementedError()
class PredictorBase():
"""Base class which can perform "predictions". Predictions are formed by
evaluating the intensity (background and triggers) at one or more time
points and averaging.
:param model: The :class:`ModelBase` object to get the trigger and
background from.
:param points: Usual array of shape `(3,N)`
"""
def __init__(self, model, points):
self._model = model
self._points = _np.asarray(points)
@property
def model(self):
return self._model
@property
def points(self):
return self._points
def background_predict(self, time, space_points):
"""Find a point prediction at one time and one or more locations.
Ignores triggers, and only uses the background intensity.
:param time: Time point to evaluate at
:param space_points: Array of shape `(2,n)`
:return: Array of shape `(n,)`
"""
space_points = _np.asarray(space_points)
if len(space_points.shape) == 1:
space_points = space_points[:,None]
eval_points = _np.asarray([[time] * space_points.shape[1],
space_points[0], space_points[1]])
out = self._model.background(eval_points)
return out
def point_predict(self, time, space_points):
"""Find a point prediction at one time and one or more locations.
The data the class holds will be clipped to be before `time` and
the used as the trigger events.
:param time: Time point to evaluate at
:param space_points: Array of shape `(2,n)`
:return: Array of shape `(n,)`
"""
space_points = _np.asarray(space_points)
if len(space_points.shape) == 1:
space_points = space_points[:,None]
eval_points = _np.asarray([[time] * space_points.shape[1],
space_points[0], space_points[1]])
out = self._model.background(eval_points)
data = self._points[:,self._points[0] < time]
for i, pt in enumerate(eval_points.T):
out[i] += _np.sum(self._model.trigger(pt, pt[:,None] - data))
return out
def range_predict(self, time_start, time_end, space_points, samples=20):
if not time_start < time_end:
raise ValueError()
out = self.point_predict(time_start, space_points)
for i in range(1, samples):
t = time_start + (time_end - time_start) * i / (samples - 1)
n = self.point_predict(t, space_points)
out = out + n
return out / samples
def to_fast_split_predictor(self):
"""Return a new instance of a "predictor" which better performance if
the model conforms to the interface :class:`FastModel`.
"""
return FastPredictorBase(self._model)
def to_fast_split_predictor_histogram(self, grid, time_bin_size=1, space_bin_size=25):
"""Return a new instance of a "predictor" which offers faster
predictions by using approximations.
Currently we assume the the trigger intensity does not vary with
starting position, and that it "factors" into a product of a time
kernel and a space kernel. The model must conform to the
:class:`FastModel` interface. We also assume that the background
intensity does not vary in time.
:param time_bin_size: Size of bins for the histogram we use to
approximate the time kernel.
:param space_bin_size: Size of bins for the two dimensional histogram
we use to approximate the space kernel.
:param grid: The grid to base the background estimate on: for best
results, this should be the same grid you will eventually make
predictions for.
"""
return FastPredictorHist(self._model,
self._to_time_hist(time_bin_size), time_bin_size,
self._to_space_grid(space_bin_size), self._to_background(grid))
def _to_background(self, grid):
cts_pred = predictors.KernelRiskPredictor(self._model.background_in_space)
cts_pred.samples = -5
return predictors.grid_prediction(cts_pred, grid)
def _to_space_grid(self, space_bin_size):
size = 5
while True:
d = size * space_bin_size
region = _ocp_data.RectangularRegion(xmin=-d, ymin=-d, xmax=d, ymax=d)
pred = predictors.grid_prediction_from_kernel(self._model.space_trigger,
region, space_bin_size, samples=-5)
mat = pred.intensity_matrix
sorted_mat = _np.sort(mat.flatten())
cs = _np.cumsum(sorted_mat)
if not _np.any(cs <= cs[-1]*.001):
size += size
continue
sorted_index = _np.max(_np.where(cs <= cs[-1]*.001))
cutoff = sorted_mat[sorted_index]
mask = (pred.intensity_matrix <= cutoff)
r = int(size*80/100)
x = _np.broadcast_to(_np.arange(size*2)[:,None], (size*2, size*2))
y = _np.broadcast_to(_np.arange(size*2)[None,:], (size*2, size*2))
disc = _np.sqrt((x-size)**2 + (y-size)**2) >= r
if _np.all(mask[disc]):
return pred
size += size
def _to_time_hist(self, time_bin_size):
size = 100
while True:
hist = self._model.time_trigger(_np.arange(size) * time_bin_size)
sorted_hist = _np.sort(hist)
cs = _np.cumsum(sorted_hist)
if not _np.any(cs <= cs[-1]*.001):
size += size
continue
sorted_index = _np.max(_np.where(cs <= cs[-1]*.001))
cutoff = sorted_hist[sorted_index]
mask = (hist <= cutoff)
index_start = int(size * 80 / 100)
if _np.all(mask[index_start:]):
index_end = _np.max(_np.where(~mask))
return hist[:index_end+1]
size += size
class FastPredictorBase():
"""Base class which can perform fast "predictions" by assuming that the
background rate does not vary in time, and that the trigger kernel factors.
:param model: The :class:`FastModel` object to get the trigger and
background from.
"""
def __init__(self, model):
self._model = model
@property
def model(self):
"""The model we base predictions on."""
return self._model
@property
def points(self):
"""Points in the past we use as triggers."""
return self._points
@points.setter
def points(self, v):
self._points = v
def time_kernel(self, times):
return self._model.time_trigger(times)
def space_kernel(self, pts):
return self._model.space_trigger(pts)
def background_kernel(self, pts):
return self._model.background_in_space(pts)
def range_predict(self, time_start, time_end, space_points, time_samples=5):
space_points = _np.asarray(space_points)
if len(space_points.shape) == 1:
space_points = space_points[:,None]
data = self._points[:,self._points[0] < time_start]
tl = space_points.shape[-1] * data.shape[-1]
pts = (space_points[:,:,None] - data[1:,None,:]).reshape((2,tl))
space_triggers = self.space_kernel(pts).reshape(space_points.shape[-1], data.shape[-1])
times = _np.linspace(time_start, time_end, time_samples)
dtimes = (times[None,:] - data[0][:,None])
time_triggers = self.time_kernel(dtimes.flatten()).reshape(dtimes.shape)
time_triggers = _np.mean(time_triggers, axis=1)
return self.background_kernel(space_points) + _np.sum(space_triggers * time_triggers[None,:], axis=1)
class FastPredictorHist(FastPredictorBase):
"""Base class which can perform fast "predictions", based on using
histograms to approximate the kernels.
:param model: The :class:`FastModel` object to get the trigger and
background from.
:param time_hist: Array of shape `(k,)` giving the time kernel.
:param time_bandwidth: Width of each bin in the time histogram.
:param space_grid: Instance of :class:`GridPredictionArray` to use as an
approximation to the space kernel.
:param background_grid: Instance of :class:`GridPredictionArray` to use as an
approximation to the (time-invariant) background rate.
"""
def __init__(self, model, time_hist, time_bandwidth, space_grid, background_grid):
super().__init__(model)
self._time = (time_hist, time_bandwidth)
self._space_grid = space_grid
self._background_grid = background_grid
@property
def time_histogram_width(self):
"""The width of each bar in the time histogram."""
return self._time[1]
@property
def time_histogram(self):
"""An array giving the height of each bar in the time histogram."""
return self._time[0]
@property
def space_grid(self):
"""The grid array we use for approximating the space kernel."""
return self._space_grid
def time_kernel(self, times):
times = _np.atleast_1d(times)
indices = _np.floor_divide(times, self._time[1]).astype(_np.int)
m = indices < self._time[0].shape[0]
out = _np.empty(times.shape)
out[m] = self._time[0][indices[m]]
out[~m] = 0
return out
def space_kernel(self, pts):
return self._space_grid.risk(*pts)
def background_kernel(self, pts):
return self._background_grid.risk(*pts)
def non_normalised_p_matrix(model, points):
d = points.shape[1]
p = _np.zeros((d,d))
progress = _ocp_logger.ProgressLogger(d * (d+1) / 2, _datetime.timedelta(seconds=10), _logger)
p[_np.diag_indices(d)] = model.background(points)
progress.add_to_count(d)
for i in range(d):
trigger_point = points[:,i]
delta_points = trigger_point[:,None] - points[:, :i]
m = delta_points[0] > 0
p[:i, i][m] = model.trigger(trigger_point, delta_points[:,m])
p[:i, i][~m] = 0
progress.add_to_count(i)
return p
def normalise_p(p):
norm = _np.sum(p, axis=0)[None,:]
if _np.any(norm==0):
raise ValueError("Zero column in p matrix", p)
return p / norm
def p_matrix(model, points):
"""Compute the normalised "p" matrix.
:param model: Instance of :class:`ModelBase`
:param points: Data
"""
p = non_normalised_p_matrix(model, points)
return normalise_p(p)
def clamp_p(p, cutoff = 99.9):
"""For each column, set entries beyond the `cutoff` percentile to 0.
"""
pp = _np.array(p)
for j in range(1, p.shape[1]):
x = pp[:j+1,j]
lookup = _np.argsort(x)
s = x[lookup]
c = _np.sum(_np.cumsum(s) < 1 - cutoff / 100)
x[lookup[:c]] = 0
return pp
class Optimiser():
"""We cannot know all models and how to optimise them, but we provide some
helper routines."""
def __init__(self, model, points, make_p=True):
self._logger = _logging.getLogger(__name__)
self._model = model
self._points = points
if make_p:
self._p = _np.asarray( p_matrix(model, points) )
if _np.any(self._p < 0):
raise ValueError("p should ve +ve")
@property
def p(self):
"""The p matrix"""
return self._p
@property
def model(self):
return self._model
@property
def points(self):
return self._points
@property
def num_points(self):
return self._points.shape[1]
@property
def p_diag(self):
"""The diagonal of the p matrix."""
d = self._points.shape[1]
return self._p[_np.diag_indices(d)]
@property
def p_diag_sum(self):
return _np.sum(self.p_diag)
@property
def p_upper_tri_sum(self):
out = 0.0
for i in range(1, self._p.shape[0]):
out += _np.sum(self._p[:i, i])
if abs(out) < 1e-10:
#raise ValueError()
self._logger.warn("p-matrix has become diagonal-- no repeat behaviour!")
return out
def upper_tri_col(self, col):
return self._p[:col, col]
def diff_col_times(self, col):
"""`times[col] - times[:col]`"""
return self._points[0, col] - self._points[0, :col]
def diff_col_points(self, col):
"""`xypoints[col] - xypoints[:col]`"""
return self._points[1:, col][:,None] - self._points[1:, :col]
def sample(self):
"""Use the p-matrix to take a "sample", returning background events
and triggered events.
:return: Pair `(bk_indices, trigger_pairs)` where `bk_indices` are
indices into :attr:`points` giving the sampled background events,
and `trigger_pairs` is a list of pairs `(trigger, triggered)` where
`trigger` is the trigger index, and `triggered` if the (later) index
of the event which is triggered.
"""
bk, tr = [], []
for i in range(self.num_points):
j = _np.random.choice(i+1, p=self.p[:i+1,i])
if i==j:
bk.append(i)
else:
tr.append((j,i))
return bk, tr
def sample_to_points(self):
"""Use the p-matrix to take a "sample", returning background events
and triggered events.
:return: Pair `(bk_points, trigger_deltas)` both arrays of points,
`bk_points` being the background events, and `trigger_deltas` being
the "jumps" from the triggering to the triggered events.
"""
bk, tr = self.sample()
bk = _np.array(bk, dtype=_np.int)
bk_points = self._points[:, bk]
trigger_deltas = [self._points[:,end] - self._points[:,start]
for start, end in tr]
return bk_points, _np.asarray(trigger_deltas).T
def iterate(self):
"""Abstract method to be over-riden. Should return a new `model`."""
raise NotImplementedError()
class _BaseTrainer(predictors.DataTrainer):
def __init__(self):
self.time_unit = _np.timedelta64(1, "D")
self._logger = _logging.getLogger(__name__)
@property
def time_unit(self):
"""The unit of time to use to convert real timestamps into abstract
timestamps."""
return self._time_unit
@time_unit.setter
def time_unit(self, v):
self._time_unit = _np.timedelta64(v)
def make_data(self, predict_time=None):
"""Internal method, and for testing. Returns the data in the format
expected by the base classes.
:param predict_time: Crop the data to before this time, and use this time
as the end point. If `None` then use the final timestamp in the
data, rounded up by the currently in use time unit.
:return: `predict_time, for_fixed, data`
"""
if predict_time is None:
offset = _np.datetime64("2000-01-01T00:00")
x = self.data.timestamps[-1] - offset
x = _np.ceil(x / self.time_unit) * self.time_unit
predict_time = offset + x
else:
predict_time = _np.datetime64(predict_time)
data = self.data[self.data.timestamps <= predict_time]
times = (data.timestamps - data.timestamps[0]) / self.time_unit
for_fixed = (predict_time - data.timestamps) / self.time_unit
data = _np.asarray([times, data.xcoords, data.ycoords])
return predict_time, for_fixed, data
class Trainer(_BaseTrainer):
"""Base class for a standard "trainer". It is not assumed that this will
always be used; but it may prove helpful often.
"""
def __init__(self):
super().__init__()
def make_data(self, predict_time=None):
"""Internal method, and for testing. Returns the data in the format
expected by the base classes.
:param predict_time: As in :meth:`train`.
:return: `(fixed, data)` where `fixed` is a class describing any
"fixed" parameters of the model (meta-parameters if you like) and
`data` is an array of shape `(3,N)`.
"""
predict_time, for_fixed, data = super().make_data(predict_time)
return self.make_fixed(for_fixed), data
def make_fixed(self, times):
"""Abstract method to return the "fixed" model.
:param times: An array of the timestamps, converted to units of time
before the "predict point".
"""
raise NotImplementedError()
def initial_model(self, fixed, data):
"""Abstract method to return the initial model from which optimisation
is performed. The pair `(fixed, data)` is as returned by
:meth:`make_data`.
"""
raise NotImplementedError()
@property
def _optimiser(self):
"""The class to be used as the optimiser"""
raise NotImplementedError()
def train(self, predict_time=None, iterations=1):
"""Optimise the model.
:predict_time: Crop the data to before this time, and use this time
as the end point. If `None` then use the final timestamp in the
data, rounded up by the currently in use time unit.
:return: Instances of :class:`Model`.
"""
fixed, data = self.make_data(predict_time)
model = self.initial_model(fixed, data)
for _ in range(iterations):
opt = self._optimiser(model, data)
model = opt.iterate()
self._logger.debug(model)
return model
class Predictor(_BaseTrainer):
"""A :class:`DataTrainer` which uses a model to make predictions.
:param grid: The Grid object to make predictions against.
:param model: The model object to use.
"""
def __init__(self, grid, model):
super().__init__()
self._grid = grid
self._model = model
def to_fast_split_predictor_histogram(self, time_bin_size=1, space_bin_size=25):
"""Return a new instance of a "predictor" which offers faster
predictions by using approximations.
Currently we assume the the trigger intensity does not vary with
starting position, and that it "factors" into a product of a time
kernel and a space kernel. The model must conform to the
:class:`FastModel` interface. We also assume that the background
intensity does not vary in time.
:param time_bin_size: Size of bins for the histogram we use to
approximate the time kernel. In units of :attr:`time_unit`.
:param space_bin_size: Size of bins for the two dimensional histogram
we use to approximate the space kernel.
"""
pred = PredictorBase(self._model, [])
fsp = pred.to_fast_split_predictor_histogram(self._grid, time_bin_size, space_bin_size)
return FastPredictor(self._grid, fsp)
def to_fast_split_predictor(self):
"""Return a new instance of a "predictor" which offers faster
predictions, assuming that the model conforms to the interface
:class:`FastModel`.
"""
pred = PredictorBase(self._model, [])
return FastPredictor(self._grid, pred.to_fast_split_predictor())
def background_continuous_predict(self, predict_time, space_samples=20):
"""Make a prediction at this time, returning a continuous prediction.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A continuous prediction.
"""
predict_time, for_fixed, data = self.make_data(predict_time)
time = _np.max(for_fixed)
pred = PredictorBase(self._model, data)
def kernel(pts):
return pred.background_predict(time, pts)
return predictors.KernelRiskPredictor(kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
def background_predict(self, predict_time, space_samples=20):
"""Make a prediction at this time.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A grid prediction, masked if possible with the grid, and
normalised.
"""
cts_predictor = self.background_continuous_predict(predict_time, space_samples)
return self._to_grid_pred(cts_predictor)
def continuous_predict(self, predict_time, end_time=None, time_samples=20, space_samples=20):
"""Make a prediction at this time, returning a continuous prediction.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A continuous prediction.
"""
predict_time, for_fixed, data = self.make_data(predict_time)
time = _np.max(for_fixed)
pred = PredictorBase(self._model, data)
if end_time is None:
def kernel(pts):
return pred.point_predict(time, pts)
else:
end_time = _np.datetime64(end_time)
time_end = time + (end_time - predict_time) / self.time_unit
def kernel(pts):
return pred.range_predict(time, time_end, pts, samples=time_samples)
return predictors.KernelRiskPredictor(kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
def predict(self, predict_time, end_time=None, time_samples=20, space_samples=20):
"""Make a prediction at this time.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A grid prediction, masked if possible with the grid, and
normalised.
"""
cts_predictor = self.continuous_predict(predict_time, end_time, time_samples, space_samples)
return self._to_grid_pred(cts_predictor)
def _to_grid_pred(self, cts_predictor):
grid_pred = predictors.GridPredictionArray.from_continuous_prediction_grid(
cts_predictor, self._grid)
try:
grid_pred.mask_with(self._grid)
except:
pass
return grid_pred.renormalise()
class FastPredictor(_BaseTrainer):
"""A :class:`DataTrainer` which uses a model to make predictions.
Is optimised for certain classes of models and can optionally also
approximate kernels by histograms.
Currently we assume the the trigger intensity does not vary with
starting position, and that it "factors" into a product of a time
kernel and a space kernel. The model must conform to the
:class:`FastModel` interface.
:param grid: The Grid object to make predictions against.
:param fast_pred_base: The instance of :class:`FastPredictorBase`
we'll use internally.
"""
def __init__(self, grid, fast_pred_base):
super().__init__()
self._grid = grid
self._fast_pred_base = fast_pred_base
@property
def fast_predictor_base(self):
"""The underlying :class:`FastPredictorBase` which is used."""
return self._fast_pred_base
def background_predict(self, space_samples=-5):
cts_predictor = predictors.KernelRiskPredictor(self._fast_pred_base.background_kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
return self._to_grid_pred(cts_predictor)
def continuous_predict(self, predict_time, end_time, time_samples=5, space_samples=-5):
"""Make a prediction at this time, returning a continuous prediction.
:param predict_time: Limit to data before this time, and use this as
the predict time.
:param end_time: Approximately intergate over this time range.
:param time_samples: The number of samples to use in approximating the
integral over time.
:param space_samples: The number of samples to use in the monte-carlo
integration over space
:return: A continuous prediction.
"""
predict_time, for_fixed, data = self.make_data(predict_time)
time = _np.max(for_fixed)
self._fast_pred_base.points = data
end_time = _np.datetime64(end_time)
time_end = time + (end_time - predict_time) / self.time_unit
def kernel(pts):
return self._fast_pred_base.range_predict(time, time_end, pts, time_samples=time_samples)
return predictors.KernelRiskPredictor(kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
def predict(self, predict_time, end_time, time_samples=5, space_samples=-5):
"""Make a prediction at this time.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: Approximately intergate over this time range.
:param time_samples: The number of samples to use in approximating the
integral over time.
:param space_samples: The number of samples to use in the monte-carlo
integration over space
:return: A grid prediction, masked if possible with the grid, and
normalised.
"""
cts_predictor = self.continuous_predict(predict_time, end_time,
time_samples, space_samples)
return self._to_grid_pred(cts_predictor)
def _to_grid_pred(self, cts_predictor):
grid_pred = predictors.GridPredictionArray.from_continuous_prediction_grid(
cts_predictor, self._grid)
try:
grid_pred.mask_with(self._grid)
except:
pass
return grid_pred.renormalise()
|
{"hexsha": "175360094e78bd956c69a2fa63284255e863fa6b", "size": 29946, "ext": "py", "lang": "Python", "max_stars_repo_path": "open_cp/sepp_base.py", "max_stars_repo_name": "sumau/PredictCode", "max_stars_repo_head_hexsha": "e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8", "max_stars_repo_licenses": ["Artistic-2.0"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-04-19T09:17:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T08:53:28.000Z", "max_issues_repo_path": "open_cp/sepp_base.py", "max_issues_repo_name": "sumau/PredictCode", "max_issues_repo_head_hexsha": "e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8", "max_issues_repo_licenses": ["Artistic-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-06-11T17:46:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-07T10:49:10.000Z", "max_forks_repo_path": "open_cp/sepp_base.py", "max_forks_repo_name": "sumau/PredictCode", "max_forks_repo_head_hexsha": "e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8", "max_forks_repo_licenses": ["Artistic-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-07-19T18:29:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T22:06:45.000Z", "avg_line_length": 37.6206030151, "max_line_length": 109, "alphanum_fraction": 0.6218860616, "include": true, "reason": "import numpy", "num_tokens": 6877}
|
import torch
import numpy
import matplotlib.pyplot as plt
from torch import nn
from einops.layers.torch import Rearrange
from einops import rearrange, repeat
class PatchEmbedding(nn.Module):
"""Creates the patches for the model input"""
def __init__(
self,
dim,
channels=3,
dropout_rate=0.1,
image_height=256,
image_width=256,
image_depth=256,
patch_height=16,
patch_width=16,
patch_depth=None,
):
"""
:patch_height: Height of patches, default is 16.
:patch_width: Width of patches, default is 16.
:patch_depth: Depth of patches, default is undefined.
"""
nn.Module.__init__(self)
if not patch_depth:
patch_dim = channels * patch_height * patch_width
self._num_patches = (image_height // patch_height) * (
image_width // patch_width
)
self._rearr = Rearrange(
"b c (h p1) (w p2) -> b (h w) (p1 p2 c)",
p1=patch_height,
p2=patch_width,
)
self._lin = nn.Linear(patch_dim, dim)
self._pos = nn.Parameter(torch.randn(1, self._num_patches + 1, dim))
self._token = nn.Parameter(torch.randn(1, 1, dim))
else:
patch_dim = channels * patch_height * patch_width * patch_depth
self._num_patches = (
(image_height // patch_height)
* (image_width // patch_width)
* (image_depth // patch_depth)
)
self._rearr = Rearrange(
"b c (h p1) (w p2) (z p3) -> b (h w z) (p1 p2 p3 c)",
p1=patch_height,
p2=patch_width,
p3=patch_depth,
)
self._lin = nn.Linear(patch_dim, dim)
self._pos = nn.Parameter(torch.randn(1, self._num_patches + 1, dim))
self._token = nn.Parameter(torch.randn(1, 1, dim))
self._dropout = nn.Dropout(dropout_rate)
def forward(self, x):
x = self._rearr(x)
x = self._lin(x)
b, _, _ = x.shape
tokens = repeat(self._token, "() n d -> b n d", b=b)
x = torch.cat((tokens, x), dim=1)
x += self._pos[:, : (self._num_patches + 1)]
x = self._dropout(x)
return x
|
{"hexsha": "ffc31773a9c45770b8da5ab40cb702eca3683574", "size": 2362, "ext": "py", "lang": "Python", "max_stars_repo_path": "transformers/patch_emb.py", "max_stars_repo_name": "mhun1/transformers", "max_stars_repo_head_hexsha": "06376fabd7ccc15bf7fa5a4a4a142961a8c41e79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "transformers/patch_emb.py", "max_issues_repo_name": "mhun1/transformers", "max_issues_repo_head_hexsha": "06376fabd7ccc15bf7fa5a4a4a142961a8c41e79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transformers/patch_emb.py", "max_forks_repo_name": "mhun1/transformers", "max_forks_repo_head_hexsha": "06376fabd7ccc15bf7fa5a4a4a142961a8c41e79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6753246753, "max_line_length": 80, "alphanum_fraction": 0.533022862, "include": true, "reason": "import numpy", "num_tokens": 591}
|
import numpy as np
def randomcolor():
''' PICKS COLORS RANDOMLY
'''
colors = []
for i in range(20):
colors.append(list((np.random.randint(0, 255, 3) / 255)))
return colors
|
{"hexsha": "ff7fcbb0bfdbde3d1f79a429a9fe598acfe02aeb", "size": 206, "ext": "py", "lang": "Python", "max_stars_repo_path": "astetik/style/random_colors.py", "max_stars_repo_name": "meirm/astetik", "max_stars_repo_head_hexsha": "ea05ce57a0bf1e8bd7ef18c4d5ca8d7ad3fb4be7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-05-14T07:26:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T12:05:20.000Z", "max_issues_repo_path": "astetik/style/random_colors.py", "max_issues_repo_name": "meirm/astetik", "max_issues_repo_head_hexsha": "ea05ce57a0bf1e8bd7ef18c4d5ca8d7ad3fb4be7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2018-04-04T20:45:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-07T12:54:37.000Z", "max_forks_repo_path": "astetik/style/random_colors.py", "max_forks_repo_name": "meirm/astetik", "max_forks_repo_head_hexsha": "ea05ce57a0bf1e8bd7ef18c4d5ca8d7ad3fb4be7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-02-09T14:35:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-19T13:47:23.000Z", "avg_line_length": 13.7333333333, "max_line_length": 65, "alphanum_fraction": 0.572815534, "include": true, "reason": "import numpy", "num_tokens": 56}
|
% book : Signals and Systems Laboratory with MATLAB
% authors : Alex Palamides & Anastasia Veloni
% Fast Fourier Transform of the sequence x=[1 2 3],n=0,1,2
x=[1 2 3];
Xk1=fft(x)
Xk2=dft(x)
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/28762-signals-and-systems-laboratory-with-matlab-m-files/M-FILES/7/c79a.m"}
|
using GeometryBasics
import GeometryBasics
import GeometryBasics as gb
using Test
x = Rect(Vec(0.0, 0.0), Vec(1.0, 1.0))
y = Rect(Vec(0.5, 0.7), Vec(1.0, 1.0))
#z = GeometryBasics.intersect(x,y)
# """
# intersect(h1::Rect, h2::Rect)
# Perform a intersection between two Rects.
# """
# function intersect(h1::Rect{N}, h2::Rect{N}) where {N}
# m = max.(minimum(h1), minimum(h2))
# mm = min.(maximum(h1), maximum(h2))
# return Rect{N}(m, mm - m)
# end
# https://github.com/JuliaGeometry/GeometryBasics.jl/blob/798ddaa812cb11a8b28cc9d75f6df12e07c72fdc/src/primitives/rectangles.jl#L337-L341
z = gb.intersect(x,y)
x = Rect(Vec(0.0, 0.0), Vec(1.0, 1.0))
y = Rect(Vec(2.5, 3.7), Vec(1.0, 1.0))
z = gb.intersect(x,y)
@show z
rect = Vector{Rect2D{Float64}}()
push!(rect, z)
rect
function learn_Rect()
r1 = Rect(Vec(0.0, 0.0), Vec(1.0, 1.0))
r2 = Rect(Vec(0.5, 0.7), Vec(1.0, 1.0))
r3 = Rect2D{Float64}(0,0,1,1)
@test r1 == r3
@test 1 == area(r1)
@test 1 == area(r2)
r4 = Rect2D{Float64}(0,0,2,3)
@test 2*3 == area(r4)
r5 = Rect2D{Float64}(0.2, 0.4, 1, 1)
r6 = GeometryBasics.intersect(r1,r5)
@test (1-0.2)*(1-0.4) == area(r6)
@test [0.2,0.4] == r6.origin
@test [0.8,0.6] == r6.widths
#@show r6
## No intersection of rectangles
# Only 2 possible relative positions
# Lower-left and upper-right
# Upper-left and lower-right
# If any of the width is negative then it is non-intersect
# Test intersection with
#
# any(ir.widths .<= 0)
## Lower-left and upper-right
# Points are touching
r = Rect2D{Float64}(1, 1, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test [1,1] == ir.origin
@test [0,0] == ir.widths
@test any(ir.widths .<= 0)
# x >, y <
r = Rect2D{Float64}(1.001, 0.5, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test any(ir.widths .<= 0)
# x >, y >
r = Rect2D{Float64}(1.001, 1.001, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test any(ir.widths .<= 0)
# x <, y >
r = Rect2D{Float64}(0.5, 1.001, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test any(ir.widths .<= 0)
## Upper-left and lower-right
# x >, y <
r = Rect2D{Float64}(1.001, -0.5, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test any(ir.widths .<= 0)
# x >, y >
r = Rect2D{Float64}(1.001, -1.001, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test any(ir.widths .<= 0)
# x <, y >
r = Rect2D{Float64}(0.5, -1.001, 1, 1)
ir = GeometryBasics.intersect(r1,r)
@test any(ir.widths .<= 0)
end
@testset "Base" begin
learn_Rect()
end
nothing
|
{"hexsha": "1c77acf13eac7d628bac362e7b7baef79656de03", "size": 2646, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "learn/learn_GeometryBasics.jl", "max_stars_repo_name": "ykyang/org.allnix.julia", "max_stars_repo_head_hexsha": "58933a5848dec81c53d591b4163e9a70df62ddd8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "learn/learn_GeometryBasics.jl", "max_issues_repo_name": "ykyang/org.allnix.julia", "max_issues_repo_head_hexsha": "58933a5848dec81c53d591b4163e9a70df62ddd8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learn/learn_GeometryBasics.jl", "max_forks_repo_name": "ykyang/org.allnix.julia", "max_forks_repo_head_hexsha": "58933a5848dec81c53d591b4163e9a70df62ddd8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7289719626, "max_line_length": 137, "alphanum_fraction": 0.582010582, "num_tokens": 1068}
|
import sys
import signal
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
from datetime import timedelta
import pygame
import pygame.camera
import pygame.surfarray
from PIL import Image
import numpy as np
image_path = 'image.jpg'
# getting image from camera
pygame.camera.init()
# pygame.camera.list_camera() #Camera detected or not
cam = pygame.camera.Camera("/dev/video0",(640,480))
cam.start();
def main():
while True:
try:
print ('Saving image from camera...')
start_time_camera = time.time()
img = cam.get_image()
pygame.image.save(img, image_path)
end_time_camera =time.time()
time_dif_camera = end_time_camera - start_time_camera
# Print the time-usage.
os.system('cls' if os.name == 'nt' else 'clear')
print ('###### time usage camera ######')
print(str(timedelta(seconds=int(round(time_dif_camera)))))
except (KeyboardInterrupt, SystemExit, RuntimeError, SystemError):
cam.stop()
def exit_gracefully(self, signum, frame):
# stuff to run when process is closed
cam.stop()
if __name__ == '__main__':
signal.signal(signal.SIGTERM, exit_gracefully)
main()
|
{"hexsha": "f1d255079fcba00519d3a1afd498677dd13635de", "size": 1173, "ext": "py", "lang": "Python", "max_stars_repo_path": "Classifier/Old/Camera.py", "max_stars_repo_name": "mugroma3/ClassiPi", "max_stars_repo_head_hexsha": "8a70a5f5691d28c57f084b370f4688b70e8f406a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-01-24T16:44:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-26T23:28:05.000Z", "max_issues_repo_path": "Classifier/Old/Camera.py", "max_issues_repo_name": "mugroma3/ClassiPi", "max_issues_repo_head_hexsha": "8a70a5f5691d28c57f084b370f4688b70e8f406a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Classifier/Old/Camera.py", "max_forks_repo_name": "mugroma3/ClassiPi", "max_forks_repo_head_hexsha": "8a70a5f5691d28c57f084b370f4688b70e8f406a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.9387755102, "max_line_length": 70, "alphanum_fraction": 0.6930946292, "include": true, "reason": "import numpy", "num_tokens": 278}
|
[STATEMENT]
lemma fold_sum_aux:
assumes "\<forall>u \<in> set (a # xs). \<forall>v \<in> set (a # xs). f v + W u v \<ge> f u"
shows "sum_list (map f (a # xs @ [a])) \<le> sum_list (map f (a # xs @ [a])) + weight (a # xs @ [a])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum_list (map f (a # xs @ [a])) \<le> sum_list (map f (a # xs @ [a])) + local.weight (a # xs @ [a])
[PROOF STEP]
using fold_sum_aux'[of a xs a f] assms
[PROOF STATE]
proof (prove)
using this:
\<forall>u\<in>set (a # xs). \<forall>v\<in>set (xs @ [a]). f u \<le> f v + W u v \<Longrightarrow> sum_list (map f (a # xs)) \<le> sum_list (map f (xs @ [a])) + local.weight (a # xs @ [a])
\<forall>u\<in>set (a # xs). \<forall>v\<in>set (a # xs). f u \<le> f v + W u v
goal (1 subgoal):
1. sum_list (map f (a # xs @ [a])) \<le> sum_list (map f (a # xs @ [a])) + local.weight (a # xs @ [a])
[PROOF STEP]
by auto (metis (no_types, opaque_lifting) add.assoc add.commute add_left_mono)
|
{"llama_tokens": 422, "file": "Monad_Memo_DP_example_Bellman_Ford", "length": 2}
|
function v = subsref( x, S )
try
v = subsref( x.value_, S );
catch
cvx_throw( 'Invalid tuple reference: %s%s', inputname(1), cvx_subs2str( S ) );
end
% Copyright 2005-2014 CVX Research, Inc.
% See the file LICENSE.txt for full copyright information.
% The command 'cvx_where' will show where this file is located.
|
{"author": "yu-jiang", "repo": "radpbook", "sha": "88b9fa7d0a541099cdd1ac29383c89e087d1d895", "save_path": "github-repos/MATLAB/yu-jiang-radpbook", "path": "github-repos/MATLAB/yu-jiang-radpbook/radpbook-88b9fa7d0a541099cdd1ac29383c89e087d1d895/tools/cvx-w64/cvx/lib/@cvxtuple/subsref.m"}
|
import unittest
import numpy as np
import json
import os, shutil, tempfile
from model import Model
class ModelTests(unittest.TestCase):
def setUp(self):
self.model = Model()
self.test_model_dir = tempfile.mkdtemp()
self.mock_train_set = (np.zeros((1, 10000)), np.zeros((1,)))
self.mock_dev_set = self.mock_train_set
self.mock_test_set = self.mock_train_set
self.mock_input = np.random.randint(2, size=10000).reshape(1, 10000)
def tearDown(self):
shutil.rmtree(self.test_model_dir)
def test_build(self):
"""
Test architecture of model
"""
built_model = self.model.build()
j_model = json.loads(built_model.to_json())
# number of layers
self.assertEqual(4, len(built_model.layers), "incorrect number of layer in model")
# input
self.assertEqual("InputLayer", j_model["config"]["layers"][0]["class_name"],
"input isn't specified as input layer")
self.assertEqual([None, 10000], j_model["config"]["layers"][0]["config"]["batch_input_shape"],
"shape of input incorrect")
self.assertEqual("float32", j_model["config"]["layers"][0]["config"]["dtype"],
"type of input incorrect")
# layer types
self.assertEqual("Dense", j_model["config"]["layers"][1]["class_name"],
"1st layer isn't specified as dense layer")
self.assertEqual("Dense", j_model["config"]["layers"][2]["class_name"],
"2nd layer isn't specified as dense layer")
self.assertEqual("Dense", j_model["config"]["layers"][3]["class_name"],
"3rd layer isn't specified as dense layer")
# number of units
self.assertEqual(16, j_model["config"]["layers"][1]["config"]["units"], "false number of units in 1st layer")
self.assertEqual(16, j_model["config"]["layers"][2]["config"]["units"], "false number of units in 2nd layer")
self.assertEqual(1, j_model["config"]["layers"][3]["config"]["units"], "false number of units in 3rd layer")
# activation
self.assertEqual("relu", j_model["config"]["layers"][1]["config"]["activation"],
"false activatio in 1st layer")
self.assertEqual("relu", j_model["config"]["layers"][2]["config"]["activation"],
"false activation in 2nd layer")
self.assertEqual("sigmoid", j_model["config"]["layers"][3]["config"]["activation"],
"false activation in 3rd layer")
def test_train(self):
"""
Test training step
"""
self.model.build()
trained_model = self.model.train(self.mock_train_set, self.mock_dev_set,
epochs=1, batch_size=512)
self.assertIsNotNone(trained_model[1], "no model trained")
self.assertIsNotNone(trained_model[0], "history dict doesn't exist")
def test_evaluate(self):
"""
Test evaluation
"""
self.model.build()
self.model.train(self.mock_train_set, self.mock_dev_set, 1, 512, save_model=False)
loss, acc = self.model.evaluate(self.mock_test_set)
# Loss
self.assertIsNotNone(loss, "loss not computed")
self.assertGreaterEqual(loss, 0., "loss is negativ")
# Accuracy
self.assertIsNotNone(acc, "accuracy not computed")
self.assertGreaterEqual(acc, 0., "accuracy is negativ")
self.assertLessEqual(acc, 1., "accuracy is greater than 1")
def test_save(self):
"""
Test model saving
"""
model = self.model.build()
self.model.save(model, self.test_model_dir)
files = os.listdir(self.test_model_dir)
if files:
test_ext = os.path.splitext(files[0])[1]
# Saves a model
self.assertTrue(files, "no model saved")
self.assertEqual(".h5", test_ext, "model not saved as '.h5'")
def test_load(self):
"""
Test model loading
"""
model = self.model.build()
loaded_model = None
self.model.save(model, self.test_model_dir)
files = os.listdir(self.test_model_dir)
if files:
model_name = files[0] # hardcoded, take first model from model_dir
loaded_model = self.model.load(self.test_model_dir, model_name)
# loaded model exists
self.assertTrue(loaded_model, "no model loaded")
def test_predict(self):
"""
Test prediction
"""
self.model.build()
self.model.train(self.mock_train_set, self.mock_dev_set, 1, 512, save_model=False)
test_output = self.model.predict(self.mock_input)
# existence of prediction
self.assertIsNotNone(test_output, "no prediction generated, without given model name")
# boundaries of prediction
self.assertGreaterEqual(test_output, 0., "prediction out of boundaries (is negativ)")
self.assertLessEqual(test_output, 1., "prediction out of boundaries (is greater than 1)")
# Tests with specific model name as input
files = os.listdir(self.test_model_dir)
if files:
model_name = files[0] # hardcoded, take first model from model_dir
test_output_from_model = self.model.predict(self.mock_input, model_name)
#existence
self.assertIsNotNone(test_output_from_model, "no prediction generated, with given model name")
# boundaries of prediction
self.assertGreaterEqual(test_output_from_model, 0.,
"prediction from loaded model out of boundaries (is negativ)")
self.assertLessEqual(test_output_from_model, 1.,
"prediction from loaded model out of boundaries (is greater than 1)")
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "2b7d53cbda8dab4d35589786aadde63ffc0030e4", "size": 5997, "ext": "py", "lang": "Python", "max_stars_repo_path": "IMDB_binary_classification/test/test_model.py", "max_stars_repo_name": "donK23/Thoughtful_DL", "max_stars_repo_head_hexsha": "8cd1b3b0ad35281b2e02a7e8581a606ae659284d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-21T07:41:10.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-21T07:41:10.000Z", "max_issues_repo_path": "IMDB_binary_classification/test/test_model.py", "max_issues_repo_name": "donK23/Thoughtful_DL", "max_issues_repo_head_hexsha": "8cd1b3b0ad35281b2e02a7e8581a606ae659284d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IMDB_binary_classification/test/test_model.py", "max_forks_repo_name": "donK23/Thoughtful_DL", "max_forks_repo_head_hexsha": "8cd1b3b0ad35281b2e02a7e8581a606ae659284d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9556962025, "max_line_length": 117, "alphanum_fraction": 0.6048024012, "include": true, "reason": "import numpy", "num_tokens": 1272}
|
import os.path
import logging
import numpy as np
from typing import Union
import PyMieSim
from PyMieSim.Tools.LPModes import GetFarFieldLP
from PyMieSim.Tools.Mesh import FibonacciMesh
from PyMieSim.Tools.BaseClasses import BaseDetector, MeshProperty
from PyMieSim.Tools.ErrorMsg import *
from PyMieSim.Tools.utils import NA2Angle, LoadLPMode, IO
from PyMieSim.bin.LMTScatterer import BindedPhotodiode, BindedLPMode
class Photodiode(BaseDetector, MeshProperty):
"""
.. note::
Detector type class representing a photodiode, light coupling is
thus independant of the phase of the latter.
Parameters
----------
NA : :class:`float`
Numerical aperture of imaging system.
Sampling : :class:`int`
Number of sampling points for the mode (inside NA).
GammaOffset : :class:`float`
Angle offset of detector in the direction perpendicular to polarization.
PhiOffset : :class:`float`
Angle offset of detector in the direction parallel to polarization.
Filter : :class:`float`
Angle of polarization filter in front of detector. Default is "None"
CouplingMode : :class:`str`
Methode for computing mode coupling. Either Point or Mean.
"""
def __init__(self,
NA : Union[int, float],
Sampling : int = 400,
CouplingMode : str = 'Point',
GammaOffset : Union[int, float] = 0,
PhiOffset : Union[int, float] = 0,
Filter : Union[int, float, bool] = None):
self.CouplingMode = CouplingMode
self._Filter = Filter
self._PhiOffset = PhiOffset
self._GammaOffset = GammaOffset
self._NA = NA
self._Sampling = Sampling
self.ScalarField = np.ones(Sampling)
self.GetBinding()
def GetBinding(self):
self.Mesh = FibonacciMesh(MaxAngle = NA2Angle(self._NA).Radian,
Sampling = self._Sampling,
PhiOffset = self._PhiOffset,
GammaOffset = self._GammaOffset)
self.Bind = BindedPhotodiode(NA = self._NA,
Phi = np.deg2rad(self._PhiOffset),
Gamma = np.deg2rad(self._GammaOffset),
Filter = np.deg2rad(self._Filter) if self._Filter else float(4242),
Sampling = self._Sampling)
def GetScalarField(self, Sampling, Structured=False):
if Structured:
return np.ones([Sampling, Sampling])
else:
return np.ones(Sampling)
def __str__(self):
return self.Name
def __repr__(self):
return IO( f"""
Photodiode detector
Coupling Mode: Intensity
Numerical aperture: {self.NA:.4f}
Sampling: {self.Mesh.Sampling}
Gamma offset: {self.Mesh.GammaOffset}
Phi offset: {self.Mesh.PhiOffset}
""" )
class IntegratingSphere(Photodiode):
"""
.. note::
Detector type class representing a photodiode, light coupling is
thus independant of the phase of the latter.
Parameters
----------
NA : :class:`float`
Numerical aperture of imaging system.
Sampling : :class:`int`
Number of sampling points for the mode (inside NA).
GammaOffset : :class:`float`
Angle offset of detector in the direction perpendicular to polarization.
PhiOffset : :class:`float`
Angle offset of detector in the direction parallel to polarization.
Filter : :class:`float`
Angle of polarization filter in front of detector. Default is "None"
CouplingMode : :class:`str`
Methode for computing mode coupling. Either Point or Mean.
"""
def __init__(self,
Sampling : int = 400,
CouplingMode : str = 'Point',
Filter : Union[int, float, bool] = None):
self.CouplingMode = CouplingMode
self._Filter = Filter
self._PhiOffset = 0
self._GammaOffset = 0
self._NA = 2.0
self._Sampling = Sampling
self.ScalarField = np.ones(Sampling)
self.GetBinding()
def GetBinding(self):
self.Mesh = FibonacciMesh(MaxAngle = NA2Angle(self._NA).Radian,
Sampling = self._Sampling,
PhiOffset = self._PhiOffset,
GammaOffset = self._GammaOffset)
self.Bind = BindedPhotodiode(Sampling = self._Sampling,
NA = self._NA,
Phi = np.deg2rad(self._PhiOffset),
Gamma = np.deg2rad(self._GammaOffset),
Filter = np.deg2rad(self._Filter) if self._Filter else float(4242)
)
def GetScalarField(self, Sampling, Structured=False):
if Structured:
return np.ones([Sampling, Sampling])
else:
return np.ones(Sampling)
def __repr__(self):
return IO( f"""
Integrating sphere
Coupling Mode: Intensity
Sampling: {self.Mesh.Sampling}
""" )
def __str__(self):
return self.Name
class LPmode(BaseDetector, MeshProperty):
"""
.. note::
Detector type class representing a fiber LP mode, light coupling is
thus dependant of the phase of the latter.
Parameters
----------
Mode : :class:`tuple`
LP mode index l, m.
NA : :class:`float`
Numerical aperture of imaging system.
Sampling : :class:`int`
Number of sampling points for the mode (inside NA).
InterpSampling : :class:`int`
Number of sampling point for interpolation of FarField mode.
GammaOffset : :class:`float`
Angle offset of detector in the direction perpendicular to polarization.
PhiOffset : :class:`float`
Angle offset of detector in the direction parallel to polarization.
Filter : :class:`float`
Angle of polarization filter in front of detector. Default is "None"
CouplingMode : :class:`str`
Methode for computing mode coupling. Either [Point or Mean].
"""
def __init__(self,
Mode : Union[tuple, list],
NA : float,
Rotation : Union[int, float] = 0,
Sampling : int = 401,
GammaOffset : Union[int, float] = 0,
PhiOffset : Union[int, float] = 0,
Filter : Union[int, float, bool] = None,
CouplingMode : str = 'Point'):
assert CouplingMode in ['Point','Mean'], Error_MeanCentered
if NA > 1 or NA < 0: logging.warning(warning_NAHigh)
self.ModeNumber = Mode
self.CouplingMode = CouplingMode
self._Filter = Filter
self._PhiOffset = PhiOffset
self._GammaOffset = GammaOffset
self._NA = NA
self._Sampling = Sampling
self.GetBinding()
def GetBinding(self):
self.Mesh = FibonacciMesh(MaxAngle = NA2Angle(self._NA).Radian,
Sampling = self._Sampling,
PhiOffset = self._PhiOffset,
GammaOffset = self._GammaOffset)
self.ScalarField = GetFarFieldLP(Mode = self.ModeNumber,
MaxAngle = NA2Angle(self._NA).Radian,
Sampling = self._Sampling)
self.Bind = BindedLPMode(ScalarField = self.ScalarField,
Sampling = self._Sampling,
NA = self._NA,
Phi = np.deg2rad(self._PhiOffset),
Gamma = np.deg2rad(self._GammaOffset),
Filter = np.deg2rad(self._Filter) if self._Filter else float(4242)
)
def GetScalarField(self, Sampling, Structured=False):
return GetFarFieldLP(Mode = self.ModeNumber,
MaxAngle = NA2Angle(self._NA).Radian,
Sampling = Sampling,
Structured = Structured)
def __str__(self):
return self.Name
def __repr__(self):
return IO( f"""
LP mode detector
Coupling Mode: Amplitude
LP Mode: {self.ModeNumber}
Numerical aperture: {self.NA:.4f}
Sampling: {self.Mesh.Sampling}
Gamma offset: {self.Mesh.GammaOffset}
Phi offset: {self.Mesh.PhiOffset}
""" )
# -
|
{"hexsha": "a360e1d0d36bdf05eaa24772bedd158592130805", "size": 9241, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyMieSim/Detector.py", "max_stars_repo_name": "MartinPdS/PyMieSim", "max_stars_repo_head_hexsha": "2560c7f4009df5d05bcb0ce8e929aa7baa7be8de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-02-11T17:53:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-11T20:20:01.000Z", "max_issues_repo_path": "PyMieSim/Detector.py", "max_issues_repo_name": "MartinPdS/PyMieSim", "max_issues_repo_head_hexsha": "2560c7f4009df5d05bcb0ce8e929aa7baa7be8de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-05-12T04:33:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-14T05:25:19.000Z", "max_forks_repo_path": "PyMieSim/Detector.py", "max_forks_repo_name": "MartinPdS/PyMieSim", "max_forks_repo_head_hexsha": "2560c7f4009df5d05bcb0ce8e929aa7baa7be8de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-10T19:46:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-01T19:52:14.000Z", "avg_line_length": 35.0037878788, "max_line_length": 106, "alphanum_fraction": 0.5321934856, "include": true, "reason": "import numpy", "num_tokens": 1956}
|
import common
import config
import Matlab
import math
import sys
import numpy as np
import cv2
CAUSAL_DO_NOT_SMOOTH = True
def rand_index(max_index, a_len):
if a_len > max_index:
index = np.array([])
return
index = np.zeros((1, a_len))
available = range(1, max_index)
"""
From Matlab help:
r = rand(n) returns an n-by-n matrix
containing pseudorandom values drawn from the standard uniform
distribution on the open interval (0,1).
r = rand(m,n) or r = rand([m,n]) returns an m-by-n matrix.
"""
rs = np.ceil(np.random.rand(1, len) *
range(max_index, max_index - a_len + 1 + -1, -1))
for p in range(1, len + 1):
while rs[p - 1] == 0:
rs[p - 1] = np.ceil(np.random.rand(1) * (max_index - p + 1))
index[p - 1] = available[rs[p - 1]]
available[rs[p - 1]] = np.array([])
return index
# TODO: Finish this method or delete it.
"""
# It seems ransac_line is NEVER called, since nop == 0 in causal()
def ransac_line(pts, iterNum, thDist, thInlrRatio):
# RANSAC Use RANdom SAmple Consensus to fit a line
# RESCOEF = RANSAC(PTS,ITERNUM,THDIST,THINLRRATIO) PTS is 2*n matrix including
# n points, ITERNUM is the number of iteration, THDIST is the inlier
# distance threshold and ROUND(THINLRRATIO*SIZE(PTS,2)) is the inlier number threshold. The final
# fitted line is y = alpha*x+beta.
# Yan Ke @ THUEE, xjed09@gmail.com
#
# modified by georgios evangelidis
# TODO: not finished implementing it since it's not used
assert False
sampleNum = 2
ptNum = pts.shape[1]
thInlr = np.round(thInlrRatio * ptNum)
inlrNum = np.zeros((1, iterNum))
theta1 = np.zeros((1,iterNum))
rho1 = np.zeros((1, iterNum))
for p in range(1, iterNum + 1):
#% 1. fit using 2 random points
sampleIdx = rand_index(ptNum, sampleNum)
#ptSample = pts(:,sampleIdx)
ptSample = pts[:, sampleIdx - 1]
#d = ptSample(:,2)-ptSample(:,1)
d = ptSample[:, 1] - ptSample[:, 1]
#d=d/norm(d) #% direction vector of the line
d = d / npla.norm(d) #% direction vector of the line
#% 2. count the inliers, if more than thInlr, refit else iterate
#n = [-d(2),d(1)] #% unit normal vector of the line
n = np.c_[-d[1], d[0]] #% unit normal vector of the line
#dist1 = n*(pts-repmat(ptSample(:,1),1,ptNum))
dist1 = n * (pts - repmat(ptSample[:, 0], 1, ptNum)) # TODO: check more
inlier1 = find(abs(dist1) < thDist)
#inlrNum(p) = length(inlier1)
inlrNum[p - 1] = len(inlier1)
#if length(inlier1) < thInlr, continue end
if len(inlier1) < thInlr:
continue
#ev = princomp(pts(:,inlier1)')
ev = princomp(pts[:, inlier1].T)
#d1 = ev(:,1)
d1 = ev[:, 0]
#theta1(p) = -atan2(d1(2),d1(1)) #% save the coefs
theta1[p - 1] = - math.atan2(d1[1], d1[0]) #% save the coefs
#rho1(p) = [-d1(2),d1(1)]*mean(pts(:,inlier1),2)
rho1[p - 1] = [-d1(2),d1(1)] * pts[:, inlier1].mean(2)
#% 3. choose the coef with the most inliers
#[~,idx] = max(inlrNum)
idx = argmax(inlrNum)
theta = theta1[idx]
rho = rho1[idx]
alpha = -sin(theta) / cos(theta)
beta = rho / cos(theta)
return alpha, beta
"""
'''
About costs:
- Evangelidis' causal() uses a
mean over Vote space and weighted sum w.r.t. the scales
- my causal() uses a simple summation
-
- Evangelidis' dp3() starts from
sum after scale of Vote space
but also uses in the update phase of the memoization table the
weights over the different scales.
'''
def compute_cost(crossref, v, file_name="crossref.txt"):
# v[r][q] = votes of ref frame r for query frame q
print("compute_cost(): v.shape = %s" % str(v.shape))
print("compute_cost(): crossref.shape = %s" % str(crossref.shape))
# TODO: print also a synchronization error (look at TPAMI 2013 Evangelidis)
num_back = 0
total_step = 0
penalty_cost = 0
my_min = crossref[0][1]
my_max = crossref[0][1]
for i in range(1, crossref.shape[0]):
if my_min > crossref[i][1]:
my_min = crossref[i][1]
if my_max < crossref[i][1]:
my_max = crossref[i][1]
total_step += abs(crossref[i][1] - crossref[i - 1][1])
# TODO: check also if we stay too long in the same ref frame and
# penalize if more than 10-20 same value in a row
penalty_cost += abs(crossref[i][1] - crossref[i - 1][1])
if crossref[i][1] < crossref[i - 1][1]:
num_back += 1
abs_avg_step = total_step / (crossref.shape[0] - 1)
avg_step = (crossref[crossref.shape[0] - 1][1] - crossref[0][1]) / (
crossref.shape[0] - 1)
cost = 0.0
my_text2 = "compute_cost(): crossref and v =\n"
for q in range(crossref.shape[0]):
assert crossref[q][0] == q
try:
cost += v[crossref[q][1]][q]
my_text2 += "[%d %d] %.7f " % \
(q, crossref[q][1] + config.initFrame[1],
v[crossref[q][1]][q])
for r in range(int(crossref[q][1]) - 5, int(crossref[q][1]) + 5):
if r < 0:
continue
if r >= v.shape[0]:
break
my_text2 += "%.7f " % v[r, q]
except:
common.DebugPrintErrorTrace()
"""
We print the first to nth order statistics - e.g., the first 5 biggest
vote values.
I got inspired from
https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
(see also
https://stackoverflow.com/questions/10337533/a-fast-way-to-find-the-largest-n-elements-in-an-numpy-array)
"""
my_arr = v[:, q].copy()
my_arr_indices = my_arr.argsort()[-5:][::-1]
my_text2 += " max ind = %s" % str(my_arr_indices + config.initFrame[1])
my_text2 += " max vals = %s" % str(my_arr[my_arr_indices])
my_text2 += "\n"
my_text2 += "\n\ncost computed is %.7f\n" % cost
my_text2 += "penalty is %.7f\n" % penalty_cost
my_text2 += "reference frames are in the interval [%d, %d]\n" % \
(my_min + config.initFrame[1], my_max + config.initFrame[1])
my_text2 += "absolute avg step computed is %.7f\n" % abs_avg_step
my_text2 += " avg step computed is %.7f\n" % avg_step
my_text2 += "Number of times going back (num_back) is %d" % num_back
# TODO: print also a synchronization error (look at TPAMI 2013 Evangelidis)
f_output = open(file_name, "wt")
f_output.write(my_text2)
f_output.close()
def causal_alex(v_space, num_frames_q, num_frames_r):
nos = v_space.shape[2]
# We transform nan in 0 in v_space
for i in range(1, nos + 1):
v_temp = v_space[:, :, i - 1]
v_temp[np.isnan(v_temp)] = 0
v_space[:, :, i - 1] = v_temp
# TODO: we should use a weighted sum w.r.t. the scales,
# just like in causal()
v = v_space.sum(2)
crossref = np.zeros((num_frames_q, 2))
for iFor in range(num_frames_q):
crossref[iFor, 0] = iFor
b = v[:, iFor].argmax()
crossref[iFor, 1] = b
print("causal_alex(): crossref = %s" % str(crossref))
compute_cost(crossref, v, "crossref_causal_Alex.txt")
print("causal_alex(): END")
# TODO: write crossref_causal_Alex.txt
return crossref
def causal(v_space, H, num_frames_q, num_frames_r, bov_flag, crop_flag,
const_type,
nop=0):
# causal() is the local/greedy optimization solution
# nargin>7 means that we do the local smoothing with RANSAC (see the paper)
print("causal(): At entrance \n"
" num_frames_q=%s, num_frames_r=%s, bov_flag=%d, crop_flag=%d, "
"const_type=%d, nop=%d" %
(num_frames_q, num_frames_r, bov_flag, crop_flag,
const_type, nop))
# Normally bov_flag=0, crop_flag=0, const_type=1,nop=0
if common.MY_DEBUG_STDOUT:
print("causal(): v_space.shape = %s" % str(v_space.shape))
print("causal(): H.shape = %s" % str(H.shape))
for i in range(v_space.shape[2]):
common.DebugPrint("causal(): v_space[:, :, %d] = \n%s" % (
i, str(v_space[:, :, i])))
for i in range(H.shape[2]):
common.DebugPrint(
"causal(): H[:, :, %d] = \n%s" % (i, str(H[:, :, i])))
# 3D matrix
nos = v_space.shape[2]
# same with the multi_scale_harris function
sigma_0 = 1.2
n = range(0, nos)
# NOT GOOD in Python: sigma_d = math.sqrt(1.8)**n * sigma_0
sq18 = np.ones((1, nos)) * math.sqrt(1.8)
sigma_d = sq18 ** n * sigma_0
w = sigma_d[0]
w = w / w.sum()
# Alex: normally NOT executed
if bov_flag == 1:
w = w[:, ::-1] # We flip on the vertical (left becomes right)
print("causal(): w = %s" % str(w))
# Alex: normally NOT executed
# TODO: Remove assert False or delete code block.
"""
if (const_type == 1) and (bov_flag == 1):
assert False # Normally this code does NOT get executed
vv = np.zeros((v_space.shape[0], v_space.shape[1]))
for j in range(1, nos + 1):
vv = vv + v_space[:, :, j - 1]
X, Y = sort(vv, "descend")
# enable top-N list
N = 300
for s in range(1, nos + 1):
for i in range(1, v_space.shape[1] + 1):
y = Y[:, i - 1]
votes = v_space[:, i - 1, s - 1]
h = H[:, i - 1, s - 1]
votes[y[N:]] = 0
h[y[N:]] = 0
v_space[:, i - 1, s - 1] = votes
H[:, i - 1, s - 1] = h
"""
vv = None
crossref = np.zeros((num_frames_q, 2))
# We transform nan in 0 in v_space
"""
We substitute i - 1 with i, since array indexing starts from 1 in Matlab
and 0 in Python.
"""
for i in range(nos):
v_temp = v_space[:, :, i]
v_temp[np.isnan(v_temp)] = 0
v_space[:, :, i] = v_temp
# Alex: I personally find this idea of using filter2() VERY BAD -
# the results in v_space should already be VERY good
if not CAUSAL_DO_NOT_SMOOTH:
# this filtering of votes favors smoother results
b = Matlab.hamming(11)
"""
We substitute i - 1 with i, since array indexing starts from 1 in Matlab
and 0 in Python.
"""
for i in range(nos):
"""
From the Matlab help:
Y = filter2(h,X) filters
the data in X with the two-dimensional FIR filter
in the matrix h. It computes the result, Y,
using two-dimensional correlation, and returns the central part of
the correlation that is the same size as X.
"""
v_space[:, :, i] = Matlab.filter2(b, v_space[:, :, i])
# TODO: do the optimization Evangelidis says here
"""
From Matlab help:
M = mean(A,dim) returns
the mean values for elements along the dimension of A specified
by scalar dim. For matrices, mean(A,2) is
a column vector containing the mean value of each row.
"""
# this might help more instead of starting from zero votes
v = v_space.mean(2)
"""
We substitute i - 1 with i, since array indexing starts from 1 in Matlab
and 0 in Python.
"""
for i in range(nos):
if crop_flag == 0:
if (const_type == 1) and (bov_flag == 1):
# TODO: think well *
v += w[i] * H[:, :, i] + v_space[:, :, i]
else:
# Alex: we are normally in this case, since crop_flag == 0,
# const_type == 1, bov_flag == 0
# TODO: think well * Exception "ValueError: operands could
# not be broadcast together with shapes (5) (23,8)"
v += w[i] * v_space[:, :, i]
else:
v += w[i] * v_space[:, :, i] + H[:, :, i] # TODO: think well *
if common.MY_DEBUG_STDOUT:
common.DebugPrint("causal(): v.shape = %s" % str(v.shape))
common.DebugPrint("causal(): v (the matrix used to choose the"
"max-voting reference frame) = %s" % str(v))
"""
We substitute iFor -1 with iFor since arrays start with 0 in Python,
not with 1 like in Matlab
"""
for iFor in range(num_frames_q):
crossref[iFor, 0] = iFor # TODO - think well
b = v[:, iFor].argmax()
a = v[:, iFor][b]
crossref[iFor, 1] = b # TODO - think well
# We normally do NOT execute the following code, since nop == 0
# TODO: This code is broken, both mod and ransac_line methods either don't
# exist or are not finished.
"""
if nop != 0:
xx = crossref[:, 0].T
yy = crossref[:, 1].T
yy_new = yy
if mod(nop, 2) == 0:
quit()
# miso is used only as array index and in
# range expressions --> it can be an integer
miso = (nop - 1) / 2
if nop < 7:
pass
for iFor in range(miso + 1, len(xx) - miso + 1):
xx = xx[iFor - miso - 1 : iFor + miso]
yy = yy[iFor - miso - 1 : iFor + miso]
if nop < 9:
# iter_num is used only in range expressions and
# array dimensions --> it can be an integer.
iter_num = nop * (nop - 1) / 2
else:
iter_num = 10
th_dist = 2
th_inlr_ratio = 0.6
# From Matlab help:
# RANSAC Use RANdom SAmple Consensus to fit a line
# RESCOEF = RANSAC(PTS,ITERNUM,THDIST,THINLRRATIO) PTS is 2*n
# matrix including n points, ITERNUM is the number of iteration,
# THDIST is the inlier distance threshold and
# ROUND(THINLRRATIO*SIZE(PTS,2)) is the inlier number threshold.
# The final fitted line is y = alpha*x+beta.
# Yan Ke @ THUEE, xjed09@gmail.com
alpha, beta = ransac_line( np.r_[xx, yy], iter_num,
th_dist, th_inlr_ratio)
if alpha != 0:
yy_new = alpha * xx + beta # TODO: think well *
yy_new[iFor - 1] = yy_new[miso+1 - 1]
crossref[:, 2] = yy_new
"""
crossref = crossref.astype(int)
print("causal(): crossref = %s" % str(crossref))
compute_cost(crossref, v, "crossref_causal.txt")
return crossref
def dp3(vspace, num_frames_r, num_frames_q, bov_flag):
# TODO: There are two vars in this fn, one 'D" and the other 'd'. We should
# find a different name for one of them.
# Dynamic programming for a maximum-vote path in vote-space
# 2010, Georgios Evangelidis <georgios.evangelidis@iais.fraunhofer.de>
print("Entered dp3(): Running dynamic programming...")
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): v_space = %s" % str(vspace))
crossref = np.zeros((num_frames_q, 2))
sigma_0 = 1.2
r, c, d = vspace.shape
n = np.array(range(1, d + 1))
sigma_i = math.sqrt(1.8)**n * sigma_0
w = sigma_i
w = w / float(w.sum())
if bov_flag == 1:
w = w[:, ::-1]
# Initialization
D = np.zeros((r + 1, c + 1))
D[0, :] = np.nan
D[:, 0] = np.nan
D[0, 0] = 0
"""
We substitute i - 1 with i since arrays start with 0 in Python,
not with 1 like in Matlab.
"""
for i in range(d):
v_temp = vspace[:, :, i]
v_temp[np.isnan(v_temp)] = 0 # TODO: check OK
vspace[:, :, i] = v_temp
vv = np.zeros((r, c))
"""
We substitute j - 1 with j since arrays start with 0 in Python,
not with 1 like in Matlab.
"""
for j in range(d):
vv = vv + vspace[:, :, j]
D[1:, 1:] = vv
new_dp3_alex = False
if new_dp3_alex:
# Alex: added cost
cost = np.zeros((r + 1, c + 1))
tback = np.zeros((r + 1, c + 1))
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): printing locally optimum solutions:")
# Alex: trying out to find a better solution than dp3() !!!!TODO : more
# This solution is basically the one returned by causal() IF we do NOT
# apply Matlab.filter2() on v_space
for j in range(1, c + 1):
max_col = 0.0
max_pos = -1
for i in range(1, r + 1):
assert D[i, j] >= 0.0
if max_col < D[i, j]:
max_col = D[i, j]
# So for query frame j we have a candidate matching
# ref frame i
max_pos = i
common.DebugPrint("dp3(): for query frame %d - "
"candidate frame %d" % (j - 1, max_pos))
common.DebugPrint("dp3(): for query frame %d we found matching "
"ref frame %d" % (j - 1, max_pos))
# TODO: make i =0.., j=0.. and substitute i-1 with i, i-2 with i-1, etc
for i in range(1, r + 1):
for j in range(1, c + 1):
if (i > 1) and (j > 1):
dd1 = w[0] * vspace[i - 2, max(0, j - 3), 0]
dd2 = w[0] * vspace[max(0, i - 3), j - 2, 0]
dd3 = w[0] * vspace[i - 2, j - 2, 0]
dd4 = w[0] * vspace[i - 2, j - 1, 0]
dd5 = w[0] * vspace[i - 1, j - 2, 0]
if d > 1:
for sc in range(2, d + 1):
dd1 = max(dd1, w[sc - 1] *
vspace[i - 2, max(0, j - 3), sc - 1])
dd2 = max(dd2, w[sc - 1] * \
vspace[max(0, i - 3), j - 2, sc - 1])
dd3 = max(dd3, w[sc - 1] * vspace[i - 2, j - 2, sc - 1])
dd4 = max(dd4, w[sc - 1] * vspace[i - 2, j - 1, sc - 1])
dd5 = max(dd5, w[sc - 1] * vspace[i - 1, j - 2, sc - 1])
D[i - 1, j - 2] += dd1
D[i - 2, j - 1] += dd2
D[i - 1, j - 1] += dd3
D[i - 1, j] += dd4
D[i, j - 1] += dd5
dmax, tb = Matlab.max(np.array([
D[i - 1, j - 1] + 1.0 / math.sqrt(2.0),
D[i - 2, j - 1] + 1.0 / math.sqrt(5.0),
D[i - 1, j - 2] + 1.0 / math.sqrt(5.0),
D[i - 1, j] + 1,
D[i, j - 1] + 1]))
else:
dmax, tb = Matlab.max(
np.array([D[i - 1, j - 1], D[i - 1, j], D[i, j - 1]]))
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): dmax = %s" % str(dmax))
common.DebugPrint("dp3(): tb = %s" % str(tb))
if new_dp3_alex:
cost[i, j] = 0 # TODO: think more
else:
D[i, j] += dmax # TODO: for me it's weird he adds dmax here...
tback[i, j] = tb
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): D.shape = %s" % str(D.shape))
common.DebugPrint("dp3(): D = %s" % str(D))
common.DebugPrint("dp3(): tback.shape = %s" % str(tback.shape))
common.DebugPrint("dp3(): tback = %s" % str(tback))
# Traceback
i = r + 1
j = c + 1
y = i - 1
x = j - 1
while (i > 2) and (j > 2):
tb = tback[i - 1, j - 1] + 1 # In Matlab, max returns indices from 1..
if tb == 1:
i -= 1
j -= 1
elif tb == 2:
i -= 2
j -= 1
elif tb == 3:
i -= 1
j -= 2
elif tb == 4:
i -= 1
j = j
elif tb == 5:
j -= 1
i = i
else:
assert False
y = np.hstack([i - 1, y])
x = np.hstack([j - 1, x])
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): before D.shape = %s" % str(D.shape))
common.DebugPrint("dp3(): before D = %s" % str(D))
# Strip off the edges of the D matrix before returning
D = D[1: (r + 1), 1: (c + 1)]
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): D.shape = %s" % str(D.shape))
common.DebugPrint("dp3(): D = %s" % str(D))
rd_start = 1
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): v_space.shape = %s" % str(vspace.shape))
# TODO: understand well what is x,y and why computes p
for i in range(0, vspace.shape[1]):
crossref[i, 0] = i # TODO: think if OK
p = np.nonzero(x == i)
p = p[0]
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): x.shape = %s" % str(x.shape))
common.DebugPrint("dp3(): x = %s" % str(x))
common.DebugPrint("dp3(): y.shape = %s" % str(y.shape))
common.DebugPrint("dp3(): y = %s" % str(y))
common.DebugPrint("dp3(): i = %s" % str(i))
common.DebugPrint("dp3(): p = %s" % str(p))
if p.size == 0:
# Alex: Vali Codreanu said to change from temp=0 to temp=3
temp = 0
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): temp = %s" % str(temp))
crossref[i, 1] = 0 + rd_start - 1
else:
temp = y[p]
if common.MY_DEBUG_STDOUT:
common.DebugPrint("dp3(): temp = %s" % str(temp))
if temp.size == 1:
# If temp has only 1 element:
crossref[i, 1] = temp + rd_start - 1
else:
crossref[i, 1] = temp[-1] + rd_start - 1
common.DebugPrint("dp3(): crossref = %s" % str(crossref))
compute_cost(crossref, vv, "crossref_dp3.txt")
return y, x, D, tback, crossref
def dp_alex(v_space, num_frames_r, num_frames_q, bov_flag, prev_ref=5,
next_ref=0):
"""
v_space is a matrix with shape (num_frames_r, num_frames_q).
See multiscale_quad_retrieval.py for definition:
Votes_space = np.zeros( (len(RD), len(QD)) )
"""
t1 = float(cv2.getTickCount())
common.DebugPrint("Entered dp_alex(): Running dynamic programming...")
r, c, d = v_space.shape
# We substitute all NaN's of v_space
for i in range(d):
v_temp = v_space[:, :, i]
v_temp[np.isnan(v_temp)] = 0
v_space[:, :, i] = v_temp
vv = np.zeros((r, c))
for j in range(d):
vv += v_space[:, :, j]
# Checking that vv has positive elements
assert np.nonzero(vv < 0.0)[0].size == 0
print_matrices = True
if common.MY_DEBUG_STDOUT and print_matrices:
print("dp_alex(): r = %d, c = %d" % (r, c))
print("dp_alex(): vv = \n%s" % str(vv))
sys.stdout.flush()
D = np.zeros((r, c))
tback = np.zeros((r, c))
for ref in range(r):
D[ref, 0] = vv[ref, 0]
tback[ref, 0] = -1
for qry in range(1, c):
for ref in range(r):
# We enumerate a few reference frames to find the one with
# highest votes
lb = ref - prev_ref
ub = ref + next_ref
if lb < 0:
lb = 0
if lb >= r:
lb = r - 1
if ub < 0:
ub = 1
if ub >= r:
ub = r - 1
max_pos = lb
for i in range(lb + 1, ub + 1):
"""
We use <= --> we break ties by going forward in the
reference video (incrementing the reference frame for
the next query frame).
"""
if D[max_pos, qry - 1] <= D[i, qry - 1]:
max_pos = i
# max_pos is the maximum vote reference frame for query frame qry
D[ref, qry] += D[max_pos, qry - 1] + vv[ref, qry]
tback[ref, qry] = max_pos
if common.MY_DEBUG_STDOUT and print_matrices:
print("D = \n%s" % str(D))
print("tback = \n%s" % str(tback))
crossref = np.zeros((num_frames_q, 2))
# Find max-cost path (the critical path) for the last query frame:
max_pos = 0
for ref in range(1, r):
"""
We use <= --> we break ties by going forward in the
reference video (incrementing the reference frame for
the next query frame) - debatable if this is a good idea!!!!TODO.
"""
if D[max_pos, c - 1] <= D[ref, c - 1]:
max_pos = ref
print("max_pos = %d" % max_pos)
print("dp_alex(): cost critical path = %s" % str(D[max_pos, c - 1]))
pos_ref = max_pos
for qry in range(c - 1, 0-1, -1):
crossref[qry][0] = qry
crossref[qry][1] = pos_ref
common.DebugPrint("qry=%d, pos_ref=%d" % (qry, pos_ref))
pos_ref = tback[pos_ref, qry]
# time took
common.DebugPrint("dp_alex(): crossref = %s" % str(crossref))
compute_cost(crossref, vv, "crossref_dp_Alex.txt")
# TODO: assert cost computed is = D[max_pos,...]
t2 = float(cv2.getTickCount())
my_time = (t2 - t1) / cv2.getTickFrequency()
print("dp_alex() took %.6f [sec]" % my_time)
y = None
x = None
return y, x, D, tback, crossref
dp3Orig = dp3
dp3 = dp_alex
# TODO: Move tests into separate files.
import unittest
class TestSuite(unittest.TestCase):
def testCausal(self):
# This is a test case from the videos from Evangelidis, with
# frameStep = 200
vspace = np.zeros( (12, 4, 5) )
vspace[:, :, 0] = np.array([ [0, 0, 0, 0],
[0, 0, 1.1646, 0],
[0, 1.1646, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
# The rest of v_space is completely 0 :)
H = np.ones((12, 4, 5))
num_frames_q = 4
num_frames_r = 12
bov_flag = 0
crop_flag = 0
const_type = 1
res = causal(vspace, H, num_frames_q, num_frames_r, bov_flag,
crop_flag, const_type)
print("testCausal(): res from causal() = %s" % str(res))
resGood = np.array([[ 0, 2],
[ 1, 2],
[ 2, 1],
[ 3, 1]])
aZero = res - resGood
self.assertTrue((aZero == 0).all())
res = dp3(vspace, num_frames_r, num_frames_q, bov_flag)
# TODO: test result of dp3()
if __name__ == '__main__':
# See http://docs.scipy.org/doc/numpy/reference/generated/numpy.set_printoptions.html
np.set_printoptions(threshold=1000000, linewidth=5000)
unittest.main()
|
{"hexsha": "fe9a013d363f4587ca50dea18218bc0eb9e2033d", "size": 27454, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/multiscale_synchro_decision.py", "max_stars_repo_name": "Spectavi/video-diff", "max_stars_repo_head_hexsha": "4ad28aea48877937f6b5b25f374f9c14eaf79212", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-18T21:24:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-18T21:24:42.000Z", "max_issues_repo_path": "Python/multiscale_synchro_decision.py", "max_issues_repo_name": "Spectavi/video-diff", "max_issues_repo_head_hexsha": "4ad28aea48877937f6b5b25f374f9c14eaf79212", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/multiscale_synchro_decision.py", "max_forks_repo_name": "Spectavi/video-diff", "max_forks_repo_head_hexsha": "4ad28aea48877937f6b5b25f374f9c14eaf79212", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8790419162, "max_line_length": 115, "alphanum_fraction": 0.5035331828, "include": true, "reason": "import numpy", "num_tokens": 8181}
|
# -*- coding: utf-8 -*-
"""
Merge two X/y. Takes care of reading two feature/label files, merges them and shuffels them
and writes them to new output directory.
Of course, I could also generalize to two lists of X and ys, respectively but this might lead
to even worse practices.
One big approximation of all of this is that all data fits into memory in once. But it should not be
hard to wrap it in Dask if this is not the case.
"""
import os
import pickle
import click
import numpy as np
from sklearn.utils import shuffle
from oximachine_featurizer.utils import read_pickle
RANDOM_SEED = 1234
class Merger:
"""Class to merge two featrue sets"""
def __init__( # pylint:disable=too-many-arguments
self,
features0,
features1,
labels0,
labels1,
names0,
names1,
outdir_features,
outdir_labels,
outdir_names,
):
self.features0 = features0
self.features1 = features1
# make sure that they have the same number of features
assert self.features0.shape[1] == self.features1.shape[1]
self.labels0 = labels0
self.labels1 = labels1
self.names0 = names0
self.names1 = names1
# make sure labels have the same number of columns (one) and the same length as the corresponding features
assert len(self.features0) == len(self.labels0)
assert len(self.features1) == len(self.labels1)
assert len(self.labels0) == len(self.names0)
assert len(self.labels1) == len(self.labels1)
# set the outdir
self.outdir_features = outdir_features
self.outdir_labels = outdir_labels
self.outdir_names = outdir_names
@staticmethod
def stack_arrays(features0, features1, labels0, labels1, names0, names1):
"""Perform the actual merging"""
X = np.vstack([features0, features1]) # pylint:disable=invalid-name
y = np.array(list(labels0) + list(labels1)) # pylint:disable=invalid-name
names = names0 + names1
return X, y, names
@classmethod
def from_files( # pylint:disable=too-many-arguments
cls,
features0path,
features1path,
labels0path,
labels1path,
names0path,
names1path,
outdir_features,
outdir_labels,
outdir_names,
):
"""Construct class from filepaths"""
features0 = np.load(features0path)
features1 = np.load(features1path)
labels0 = np.load(labels0path)
labels1 = np.load(labels1path)
names0 = read_pickle(names0path)
names1 = read_pickle(names1path)
return cls(
features0,
features1,
labels0,
labels1,
names0,
names1,
outdir_features,
outdir_labels,
outdir_names,
)
@staticmethod
def output( # pylint:disable=invalid-name
X,
y,
names,
outdir_features,
outdir_labels,
outdir_names,
):
"""Write the new training set files for the merged training set"""
features, labels, names = shuffle(X, y, names, random_state=RANDOM_SEED)
np.save(os.path.join(outdir_features, "features"), features)
np.save(os.path.join(outdir_labels, "labels"), labels)
with open(os.path.join(outdir_names, "names.pkl"), "wb") as picklefile:
pickle.dump(names, picklefile)
def merge(self):
"""Stack arrays and shuffle"""
X, y, names = Merger.stack_arrays( # pylint:disable=invalid-name
self.features0,
self.features1,
self.labels0,
self.labels1,
self.names0,
self.names1,
)
# Now shuffle and output
Merger.output(
X, y, names, self.outdir_features, self.outdir_labels, self.outdir_names
)
@click.command("cli")
@click.argument("features0path")
@click.argument("features1path")
@click.argument("labels0path")
@click.argument("labels1path")
@click.argument("names0path")
@click.argument("names1path")
@click.argument("outdir_features")
@click.argument("outdir_labels")
@click.argument("outdir_names")
def run_merging( # pylint:disable=too-many-arguments
features0path,
features1path,
labels0path,
labels1path,
names0path,
names1path,
outdir_features,
outdir_labels,
outdir_names,
):
"""CLI"""
merger = Merger.from_files(
features0path,
features1path,
labels0path,
labels1path,
names0path,
names1path,
outdir_features,
outdir_labels,
outdir_names,
)
merger.merge()
if __name__ == "__main__":
run_merging() # pylint:disable=no-value-for-parameter
|
{"hexsha": "e3f5095371789eab5e0dc65df332b76733095db0", "size": 4840, "ext": "py", "lang": "Python", "max_stars_repo_path": "run/merge_two_x_y.py", "max_stars_repo_name": "kjappelbaum/mof_oxidation_states", "max_stars_repo_head_hexsha": "1bbfe9d84802b2248a23ac3d3ee999ed649fe816", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-01-17T18:22:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-06T18:33:28.000Z", "max_issues_repo_path": "run/merge_two_x_y.py", "max_issues_repo_name": "davidtangGT/oximachine_featurizer", "max_issues_repo_head_hexsha": "a3fac6b19f56fb45be89718143d54f932d1e1611", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-09-25T05:02:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-30T16:30:33.000Z", "max_forks_repo_path": "run/merge_two_x_y.py", "max_forks_repo_name": "davidtangGT/oximachine_featurizer", "max_forks_repo_head_hexsha": "a3fac6b19f56fb45be89718143d54f932d1e1611", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-02T12:08:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-18T02:07:39.000Z", "avg_line_length": 27.6571428571, "max_line_length": 114, "alphanum_fraction": 0.6247933884, "include": true, "reason": "import numpy", "num_tokens": 1129}
|
import torch
import numpy as np
import argparse
import mmcv
from mmcv.runner import load_checkpoint, init_dist, get_dist_info, build_optimizer, set_random_seed
from mmcv import Config, DictAction
from mmaction.models import build_recognizer
from dataloader.dataloaderTCN import DataloaderTCN
from scipy.io import savemat
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def parse_args():
parser = argparse.ArgumentParser(description="Arguments for Feature extraction")
parser.add_argument("--model_checkpoint", type=str, help="Location to model (.pth) file")
parser.add_argument("--model_config", type=str, help="Location to config file")
parser.add_argument("--batch_size", type=int)
parser.add_argument("--num_workers", type=int)
parser.add_argument("--videos_per_gpu", type=int)
parser.add_argument("--annotation_file", type=str)
parser.add_argument("--data_prefix", type=str)
parser.add_argument("--transcriptions_dir", type=str)
parser.add_argument("--features_output_dir", type=str)
parser.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
args = parser.parse_args()
return args
def prepare_model(cfg, args, device=torch.device('cuda:0')):
model = build_recognizer(cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
load_checkpoint(model, args.model_checkpoint, map_location=device, strict=False)
model.to(device)
model.cfg = cfg
model.eval()
return model.cuda()
def prepare_dataloader(args):
dataloader = DataloaderTCN(args.batch_size,
args.num_workers,
args.videos_per_gpu,
False,
args.annotation_file,
args.data_prefix,
args.seed)
return dataloader.get_loader()
def get_ground_truth(args):
converter = {0:0, 1:1, 2:2, 3:3, 4:4, 5:5, 7:6, 8:7, 9:8, 10:9}
video_transcriptions_file = args.annotation_file.split("/")[-1]
transcriptions_dir = args.transcriptions_dir
transcriptions_file_path = transcriptions_dir + "/" + video_transcriptions_file
f = open(transcriptions_file_path)
all_lines = f.readlines()
gesture_segments = [[[int(line.split()[0]), int(line.split()[1])], int(line.split()[2][1:])-1] for line in all_lines]
first_frame = gesture_segments[0][0][0]
last_frame = gesture_segments[-1][0][1]
remaining_frames = 32 - (last_frame - first_frame) % 32
last_frame += remaining_frames
gesture_segments[-1][0][1] += remaining_frames
final_label = []
for frame in range(first_frame, last_frame, 2):
for segments in gesture_segments:
gesture_label = segments[-1]
gesture_start = segments[0][0]
gesture_end = segments[0][1]
if frame>=gesture_start and frame<=gesture_end:
final_label.append(converter[gesture_label])
return final_label
def save_to_mat(S, Y, args):
features = {"S": S, "Y": Y}
video_transcriptions_file = args.annotation_file.split("/")[-1]
mat_file_path = args.features_output_dir + "/" + video_transcriptions_file.split(".")[0] + ".mat"
savemat(mat_file_path, features, oned_as='row')
def main():
args = parse_args()
cfg = Config.fromfile(args.model_config)
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
cfg.seed = args.seed
model = prepare_model(cfg, args)
dataloader = prepare_dataloader(args)
modelBackbone = model.backbone
final_gt = get_ground_truth(args)
final_output = None
for (batchIdx, batch) in enumerate(dataloader):
frames = batch["imgs"].cuda()
frames = frames.reshape((-1,) + frames.shape[2:])
with torch.no_grad():
output = modelBackbone(frames)
if final_output is None:
final_output = output
else:
final_output = torch.cat([final_output, output], dim=0)
final_output = final_output.permute(0, 2, 1, 3, 4) # chunks * feature_len * T * H * W => T * feature_len * H * W
avgPool = torch.nn.AdaptiveAvgPool2d(1)
final_output = avgPool(final_output) # T* feature_len * H * W
final_output = final_output.reshape(final_output.shape[0]*final_output.shape[1], -1)
S = final_output.detach().cpu().numpy()
Y = np.array(final_gt)
save_to_mat(S, Y, args)
if __name__ == '__main__':
main()
|
{"hexsha": "90077805a2b88a470c07bb5128961a69520a0db3", "size": 4963, "ext": "py", "lang": "Python", "max_stars_repo_path": "features_extraction/feature_extractor.py", "max_stars_repo_name": "vinayakShenoy/Video-Swin-Transformer", "max_stars_repo_head_hexsha": "bd90abce394eca0db90d80c334c8b05aba2233b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "features_extraction/feature_extractor.py", "max_issues_repo_name": "vinayakShenoy/Video-Swin-Transformer", "max_issues_repo_head_hexsha": "bd90abce394eca0db90d80c334c8b05aba2233b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "features_extraction/feature_extractor.py", "max_forks_repo_name": "vinayakShenoy/Video-Swin-Transformer", "max_forks_repo_head_hexsha": "bd90abce394eca0db90d80c334c8b05aba2233b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7734375, "max_line_length": 121, "alphanum_fraction": 0.6473906911, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1171}
|
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE DeriveFunctor #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
import qualified Data.Map as Map
import Data.Map(Map)
import qualified Data.Set as Set
import Control.Arrow(first, second, (&&&))
import Data.List
import Data.Maybe
import Debug.Trace
import Data.Ord
import qualified Text.Printf as Printf
import qualified Data.Array as Array
import Data.Array(Array, (!), range, Ix)
import qualified Data.Matrix
import qualified System.Random
import Control.Monad
import Control.Monad.Identity
import System.IO(stdout, hSetBuffering, BufferMode(..))
import Control.Parallel.Strategies
import GHC.Generics
import qualified Data.Eigen.Matrix
import Foreign.C.Types
import qualified Numeric.LinearAlgebra.HMatrix
import qualified Numeric.LinearAlgebra.Data
import Control.Arrow
import GHC.Stack
import Matrix
data RawMaterial =
BuriedIronRaw
| BuriedCopperRaw
| BuriedStoneRaw
-- | PetroleumGasRaw
-- | LightOilRaw
-- | HeavyOilRaw
| CrudeOilRaw
| BuriedCoalRaw
| PollutionRaw
deriving (Eq, Ord, Enum, Bounded, Show, Ix, Generic)
instance NFData RawMaterial
type RawMaterialPressure = Map Product Rat
data Product =
IronPlate
| CopperPlate
| SteelPlate
| IronOre
| CopperOre
| CopperCable
| GearWheel
| Plastic
| Pipe
| ElectronicCircuit
| AdvancedCircuit
| EngineUnit
| ProcessingUnit
| ElectricMiningDrill
| PiercingRoundMagazine
| FirearmRoundMagazine
| Grenade
| GunTurret
| SciencePack1
| SciencePack2
| SciencePack3
| SciencePackMilitary
| SciencePackProduction
| SciencePackHighTech
| SpeedModule
| SpeedModule2
| SpeedModule3
| ProductivityModule
| ProductivityModule2
| ProductivityModule3
| EfficiencyModule
| EfficiencyModule2
| EfficiencyModule3
| Sulfur
| SulfuricAcid
| CoalLiquefaction
| ResearchCoalLiquefaction
| ResearchLaserTurretDamage5
| ResearchRocketSilo
| ResearchNuclearPower
| ResearchEndgame
| Inserter
| TransportBelt
| AssemblingMachine1
| AssemblingMachine2
| AssemblingMachine3
| SteelFurnace
| ElectricFurnace
| ElectricEngineUnit
| Lubricant
| StoneBrick
| Stone
| LaserTurret
| Battery
| ChemicalPlant
| OilRefinery
| LabBuilding
| BoilerBuilding
| SteamEngineBuilding
| SolarFacilityBuilding
| NuclearFacilityBuilding
| Roboport
| Substation
| Accumulator
| SolarPanel
| StoneFurnace
| SteamTurbine
| NuclearReactor
| HeatExchanger
| HeatPipe
| Concrete
| Beacon
| RocketPart
| RocketSiloBuilding
| ControlModule
| LightweightStructure
| RocketFuel
| SciencePackSpace
| Satellite
| Radar
| ElectricalEnergy -- in J
| ChemicalEnergy -- in J
| SolidFuel
| Steam
| Coal
| BuriedIron
| BuriedCopper
| PetroleumGas
| LightOil
| HeavyOil
| CrudeOil
| BuriedCoal
| BuriedStone
| Pollution
deriving (Eq, Ord, Show, Enum, Bounded, Ix, Generic)
instance NFData Product where
newtype Time = Time { unTime :: Rat } deriving (Eq, Ord, Show, NFData)
instance Linear Time where
zero = Time 0
add (Time x) (Time y) = Time (add x y)
minus (Time x) = Time (minus x)
instance VectorSpace Time where
type Scalar Time = Rat
scale x (Time t) = Time (x * t)
data VenueKind =
AssemblyVenueKind
| SmelterVenueKind
| ChemicalVenueKind
| MinerVenueKind
| LabVenueKind
| BoilerVenueKind
| SteamEngineVenueKind
| GreenPowerVenueKind
| RefineryVenueKind
| RocketSiloVenueKind
| NoVenueVenueKind
deriving (Show, Ord, Eq)
venuesByKind :: VenueKind -> [Venue]
venuesByKind AssemblyVenueKind = [Assembly2, Assembly3]
venuesByKind SmelterVenueKind = [SmelterBurner, SmelterElectric]
venuesByKind ChemicalVenueKind = [Chemical]
venuesByKind MinerVenueKind = [Miner]
venuesByKind LabVenueKind = [Lab]
venuesByKind BoilerVenueKind = [Boiler]
venuesByKind SteamEngineVenueKind = [SteamEngine]
venuesByKind GreenPowerVenueKind = [SolarFacility, NuclearFacility]
venuesByKind RefineryVenueKind = [Refinery]
venuesByKind NoVenueVenueKind = [NoVenue]
venuesByKind RocketSiloVenueKind = [RocketSilo]
data Venue =
Assembly2
| Assembly3
| SmelterBurner
| SmelterElectric
| Chemical
| Miner
| Lab
| Boiler
| SteamEngine
| SolarFacility
| NuclearFacility
| Refinery
| RocketSilo
| NoVenue
deriving (Show, Eq, Ord, Generic)
data ModuleConfig = ModuleConfig
{
configSpeedBonus :: Rat,
configProductivityBonus :: Rat,
configEnergyBonus :: Rat,
configPollutionBonus :: Rat
} deriving (Eq, Ord, Show, Generic)
instance Linear ModuleConfig where
zero = mempty
add = mappend
minus = scale (-1)
instance VectorSpace ModuleConfig where
type Scalar ModuleConfig = Rat
scale x (ModuleConfig a b c d) =
ModuleConfig
(x * a) (x * b) (x * c) (x * d)
-- venue, modules and modules in beacons
type PreConfig = (Venue, [Product], Maybe Product)
data Config = Config
{
configVenue :: Venue,
configModules :: ModuleConfig,
configModuleMaterials :: Map Product Rat,
configConstantExtraPower :: Rat
} deriving Generic
instance NFData ModuleConfig
instance NFData Config
speedMultiplier c = (1 + configSpeedBonus x) where x = configModules c
productivityMultiplier c = 1 + configProductivityBonus x where x = configModules c
energyMultiplier c = max 0.2 (1 + configEnergyBonus x) where x = configModules c
pollutionMultiplier c = (1 + configPollutionBonus x) where x = configModules c
instance Monoid ModuleConfig where
mempty = ModuleConfig 0 0 0 0
a `mappend` b =
ModuleConfig
{
configSpeedBonus = (configSpeedBonus a + configSpeedBonus b),
configProductivityBonus = (configProductivityBonus a + configProductivityBonus b),
configEnergyBonus = (configEnergyBonus a + configEnergyBonus b),
configPollutionBonus = (configPollutionBonus a + configPollutionBonus b)
}
data Usability =
Unusable | Usable
moduleToConfig SpeedModule = ModuleConfig 0.2 0 0.5 0
moduleToConfig SpeedModule2 = ModuleConfig 0.3 0 0.6 0
moduleToConfig SpeedModule3 = ModuleConfig 0.5 0 0.7 0
moduleToConfig EfficiencyModule = ModuleConfig 0 0 (negate 0.3) 0
moduleToConfig EfficiencyModule2 = ModuleConfig 0 0 (negate 0.4) 0
moduleToConfig EfficiencyModule3 = ModuleConfig 0 0 (negate 0.5) 0
moduleToConfig ProductivityModule = ModuleConfig (negate 0.15) 0.04 0.4 0
moduleToConfig ProductivityModule2 = ModuleConfig (negate 0.15) 0.06 0.6 0
moduleToConfig ProductivityModule3 = ModuleConfig (negate 0.15) 0.10 0.8 0
moduleToConfig p = error $ "not a module: " ++ show p
allModules :: Usability -> [[[Product]]]
allModules usability =
[[ ([]) ]] ++
(map (\ms -> map (\m -> [m]) ms) $
(map return
[ SpeedModule
, EfficiencyModule
-- , EfficiencyModule2
-- , EfficiencyModule3
, SpeedModule2
, SpeedModule3
]
++ [case usability of
Unusable ->
[ ProductivityModule
, ProductivityModule2
, ProductivityModule3
]
Usable -> []]))
choose' :: Int -> [[a]] -> [[a]]
choose' k l
| k < 0 = []
| k == 0 = [[]]
| otherwise = case l of
[] -> []
(xc : xs) -> choose' k xs ++ (do
i <- [1..k]
(x :: a) <- xc
rest <- choose' (k - i) xs
return $ replicate i x ++ rest)
choose k l
| k < 0 = []
| k == 0 = [[]]
| otherwise = case l of
[] -> []
(x : xs) -> map (x:) (choose (k-1) l) ++ choose k xs
moduleSlots :: Venue -> Int
moduleSlots venue = case venue of
Assembly2 -> 2
Assembly3 -> 4
SmelterElectric -> 2
SmelterBurner -> 0
Chemical -> 3
Miner -> 3
Lab -> 2
Boiler -> 0
SteamEngine -> 0
SolarFacility -> 0
NuclearFacility -> 0
Refinery -> 3
NoVenue -> 0
RocketSilo -> 4
initial_module_config gc Miner = mempty { configProductivityBonus = gc_miner_productivity_bonus gc }
initial_module_config _ _ = mempty
-- we have [beaconizationFactor] factories per beacon
-- and [beaconizationFactor] beacons per factory
beaconizationFactor :: RecipeName -> Integer
beaconizationFactor (ProductRecipe ElectronicCircuit) = 2
beaconizationFactor (ProductRecipe CopperCable) = 7
beaconizationFactor (ProductRecipe SciencePackProduction) = 1
beaconizationFactor (ProductRecipe SciencePackMilitary) = 2
beaconizationFactor (ProductRecipe IronOre) = 6
beaconizationFactor _ = 4
linksPerBeacon :: RecipeName -> Integer
linksPerBeacon = beaconizationFactor
linksPerFactory :: RecipeName -> Integer
linksPerFactory = beaconizationFactor
beaconsPerFactory x = fromInteger (linksPerFactory x) / fromInteger (linksPerBeacon x)
mkConfig gc recipe (venue, modules, beacon) =
Config
venue
(mconcat $
(initial_module_config gc venue)
: map moduleToConfig modules
++ (case beacon of
Just m ->
[ scale (fromIntegral (linksPerFactory recipe)) (moduleToConfig m) ]
Nothing ->
[]
)
)
(Map.fromListWith add
(map (\x -> (x, 1)) modules ++ (
case beacon of
Nothing -> []
Just m ->
let ratio = beaconsPerFactory recipe in
[(m, 2 * ratio), (Beacon, 1 * ratio)]
)
))
(case beacon of
Nothing -> 0
Just _ -> 480e3 * (beaconsPerFactory recipe)
)
-- in Watt
data Power =
ElectricalPower { unPower :: Rat }
| ChemicalPower Rat
baseBoilerPower = 3.6e6
basePower :: Venue -> Power
basePower Assembly2 = ElectricalPower 150e3
basePower Assembly3 = ElectricalPower 210e3
basePower Miner = ElectricalPower 90e3
basePower SmelterElectric = ElectricalPower 180e3
basePower SmelterBurner = ChemicalPower 180e3
basePower Chemical = ElectricalPower 210e3
basePower Lab = ElectricalPower 60e3
basePower Boiler = ChemicalPower baseBoilerPower
basePower SteamEngine = ElectricalPower 0
basePower SolarFacility = ElectricalPower 0
basePower NuclearFacility = ElectricalPower 0
basePower Refinery = ElectricalPower 420e3
basePower NoVenue = ElectricalPower 0
basePower RocketSilo = ElectricalPower 4e6
basePollution Assembly2 = 2.4
basePollution Assembly3 = 1.8
basePollution Miner = 9
basePollution SmelterElectric = 0.9
basePollution SmelterBurner = 3.6
basePollution Chemical = 1.8
basePollution Lab = 0
basePollution Boiler = 27.6923
basePollution Refinery = 3.6
basePollution SolarFacility = 0
basePollution NuclearFacility = 0
basePollution SteamEngine = 0
basePollution NoVenue = 0
basePollution RocketSilo = 0
baseSpeed _ SolarFacility = (42e3 * 176)
baseSpeed _ NuclearFacility = 160e6
baseSpeed _ Assembly2 = 0.75
baseSpeed _ Assembly3 = 1.25
baseSpeed _ Miner = 1 -- factored in into the recipe
baseSpeed _ SmelterElectric = 2
baseSpeed _ SmelterBurner = 2
baseSpeed _ Chemical = 1.25
baseSpeed config Lab = gc_lab_speed_multiplier config
baseSpeed _ Boiler = 1 -- this is factored into the recipe
baseSpeed _ SteamEngine = 1 -- this is factored into the recipe
baseSpeed _ Refinery = 1
baseSpeed _ NoVenue = 1 -- this is meaningless
baseSpeed _ RocketSilo = 1
scaleTime s (Time t) = Time (s * t)
coalToEnergy coal = coal * 8e6 / 2
data Recipe = Recipe
{
recipeName :: RecipeName,
recipeProducts :: [(Product, Rat)],
recipeMaterials :: [(Product, Rat)],
recipeVenueKind :: VenueKind,
recipeTime :: Time
} deriving (Eq, Ord, Show)
data RecipeName =
ProductRecipe Product
| LiquefactionRecipe
| AdvancedOilProcessing
| BoilerRecipe
| UseAsFuelRecipe Product
deriving (Eq, Ord, Generic)
instance Show RecipeName where
show (ProductRecipe product) = show product
show (LiquefactionRecipe) = "Liquefaction"
show (AdvancedOilProcessing) = "AdvancedOilProcessing"
show BoilerRecipe = "BoilerRecipe"
show (UseAsFuelRecipe product) = "UseAsFuel" ++ show product
energy_per_steam = 30000
data ResearchCost = ResearchCost {
researchCostTime :: Time, -- time to process 1 stack of bottles
researchCostBottles :: [Product],
researchCostAmount :: Rat
}
marathon_research_cost_adjustment = 4.0
lab_speed_researches :: [(ResearchCost, Rat)]
lab_speed_researches =
let r = SciencePack1 in
let g = SciencePack2 in
let b = SciencePack3 in
let p = SciencePackProduction in
let y = SciencePackHighTech in
let bot l amount = ResearchCost (Time 30) l (amount * marathon_research_cost_adjustment) in
(bot [r, g] 100, 0.20)
: (bot [r, g] 200, 0.30)
: (bot [r, g, b] 250, 0.40)
: (bot [r, g, b] 500, 0.50)
: (bot [r, g, b, p] 500, 0.50)
: (bot [r, g, b, p, y] 500, 0.60)
: error "further lab researches unknown"
mining_productivity_researches :: [(ResearchCost, Rat)]
mining_productivity_researches =
let r = SciencePack1 in
let g = SciencePack2 in
let b = SciencePack3 in
let p = SciencePackProduction in
let y = SciencePackHighTech in
zipWith
(\colors amounts -> (ResearchCost (Time 60) colors (amounts * 4), 0.02))
(replicate 3 [r, g] ++ replicate 4 [r, g, b] ++ replicate 4 [r, g, b, p] ++ replicate 4 [r, g, b, p, y])
(map fromInteger [100, 200..]) ++ error "further mining productivity unknown"
computeResearchCapital :: Int -> [(ResearchCost, Rat)] -> Map Product Rat
computeResearchCapital n =
mconcat' . map (\r -> scale (researchCostAmount r) (Map.fromListWith (+) $ map (\p -> (p, 1)) (researchCostBottles r))) . map fst . take n
data SmeltingMode =
BurnerSmelting
| ElectricSmelting
data Liquefaction_mode =
Liquefaction_disabled
| Liquefaction_to_gas
| Liquefaction_to_burn
deriving (Show, Generic)
class Enumerate a where
allOfThem :: [a]
instance Enumerate Liquefaction_mode where
allOfThem =
[ Liquefaction_disabled
, Liquefaction_to_gas
, Liquefaction_to_burn
]
-- the details of GameConfig sufficient to determine what recipes are available
data GameConfigQualitative = GameConfigQualitative {
qgc_liquefaction :: Liquefaction_mode
}
to_qualitative (GameConfig { gc_liquefaction = liquefaction }) = GameConfigQualitative { qgc_liquefaction = liquefaction }
data GameConfig = GameConfig {
gc_lab_researches_done :: Int,
gc_mining_researches_done :: Int,
gc_liquefaction :: Liquefaction_mode,
gc_recipe_configs :: RecipeName -> PreConfig
}
deriving Generic
data Change =
ProductChange RecipeName PreConfig
| Other String String
deriving Generic
instance NFData Change
instance NFData GameConfig
instance NFData Liquefaction_mode
instance NFData RecipeName
gc_alternatives :: GameConfig -> [(Change, GameConfig)]
gc_alternatives gc =
[(Other "Liquefaction Mode" (show mode), gc { gc_liquefaction = mode }) | mode <- allOfThem] ++
[(Other "Lab speed" "+1", gc { gc_lab_researches_done = gc_lab_researches_done gc + 1 }) ] ++
[(Other "Mining productivity" "+1", gc { gc_mining_researches_done = gc_mining_researches_done gc + 1 }) ] ++
[(ProductChange recipeName config, gc { gc_recipe_configs = f' })
| (recipe, _) <- recipes
, recipeName <- [ recipeName recipe ]
, venue <- venuesByKind (recipeVenueKind recipe)
, let availableModules = allModules (usability recipeName)
, modules <- choose' (moduleSlots venue) availableModules
, modules <- [ concat modules ]
, beacon <- if length modules == moduleSlots venue && moduleSlots venue > 0 then [ Nothing, Just SpeedModule2, Just SpeedModule3 ] else [ Nothing ]
, let config = (venue, modules, beacon)
, let f' = (let f = gc_recipe_configs gc in (\r -> if r == recipeName then config else f r))
]
gc_lab_speed_multiplier :: GameConfig -> Rat
gc_lab_speed_multiplier t = 1 +
(mconcat' $ map snd $ take (gc_lab_researches_done t) lab_speed_researches)
gc_miner_productivity_bonus :: GameConfig -> Rat
gc_miner_productivity_bonus t =
(mconcat' $ map snd $ take (gc_mining_researches_done t) mining_productivity_researches)
gc_configs :: GameConfig -> (Recipe -> Config)
gc_configs gc =
\recipe ->
let pre = gc_recipe_configs gc (recipeName recipe) in
mkConfig gc (recipeName recipe) pre
-- recipes and whether or not they should be enabled
recipes :: [(Recipe, (GameConfigQualitative -> Bool))]
recipes =
let assembly = AssemblyVenueKind in
let smelter = SmelterVenueKind in
let
miner what ingredients speed =
[ (Recipe (ProductRecipe what) [(what, 1)] ingredients MinerVenueKind (Time $ recip speed), const True) ]
in
concat [
r GearWheel 1 [(IronPlate, 4)] assembly (Time 0.5),
r IronPlate 1 [(IronOre, 1)] smelter (Time 3.5),
r CopperPlate 1 [(CopperOre, 1)] smelter (Time 3.5),
r SteelPlate 1 [(IronPlate, 10)] smelter (Time 35),
miner IronOre [(BuriedIron, 1)] 0.525,
miner CopperOre [(BuriedCopper, 1)] 0.525,
miner Coal [(BuriedCoal, 1)] 0.525,
r Beacon 1[(AdvancedCircuit, 20), (CopperCable, 10), (ElectronicCircuit, 20), (SteelPlate, 10)] assembly (Time 15),
r Plastic 2[(PetroleumGas, 20), (Coal, 1)] ChemicalVenueKind (Time 1),
r ElectronicCircuit 1 [(CopperCable, 10), (IronPlate, 2)] assembly (Time 0.5),
r AdvancedCircuit 1 [(Plastic, 4), (CopperCable, 8), (ElectronicCircuit, 2)] assembly (Time 6),
r CopperCable 2 [(CopperPlate, 1)] assembly (Time 0.5),
r Pipe 1 [(IronPlate, 2)] assembly (Time 0.5),
r EngineUnit 1 [(GearWheel, 1), (Pipe, 2), (SteelPlate, 1)] assembly (Time 10),
r ElectricMiningDrill 1 [(GearWheel, 10), (IronPlate, 20), (ElectronicCircuit, 5)] assembly (Time 2),
r SciencePack3 1 [(AdvancedCircuit, 1), (ElectricMiningDrill, 1), (EngineUnit, 1)] assembly (Time 12),
r SciencePack1 1 [(CopperPlate, 1), (GearWheel, 1)] assembly (Time 5),
r SciencePack2 1 [(Inserter, 1), (TransportBelt, 1)] assembly (Time 6),
r SciencePackProduction 2 [(ElectricEngineUnit, 1), (ElectricFurnace, 1)] assembly (Time 14),
r PiercingRoundMagazine 1 [(FirearmRoundMagazine, 1), (SteelPlate, 1), (CopperPlate, 5)] assembly (Time 3),
r FirearmRoundMagazine 1 [(IronPlate, 4)] assembly (Time 1),
r Grenade 1 [(IronPlate, 5), (Coal, 10)] assembly (Time 8),
r GunTurret 1[(GearWheel, 10), (CopperPlate, 10), (IronPlate, 20)] assembly (Time 8),
r SciencePackMilitary 2 [(PiercingRoundMagazine, 1), (Grenade, 1), (GunTurret, 1)] assembly (Time 10),
r SpeedModule 1 [(AdvancedCircuit, 5), (ElectronicCircuit, 5)] assembly (Time 15),
r EfficiencyModule 1 [(AdvancedCircuit, 5), (ElectronicCircuit, 5)] assembly (Time 15),
r ProductivityModule 1 [(AdvancedCircuit, 5), (ElectronicCircuit, 5)] assembly (Time 15),
r EfficiencyModule2 1 [(AdvancedCircuit, 5), (EfficiencyModule, 4), (ProcessingUnit, 5)] assembly (Time 30),
r SpeedModule2 1 [(AdvancedCircuit, 5), (SpeedModule, 4), (ProcessingUnit, 5)] assembly (Time 30),
r ProductivityModule2 1 [(AdvancedCircuit, 5), (ProductivityModule, 4), (ProcessingUnit, 5)] assembly (Time 30),
r ProductivityModule3 1 [(AdvancedCircuit, 5), (ProductivityModule2, 5), (ProcessingUnit, 5)] assembly (Time 60),
r SpeedModule3 1 [(AdvancedCircuit, 5), (SpeedModule2, 5), (ProcessingUnit, 5)] assembly (Time 60),
r EfficiencyModule3 1 [(AdvancedCircuit, 5), (EfficiencyModule2, 5), (ProcessingUnit, 5)] assembly (Time 60),
r ProcessingUnit 1 [(AdvancedCircuit, 2), (ElectronicCircuit, 20), (SulfuricAcid, 10)] assembly (Time 10),
r SulfuricAcid 50 [(IronPlate, 1), (Sulfur, 5)] ChemicalVenueKind (Time 1),
r Sulfur 2 [(PetroleumGas, 30)] ChemicalVenueKind (Time 1),
r ResearchCoalLiquefaction (1/800) [(SciencePack1, 1), (SciencePack2, 1), (SciencePack3, 1), (SciencePackProduction, 1)] LabVenueKind (Time 30),
r ResearchNuclearPower (1/4000) [(SciencePack1, 1), (SciencePack2, 1), (SciencePack3, 1)] LabVenueKind (Time 30),
r ResearchLaserTurretDamage5 (1/800)
[(SciencePack1, 1), (SciencePack2, 1), (SciencePack3, 1), (SciencePackProduction, 1), (SciencePackHighTech, 1)] LabVenueKind (Time 60),
r ResearchRocketSilo (1/4000)
[ (SciencePack1, 1)
, (SciencePack2, 1)
, (SciencePack3, 1)
, (SciencePackProduction, 1)
, (SciencePackHighTech, 1)
, (SciencePackMilitary, 1)] LabVenueKind (Time 60),
r ResearchEndgame (1/10000)
[ (SciencePack1, 1)
, (SciencePack2, 1)
, (SciencePack3, 1)
, (SciencePackProduction, 1)
, (SciencePackHighTech, 1)
, (SciencePackMilitary, 1)
, (SciencePackSpace, 1)
] LabVenueKind (Time 60),
r Inserter 1 [(ElectronicCircuit, 1), (IronPlate, 1), (GearWheel, 1)] assembly (Time 0.5),
r TransportBelt 2 [(GearWheel, 1), (IronPlate, 1)] assembly (Time 0.5),
r AssemblingMachine1 1 [(GearWheel, 5), (IronPlate, 9), (ElectronicCircuit, 3)] assembly (Time 0.5),
r AssemblingMachine2 1 [(AssemblingMachine1, 1), (ElectronicCircuit, 5), (GearWheel, 10), (IronPlate, 20)] assembly (Time 0.5),
r AssemblingMachine3 1 [(AssemblingMachine2, 2), (SpeedModule, 4)] assembly (Time 0.5),
r SteelFurnace 1 [(SteelPlate, 6), (StoneBrick, 10)] assembly (Time 3),
r ElectricFurnace 1 [(AdvancedCircuit, 5), (SteelPlate, 10), (StoneBrick, 10)] assembly (Time 5),
r ElectricEngineUnit 1 [(ElectronicCircuit, 2), (EngineUnit, 1), (Lubricant, 15)] assembly (Time 10),
r StoneBrick 1 [(Stone, 2)] smelter (Time 3.5),
miner Stone [(BuriedStone, 1)] 0.65,
r Lubricant 10 [(HeavyOil, 10)] ChemicalVenueKind (Time 1),
r LaserTurret 1 [(Battery, 12), (ElectronicCircuit, 20), (SteelPlate, 20)] assembly (Time 20),
r Battery 1[(CopperPlate, 1), (IronPlate, 1), (SulfuricAcid, 40)] ChemicalVenueKind (Time 5),
r SciencePackHighTech 2 [(Battery, 1), (CopperCable, 30), (ProcessingUnit, 3), (SpeedModule, 1)] assembly (Time 14),
r ChemicalPlant 1 [(ElectronicCircuit, 5), (GearWheel, 5), (Pipe, 5), (SteelPlate, 5)] assembly (Time 5),
r OilRefinery 1 [(ElectronicCircuit, 10), (GearWheel, 10), (Pipe, 10), (SteelPlate, 15), (StoneBrick, 10)] assembly (Time 8),
r LabBuilding 1 [(ElectronicCircuit, 10), (GearWheel, 10), (TransportBelt, 4)] assembly (Time 2),
r BoilerBuilding 1 [(Pipe, 4), (StoneFurnace, 1)] assembly (Time 0.5),
r StoneFurnace 1 [(Stone, 5)] assembly (Time 0.5),
r SteamEngineBuilding 1 [(GearWheel, 10), (IronPlate, 50), (Pipe, 5)] assembly (Time 0.5),
[ (Recipe BoilerRecipe [(Steam, (baseBoilerPower * 0.5) / energy_per_steam)] [] BoilerVenueKind (Time 1), const True) ],
-- r ElectricalEnergy 1 [(Steam, 1/energy_per_steam)] SteamEngineVenueKind (Time (1/900e3)),
r ElectricalEnergy 1 [] GreenPowerVenueKind (Time 1),
r SolarFacilityBuilding 1 [(SolarPanel, 176), (Accumulator, 166), (Substation, 10), (Roboport, 1)] NoVenueVenueKind (Time 1),
r Roboport 1 [(AdvancedCircuit, 45), (GearWheel, 45), (SteelPlate, 45)] assembly (Time 5),
r Substation 1 [(AdvancedCircuit, 5), (CopperPlate, 5), (SteelPlate, 10)] assembly (Time 0.5),
r Accumulator 1 [(Battery, 5), (IronPlate, 2)] assembly (Time 10),
r SolarPanel 1 [(CopperPlate, 5), (ElectronicCircuit, 15), (SteelPlate, 5)] assembly (Time 10),
r PetroleumGas 2 [(LightOil, 3)] ChemicalVenueKind (Time 5),
r SolidFuel 1 [(LightOil, 10)] ChemicalVenueKind (Time 3),
r LightOil 3 [(HeavyOil, 4)] ChemicalVenueKind (Time 5),
[ (Recipe AdvancedOilProcessing [(HeavyOil, 10), (LightOil, 45), (PetroleumGas, 55)] [(CrudeOil, 100)] RefineryVenueKind (Time 5), const True) ],
r Concrete 10 [(StoneBrick, 5), (IronOre, 1)] assembly (Time 10),
r NuclearFacilityBuilding 1 [(NuclearReactor, 1), (HeatExchanger, 16), (SteamTurbine, 25), (HeatPipe, 60)] NoVenueVenueKind (Time 1),
r SteamTurbine 1 [(GearWheel, 50), (CopperPlate, 50), (Pipe, 20)] assembly (Time 3),
r NuclearReactor 1 [(Concrete, 500), (SteelPlate, 500), (AdvancedCircuit, 500), (CopperPlate, 500)] assembly (Time 8),
r HeatExchanger 1 [(SteelPlate, 10), (CopperPlate, 100), (Pipe, 10)] assembly (Time 3),
r HeatPipe 1 [(SteelPlate, 10), (CopperPlate, 20)] assembly (Time 1),
r RocketSiloBuilding 1 [(Concrete, 1000), (ElectricEngineUnit, 200), (Pipe, 100), (ProcessingUnit, 200), (SteelPlate, 1000)] assembly (Time 30),
r RocketPart 1 [(ControlModule, 10), (LightweightStructure, 10), (RocketFuel, 10)] RocketSiloVenueKind (Time 3),
r ControlModule 1 [(ProcessingUnit, 1), (SpeedModule, 1)] assembly (Time 30),
r LightweightStructure 1 [(CopperPlate, 10), (Plastic, 10), (SteelPlate, 10)] assembly (Time 30),
r RocketFuel 1 [(SolidFuel, 10)] ChemicalVenueKind (Time 30),
r SciencePackSpace 1000 [(RocketPart, 100), (Satellite, 1)] NoVenueVenueKind (Time 1),
r Satellite 1 [(Accumulator, 100), (LightweightStructure, 100), (ProcessingUnit, 100), (Radar, 5), (RocketFuel, 50), (SolarPanel, 100)] assembly (Time 5),
r Radar 1 [(ElectronicCircuit, 5), (GearWheel, 5), (IronPlate, 10)] assembly (Time 0.5),
(
let
burn_coal =
Recipe (UseAsFuelRecipe Coal) [(ChemicalEnergy, 8e6)] [(Coal, 1)] NoVenueVenueKind (Time 1) -- time is meaningless here
liquefaction =
Recipe LiquefactionRecipe [(HeavyOil, 35), (LightOil, 15), (PetroleumGas, 20)] [(Coal, 10), (HeavyOil, 25), (Steam, 50)] RefineryVenueKind (Time 5)
burn_bricks =
Recipe (UseAsFuelRecipe SolidFuel) [(Steam, (25e6 * 0.5) / energy_per_steam)] [(SolidFuel, 1)] BoilerVenueKind (Time 1) -- incorrect time, but nothing cares
in
[ (burn_coal, (\gc -> case qgc_liquefaction gc of { Liquefaction_to_burn -> False; _ -> True }))
, (burn_bricks, (\gc -> case qgc_liquefaction gc of { Liquefaction_to_burn -> True; _ -> False }))
, (liquefaction, (\gc -> case qgc_liquefaction gc of { Liquefaction_disabled -> False; _ -> True }))
])
] where
r product quantity ingredients venues time = [ (Recipe (ProductRecipe product) [(product, quantity)] ingredients venues time, const True) ]
mconcat' x = foldr add zero x
functionToMatrix :: (Num v, Ix' b, Ix' a) => (a -> [(b, v)]) -> Matrix a b v
functionToMatrix f =
Matrix (Array.array fullRange [ ((a, b), maybe 0 id (Map.lookup b bs)) | a <- range fullRange, let bs = Map.fromListWith (+) (f a), b <- range fullRange])
recipesByName = Map.fromListWith (error "multiple recipes with the same name") (map (\(recipe, _enabled) -> (recipeName recipe, recipe)) (recipes))
enabledRecipesByName gc =
Map.fromListWith (error "multiple recipes with the same name") (concatMap (\(recipe, enabled) -> if not (enabled gc) then [] else [(recipeName recipe, recipe)]) (recipes))
recipesToMatrix :: GameConfig -> Map RecipeName (Map Product Rat)
recipesToMatrix gc =
fmap (\(Recipe recipeName production consumption _venueKind (Time baseTime)) ->
(
let preConfig = gc_recipe_configs gc recipeName in
let config = mkConfig gc recipeName preConfig in
let venue = configVenue config in
let
time = (baseTime / (speedMultiplier config * baseSpeed gc venue))
extra_energy = time * configConstantExtraPower config
energy_and_pollution =
let multiplier = time * energyMultiplier config in
let pollution = basePollution venue in
case basePower venue of
ElectricalPower basePower ->
[(ElectricalEnergy, (basePower * multiplier) + extra_energy), (Pollution, pollution * multiplier)]
ChemicalPower basePower ->
[(ChemicalEnergy, basePower * multiplier + extra_energy), (Pollution, pollution * multiplier)]
in
Map.fromListWith add (fmap (second negate) (consumption ++ energy_and_pollution) ++ map (second ((* productivityMultiplier config))) production)
)) (enabledRecipesByName (to_qualitative gc))
matrix_add (Matrix x) (Matrix y) = Matrix (Array.array fullRange [(i, x ! i + y ! i) | i <- range fullRange])
matrix_negate (Matrix x) = Matrix (Array.array fullRange [(i, - x ! i) | i <- range fullRange])
matrix_subtract a b = matrix_add a (matrix_negate b)
matrix_identity :: (Ix' a, Num v) => Matrix a a v
matrix_identity = Matrix (Array.array fullRange [((a,b), if a == b then 1 else 0) | (a,b) <- range fullRange])
to_data_matrix :: forall a v . (Ix' a) => Matrix a a v -> Data.Matrix.Matrix v
to_data_matrix (Matrix m) =
Data.Matrix.matrix
(Array.rangeSize (fullRange :: (a, a)))
(Array.rangeSize (fullRange :: (a, a)))
(\(i, j) ->
m ! (toEnum (i - 1), toEnum (j - 1)))
of_data_matrix :: forall a v . Ix' a => Data.Matrix.Matrix v -> Matrix a a v
of_data_matrix m =
let (all_as :: [a]) = range fullRange in
Matrix (Array.array fullRange
[ ((i, j), v)
| (i, r) <- zip all_as (Data.Matrix.toLists m)
, (j, v) <- zip all_as r])
matrix_inverse_precise :: (Ix' a, Num v, Fractional v, Eq v) => Matrix a a v -> Matrix a a v
matrix_inverse_precise x = trace "inverting" $ of_data_matrix . (\(Right res) -> res) . Data.Matrix.inverse . to_data_matrix $ x
to_hmatrix :: forall a . (Ix' a) => Matrix a a Rat -> Numeric.LinearAlgebra.HMatrix.Matrix Double
to_hmatrix (Matrix m) =
Numeric.LinearAlgebra.Data.fromLists
$ [
[ fromRational . toRational $ (m ! (i, j))
| j <- range fullRange
]
| i <- range fullRange
]
of_hmatrix :: forall a . Ix' a => Numeric.LinearAlgebra.HMatrix.Matrix Double -> Matrix a a Rat
of_hmatrix m =
let (all_as :: [a]) = range fullRange in
Matrix (Array.array fullRange
[ ((i, j), fromRational . toRational $ v)
| (i, r) <- zip all_as (Numeric.LinearAlgebra.Data.toLists m)
, (j, v) <- zip all_as r])
matrix_inverse_hmatrix :: (Ix' a) => Matrix a a Rat -> Matrix a a Rat
matrix_inverse_hmatrix x = of_hmatrix . Numeric.LinearAlgebra.HMatrix.inv . to_hmatrix $ x
to_eigen_matrix :: forall a . (Ix' a) => Matrix a a Rat -> Data.Eigen.Matrix.Matrix Double CDouble
to_eigen_matrix (Matrix m) =
Data.Eigen.Matrix.fromList
[
[ fromRational . toRational $ (m ! (toEnum (i - 1), toEnum (j - 1)))
| j <- range fullRange
]
| i <- range fullRange
]
of_eigen_matrix :: forall a . Ix' a => Data.Eigen.Matrix.Matrix Double CDouble -> Matrix a a Rat
of_eigen_matrix m =
let (all_as :: [a]) = range fullRange in
Matrix (Array.array fullRange
[ ((i, j), fromRational . toRational $ v)
| (i, r) <- zip all_as (Data.Eigen.Matrix.toList m)
, (j, v) <- zip all_as r])
matrix_inverse_eigen :: (Ix' a) => Matrix a a Rat -> Matrix a a Rat
matrix_inverse_eigen x = of_eigen_matrix . Data.Eigen.Matrix.inverse . to_eigen_matrix $ x
matrix_inverse_verified inverse x =
let y = inverse x in
if (matrix_mult x y == Matrix (f_array (\(x, y) -> if x == y then 1 else 0))) then y else error $ "matrix inverse broken: " ++ "\n" ++ show x ++"\n" ++ show y
-- x = m * x + x_0
-- solve_equation :: (Ix' a, Num v, Fractional v, Eq v) => Matrix a a v -> Vector a v -> Vector a v
-- solve_equation a x0 = matrix_inverse (matrix_subtract a matrix_identity) `matrix_mult` matrix_negate x0
sparse_transpose :: (Ord a, Ord b) => Map a (Map b v) -> Map b (Map a v)
sparse_transpose m =
fmap
Map.fromList
$ Map.fromListWith (++) (concatMap (\(a, m') -> map (\(b, v) -> (b, [(a, v)])) (Map.toList m')) (Map.toList m))
matrix_to_sparse :: (Ix' a, Ix' b, Ord b, Linear v, Eq v) => Matrix a b v -> [(a, (Map b v))]
matrix_to_sparse (Matrix m) =
[ (a, Map.fromList
[
(b, x)
| b <- range fullRange
, let x = m ! (a,b)
, x /= zero
])
| a <- range fullRange
]
type RawProduct = Product
solvedRecipes
:: GameConfig
-> Map Product (Map RawProduct Rat, Map RecipeName Rat)
solvedRecipes gc =
find_kernel_with_trace (/) (*) (recipesToMatrix gc)
currentSolvedRecipes = solvedRecipes current_game_config
currentRecipeMatrix = recipesToMatrix current_game_config
vector_lookup :: Ix' a => Vector a v -> a -> v
vector_lookup (Matrix x) a = x ! (a, ())
compute'_new :: GameConfig -> Product -> RawMaterialPressure
compute'_new gc =
let recipes = fmap fst $ solvedRecipes gc in
\product -> case Map.lookup product recipes of
Nothing -> Map.empty
Just m -> m
compute_recipe gc =
let compute = compute'_new gc in
\recipe ->
mconcat'
[ scale quantity (compute product)
| (product, quantity) <- recipeProducts recipe
]
compute' gc = compute'_new gc
data SparseMatrix a b v = SparseMatrix [(a, Map b v)]
data MatrixError =
Not_enough_equations
| Contradicting_equations
deriving Show
computeTotalCost :: Product -> RawMaterialPressure
computeTotalCost = compute' current_game_config
usability' Beacon = Usable
usability' GearWheel = Unusable
usability' IronPlate = Unusable
usability' IronOre = Unusable
usability' CopperOre = Unusable
usability' CopperCable = Unusable
usability' ElectronicCircuit = Unusable
usability' AdvancedCircuit = Unusable
usability' Plastic = Unusable
usability' Sulfur = Unusable
usability' SulfuricAcid = Unusable
usability' EngineUnit = Unusable
usability' CopperPlate = Unusable
usability' SteelPlate = Unusable
usability' ElectricMiningDrill = Usable
usability' Pipe = Usable
usability' Inserter = Usable
usability' ElectricFurnace = Usable
usability' SciencePack1 = Unusable
usability' SciencePack2 = Unusable
usability' SciencePack3 = Unusable
usability' AssemblingMachine1 = Usable
usability' AssemblingMachine2 = Usable
usability' AssemblingMachine3 = Usable
usability' TransportBelt = Usable
usability' StoneBrick = Unusable
usability' Lubricant = Unusable
usability' SciencePackProduction = Unusable
usability' SciencePackHighTech = Unusable
usability' CoalLiquefaction = Unusable
usability' ElectricEngineUnit = Unusable
usability' Stone = Unusable
usability' Coal = Unusable
usability' ProcessingUnit = Unusable
usability' SpeedModule = Usable
usability' SpeedModule2 = Usable
usability' SpeedModule3 = Usable
usability' EfficiencyModule = Usable
usability' EfficiencyModule2 = Usable
usability' EfficiencyModule3 = Usable
usability' ProductivityModule = Usable
usability' ProductivityModule2 = Usable
usability' ProductivityModule3 = Usable
usability' SolidFuel = Unusable
usability' ElectricalEnergy = Usable
usability' PetroleumGas = Unusable
usability' ResearchCoalLiquefaction = Unusable
usability' ResearchEndgame = Unusable
usability' ResearchRocketSilo = Unusable
usability' ResearchNuclearPower = Unusable
usability' ResearchLaserTurretDamage5 = Unusable
usability' LightOil = Unusable
usability' HeavyOil = Unusable
usability' CrudeOil = Unusable
usability' Steam = Unusable
usability' LaserTurret = Usable
usability' Battery = Unusable
usability' PiercingRoundMagazine = Usable
usability' FirearmRoundMagazine = Usable
usability' Grenade = Usable
usability' GunTurret = Usable
usability' SciencePackMilitary = Unusable
usability' SteelFurnace = Usable
usability' StoneFurnace = Usable
usability' LabBuilding = Usable
usability' SteamEngineBuilding = Usable
usability' BoilerBuilding = Usable
usability' OilRefinery = Usable
usability' ChemicalPlant = Usable
usability' SolarFacilityBuilding = Usable
usability' SolarPanel = Usable
usability' Accumulator = Usable
usability' Substation = Usable
usability' Roboport = Usable
usability' SteamTurbine = Usable
usability' NuclearReactor = Usable
usability' HeatExchanger = Usable
usability' HeatPipe = Usable
usability' Concrete = Usable
usability' RocketPart = Unusable
usability' RocketSiloBuilding = Unusable
usability' ControlModule = Unusable
usability' LightweightStructure = Unusable
usability' RocketFuel = Unusable
usability' SciencePackSpace = Unusable
usability' NuclearFacilityBuilding = Usable
usability' Satellite = Usable
usability' Radar = Usable
usability' ChemicalEnergy = Unusable
usability' BuriedIron = Unusable
usability' BuriedCopper = Unusable
usability' BuriedCoal = Unusable
usability' BuriedStone = Unusable
usability' Pollution = Unusable
usability recipeName =
case recipeName of
ProductRecipe product -> usability' product
LiquefactionRecipe -> Unusable
AdvancedOilProcessing -> Unusable
UseAsFuelRecipe _product -> Unusable
BoilerRecipe -> Usable
evaluateTotalCost :: RawMaterialPressure -> Rat
evaluateTotalCost f = sum [ (estimate k * v) | (k, v) <- Map.toList f, v /= zero] where
-- estimate LightOil = 0.1
estimate CrudeOil = 0.1
estimate HeavyOil = 0.1
estimate BuriedCoal = 1
estimate BuriedIron = 1.5
estimate BuriedCopper = 1
estimate BuriedStone = 0.3
estimate Pollution = 0.001
estimate product = error $ "don't know how much this is worth: " ++ show product
-- estimate PetroleumGas = 0.1
subtract' a b = add a (minus b)
instance NFData Venue
data Assessment = Assessment {
totalRawMaterials :: RawMaterialPressure,
totalCapitalSeconds :: RawMaterialPressure,
totalCapital :: RawMaterialPressure
} deriving (Generic, Show)
instance NFData Assessment
instance Linear Assessment where
zero = Assessment {
totalRawMaterials = zero,
totalCapitalSeconds = zero,
totalCapital = zero
}
a `add` b =
Assessment
{
totalRawMaterials = add (totalRawMaterials a) (totalRawMaterials b),
totalCapitalSeconds = add (totalCapitalSeconds a) (totalCapitalSeconds b),
totalCapital = add (totalCapital a) (totalCapital b)
}
minus a =
Assessment
{
totalRawMaterials = minus (totalRawMaterials a),
totalCapitalSeconds = minus (totalCapitalSeconds a),
totalCapital = minus (totalCapital a)
}
instance VectorSpace Assessment where
type Scalar Assessment = Rat
scale x a =
Assessment
{
totalRawMaterials = scale x (totalRawMaterials a),
totalCapitalSeconds = scale x (totalCapitalSeconds a),
totalCapital = scale x (totalCapital a)
}
capitalUsePerExecution :: GameConfig -> Recipe -> RawMaterialPressure
capitalUsePerExecution gc recipe =
let config = gc_configs gc recipe in
let Time time = recipeTime recipe in
let execution_time = time / (speedMultiplier config * baseSpeed gc (configVenue config)) in
let facility_cost = capitalCost config in
scale execution_time facility_cost
type CapitalUse = RawMaterialPressure
computeTotalCost_multi :: Map Product Rat -> RawMaterialPressure
computeTotalCost_multi = mconcat' . map (\(product, amount) -> scale amount (computeTotalCost product)) . Map.toList
assess_gc' :: GameConfig -> Map Product Rat -> (Map RecipeName CapitalUse, Assessment)
assess_gc' gc =
let solved = solvedRecipes gc in
let rbn = enabledRecipesByName (to_qualitative gc) in
\demand -> runIdentity $ do
(rawMaterials, executions) <- return $ dot_product_with scale demand solved
capitalSeconds <- return $ Map.mapWithKey (\recipeName executions ->
let Just recipe = Map.lookup recipeName rbn in
scale executions (capitalUsePerExecution gc recipe)
) executions
labResearchCapital <- return $ computeResearchCapital (gc_lab_researches_done gc) lab_speed_researches
miningResearchCapital <- return $ computeResearchCapital (gc_mining_researches_done gc) mining_productivity_researches
return $ (capitalSeconds, Assessment
{
totalRawMaterials = rawMaterials,
totalCapitalSeconds = mconcat' (Map.elems capitalSeconds),
totalCapital = computeTotalCost_multi (mconcat' [labResearchCapital, miningResearchCapital])
})
assess_gc configs =
let a = assess_gc' configs in
\demand -> snd (a demand)
newtype Rat = Rat Double deriving (Eq, Ord, Generic, NFData, Linear, Num, Fractional, Real)
instance VectorSpace Rat where
type Scalar Rat = Rat
scale (Rat x) (Rat y) = Rat (x * y)
dot_product_with :: (Ord k, Linear a, Linear b, Linear c) => (a -> b -> c) -> Map k a -> Map k b -> c
dot_product_with f m1 m2 = foldr add zero $ Map.elems $
Map.mergeWithKey
(\_k a b -> Just (f a b))
(\_ -> Map.empty)
(\_ -> Map.empty)
m1
m2
instance Show Rat where
show (Rat x) = Printf.printf "%.4f" (fromRational $ toRational x :: Double)
showModule SpeedModule = "s1"
showModule SpeedModule2 = "s2"
showModule SpeedModule3 = "s3"
showModule EfficiencyModule = "e1"
showModule EfficiencyModule2 = "e2"
showModule EfficiencyModule3 = "e3"
showModule ProductivityModule = "p1"
showModule ProductivityModule2 = "p2"
showModule ProductivityModule3 = "p3"
showModule x = error $ "not a module: " ++ show x
divv a b = if b == 0 then 1e10 else a / b
venueBuilding :: Venue -> [Product]
venueBuilding venue = case venue of
Assembly2 -> [AssemblingMachine2]
Assembly3 -> [AssemblingMachine3]
Miner -> [ElectricMiningDrill]
SmelterElectric -> [ElectricFurnace]
SmelterBurner -> [SteelFurnace]
Chemical -> [ChemicalPlant]
Lab -> [LabBuilding]
Boiler -> [BoilerBuilding]
SteamEngine -> [SteamEngineBuilding]
SolarFacility -> [SolarFacilityBuilding]
NuclearFacility -> [NuclearFacilityBuilding]
NoVenue -> []
Refinery -> [OilRefinery]
RocketSilo -> [RocketSiloBuilding]
capitalCost :: Config -> RawMaterialPressure
capitalCost config =
computeTotalCost_multi (
configModuleMaterials config
`add`
Map.fromListWith (+) (map (\p -> (p, 1)) (venueBuilding (configVenue config))))
partition_market :: (a -> (Rat, Rat)) -> [a] -> ([a], [a], [a], [a], [a])
partition_market evaluate l
=
( p (\(gain, cost) -> gain >= 0 && cost <= 0) (\(_gain, cost) -> cost) -- free capital
, p (\(gain, cost) -> gain >= 0 && cost <= 0) (\(gain, _cost) -> (-gain)) -- free resources
, p (\(gain, cost) -> gain >= 0 && cost <= 0) (\(gain, cost) -> (gain * cost)) -- free both
, p (\(gain, cost) -> gain > 0 && cost >= 0) (\(gain, cost) -> cost / gain) -- buy
, p (\(gain, cost) -> gain < 0 && cost < 0) (\(gain, cost) -> gain / cost) -- sell
) where
p predicate order = sortBy (comparing (order . evaluate)) $ filter (predicate . evaluate) l
allRecipeNames qgc = [recipeName recipe | (recipe, check) <- recipes, check qgc]
venueKind_of_venue venue = case venue of
Assembly2 -> AssemblyVenueKind
Assembly3 -> AssemblyVenueKind
SmelterBurner -> SmelterVenueKind
SmelterElectric -> SmelterVenueKind
Chemical -> ChemicalVenueKind
Miner -> MinerVenueKind
Lab -> LabVenueKind
Boiler -> BoilerVenueKind
SteamEngine -> SteamEngineVenueKind
Refinery -> RefineryVenueKind
NoVenue -> NoVenueVenueKind
SolarFacility -> GreenPowerVenueKind
NuclearFacility -> GreenPowerVenueKind
RocketSilo -> RocketSiloVenueKind
isVenueDefault venue =
venue == currentDefaultVenue (venueKind_of_venue venue)
possibleSavings :: RawMaterialPressure -> GameConfig -> [(Change, GameConfig, Assessment)]
possibleSavings demand gc =
let assessment_base = assess_gc gc demand in
let
assess_diff gc' =
let assessment_tip = assess_gc gc' demand in
add assessment_tip (minus assessment_base)
in
(`using` parListChunk 10 rdeepseq) $ map (\(change, config) -> (change, config, assess_diff config)) (gc_alternatives gc)
showChange :: Change -> (String, String)
showChange (ProductChange product (venue, modules, beacon)) =
let
showVenue SmelterElectric = "+"
showVenue SmelterBurner = "-"
showVenue Assembly2 = "-"
showVenue Assembly3 = "+"
showVenue venue | isVenueDefault venue = "" | otherwise = "??"
showBeacon Nothing = ""
showBeacon (Just b) = "!" ++ showModule b
in
(show product, showVenue venue ++ (concatMap showModule modules) ++ showBeacon beacon)
showChange (Other a b) = (a, b)
data TableEntry = TableEntry {
te_name :: (String, String),
te_efficiency :: Time,
te_saving :: RawMaterialPressure,
te_cost :: RawMaterialPressure,
te_saving_and_cost :: (Rat, Rat)
}
evaluate (Time totalTime)
name (Assessment {
totalRawMaterials,
totalCapitalSeconds,
totalCapital}) =
let te_saving = minus totalRawMaterials in
let te_cost = scale (recip totalTime) totalCapitalSeconds `add` totalCapital in
let efficiency = Time (totalTime * (evaluateTotalCost te_cost / evaluateTotalCost te_saving)) in
let
te_saving_and_cost =
(evaluateTotalCost te_saving, evaluateTotalCost te_cost + installationCost )
in
(TableEntry { te_name = name, te_efficiency = efficiency, te_saving, te_cost, te_saving_and_cost })
possibleSavings' demand time =
[ evaluate time (showChange change) assessment
| (change, _gc, assessment) <- possibleSavings demand current_game_config
]
installationCost = 40000
desiredMaterials =
[ (ResearchEndgame, 1)
, (PiercingRoundMagazine, 20000)
, (ProductivityModule3, 200)
]
lookup0 m k = case Map.lookup k m of
Nothing -> zero
Just x -> x
currentRecipeVenue recipe =
let config = (gc_configs current_game_config recipe) in
configVenue config
currentEffectiveExecutionTime recipeName =
let recipe = (enabledRecipesByName (to_qualitative current_game_config) !!! recipeName) in
unTime (recipeTime recipe)
/ (speedMultiplier (gc_configs current_game_config recipe) * baseSpeed current_game_config (currentRecipeVenue recipe))
showHours (Time t) =
show (t / 3600) ++ "h"
format_material_pressure = show . evaluateTotalCost
rCols =
[ ("Efficiency", (showHours . te_efficiency))
, ("Name", (fst . te_name))
, ("Mod", (snd . te_name))
, ("Gain", (format_material_pressure . te_saving))
, ("Cost", (format_material_pressure . te_cost))
]
pad n l = replicate (n - length l) ' ' ++ l
printTable :: [[String]] -> [String]
printTable =
map concat . transpose . map (\col -> let maxl = maximum (map length col) in map (pad (maxl + 1)) col) . transpose
printTableG :: [a] -> [(String, (a -> String))] -> IO ()
printTableG l cols =
let title = map fst cols in
let showA row = map (($row) . snd) cols in
mapM_ putStrLn $ printTable (title : map showA l)
printRs l = printTableG l rCols
interestingProducts = []
show_percent n =
Printf.printf "%.2f%%" (fromRational (toRational n) * (100 :: Double))
printDetailedCost cost =
let overall = evaluateTotalCost cost in
let
r =
map (\(k, v) ->
let here = evaluateTotalCost (Map.singleton k v) in
[show k, show v, show here, show_percent $ here / overall])
(Map.toList cost) ++ [["Total", "_", show overall, "100.00%"]]
in
mapM_ putStrLn $ printTable r
print_config_details totalTime demand gc =
let solved = solvedRecipes gc in
let matrix = recipesToMatrix gc in
let
(total_cost_per_second, executions_per_second) =
foldr add zero (
map
(\(product, amount) ->
scale (recip $ unTime totalTime) (scale amount $ solved !!! product)
) desiredMaterials)
in
let
negative_executions_per_second =
filter
((<0) . snd)
(Map.toList executions_per_second)
in
let
effective_execution_time recipeName =
let recipe = (enabledRecipesByName (to_qualitative gc) !!! recipeName) in
unTime (recipeTime recipe)
/ (speedMultiplier (gc_configs gc recipe) * baseSpeed gc (configVenue (gc_configs gc recipe)))
in
do
mapM_ print negative_executions_per_second
print "total factories:"
let factories k = flip fmap (Map.lookup k executions_per_second) (* effective_execution_time k)
let (capital_use_per_recipe, Assessment _raw total_capital_use total_capital_oneoff) = assess_gc' gc demand
let scale_capital_use = scale (recip $ unTime totalTime)
printTableG (sortBy (comparing factories) (allRecipeNames currentQGC)) $
[ ("Name", show)
, ("Factories", maybe "<none>" show . factories)
, ("Price",
\k ->
case k of
ProductRecipe product ->
show $ evaluateTotalCost $ computeTotalCost product
_ -> "<complex>")
, ("Capital", (\k -> show $ evaluateTotalCost $ scale_capital_use (lookup0 capital_use_per_recipe k)))
] ++ flip map interestingProducts (\product -> (show product, (\k -> show $ lookup0 (matrix !!! k) product * (lookup0 executions_per_second k))))
putStrLn "Total cost:"
printDetailedCost (scale (unTime totalTime) total_cost_per_second)
putStrLn "Total production capital:"
print (evaluateTotalCost $ scale_capital_use total_capital_use)
putStrLn "Total research capital:"
print (evaluateTotalCost $ total_capital_oneoff)
report =
let totalTime = Time (5 * 3600) in
let
demand =
Map.fromList desiredMaterials
in
let savings = possibleSavings' demand totalTime in
do
let (free_capital, free_resources, free_both, buys, sells) = partition_market te_saving_and_cost savings
print_config_details totalTime demand current_game_config
putStrLn $ "Number of alternative configurations considered:" ++ show (length (gc_alternatives current_game_config))
putStrLn "Free capital:"
printRs (take 10 free_capital)
putStrLn "Free resources:"
printRs (take 10 free_resources)
putStrLn "Free both:"
printRs (take 10 free_both)
putStrLn "Buys:"
printRs (take 20 buys)
putStrLn "Sells:"
printRs (take 20 sells)
matrix_of_lists lists =
Matrix (Array.array fullRange
[ ((i, j), toRational v)
| (i, r) <- zip (range fullRange) lists
, (j, v) <- zip (range fullRange) r])
_identity_matrix :: (Ix' a) => Matrix a a Rat
_identity_matrix = Matrix (f_array (\(a,b) -> if a == b then 1 else 0))
currentDefaultVenue :: VenueKind -> Venue
currentDefaultVenue AssemblyVenueKind = Assembly2
currentDefaultVenue SmelterVenueKind = SmelterElectric
currentDefaultVenue GreenPowerVenueKind = NuclearFacility
currentDefaultVenue venueKind = case venuesByKind venueKind of
[ venue ] -> venue
_ -> error "ambiguous venue"
trivial recipe =
(currentDefaultVenue (recipeVenueKind recipe), [], Nothing)
(!!!) :: (Ord k, HasCallStack) => Map k v -> k -> v
(!!!) m x = m Map.! x
parseSpec :: String -> (Maybe Char, [Product], Maybe Product)
parseSpec ('+' : rest) = (\(m,b) -> (Just '+', m, b)) (parseSpec' rest)
parseSpec ('-' : rest) = (\(m,b) -> (Just '-', m, b)) (parseSpec' rest)
parseSpec rest = (\(m,b) -> (Nothing, m, b)) (parseSpec' rest)
parseSpec' ('!' : rest) = ([], parseBeaconSpec rest)
parseSpec' (c1 : c2 : rest) = (parseModule c1 c2 : ms, b) where
(ms, b) = parseSpec' rest
parseSpec' [] = ([], Nothing)
parseSpec' s = error $ "syntax error when parsing modules configuration " ++ show s
parseBeaconSpec [c1, c2] = Just (parseModule c1 c2)
parseBeaconSpec [] = Nothing
parseBeaconSpec s = error $ "syntax error when parsing beacon configuration " ++ show s
parseModule 'p' '3' = ProductivityModule3
parseModule 'p' '2' = ProductivityModule2
parseModule 'p' '1' = ProductivityModule
parseModule 's' '3' = SpeedModule3
parseModule 's' '2' = SpeedModule2
parseModule 's' '1' = SpeedModule
parseModule 'e' '3' = EfficiencyModule3
parseModule 'e' '2' = EfficiencyModule2
parseModule 'e' '1' = EfficiencyModule
parseModule c1 c2 = error $ "unrecognized module: " ++ [c1, c2]
venueByChar :: VenueKind -> Maybe Char -> Venue
venueByChar SmelterVenueKind (Just '+') = SmelterElectric
venueByChar SmelterVenueKind (Just '-') = SmelterBurner
venueByChar SmelterVenueKind Nothing = error "ambiguous smelter"
venueByChar AssemblyVenueKind (Just '-') = Assembly2
venueByChar AssemblyVenueKind (Just '+') = Assembly3
venueByChar AssemblyVenueKind Nothing = error "ambiguous assembly"
venueByChar GreenPowerVenueKind (Just '-') = SolarFacility
venueByChar GreenPowerVenueKind (Just '+') = NuclearFacility
venueByChar GreenPowerVenueKind Nothing = error "ambiguous green power"
venueByChar kind Nothing = currentDefaultVenue kind
venueByChar kind (Just c) = error $ "venueByChar weird invocation" ++ show (kind, c)
currentSpecs =
let
products =
init[
--(ProcessingUnit,"+s2p3p3p3"),
--(GearWheel,"+p2p2p2s1"),
--(Plastic,"p2p2e1"),
(SulfuricAcid,"p2p2p2"),
--(AdvancedCircuit,"+e1e1p1p1"),
--(ResearchNuclearPower,"p2p2"),
--(ResearchRocketSilo,"p2p2"),
--(EngineUnit,"-e1e1"),
(SciencePack1,"-e1e1"),
(SciencePack2,"-e1e1"),
(LightOil,"e1e1e1"),
--(CopperCable,"+e1e1e1p1"),
--(PetroleumGas,"e1e1e1"),
(IronOre,"e1e1e1"),
(CopperOre,"e1e1e1"),
(Coal,"e1e1e1"),
(PiercingRoundMagazine,"-e1e1"),
--(SciencePackHighTech,"+p3p3p3p3"),
--(ElectronicCircuit,"+p3p3p3s2"),
--(SciencePack3,"+p3p3p3s2")
(ProcessingUnit,"+p3p3p3p3!s3"),
(SciencePack3,"+p3p3p3p3!s3"),
(SciencePackHighTech,"+p3p3p3p3!s3"),
--(CopperCable,"+p2p2p2p2!s2"),
(ElectronicCircuit,"+p3p3p3p3!s2"),
(ResearchEndgame,"p3p3!s2"),
(GearWheel,"+p3p3p3p3!s3"),
(AdvancedCircuit,"+p3p3p3p3!s3"),
(SciencePackMilitary,"+p3p3p3p3!s2"),
(Plastic,"p3p3p3!s3"),
(CopperPlate,"+p2p2!s2"),
(IronPlate,"+p2p2!s2"),
(SteelPlate,"+p2p2!s2"),
(CopperCable, "+p3p3p3p3!s3"),
(PetroleumGas, "p2p2p2!s2"),
(RocketPart, "p3p3p3p3"),
(ControlModule, "+p3p3p3p3!s3"),
(RocketFuel, "p3p3p3p3!s3"),
(LightweightStructure, "+p3p3p3p3!s3"),
-- make sure:
(SciencePackProduction,"+p3p3p3p3!s2"),
(EngineUnit,"+p3p3p3p3!s2"),
undefined]
in
map (\(p,s) -> (ProductRecipe p, s)) products ++ [
(AdvancedOilProcessing, "p3p3p3!s3")
]
currentModules :: RecipeName -> PreConfig
currentModules = runIdentity $ do
specs <- return $ Map.fromListWith (error "spec for the same product given twice") (map (second parseSpec) currentSpecs)
let byName = Map.fromList (map (\(r, _) -> (recipeName r, r)) recipes)
return $ \recipeName ->
let recipe = byName !!! recipeName in
case Map.lookup recipeName specs of
Nothing ->
trivial recipe
Just (venue_char, modules, beacon) ->
let venue = venueByChar (recipeVenueKind recipe) venue_char in
(venue, modules, beacon)
current_game_config :: GameConfig
current_game_config =
GameConfig {
gc_lab_researches_done = 4,
gc_mining_researches_done = 6,
gc_liquefaction = Liquefaction_disabled,
gc_recipe_configs = currentModules
}
currentQGC = to_qualitative current_game_config
--main = print $ computeTotalCost SciencePack3
main = report
--main =
-- print $ solvedRecipes currentConfig !!! SulfuricAcid
-- main = mapM_ print $ possibleSavings'''
{-
main = do
flip mapM_ (range fullRange :: [Product]) $ \product -> do
let configs =
(\p -> if p == GearWheel
then Config {configSpeedBonus = 0, configProductivityBonus = 0, configEnergyBonus = 0}
else currentConfig p)
let x = compute' configs GearWheel
let y = compute' configs GearWheel
print $ x
print $ y
print $ x == y
-}
|
{"hexsha": "9fc969954a0d74d2e86e8f63efa63eea13d0515d", "size": 54507, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "factorio-module-selector.hs", "max_stars_repo_name": "Rotsor/factorio-module-selector", "max_stars_repo_head_hexsha": "22a595ba36bbf9ca337bba0dfbe3c387308f43b3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "factorio-module-selector.hs", "max_issues_repo_name": "Rotsor/factorio-module-selector", "max_issues_repo_head_hexsha": "22a595ba36bbf9ca337bba0dfbe3c387308f43b3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "factorio-module-selector.hs", "max_forks_repo_name": "Rotsor/factorio-module-selector", "max_forks_repo_head_hexsha": "22a595ba36bbf9ca337bba0dfbe3c387308f43b3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0973509934, "max_line_length": 173, "alphanum_fraction": 0.6971765094, "num_tokens": 16140}
|
\documentclass[a4paper,10pt]{book}
\usepackage{textcomp}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage[top=0.5in, bottom=0.5in, left=0.5in, right=0.5in]{geometry}
\usepackage{setspace}
\newtheorem{theorem}{Theorem}[chapter]
\newtheorem{lemma}[theorem]{Lemma}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{xca}[theorem]{Exercise}
\newtheorem{con}[theorem]{}
\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\numberwithin{section}{chapter}
\numberwithin{equation}{chapter}
\begin{document}
\chapter{Metric Graphs}
\section{Graphs: Combinatorical Structure}
\begin{definition}
A graph \emph{$\Gamma$} consists of
\begin{itemize}
\item A pair of sets $(V,\ E)$ (called the `Vertex' and `Edge' sets respectively)
\item An involution $\: \phi \colon E \rightarrow E$
satisfying $\phi (e) = \bar{e} $ and $\phi\circ\phi = \mathbf{1}_E$ (identity function)
\item Functions $i,\ \tau: E \rightarrow V$ such that $i(\bar{e}) = \tau (e) $ and $\tau(\bar{e}) = i(e)$
\end{itemize}
\end{definition}
Insert EXAMPLE 'ERE
\section{A Graph as a Space}
The following construction associates to a graph $\Gamma$, a space $| \Gamma |$
\vspace{0.1in} \\ Let $ \xi = E \times [0,1] = \{ (e,t) : e \in E,\ t \in [0,1]\} $.\\
Let $ \widetilde{\Gamma} = \xi \cup V $ \\
Let `\texttildelow' be the equivalence relation on $ \widetilde{\Gamma} $ generated by:
\begin{itemize}
\item If $ (e,t) \in \xi $, then $ (e,t) \sim (\bar{e},1-t) $
\item $ \forall \ e \in E $, $ i(e) \sim (e,0) $ and $ \tau (e) \sim (e,1) $
\end{itemize}
$ |\Gamma| $ is the quotient space $ \Gamma / \sim $
\section{ Metric structure on a Graph}
\subsection{Distance on the Vertex set V}
\begin{definition}
Two vertices $ v_{1}$ and $v_{2} $ are said to be \emph{adjacent} if there exists $ e \in E $ such that $ i(e) = v_1 $ and $ \tau (e) = v_2 $.
\end{definition}
Let $ \cal{D} = \{ $ d $: V \times V \rightarrow \mathbf{R} \ : \ $
d is a metric; d($ v_1,\ v_2 $) $ \leq $ 1 if $ v_1 $, $ v_2 $ are adjacent $ \} $
\begin{lemma}
$ \cal{D} $ is not empty
\end{lemma}
\begin{definition}
The \emph{Graph Metric} on V is defined as
\begin{equation}
d_{max}(v_1,v_2)= \sup_{d \in \cal{D} }d(v_1,v_2) \nonumber
\end{equation}
\end{definition}
\begin{lemma}
$ d_{max} $ is a metric.
\proof
\begin{enumerate}
\item \[\forall \ x \in V,\ d_{max}(x, x)= \sup_{d \in \cal{D}}d(x,x) = \sup\{ 0 \} = 0 \]
\item \[\forall \ x,\ y \in V,\ d_{max}(x, y)= \sup_{d \in \cal{D} }d(x,y)= \sup_{d \in \cal{D} }d(y,x) =d _{max}(y, x) \]
\item \[ d_{max}(x, z)=\sup\{d(x,y): d \in \mathcal{D}\} \leq \sup_{}\{ d(x,y): d \in \mathcal{D}\} {( d \in \cal{D} \text{ is a metric})} \]
\end{enumerate}
\end{lemma}
\begin{definition}
An \emph{edge path} is a ordered list (may be null, in whuch case it is a single vertex) of edges- denoted by $ \eta=(e_1,e_2,\dots,e_n) $, such that, $ \forall i,\ 1\leq i < n,\ i(e_i)=\tau(e_{i+1})$. $\eta$ is called ``an edge path from $i(e_1)$ to $\tau(e_n)$''.
\end{definition}
\begin{definition}
An edge path is a \emph{loop} if $i(e_1) = \tau(e_n)$
\end{definition}
\begin{definition}
An edge path is said to be \emph{reduced} if $\forall\ i,\ 1\leq i<n,\ e_i \neq \bar{e}_{i+1}$
\end{definition}
\begin{definition}
A graph $\Gamma$ is \emph{connected} if $\forall\ x,y \in V,\ \exists$ an edge path from x to y.
\end{definition}
\begin{definition}
A connected graph is called a \emph{tree} if it has no reduced edge loops.
\end{definition}
\begin{theorem}
Let $\Gamma$ be a connected graph. Then for x, y $\in V$,
\begin{equation}
d_{max}(x,y)=min\{n\geq 0 :\ \exists\ \text{an edge path from x to y of cardinality }n\}
\end{equation}
\proof
Let $d_{\Gamma}(x,y)=min\{n\geq 0 :\ \exists\ \text{an edge path from x to y of cardinality }n\}$.\\
We shall show:
\begin{enumerate}
\item $d_\Gamma$ is a metric
\item $d_\Gamma (i(e),\tau (e))\leq 1$
\item $d_{max}(x,y) \leq d_\Gamma(x,y)$
\end{enumerate}
Now, the first and second points establish that $d_\Gamma \in \mathcal{D} $. Then, $d_{max}$ being the supremum value will imply $d_{max}(x,y) \geq d_\Gamma (x,y)$. Together with 3, this will give the desired result.
\begin{lemma}
$d_\Gamma$ is a metric
\proof
\begin{enumerate}
\item \[d_\Gamma(x,x) = 0\] because the single vertex `x' is a null list of edges that give the minimal connecting edge path.
\item \[d_\Gamma(x,y) = d_\Gamma(y,x)\] Now, in an (undirected) graph, $\eta$ is a valid edge path iff $\bar{\eta}$ is also a valid edge path.
\begin{remark}
\[ \eta = (e_1,e_2,\dots,e_n) \Rightarrow \bar{\eta}=(\bar{e}_n,\bar{e}_{n-1},\dots,\bar{e}_1)\]
If $\eta$ is an edge path from x to y, $\bar{\eta}$ is an edge path fom y to x.
\end{remark}
Hence, the involution of the minimal edge path connecting x to y will connect y to x. This will also be the minimal edge path connecting y to x since otherwise the involution of the shorter path (from y to x) will connect x to y and be shorter hat the oiginal path, which leads to a contradiction.
\item If \[
\eta_1=(e_1,\dots,e_n) \text{ is the minimal path connecting x to y}\] and\[
\eta_2=(e_{n+1},\dots,e_m) \text{ is the minimal path connecting y to z}
\]
\begin{remark}
Edges in $\eta_1$ and $\eta_2$ need not be distinct.
\end{remark}
Then,\[
\eta_1 \circ \eta_2 = (e_1,e_2,\dots,e_n,e_{n+1},\dots,e_m) \text{ is an edge path connecting x to z}
\]
Hence the minimal edge path connecting x to z will have to be, by definition, shorter than or the same length as this path. Which means
\[
d_{max}(x,z)=|\eta_{min}|\leq |\eta_1 \circ \eta_2| =m+n =d_{max}(x,y)+d_{max}(y,z)
\]
\end{enumerate}
This proves that $d_{max}$ is a metric.
\end{lemma}
\begin{lemma}
$d_\Gamma (i(e),\tau (e))\leq 1$
\proof Since $(e)$ is an edge path connecting $i(e)$ and $\tau(e)$, the minimal edge path will have cardinality less than or equal to this. That is,
\[
d_{max}(i(e),\tau(e))\leq |(e)| = 1
\]
\end{lemma}
\begin{lemma}
$d_{max}(x,y) \leq d_\Gamma(x,y)$
\proof Let $ d_\Gamma(x,y) = n$.
This means that there exists an edge path $\eta = (e_1,\dots,e_n) $ connecting x to y. Now,
\[
d_{max}(x,y) \leq d_{max}(i(e_1,\tau(e_1)) + \dots + d_{max}(i(e_n),\tau(e_n)) \ \ \ \ \ \ \ \ \ \ \text{[$d_{max}$ is a metric]}
\]
\[
\leq 1 + \dots +1=n \hspace{2in} [d_{max} \in \mathcal{D}]
\]
\end{lemma}
\end{theorem}
\end{document}
|
{"hexsha": "1290f6d412b54adb53d47f0a6cf771c6cda9bf0e", "size": 6954, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/Metric_Graphs.tex", "max_stars_repo_name": "siddhartha-gadgil/MetricGeometryCourse", "max_stars_repo_head_hexsha": "92ec7727f358107a8ad61a7229bc94e2aa9bbafc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-12-28T05:53:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-20T05:56:59.000Z", "max_issues_repo_path": "src/Metric_Graphs.tex", "max_issues_repo_name": "siddhartha-gadgil/MetricGeometryCourse", "max_issues_repo_head_hexsha": "92ec7727f358107a8ad61a7229bc94e2aa9bbafc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Metric_Graphs.tex", "max_forks_repo_name": "siddhartha-gadgil/MetricGeometryCourse", "max_forks_repo_head_hexsha": "92ec7727f358107a8ad61a7229bc94e2aa9bbafc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1925465839, "max_line_length": 301, "alphanum_fraction": 0.5956284153, "num_tokens": 2473}
|
from re import L
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def test_plotting():
station = "test_station"
dates = [2019, 2020, 2021, 2022]
levels = [12, 18, 45, 90]
plt.plot(dates, levels)
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station)
assert plt.plot != None
|
{"hexsha": "e189bcc454e2fc56b5132714fa2cf651d3a9cee9", "size": 377, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_plot.py", "max_stars_repo_name": "reib2/Lab-3-Flood-Warning", "max_stars_repo_head_hexsha": "9f86b4b8a7fa9508ddaa0e9754d64ff6c4e38f66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_plot.py", "max_issues_repo_name": "reib2/Lab-3-Flood-Warning", "max_issues_repo_head_hexsha": "9f86b4b8a7fa9508ddaa0e9754d64ff6c4e38f66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_plot.py", "max_forks_repo_name": "reib2/Lab-3-Flood-Warning", "max_forks_repo_head_hexsha": "9f86b4b8a7fa9508ddaa0e9754d64ff6c4e38f66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-01T23:24:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-01T23:24:15.000Z", "avg_line_length": 18.85, "max_line_length": 36, "alphanum_fraction": 0.6445623342, "include": true, "reason": "import numpy", "num_tokens": 107}
|
"""
Crossmap class
"""
from contextlib import suppress
from yaml import dump
from logging import info, warning, error
from os import mkdir, remove
from os.path import exists
from shutil import rmtree
from scipy.sparse import vstack
from .settings import CrossmapSettings
from .dbmongo import CrossmapMongoDB as CrossmapDB
from .indexer import CrossmapIndexer
from .diffuser import CrossmapDiffuser
from .vectors import csr_residual
from .vectors import vec_decomposition as vec_decomp
from .csr import FastCsrMatrix, dimcollapse_csr
from .tools import open_file, yaml_document, time
def _search_result(ids, distances, name):
"""structure an object describing a nearest-neighbor search
:param ids: list of string identifiers
:param distances: list of numeric values
:param name: query name included in the output dictionary
:return: dictionary, contain lists with ids and distances,
ordered by distance from smallest to largerst
"""
return dict(query=name, targets=ids, distances=distances)
def _decomposition_result(ids, weights, name):
"""structure an object describing a greedy-nearest-decomposition"""
return dict(query=name, targets=ids, coefficients=weights)
def _ranked_decomposition(coefficients, ids):
"""order paired values+strings, removing zeros
:param coefficients: csr_matrix, one-column vertical vector
:param ids: list of string identifiers
:return: a list of coefficients, a list of matched identifiers.
Output is ranked, very small items are removed
"""
temp = [_ for _ in coefficients[:, 0]]
coefficients, ids = zip(*sorted(zip(temp, ids), reverse=True))
zeros = [i for i, v in enumerate(coefficients) if v == 0]
if len(zeros) == 0:
return coefficients, ids
ids = [_ for i, _ in enumerate(ids) if i not in zeros]
coefficients = [_ for i, _ in enumerate(coefficients) if i not in zeros]
return coefficients, ids
def remove_db_and_files(settings):
"""clean up database and data files (leaves settings object)"""
if type(settings) is str:
settings = CrossmapSettings(settings)
db = CrossmapDB(settings)
info("Removing database: " + db.db_name)
db.remove()
info("Removing directory: " + settings.prefix)
if exists(settings.prefix):
with suppress(OSError):
rmtree(settings.prefix)
class Crossmap:
def __init__(self, settings):
"""a standard crossmap object.
:param settings: a CrossmapSettings object, or a path to a
configuration file
"""
if type(settings) is str:
settings = CrossmapSettings(settings)
self.settings = settings
self.indexer = None
self.db = None
if not settings.valid:
return
if not exists(settings.prefix):
mkdir(settings.prefix)
self.db = CrossmapDB(settings)
self.indexer = CrossmapIndexer(settings, db=self.db)
self.db = self.indexer.db
self.encoder = self.indexer.encoder
self.diffuser = None
# determine a default dataset for querying
self.default_label = settings.data.default
if self.default_label is None:
if len(self.db.datasets):
self.default_label = list(self.db.datasets.keys())[0]
else:
raise Exception("could not determine default dataset")
@property
def valid(self):
"""get a boolean stating whether settings are valid"""
if self.indexer is None:
return False
if not self.settings.valid:
return False
return self.indexer.valid
def build(self):
"""create indexes and auxiliary objects"""
if not self.settings.valid:
return
self.indexer.build()
self.db.index("data")
self.diffuser = CrossmapDiffuser(self.settings, db=self.db)
self.diffuser.build()
self.db.index("counts")
info("done")
def load(self):
"""load indexes from prepared files"""
self.indexer.load()
self.diffuser = CrossmapDiffuser(self.settings, self.db)
def remove(self, dataset):
"""remove a dataset, or entire instance"""
self.db.remove_dataset(dataset)
dataset_files = [self.settings.yaml_file(dataset),
self.settings.index_file(dataset),
self.settings.index_dat_file(dataset)]
for f in dataset_files:
if exists(f):
remove(f)
# reload the instance (updates indexer, diffuser)
self.load()
def add(self, dataset, doc, id, metadata=None, rebuild=True):
"""add a new item into the db
:param dataset: string, name of dataset to append
:param doc: dict with string data
:param id: string, identifier for new item
:param metadata: dict, a free element of additional information
:param rebuild: logical, set True to rebuild nearest-neighbor indexing
:return: an integer signaling
"""
if dataset in self.settings.data.collections:
raise Exception("cannot add to file-based datasets")
label_status = self.db.validate_dataset_label(dataset)
if label_status < 0:
raise Exception("invalid dataset label: " + str(dataset))
if label_status:
info("Registering dataset: " + str(dataset))
self.db.register_dataset(dataset)
# update the db structures (indexing and diffusion)
idx = self.indexer.update(dataset, doc, id, rebuild=rebuild)
self.diffuser.update(dataset, [idx])
# record the item in a disk file
# (preserve existing metadata fields, perhaps add new fields)
if "metadata" not in doc or type(doc["metadata"]) is not dict:
doc["metadata"] = dict()
if metadata is not None:
for k, v in metadata.items():
doc["metadata"][k] = v
if "timestamp" not in doc["metadata"]:
doc["metadata"]["timestamp"] = time()
with open(self.settings.yaml_file(dataset), "at") as f:
f.write(dump({id: doc}))
return idx
def add_file(self, dataset, filepath):
"""transfer items from a data file into a new dataset in the db
:param dataset:
:param filepath:
:return: list with the added ids
"""
result = []
with open_file(filepath, "rt") as f:
for id, doc in yaml_document(f):
result.append(self.add(dataset, doc, id, rebuild=False))
info("Added "+str(len(result)) + " entries")
self.indexer.rebuild_index(dataset)
return result
def _prep_vector(self, doc, diffusion=None):
"""prepare text document into sparse vectors
:param doc: dictionary with component data, data_pos, etc.
:param diffusion: dictionary with diffusion strengths
:return: two csr vectors, a raw encoding and a diffused encoding
"""
v = self.encoder.document(doc)
if diffusion is None:
return v, v
return v, self.diffuser.diffuse(v, diffusion)
def diffuse(self, doc, diffusion=None, query_name="", **kwargs):
"""provide an explanation for diffusion of a document into features
:param doc: dict-like object with "data", "data_pos" and "data_neg"
:param diffusion: dict, map assigning diffusion weights
:param query_name: character, a name for the document
:param kwargs: other keyword arguments, ignored
(This is included for consistency with search() and decompose())
:return: a dictionary containing an id, and feature weights
"""
inv_feature_map = self.encoder.inv_feature_map
raw, diffused = self._prep_vector(doc, diffusion)
data = []
for i, d in zip(diffused.indices, diffused.data):
data.append([abs(d), round(d, 6), inv_feature_map[i]])
data = sorted(data, reverse=True)
data = [{"feature": v[2], "value": v[1]} for i, v in enumerate(data)]
return dict(query=query_name, features=data)
def search(self, doc, dataset, n=3, diffusion=None, query_name="query",
**kwargs):
"""identify targets that are close to the input query
:param doc: dict-like object with "data", "data_pos" and "data_neg"
:param dataset: string, identifier for dataset to look for targets
:param n: integer, number of target to report
:param diffusion: dict, map assigning diffusion weights
:param query_name: character, a name for the document
:param kwargs: other keyword arguments, ignored
(This is included for consistency with search() and decompose())
:return: a dictionary containing an id, and lists to target ids and
distances
"""
raw, diffused = self._prep_vector(doc, diffusion)
if len(raw.data) == 0:
return _search_result([], [], query_name)
suggest = self.indexer.suggest
targets, distances = suggest(diffused, dataset, n)
return _search_result(targets, distances, query_name)
def decompose(self, doc, dataset, n=3, diffusion=None,
factors=None,
query_name="query"):
"""decompose of a query document in terms of targets
:param doc: dict-like object with "data", "data_pos" and "data_neg"
:param dataset: string, identifier for dataset
:param n: integer, number of target to report
:param diffusion: dict, strength of diffusion on primary data
:param factors: list with item ids that must be included in the
decomposition
:param query_name: character, a name for the document
:return: dictionary containing an id, and list with target ids and
decomposition coefficients
"""
# shortcuts
suggest = self.indexer.suggest
get_data = self.indexer.db.get_data
# representation of the query document, raw and diffused
q_raw, q = self._prep_vector(doc, diffusion)
q_indexes = set(q_raw.indices)
q_dense = q.toarray()
# loop for greedy decomposition
factors = factors if factors is not None else []
num_factors = len(factors)
ids, coefficients, components = [], [], []
q_residual = q
while len(components) < n and len(q_residual.data) > 0:
# use a suggested factors, or find a new factor via search
if len(components) < num_factors:
target = [factors[len(components)]]
else:
target, _ = suggest(q_residual, dataset, 1)
# residual mapped back onto an existing hit? quit early
if len(target) == 0 or target[0] in ids:
break
target_data = get_data(dataset, ids=target)
ids.append(target[0])
target_vec = target_data[0]["data"]
components.append(dimcollapse_csr(target_vec, q_indexes))
basis = vstack(components)
q_modeled = q_dense
if set(basis.indices) is not q_indexes:
q_modeled = dimcollapse_csr(q, set(basis.indices)).toarray()
coefficients = vec_decomp(q_modeled, basis.toarray())
if coefficients[-1] == 0:
break
weights = FastCsrMatrix(coefficients)
q_residual = csr_residual(q, basis, weights)
# order the coefficients (decreasing size, most important first)
if len(coefficients) > 0:
# re-do decomposition using the entire q vector
coefficients = vec_decomp(q_dense, basis.toarray())
first_id = ids[0]
coefficients, ids = _ranked_decomposition(coefficients, ids)
if len(ids) == 0:
coefficients, ids = [0.0], [first_id]
return _decomposition_result(ids, coefficients, query_name)
def search_file(self, filepath, dataset, n, diffusion=None,
**kwargs):
"""find nearest targets for all documents in a file
:param filepath: string, path to a file with documents
:param dataset: string, identifier for target dataset
:param n: integer, number of target to report for each input
:param diffusion: dict, map with diffusion strengths
:param kwargs: other keyword arguments, ignored
(This is included for consistency with decompose_file())
:return: list with dicts, each as output by search()
"""
return _action_file(self.search, filepath, dataset=dataset,
n=n, diffusion=diffusion)
def decompose_file(self, filepath, dataset, n=3, diffusion=None,
factors=None):
"""perform decomposition for documents defined in a file
:param filepath: string, path to a file with documents
:param dataset: string, identifier for target dataset
:param n: integer, number of target to report for each input
:param diffusion: dict, map with diffusion strengths
:param factors: list with item ids that must be included in the
decomposition
:return: list with dicts, each as output by decompose()
"""
return _action_file(self.decompose, filepath, dataset=dataset,
n=n, diffusion=diffusion, factors=factors)
def validate_dataset_label(crossmap, label=None, log=True):
"""check for a valid dataset label
:param crossmap: object of class Crossmap
:param label: string, a dataset identifier, set None to get default
:param log: boolean, toggle log messages
:return: a validated dataset string, a default label if None is specified,
or None if specified label is invalid
"""
if label is None:
label = str(crossmap.default_label)
if log:
warning("using default dataset: " + label)
if label not in crossmap.db.datasets:
if log:
error("dataset label is not valid: " + label)
label = None
return label
def _action_file(action, filepath, **kw):
"""applies an action function to contents of a file
:param action: function
:param filepath: string, path to a file with yaml documents
:param kw: keyword arguments, all passed on to action
:return: list with result of action function on the documents in the file
"""
result = []
if filepath is None:
return result
with open_file(filepath, "rt") as f:
for id, doc in yaml_document(f):
if type(doc) is not dict:
error("invalid document type: "+str(id))
break
result.append(action(doc, **kw, query_name=id))
return result
|
{"hexsha": "fc056ccf3b2eb196b50cedd77ca90723e19f0910", "size": 14916, "ext": "py", "lang": "Python", "max_stars_repo_path": "crossmap/crossmap.py", "max_stars_repo_name": "tkonopka/crossmap", "max_stars_repo_head_hexsha": "237e4319a77281490c4e037918977230fea43d7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-12T11:40:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T11:40:10.000Z", "max_issues_repo_path": "crossmap/crossmap.py", "max_issues_repo_name": "tkonopka/crossmap", "max_issues_repo_head_hexsha": "237e4319a77281490c4e037918977230fea43d7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crossmap/crossmap.py", "max_forks_repo_name": "tkonopka/crossmap", "max_forks_repo_head_hexsha": "237e4319a77281490c4e037918977230fea43d7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3444730077, "max_line_length": 78, "alphanum_fraction": 0.631536605, "include": true, "reason": "from scipy", "num_tokens": 3213}
|
from utils import gif
from skimage import io, color
import os
import cv2
import nrrd
import numpy as np
import argparse
from parse_config import ConfigParser
import torch
from dataloader.preprocessor import BUSIDataProcessor
from model import models
from utils import prepare_device
def get_prediction(model, device, path_to_image=None):
if path_to_image:
#image = cv2.imread(path_to_image, cv2.IMREAD_GRAYSCALE)
image = io.imread(path_to_image)
dataset = BUSIDataProcessor(imgs_dir=None, masks_dir=None)
processed = dataset.preprocess(image, resize_img=True, expand_channel=False, adjust_label=False, normalize=True)
img = torch.from_numpy(np.expand_dims(processed, axis=0))
with torch.no_grad():
model.eval()
data = img.to(device, dtype=torch.float)
mask = model(data)
mask_thresh = mask > 0.5
image_pred = (mask_thresh.cpu().numpy() * 255)
image_pred = image_pred.astype(np.uint8)
return image_pred[0, 0]
def segment_3d_input(model, device, path_to_image):
if path_to_image:
# Read from directory
image_list = os.listdir(path_to_image)
segmented_volume = []
for image in image_list:
img_path = os.path.join(path_to_image, image)
img = io.imread(img_path)
mask = get_prediction(model, device, img_path)
segmented_img = apply_mask_to_image(img, mask)
segmented_volume.append(segmented_img)
return segmented_volume
else:
print("Image Path Required!")
def apply_mask_to_image(img, mask):
"""
Apply mask to input image with only contours.
"""
img_size = img.shape[0]
mask = cv2.resize(mask, dsize=(img_size, img_size))
# Find contour of the mask
imgray = mask
ret,thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw contours on image
segmented_img = cv2.drawContours(img, contours, -1, (0,255,0), 3)
return segmented_img
def saveImgWithSegmentations(segmented_volume, volume_name, model_name, save_dir):
"""
Save the 3d ultrasound with segmentations as nrrd and gif (for better visualization)
"""
# Convert from RGB to grayscale
gray3d = []
for i in range(len(segmented_volume)):
gray3d.append(color.rgb2gray(segmented_volume[i]))
gray3d = np.asarray(gray3d)
gray3d = np.transpose(gray3d, (2, 1, 0))
filename = volume_name + '_' + model_name
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
# Save as nrrd
#nrrd.write(filename=save_dir+filename+'.nrrd', data=gray3d)
#print('Successfully save results as nrrd.')
# Save as gif (fps=5 is recommended)
img_seq = gray3d.transpose((2, 1, 0)) * 255.0
img_seq = img_seq.astype(np.uint8)
if gif(save_dir+filename, img_seq, fps=5):
print('Successfully save results as gif.')
if __name__ == "__main__":
# Load checkpoints for inference
path_to_checkpoint = '/content/drive/MyDrive/exp_results/models/ResUNet/0422_001740/checkpoint-epoch100.pth'
checkpoint = torch.load(path_to_checkpoint)
# Select test images
path_to_image = '/content/drive/MyDrive/data/sample_test_volumes/'
volume_list = sorted(os.listdir(path_to_image))
# Initialize model
args = argparse.ArgumentParser(description='Inference configuration')
args.add_argument('--config', type=str, default='options/default.json',
help='config path to correct model architecture')
args.add_argument('--device', type=str, default=None)
args.add_argument('--resume', type=str, default=None)
config = ConfigParser.from_args(args)
# build model architecture, load checkpoints
model = config.init_obj('arch', models)
model.load_state_dict(checkpoint['state_dict'])
model_name = config['name']
# prepare for GPU environment
device, device_ids = prepare_device(config['n_gpu'])
model = model.to(device)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)
for volume_name in volume_list:
# Inference on test images
prediction_3d = segment_3d_input(model, device, path_to_image+volume_name)
# Save results as (nrrd &) gif
saveImgWithSegmentations(prediction_3d,
volume_name,
model_name,
save_dir='/content/drive/MyDrive/exp_results/test/')
|
{"hexsha": "28d67566830aaf3710399d9449298f16cfe5e506", "size": 4662, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "tqxli/breast_ultrasound_lesion_segmentation_PyTorch", "max_stars_repo_head_hexsha": "d378a624a12b6156364f4f72c3fc60cc0c47f6f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-05-02T15:32:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T01:39:42.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "tqxli/breast_ultrasound_lesion_segmentation_PyTorch", "max_issues_repo_head_hexsha": "d378a624a12b6156364f4f72c3fc60cc0c47f6f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-28T02:57:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-24T14:23:55.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "tqxli/breast_ultrasound_lesion_segmentation_PyTorch", "max_forks_repo_head_hexsha": "d378a624a12b6156364f4f72c3fc60cc0c47f6f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-17T03:50:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:48:30.000Z", "avg_line_length": 35.5877862595, "max_line_length": 120, "alphanum_fraction": 0.6643071643, "include": true, "reason": "import numpy", "num_tokens": 1105}
|
#ifndef BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
#define BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
// Copyright (c) 2009 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic/detail/base.hpp>
/*
Refer to: Motorola: "Programming Environments Manual for 32-Bit
Implementations of the PowerPC Architecture", Appendix E:
"Synchronization Programming Examples" for an explanation of what is
going on here (can be found on the web at various places by the
name "MPCFPE32B.pdf", Google is your friend...)
Most of the atomic operations map to instructions in a relatively
straight-forward fashion, but "load"s may at first glance appear
a bit strange as they map to:
lwz %rX, addr
cmpw %rX, %rX
bne- 1f
1:
That is, the CPU is forced to perform a branch that "formally" depends
on the value retrieved from memory. This scheme has an overhead of
about 1-2 clock cycles per load, but it allows to map "acquire" to
the "isync" instruction instead of "sync" uniformly and for all type
of atomic operations. Since "isync" has a cost of about 15 clock
cycles, while "sync" hast a cost of about 50 clock cycles, the small
penalty to atomic loads more than compensates for this.
Byte- and halfword-sized atomic values are realized by encoding the
value to be represented into a word, performing sign/zero extension
as appropriate. This means that after add/sub operations the value
needs fixing up to accurately preserve the wrap-around semantic of
the smaller type. (Nothing special needs to be done for the bit-wise
and the "exchange type" operators as the compiler already sees to
it that values carried in registers are extended appropriately and
everything falls into place naturally).
The register constrant "b" instructs gcc to use any register
except r0; this is sometimes required because the encoding for
r0 is used to signify "constant zero" in a number of instructions,
making r0 unusable in this place. For simplicity this constraint
is used everywhere since I am to lazy to look this up on a
per-instruction basis, and ppc has enough registers for this not
to pose a problem.
*/
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
#define BOOST_ATOMIC_INT_LOCK_FREE 2
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE 2
#if defined(__powerpc64__)
#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
#else
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
/* Would like to move the slow-path of failed compare_exchange
(that clears the "success" bit) out-of-line. gcc can in
principle do that using ".subsection"/".previous", but Apple's
binutils seemingly does not understand that. Therefore wrap
the "clear" of the flag in a macro and let it remain
in-line for Apple
*/
#if !defined(__APPLE__)
#define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR \
"1:\n" \
".subsection 2\n" \
"2: addi %1,0,0\n" \
"b 1b\n" \
".previous\n" \
#else
#define BOOST_ATOMIC_ASM_SLOWPATH_CLEAR \
"b 1f\n" \
"2: addi %1,0,0\n" \
"1:\n" \
#endif
namespace boost {
namespace detail {
namespace atomic {
static inline void
ppc_fence_before(memory_order order)
{
switch(order) {
case memory_order_release:
case memory_order_acq_rel:
#if defined(__powerpc64__)
__asm__ __volatile__ ("lwsync" ::: "memory");
break;
#endif
case memory_order_seq_cst:
__asm__ __volatile__ ("sync" ::: "memory");
default:;
}
}
static inline void
ppc_fence_after(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("isync");
case memory_order_consume:
__asm__ __volatile__ ("" ::: "memory");
default:;
}
}
static inline void
ppc_fence_after_store(memory_order order)
{
switch(order) {
case memory_order_seq_cst:
__asm__ __volatile__ ("sync");
default:;
}
}
/* integral types */
template<typename T>
class base_atomic<T, int, 1, true> {
typedef base_atomic this_type;
typedef T value_type;
typedef int32_t storage_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m"(v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=&r" (v)
: "m" (v_)
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"extsb %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"extsb %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"and %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"or %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"xor %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T>
class base_atomic<T, int, 1, false> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m"(v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=&r" (v)
: "m" (v_)
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"and %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"or %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"xor %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T>
class base_atomic<T, int, 2, true> {
typedef base_atomic this_type;
typedef T value_type;
typedef int32_t storage_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m"(v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=&r" (v)
: "m" (v_)
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"extsh %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"extsh %1, %1\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"and %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"or %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"xor %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T>
class base_atomic<T, int, 2, false> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m"(v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=&r" (v)
: "m" (v_)
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xffff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"rlwinm %1, %1, 0, 0xffff\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"and %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"or %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"xor %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, int, 4, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
const_cast<volatile value_type &>(v_) = v;
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
__asm__ __volatile__ (
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "+b"(v)
:
: "cr0"
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"and %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"or %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"xor %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
#if defined(__powerpc64__)
template<typename T, bool Sign>
class base_atomic<T, int, 8, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef T difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
const_cast<volatile value_type &>(v_) = v;
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v = const_cast<const volatile value_type &>(v_);
__asm__ __volatile__ (
"cmpd %0, %0\n"
"bne- 1f\n"
"1:\n"
: "+b"(v)
:
: "cr0"
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y1\n"
"stdcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"add %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"sub %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"and %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"or %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"xor %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
#endif
/* pointer types */
#if !defined(__powerpc64__)
template<bool Sign>
class base_atomic<void *, void *, 4, Sign> {
typedef base_atomic this_type;
typedef void * value_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m" (v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(v)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 4, Sign> {
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m" (v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(v)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
v = v * sizeof(*v_);
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"add %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
v = v * sizeof(*v_);
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y2\n"
"sub %1,%0,%3\n"
"stwcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
#else
template<bool Sign>
class base_atomic<void *, void *, 8, Sign> {
typedef base_atomic this_type;
typedef void * value_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"std %1, %0\n"
: "+m" (v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ (
"ld %0, %1\n"
"cmpd %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(v)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y1\n"
"stdcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
template<typename T, bool Sign>
class base_atomic<T *, void *, 8, Sign> {
typedef base_atomic this_type;
typedef T * value_type;
typedef ptrdiff_t difference_type;
public:
explicit base_atomic(value_type v) : v_(v) {}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
ppc_fence_before(order);
__asm__ (
"std %1, %0\n"
: "+m" (v_)
: "r" (v)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
value_type v;
__asm__ (
"ld %0, %1\n"
"cmpd %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(v)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
value_type original;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y1\n"
"stdcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (v)
: "cr0"
);
ppc_fence_after(order);
return original;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
int success;
ppc_fence_before(success_order);
__asm__(
"0: ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected), "=&b" (success), "+Z"(v_)
: "b" (expected), "b" (desired)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
return success;
}
value_type
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
v = v * sizeof(*v_);
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"add %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
value_type
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
{
v = v * sizeof(*v_);
value_type original, tmp;
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y2\n"
"sub %1,%0,%3\n"
"stdcx. %1,%y2\n"
"bne- 1b\n"
: "=&b" (original), "=&b" (tmp), "+Z"(v_)
: "b" (v)
: "cc");
ppc_fence_after(order);
return original;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
value_type v_;
};
#endif
/* generic */
template<typename T, bool Sign>
class base_atomic<T, void, 1, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) : v_(0) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m" (v_)
: "r" (tmp)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(tmp)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0, original;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (tmp)
: "cr0"
);
ppc_fence_after(order);
memcpy(&v, &original, sizeof(value_type));
return v;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 2, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) : v_(0) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m" (v_)
: "r" (tmp)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(tmp)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0, original;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (tmp)
: "cr0"
);
ppc_fence_after(order);
memcpy(&v, &original, sizeof(value_type));
return v;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
template<typename T, bool Sign>
class base_atomic<T, void, 4, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint32_t storage_type;
public:
explicit base_atomic(value_type v) : v_(0)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) : v_(0) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"stw %1, %0\n"
: "+m" (v_)
: "r" (tmp)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp;
__asm__ __volatile__ (
"lwz %0, %1\n"
"cmpw %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(tmp)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0, original;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"1:\n"
"lwarx %0,%y1\n"
"stwcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (tmp)
: "cr0"
);
ppc_fence_after(order);
memcpy(&v, &original, sizeof(value_type));
return v;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s = 0, desired_s = 0;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"0: lwarx %0,%y2\n"
"cmpw %0, %3\n"
"bne- 2f\n"
"stwcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
#if defined(__powerpc64__)
template<typename T, bool Sign>
class base_atomic<T, void, 8, Sign> {
typedef base_atomic this_type;
typedef T value_type;
typedef uint64_t storage_type;
public:
explicit base_atomic(value_type v)
{
memcpy(&v_, &v, sizeof(value_type));
}
base_atomic(void) {}
void
store(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"std %1, %0\n"
: "+m" (v_)
: "r" (tmp)
);
ppc_fence_after_store(order);
}
value_type
load(memory_order order = memory_order_seq_cst) const volatile
{
storage_type tmp;
__asm__ __volatile__ (
"ld %0, %1\n"
"cmpd %0, %0\n"
"bne- 1f\n"
"1:\n"
: "=r"(tmp)
: "m"(v_)
: "cr0"
);
ppc_fence_after(order);
value_type v;
memcpy(&v, &tmp, sizeof(value_type));
return v;
}
value_type
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
{
storage_type tmp = 0, original;
memcpy(&tmp, &v, sizeof(value_type));
ppc_fence_before(order);
__asm__ (
"1:\n"
"ldarx %0,%y1\n"
"stdcx. %2,%y1\n"
"bne- 1b\n"
: "=&b" (original), "+Z"(v_)
: "b" (tmp)
: "cr0"
);
ppc_fence_after(order);
memcpy(&v, &original, sizeof(value_type));
return v;
}
bool
compare_exchange_weak(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s, desired_s;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 2f\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
compare_exchange_strong(
value_type & expected,
value_type desired,
memory_order success_order,
memory_order failure_order) volatile
{
storage_type expected_s, desired_s;
memcpy(&expected_s, &expected, sizeof(value_type));
memcpy(&desired_s, &desired, sizeof(value_type));
int success;
ppc_fence_before(success_order);
__asm__(
"0: ldarx %0,%y2\n"
"cmpd %0, %3\n"
"bne- 2f\n"
"stdcx. %4,%y2\n"
"bne- 0b\n"
"addi %1,0,1\n"
"1:"
BOOST_ATOMIC_ASM_SLOWPATH_CLEAR
: "=&b" (expected_s), "=&b" (success), "+Z"(v_)
: "b" (expected_s), "b" (desired_s)
: "cr0"
);
if (success)
ppc_fence_after(success_order);
else
ppc_fence_after(failure_order);
memcpy(&expected, &expected_s, sizeof(value_type));
return success;
}
bool
is_lock_free(void) const volatile
{
return true;
}
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
private:
base_atomic(const base_atomic &) /* = delete */ ;
void operator=(const base_atomic &) /* = delete */ ;
storage_type v_;
};
#endif
}
}
#define BOOST_ATOMIC_THREAD_FENCE 2
inline void
atomic_thread_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
__asm__ __volatile__ ("isync" ::: "memory");
break;
case memory_order_release:
#if defined(__powerpc64__)
__asm__ __volatile__ ("lwsync" ::: "memory");
break;
#endif
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("sync" ::: "memory");
default:;
}
}
#define BOOST_ATOMIC_SIGNAL_FENCE 2
inline void
atomic_signal_fence(memory_order order)
{
switch(order) {
case memory_order_acquire:
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("" ::: "memory");
break;
default:;
}
}
}
#endif
|
{"hexsha": "cb1dcb9fff380af6aaccae249d23c1bc4d1340a8", "size": 54715, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Dependencies/Theron/Include/External/boost/atomic/detail/gcc-ppc.hpp", "max_stars_repo_name": "sosan/NoahGameFrame", "max_stars_repo_head_hexsha": "38c54014c5c4620b784b2c1d2cab256f42bae186", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30.0, "max_stars_repo_stars_event_min_datetime": "2015-04-09T21:35:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T01:21:32.000Z", "max_issues_repo_path": "Dependencies/Theron/Include/External/boost/atomic/detail/gcc-ppc.hpp", "max_issues_repo_name": "sosan/NoahGameFrame", "max_issues_repo_head_hexsha": "38c54014c5c4620b784b2c1d2cab256f42bae186", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2017-04-12T17:14:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-24T21:59:38.000Z", "max_forks_repo_path": "Dependencies/Theron/Include/External/boost/atomic/detail/gcc-ppc.hpp", "max_forks_repo_name": "sosan/NoahGameFrame", "max_forks_repo_head_hexsha": "38c54014c5c4620b784b2c1d2cab256f42bae186", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2015-02-06T21:33:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-12T06:07:32.000Z", "avg_line_length": 20.2948813056, "max_line_length": 81, "alphanum_fraction": 0.6358037101, "num_tokens": 19768}
|
import base64
import hashlib
import json
import numpy as np
import hypney
export, __all__ = hypney.exporter()
@export
def hashablize(obj):
"""Convert a container hierarchy into one that can be hashed.
See http://stackoverflow.com/questions/985294
"""
try:
hash(obj)
except TypeError:
if isinstance(obj, dict):
return tuple((k, hashablize(v)) for (k, v) in sorted(obj.items()))
elif isinstance(obj, np.ndarray):
return tuple(obj.tolist())
elif hasattr(obj, "__iter__"):
return tuple(hashablize(o) for o in obj)
else:
raise TypeError("Can't hashablize object of type %r" % type(obj))
else:
return obj
@export
class NumpyJSONEncoder(json.JSONEncoder):
"""Special json encoder for numpy types
Edited from mpl3d: mpld3/_display.py
"""
def default(self, obj):
try:
iterable = iter(obj)
except TypeError:
pass
else:
return [self.default(item) for item in iterable]
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
@export
def deterministic_hash(thing, length=10):
"""Return a base32 lowercase string of length determined from hashing
the container hierarchy thing
"""
hashable = hashablize(thing)
jsonned = json.dumps(hashable, cls=NumpyJSONEncoder)
digest = hashlib.sha1(jsonned.encode("ascii")).digest()
return base64.b32encode(digest)[:length].decode("ascii").lower()
|
{"hexsha": "3ca336a62f5cea5cf412f189daae83ca36a6102c", "size": 1715, "ext": "py", "lang": "Python", "max_stars_repo_path": "hypney/utils/hashing.py", "max_stars_repo_name": "JelleAalbers/hypney", "max_stars_repo_head_hexsha": "3e38e21743fc9babe0ed47af299d08242a9b6d32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hypney/utils/hashing.py", "max_issues_repo_name": "JelleAalbers/hypney", "max_issues_repo_head_hexsha": "3e38e21743fc9babe0ed47af299d08242a9b6d32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hypney/utils/hashing.py", "max_forks_repo_name": "JelleAalbers/hypney", "max_forks_repo_head_hexsha": "3e38e21743fc9babe0ed47af299d08242a9b6d32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2222222222, "max_line_length": 78, "alphanum_fraction": 0.6297376093, "include": true, "reason": "import numpy", "num_tokens": 385}
|
# =============================================================================
# @author: Shuo Zhou, The University of Sheffield
# =============================================================================
import numpy as np
from scipy.linalg import eig
from numpy.linalg import multi_dot
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import LabelBinarizer
from ..utils import base_init
class MIDA(BaseEstimator, TransformerMixin):
def __init__(self, n_components, penalty=None, kernel='linear', lambda_=1.0,
mu=1.0, eta=1.0, aug=True, **kwargs):
"""Maximum independence domain adaptation
Parameters
----------
n_components : int
n_components after tca (n_components <= d)
kernel : str
'rbf' | 'linear' | 'poly' (default is 'linear')
penalty : str
None | 'l2' (default is None)
lambda_ : float
regulisation param (if penalty==l2)
mu: total captured variance param
eta: label dependence param
References
----------
Yan, K., Kou, L. and Zhang, D., 2018. Learning domain-invariant subspace
using domain features and independence maximization. IEEE transactions on
cybernetics, 48(1), pp.288-299.
"""
self.n_components = n_components
self.kernel = kernel
self.lambda_ = lambda_
self.penalty = penalty
self.mu = mu
self.eta = eta
self.aug = aug
self._lb = LabelBinarizer(pos_label=1, neg_label=0)
self.kwargs = kwargs
def fit(self, X, y=None, co_variates=None):
"""
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
y : array-like
Labels, shape (nl_samples,)
co_variates : array-like
Domain co-variates, shape (n_samples, n_co-variates)
Note
----
Unsupervised MIDA is performed if ys and yt are not given.
Semi-supervised MIDA is performed is ys and yt are given.
"""
if self.aug and type(co_variates) == np.ndarray:
X = np.concatenate((X, co_variates), axis=1)
ker_x, unit_mat, ctr_mat, n = base_init(X, kernel=self.kernel, **self.kwargs)
if type(co_variates) == np.ndarray:
ker_c = np.dot(co_variates, co_variates.T)
else:
ker_c = np.zeros((n, n))
if y is not None:
y_mat = self._lb.fit_transform(y)
ker_y = np.dot(y_mat, y_mat.T)
obj = multi_dot([ker_x, ctr_mat, ker_c, ctr_mat, ker_x.T])
st = multi_dot([ker_x, ctr_mat, (self.mu * unit_mat
+ self.eta * ker_y),
ctr_mat, ker_x.T])
# obj = np.trace(np.dot(K,L))
else:
obj = multi_dot([ker_x, ctr_mat, ker_c, ctr_mat, ker_x.T])
st = multi_dot([ker_x, ctr_mat, ker_x.T])
if self.penalty == 'l2':
obj -= self.lambda_ * unit_mat
eig_values, eig_vectors = eig(obj, st)
idx_sorted = eig_values.argsort()
self.U = eig_vectors[:, idx_sorted]
self.U = np.asarray(self.U, dtype=np.float)
# self.components_ = np.dot(X.T, U)
# self.components_ = self.components_.T
self.X = X
return self
def transform(self, X, co_variates=None):
"""
Parameters
----------
X : array-like,
shape (n_samples, n_features)
co_variates : array-like,
Domain co-variates, shape (n_samples, n_co-variates)
Returns
-------
array-like
transformed data
"""
check_is_fitted(self, 'X')
if self.aug and type(co_variates) == np.ndarray:
X = np.concatenate((X, co_variates), axis=1)
ker_x = pairwise_kernels(X, self.X, metric=self.kernel,
filter_params=True, **self.kwargs)
return np.dot(ker_x, self.U[:, :self.n_components])
def fit_transform(self, X, y=None, co_variates=None):
"""
Parameters
----------
X : array-like,
shape (n_samples, n_features)
y : array-like
shape (n_samples,)
co_variates : array-like
shape (n_samples, n_co-variates)
Returns
-------
array-like
transformed X_transformed
"""
self.fit(X, y, co_variates)
return self.transform(X, co_variates)
|
{"hexsha": "cc0c8df8eb59e503f03d5525dfdb03144a32e031", "size": 4753, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydale/transformer/_mida.py", "max_stars_repo_name": "sz144/TPy", "max_stars_repo_head_hexsha": "689e38bdc2549015bc45cfacfe42e20a51c76e5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-08-20T13:38:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-31T08:57:12.000Z", "max_issues_repo_path": "pydale/transformer/_mida.py", "max_issues_repo_name": "sz144/pydale", "max_issues_repo_head_hexsha": "689e38bdc2549015bc45cfacfe42e20a51c76e5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pydale/transformer/_mida.py", "max_forks_repo_name": "sz144/pydale", "max_forks_repo_head_hexsha": "689e38bdc2549015bc45cfacfe42e20a51c76e5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-28T08:24:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T08:29:46.000Z", "avg_line_length": 34.4420289855, "max_line_length": 85, "alphanum_fraction": 0.5388175889, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1093}
|
abstract type Adjoint_rep_Gaugefields_4D{NC,NumofBasis} <: Adjoint_rep_Gaugefields{NC,4,NumofBasis}
end
include("./Adjoint_rep_gaugefields_4D_wing.jl")
function Base.size(U::Adjoint_rep_Gaugefields_4D{NC,NumofBasis}) where {NC,NumofBasis}
return NumofBasis,NumofBasis,U.NX,U.NY,U.NZ,U.NT
end
|
{"hexsha": "8c4bcd724b896641d20bb8b85060fa96ff098d95", "size": 300, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/4D/Adjoint_rep_gaugefields_4D.jl", "max_stars_repo_name": "akio-tomiya/Gaugefields.jl", "max_stars_repo_head_hexsha": "dd2180dfe54eba7826ddd45a13ab2f5a007857d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-24T14:21:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T14:21:45.000Z", "max_issues_repo_path": "src/4D/Adjoint_rep_gaugefields_4D.jl", "max_issues_repo_name": "akio-tomiya/Gaugefields.jl", "max_issues_repo_head_hexsha": "dd2180dfe54eba7826ddd45a13ab2f5a007857d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2022-01-18T01:51:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T01:14:03.000Z", "max_forks_repo_path": "src/4D/Adjoint_rep_gaugefields_4D.jl", "max_forks_repo_name": "akio-tomiya/Gaugefields.jl", "max_forks_repo_head_hexsha": "dd2180dfe54eba7826ddd45a13ab2f5a007857d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3333333333, "max_line_length": 101, "alphanum_fraction": 0.8066666667, "num_tokens": 112}
|
[STATEMENT]
lemma dim_gen_eigenspace: assumes "jordan_nf A n_as"
shows "dim_gen_eigenspace A ev k
= (\<Sum> n \<leftarrow> map fst [(n, e)\<leftarrow>n_as . e = ev]. min k n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.dim_gen_eigenspace A ev k = sum_list (map (min k) (map fst (filter (\<lambda>(n, e). e = ev) n_as)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. local.dim_gen_eigenspace A ev k = sum_list (map (min k) (map fst (filter (\<lambda>(n, e). e = ev) n_as)))
[PROOF STEP]
from assms[unfolded jordan_nf_def]
[PROOF STATE]
proof (chain)
picking this:
0 \<notin> fst ` set n_as \<and> similar_mat A (jordan_matrix n_as)
[PROOF STEP]
have sim: "similar_mat A (jordan_matrix n_as)"
[PROOF STATE]
proof (prove)
using this:
0 \<notin> fst ` set n_as \<and> similar_mat A (jordan_matrix n_as)
goal (1 subgoal):
1. similar_mat A (jordan_matrix n_as)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
similar_mat A (jordan_matrix n_as)
goal (1 subgoal):
1. local.dim_gen_eigenspace A ev k = sum_list (map (min k) (map fst (filter (\<lambda>(n, e). e = ev) n_as)))
[PROOF STEP]
from dim_gen_eigenspace_jordan_matrix[of n_as, folded dim_gen_eigenspace_similar[OF this]]
[PROOF STATE]
proof (chain)
picking this:
local.dim_gen_eigenspace A ?ev ?k = sum_list (map (min ?k) (map fst (filter (\<lambda>(n, e). e = ?ev) n_as)))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
local.dim_gen_eigenspace A ?ev ?k = sum_list (map (min ?k) (map fst (filter (\<lambda>(n, e). e = ?ev) n_as)))
goal (1 subgoal):
1. local.dim_gen_eigenspace A ev k = sum_list (map (min k) (map fst (filter (\<lambda>(n, e). e = ev) n_as)))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
local.dim_gen_eigenspace A ev k = sum_list (map (min k) (map fst (filter (\<lambda>(n, e). e = ev) n_as)))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 828, "file": "Jordan_Normal_Form_Jordan_Normal_Form_Uniqueness", "length": 8}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 11:18:02 2022
This script reads daily temperature values from NOAA GHCN-Daily archive,
and calculates heatwave magnitude index. FMI Sodankylä station data is
read from FMI database.
@author: rantanem
"""
import pandas as pd
import numpy as np
from fmi_routines import update_station_data
import ghcn_routines as ghcn
from scipy.ndimage.morphology import binary_dilation
from scipy import ndimage
# take only summer months
def is_jja(month):
return (month >= 6) & (month <= 8)
# list of stations and their names
list_of_stations = ghcn.ghcn_stations()
# read station location coordinates from GHCN server
station_locs = ghcn.read_station_locations()
# years and dates for which the HWM is calculated
years = np.arange(1960,2022)
dates = pd.date_range(str(years[0])+'-01-01', str(years[-1])+'-12-31')
# allocate empty dataframes
df_daily_data = pd.DataFrame(index=dates, columns=list_of_stations)
df_hwmi = pd.DataFrame(index=years, columns=list_of_stations)
df_tmax = pd.DataFrame(index=years, columns=list_of_stations)
# get the data; loop over the stations
for i, station in enumerate(list_of_stations):
print(list_of_stations[station])
# Finnish data is read from FMI
if station[:2]=='FI':
dataset = update_station_data(station='sodankyla')
cond = np.isin(dataset.index.year, years)
f = dataset['Maximum temperature'][cond]
# for other stations, read TX from GHCN-Daily
else:
f = ghcn.get_ghcn_daily_var('TMAX', station, years)
# allocate data to the dataframe
df_daily_data[station] = f.reindex(dates)
# print the number of missing days
print('Number of missing values:',np.sum(f.reindex(dates).isna().values),'\n')
# Width of the threshold selection window (days)
struct = np.ones(31)
df_p90 = pd.DataFrame(index=np.unique(df_daily_data.index.dayofyear), columns=list_of_stations)
df_25_75 = pd.DataFrame(index=[25, 75], columns=list_of_stations)
# climatology years for the threshold
years_clim = np.arange(1981, 2011)
# calculate the threshold for heat wave magnitude index
# (the 90th percentile of daily maximum temperature)
for i, station in enumerate(list_of_stations):
station_data_all_years = df_daily_data[station]
# select only the 1981-2010 years
cond = np.isin(station_data_all_years.index.year, years_clim)
station_data = station_data_all_years[cond]
doy_values = np.unique(station_data.index.dayofyear)
# Loop over each day of year
for day in doy_values:
dayofyear = station_data.index.dayofyear == day
selection = binary_dilation(dayofyear, structure=struct)
temp = station_data[selection]
df_p90[station][day] = np.nanpercentile(temp, 90)
# calculate the 25th and 75th percentiles of annual maxima
for i, station in enumerate(list_of_stations):
station_data = df_daily_data[station]
years_clim = np.arange(1981, 2011)
cond = np.isin(station_data.index.year, years_clim)
station_data = station_data[cond]
maxvalues = station_data.groupby(station_data.index.year).max()
p75_max = maxvalues.quantile(0.75)
p25_max = maxvalues.quantile(0.25)
df_25_75[station][25] = p25_max
df_25_75[station][75] = p75_max
# generate the structure to label each heatwave event
struct = np.ones(shape=(3,))
# calculate the heat wave magnitude index
for i, station in enumerate(list_of_stations):
station_data = df_daily_data[station]
heatwave_threshold = df_p90[station]
#loop through the years
for y in years:
cond = (station_data.index.year == y) & (station_data.index.month > 0)
temp = station_data[cond]
N = np.sum(temp[str(y)+'-06-01':str(y)+'-08-31'].isna())
newcoords = pd.to_datetime(y * 1000 + df_p90.index, format='%Y%j')
heatwave_threshold.index = newcoords
# identify heatwave days
heatwaves = temp > heatwave_threshold[temp.index]
# label each heatwave event
labels, nb = ndimage.label(heatwaves, structure=struct)
# calculate the length of each heatwave
heatwave_lengths = np.array(ndimage.sum(heatwaves, labels, np.arange(labels.max()+1)))
# mask heatwaves which are shorther than three days
mask = heatwave_lengths > 2
heatwave_events = mask[labels.ravel()].reshape(labels.shape)
# select only JJA period
heatwave_temps = temp[heatwave_events][is_jja(temp[heatwave_events].index.month)]
heatwave_temps = temp.where(heatwave_events)[is_jja(temp.where(heatwave_events).index.month)]
# calculate the heatwave magnitude (based on Dobricic et al. 2020)
Md = (heatwave_temps - df_25_75[station][25]) / (df_25_75[station][75] - df_25_75[station][25])
# calculate sum of the daily magnitudes of Md from the consecutive days composing a heat wave
# negative Md indices are considered zero
heatwavevalues = (Md.where((Md>0)|(Md.isnull()), 0)).values
cums = np.nancumsum(heatwavevalues, axis=0)
weights_by_duration_array = cums - np.maximum.accumulate(cums * (np.isnan(heatwavevalues)), axis=0)
hwi = np.max(weights_by_duration_array)
if N == 0:
df_hwmi[station][y] = hwi
df_tmax[station][y] = temp.max()
# save the HWM values
df_hwmi.to_csv('/Users/rantanem/Documents/python/resiclim-climateatlas/validation/data/stations_daily_hwm.csv',
index_label='Year', na_rep='NaN')
df_tmax.to_csv('/Users/rantanem/Documents/python/resiclim-climateatlas/validation/data/stations_daily_tmax.csv',
index_label='Year', na_rep='NaN')
|
{"hexsha": "bbf797a58ea6203b6f0ed0d96655e83a8ef918b8", "size": 5858, "ext": "py", "lang": "Python", "max_stars_repo_path": "validation/ghcn-daily_hwm.py", "max_stars_repo_name": "fmidev/resiclim-climateatlas", "max_stars_repo_head_hexsha": "b0c4c0ba6e3d189524cc89904636129733916f69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "validation/ghcn-daily_hwm.py", "max_issues_repo_name": "fmidev/resiclim-climateatlas", "max_issues_repo_head_hexsha": "b0c4c0ba6e3d189524cc89904636129733916f69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "validation/ghcn-daily_hwm.py", "max_forks_repo_name": "fmidev/resiclim-climateatlas", "max_forks_repo_head_hexsha": "b0c4c0ba6e3d189524cc89904636129733916f69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6627218935, "max_line_length": 112, "alphanum_fraction": 0.6903379993, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1531}
|
# Advent of Code 2018, Day 20
# (c) blu3r4y
from copy import copy
import networkx as nx
from parglare import Parser, Grammar
from parglare.actions import pass_single, pass_inner
GRAMMAR = r"""
root: '^' expression '$';
expression: element+;
element: direction | branch;
branch: '(' option+ ')';
option: expression | '|' expression?;
terminals
direction: /[NSEW]/;
"""
def part1(sequence):
graph = build_graph(sequence)
# longest distance to any other node, starting at (0, 0)
return nx.eccentricity(graph, (0, 0))
def part2(sequence):
graph = build_graph(sequence)
# all the shortest path lengths, starting at (0, 0)
shortest_paths = nx.single_source_shortest_path_length(graph, (0, 0))
# number of paths longer or equal than 1.000
return len([v for k, v in shortest_paths.items() if v >= 1000])
def parse(regex):
actions = {
"root": pass_inner,
"branch": pass_inner,
"option": [pass_single, lambda _, nodes: nodes[1] or -1]
}
parser = Parser(Grammar.from_string(GRAMMAR), actions=actions)
sequence = parser.parse(regex)
return sequence
def build_graph(sequence):
graph = nx.Graph()
def _move(directions, starts):
# keep all the end points generated so far
ends = starts
for di in directions:
# is branching necessary?
if isinstance(di, list):
next_ends = []
for option in di:
if option != -1:
# move along each branch option and store the new end points
next_ends.extend(_move(option, copy(ends)))
if -1 in di:
# this branch can be skipped, so we just append the newly found end points
ends.extend(next_ends)
else:
# only keep the new end points otherwise
ends = next_ends
else:
# move every endpoint towards the desired direction
# and create an edge within the graph
for i in range(len(ends)):
pre = ends[i]
nxt = offset(pre, di)
graph.add_edge(pre, nxt)
ends[i] = nxt
# return all the unique end points
return list(set(ends))
# build the graph by moving within the grid, starting at (0, 0)
_move(sequence, [(0, 0)])
return graph
def offset(node, direction):
(x, y) = node
if direction == 'N':
return x, y + 1
elif direction == 'S':
return x, y - 1
elif direction == 'E':
return x + 1, y
elif direction == 'W':
return x - 1, y
if __name__ == "__main__":
print(part1(parse("^WNE$")))
print(part1(parse("^ENWWW(NEEE|SSE(EE|N))$")))
print(part1(parse("^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$")))
print(part1(parse("^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$")))
print(part1(parse("^WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))$")))
print(part1(parse(open(r"../assets/day20.txt").readlines()[0])))
print(part2(parse(open(r"../assets/day20.txt").readlines()[0])))
|
{"hexsha": "3bda85115bdd1a86db858a4feb8b4683840d2d5f", "size": 3218, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/day20.py", "max_stars_repo_name": "blu3r4y/AdventOfCode2018", "max_stars_repo_head_hexsha": "5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-02T22:57:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-07T23:13:25.000Z", "max_issues_repo_path": "src/day20.py", "max_issues_repo_name": "blu3r4y/AdventOfCode2018", "max_issues_repo_head_hexsha": "5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/day20.py", "max_forks_repo_name": "blu3r4y/AdventOfCode2018", "max_forks_repo_head_hexsha": "5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-06T12:38:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T12:38:26.000Z", "avg_line_length": 27.0420168067, "max_line_length": 94, "alphanum_fraction": 0.5755127408, "include": true, "reason": "import networkx", "num_tokens": 827}
|
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
from pandas._libs.tslibs import ccalendar
def test_get_day_of_year():
assert ccalendar.get_day_of_year(2001, 3, 1) == 60
assert ccalendar.get_day_of_year(2004, 3, 1) == 61
assert ccalendar.get_day_of_year(1907, 12, 31) == 365
assert ccalendar.get_day_of_year(2004, 12, 31) == 366
dt = datetime.fromordinal(1 + np.random.randint(365 * 4000))
result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day)
expected = (dt - dt.replace(month=1, day=1)).days + 1
assert result == expected
|
{"hexsha": "b5d562a7b5a9c2bd5d55801dce8223341a6c2d7e", "size": 593, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/tests/tslibs/test_ccalendar.py", "max_stars_repo_name": "vimalromeo/pandas", "max_stars_repo_head_hexsha": "7c14e4f14aff216be558bf5d4d2d00b4838c2360", "max_stars_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2020-03-31T06:40:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T11:48:18.000Z", "max_issues_repo_path": "venv/lib/python3.7/site-packages/pandas/tests/tslibs/test_ccalendar.py", "max_issues_repo_name": "John1001Song/Big-Data-Robo-Adviser", "max_issues_repo_head_hexsha": "9444dce96954c546333d5aecc92a06c3bfd19aa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-12-04T23:44:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T08:31:40.000Z", "max_forks_repo_path": "venv/lib/python3.7/site-packages/pandas/tests/tslibs/test_ccalendar.py", "max_forks_repo_name": "John1001Song/Big-Data-Robo-Adviser", "max_forks_repo_head_hexsha": "9444dce96954c546333d5aecc92a06c3bfd19aa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2020-04-15T15:24:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-26T04:05:02.000Z", "avg_line_length": 31.2105263158, "max_line_length": 65, "alphanum_fraction": 0.6981450253, "include": true, "reason": "import numpy", "num_tokens": 187}
|
using ConcreteStructs
using DynamicIterators
using DynamicIterators: dub
using Base.Iterators: SizeUnknown, IsInfinite
import DynamicIterators: dyniterate, evolve
export Chain
@concrete terse struct Chain{K,M} <: AbstractMeasure
κ::K
μ::M
end
function basemeasure(mc::Chain)
Chain(basemeasure ∘ mc.κ, basemeasure(mc.μ))
end
Base.IteratorEltype(mc::Chain) = Base.HasEltype()
Base.eltype(::Type{C}) where {K,M,C<:Chain{K,M}} = eltype(M)
function logdensity(mc::Chain, x)
μ = mc.μ
ℓ = 0.0
for xj in x
ℓ += logdensity(μ, xj)
μ = mc.κ(xj)
end
return ℓ
end
DynamicIterators.evolve(mc::Chain, μ) = μ ⋅ mc.κ
DynamicIterators.evolve(mc::Chain) = mc.μ
dyniterate(E::Chain, value) = dub(evolve(E, value))
dyniterate(E::Chain, ::Nothing) = dub(evolve(E))
Base.iterate(E::Chain) = dyniterate(E, nothing)
Base.iterate(E::Chain, value) = dyniterate(E, value)
function DynamicIterators.dyniterate(r::Chain, (u,rng)::Sample)
μ = r.κ(u)
u = rand(rng, μ)
return u, Sample(u, rng)
end
Base.IteratorSize(::Chain) = IsInfinite()
Base.IteratorSize(::Type{Chain}) = IsInfinite()
@concrete terse struct Realized{R,S,T} <: DynamicIterators.DynamicIterator
rng::ResettableRNG{R,S}
iter::T
end
Base.IteratorEltype(mc::Realized) = Base.HasEltype()
function Base.eltype(::Type{Rz}) where {R,S,T,Rz <: Realized{R,S,T}}
eltype(T)
end
Base.length(r::Realized) = length(r.iter)
Base.size(r::Realized) = size(r.iter)
Base.IteratorSize(::Type{Rz}) where {R,S,T, Rz <: Realized{R,S,T}} = Base.IteratorSize(T)
Base.IteratorSize(r::Rz) where {R,S,T, Rz <: Realized{R,S,T}} = Base.IteratorSize(r.iter)
function Base.iterate(rv::Realized{R,S,T}) where {R,S,T}
if static_hasmethod(evolve, Tuple{T})
dyniterate(rv, nothing)
else
!isnothing(rv.rng.seed) && reset!(rv.rng)
μ,s = iterate(rv.iter)
x = rand(rv.rng, μ)
x,s
end
end
function Base.iterate(rv::Realized{R,S,T}, s) where {R,S,T}
if static_hasmethod(evolve, Tuple{T})
dyniterate(rv, s)
else
μs = iterate(rv.iter, s)
isnothing(μs) && return nothing
(μ,s) = μs
x = rand(rv.rng, μ)
return x,s
end
end
function dyniterate(rv::Realized, ::Nothing)
!isnothing(rv.rng.seed) && reset!(rv.rng)
μ = evolve(rv.iter)
x = rand(rv.rng, μ)
x, Sample(x, rv.rng)
end
function dyniterate(rv::Realized, u::Sample)
dyniterate(rv.iter, u)
end
function Base.rand(rng::AbstractRNG, T::Type, chain::Chain)
seed = rand(rng, UInt)
r = ResettableRNG(rng, seed)
return Realized(r, chain)
end
###############################################################################
# DynamicFor
# A `DynamicFor` is produced when `For` is called on a `DynamicIterator`.
@concrete terse struct DynamicFor{T,K,S} <: AbstractMeasure
κ ::K
sampler :: S
end
function DynamicFor(κ::K,sampler::S) where {K,S}
T = typeof(κ(first(sampler)))
DynamicFor{T,K,S}(κ,sampler)
end
Base.eltype(::Type{D}) where {T,D<:DynamicFor{T}} = eltype(T)
Base.IteratorEltype(d::DynamicFor) = Base.HasEltype()
Base.IteratorSize(d::DynamicFor) = Base.IteratorSize(d.sampler)
function Base.iterate(d::DynamicFor)
(x,s) = iterate(d.sampler)
(d.κ(x), s)
end
function Base.iterate(d::DynamicFor, s)
(x,s) = iterate(d.sampler, s)
(d.κ(x), s)
end
Base.length(d::DynamicFor) = length(d.sampler)
For(f, r::Realized) = DynamicFor(f,r)
function Base.rand(rng::AbstractRNG, dfor::DynamicFor)
seed = rand(rng, UInt)
return Realized(seed, copy(rng), dfor)
end
function dyniterate(df::DynamicFor, st, args...)
(val, state) = dyniterate(df.iter, st, args...)
return (df.κ(val), state)
end
For(f, it::DynamicIterator) = DynamicFor(f, it)
For(f, it::DynamicFor) = DynamicFor(f, it)
function dyniterate(fr::DynamicFor, state)
ϕ = dyniterate(fr.iter, state)
ϕ === nothing && return nothing
u, state = ϕ
fr.f(u), state
end
function Base.collect(r::Realized)
next = iterate(r)
isnothing(next) && return []
(x,s) = next
a = similar(r.iter, typeof(x))
i = 1
@inbounds a[i] = x
while !isnothing(next)
(x, s) = next
@inbounds a[i] = x
i += 1
next = iterate(r, s)
end
return a
end
function testvalue(mc::Chain)
μ = mc.μ
κ = mc.κ
rand(Chain(Dirac ∘ testvalue ∘ κ, (Dirac ∘ testvalue)(μ)))
end
|
{"hexsha": "aac0739750d4e7b712cdce2352b47f3ca3eb524e", "size": 4433, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/combinators/chain.jl", "max_stars_repo_name": "jw3126/MeasureTheory.jl", "max_stars_repo_head_hexsha": "419d2f2fc3cb27c9b1d969d2e05022f3a4a01f66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/combinators/chain.jl", "max_issues_repo_name": "jw3126/MeasureTheory.jl", "max_issues_repo_head_hexsha": "419d2f2fc3cb27c9b1d969d2e05022f3a4a01f66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/combinators/chain.jl", "max_forks_repo_name": "jw3126/MeasureTheory.jl", "max_forks_repo_head_hexsha": "419d2f2fc3cb27c9b1d969d2e05022f3a4a01f66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.579787234, "max_line_length": 89, "alphanum_fraction": 0.6273404015, "num_tokens": 1381}
|
import cv2
from asteroid import Asteroid
import random
import numpy as np
from spaceship import Spaceship
import config
import utils
from gym import Env
class Simulation(Env):
action_space = [0, 119, 97, 115, 100, 32]
def __init__(self):
self.reset()
def reset(self):
self.ship = Spaceship(scale=.3, speed=0.06, rotationspeed=10)
self.asteroids = []
self.bullets = []
self.frame = np.zeros((config.SCREEN_HEIGHT, config.SCREEN_WIDTH, 3), dtype="uint8")
self.last_astroid = utils.get_now()
self.now = utils.get_now()
self.score = 0
self.stars = []
self.add_stars()
def add_stars(self):
for i in range(100):
x = random.randint(1, config.SCREEN_WIDTH - 2)
y = random.randint(1, config.SCREEN_HEIGHT - 2)
brightness = random.randint(120, 255)
self.stars.append((x, y, brightness))
def draw_stars(self):
for x, y, brightness in self.stars:
self.frame[y, x - 1:x + 1] = int(brightness * 0.5)
self.frame[y - 1:y + 1, x] = int(brightness * 0.5)
self.frame[y, x] = brightness
def run(self):
assert config.DRAW
while True:
key = cv2.waitKey(1)
if key == 27:
return self.score
r = self.step(key)
if r[2]:
print("Your score is ", self.score)
cv2.destroyAllWindows()
return r
def step(self, action):
reward = 0.0
done = False
if action == 32:
self.bullets += self.ship.shoot()
if config.DRAW:
self.frame.fill(0)
self.draw_stars()
self.ship.tick(action, self.frame)
for asteroid in self.asteroids:
self.frame = asteroid.tick(self.frame)
if self.ship.check_crash(asteroid):
done = True
for bullet in self.bullets:
if bullet.check_crash(asteroid):
self.score += 1
reward += 1.
self.bullets.remove(bullet)
self.asteroids.remove(asteroid)
new_asteroids = asteroid.split()
self.asteroids += new_asteroids
for bullet in self.bullets:
self.frame = bullet.tick(self.frame)
if bullet.remove():
self.bullets.remove(bullet)
if config.RENDER:
cv2.imshow("space", self.frame)
cv2.waitKey(1)
if utils.it_is_time(self.last_astroid, config.asteroid_interval):
ast = Asteroid(random.random() * 0.3 + 0.1, random.random() * config.SCREEN_WIDTH,
random.random() * config.SCREEN_HEIGHT,
(1 - random.random()) * 1.6,
(1 - random.random()) * 1.6)
self.asteroids.append(ast)
self.last_astroid = utils.get_now()
self.now = utils.tick_time(self.now)
return self.frame, reward, done, {}
|
{"hexsha": "f323f83010c6b09b0c2e44b9b442ab19cfb4c781", "size": 3085, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulation.py", "max_stars_repo_name": "Elbrasch/AstroidsReinforcedLearning", "max_stars_repo_head_hexsha": "400d3a51d9ebc5ba48a7fba34c05c783aaff66ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-20T11:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-28T11:39:28.000Z", "max_issues_repo_path": "simulation.py", "max_issues_repo_name": "Elbrasch/AstroidsReinforcedLearning", "max_issues_repo_head_hexsha": "400d3a51d9ebc5ba48a7fba34c05c783aaff66ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simulation.py", "max_forks_repo_name": "Elbrasch/AstroidsReinforcedLearning", "max_forks_repo_head_hexsha": "400d3a51d9ebc5ba48a7fba34c05c783aaff66ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9010989011, "max_line_length": 94, "alphanum_fraction": 0.5335494327, "include": true, "reason": "import numpy", "num_tokens": 729}
|
import numpy as np
from ..util.backend_functions import backend as bd
from .diffractive_element import DOE
class RectangularSlit(DOE):
def __init__(self, width, height, x0 = 0, y0 = 0):
"""
Creates a slit centered at the point (x0, y0) with width width and height height
"""
global bd
from ..util.backend_functions import backend as bd
self.x0 = x0
self.y0 = y0
self.width = width
self.height = height
def get_transmittance(self, xx, yy, λ):
t = bd.where((((xx >= (self.x0 - self.width / 2)) & (xx < (self.x0 + self.width / 2)))
& ((yy >= (self.y0 - self.height / 2)) & (yy < (self.y0 + self.height / 2)))),
bd.ones_like(xx), bd.zeros_like(xx))
return t
|
{"hexsha": "ea4332171fab8e8bfd86e966092486217e810a3d", "size": 806, "ext": "py", "lang": "Python", "max_stars_repo_path": "diffractsim/diffractive_elements/rectangular_slit.py", "max_stars_repo_name": "rafael-fuente/diffractsim", "max_stars_repo_head_hexsha": "7287635d2bfa76f8b1eb24c6208796f761dd6144", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2022-01-01T01:16:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T00:42:52.000Z", "max_issues_repo_path": "diffractsim/diffractive_elements/rectangular_slit.py", "max_issues_repo_name": "rafael-fuente/diffractsim", "max_issues_repo_head_hexsha": "7287635d2bfa76f8b1eb24c6208796f761dd6144", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-02T17:33:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T17:51:39.000Z", "max_forks_repo_path": "diffractsim/diffractive_elements/rectangular_slit.py", "max_forks_repo_name": "rafael-fuente/diffractsim", "max_forks_repo_head_hexsha": "7287635d2bfa76f8b1eb24c6208796f761dd6144", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2022-02-07T22:44:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T12:34:54.000Z", "avg_line_length": 28.7857142857, "max_line_length": 102, "alphanum_fraction": 0.5558312655, "include": true, "reason": "import numpy", "num_tokens": 219}
|
#include <iostream>
#include "gtest/gtest.h"
#include <Eigen/Core>
#include "mean_curvature_solver.h"
#include "uniform_lb_operator.h"
#include "cotangent_lb_operator.h"
class MeanCurvatureSolverTest : public ::testing::Test {
protected:
MeanCurvatureSolverTest() : mUnifromMCS(mcurv::uniformLBOperatorStrategy),
mCotangentMCS(mcurv::cotangentLBOperatorStrategy),
mBunnyPath("./res/bunny.off") {}
Eigen::MatrixXd mSolution;
const std::string mBunnyPath;
mcurv::MeanCurvatureSolver mUnifromMCS;
mcurv::MeanCurvatureSolver mCotangentMCS;
};
TEST_F(MeanCurvatureSolverTest, WrongPathForExecute) {
ASSERT_THROW(mUnifromMCS.Execute(mSolution, "wrong_path"), std::runtime_error);
ASSERT_THROW(mCotangentMCS.Execute(mSolution, "wrong_path"), std::runtime_error);
}
TEST_F(MeanCurvatureSolverTest, ExecUniform) {
mUnifromMCS.Execute(mSolution, mBunnyPath);
EXPECT_NEAR(mSolution(0,0), 0.0065769, 0.001);
EXPECT_NEAR(mSolution(1,0), -0.00329001, 0.001);
EXPECT_NEAR(mSolution(2,0), -0.00750467, 0.001);
}
TEST_F(MeanCurvatureSolverTest, ExecCotangent) {
mCotangentMCS.Execute(mSolution, mBunnyPath);
EXPECT_NEAR(mSolution(0,0), 66.2739, 0.001);
EXPECT_NEAR(mSolution(1,0), -555.572, 0.001);
EXPECT_NEAR(mSolution(2,0), -76.9057, 0.001);
}
|
{"hexsha": "f2bb1a16f9f2c3b52f07ba58aa8a511e9870093c", "size": 1360, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "testing/unit_tests/test_mean_curvature_solver.cpp", "max_stars_repo_name": "dybiszb/MeanCurvatureLibrary", "max_stars_repo_head_hexsha": "b168911ef6bf08b283e7a225cc006b850fe26400", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "testing/unit_tests/test_mean_curvature_solver.cpp", "max_issues_repo_name": "dybiszb/MeanCurvatureLibrary", "max_issues_repo_head_hexsha": "b168911ef6bf08b283e7a225cc006b850fe26400", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2018-11-14T23:14:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-14T23:14:58.000Z", "max_forks_repo_path": "testing/unit_tests/test_mean_curvature_solver.cpp", "max_forks_repo_name": "dybiszb/MeanCurvatureLibrary", "max_forks_repo_head_hexsha": "b168911ef6bf08b283e7a225cc006b850fe26400", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8717948718, "max_line_length": 85, "alphanum_fraction": 0.7161764706, "num_tokens": 407}
|
# encoding: UTF-8
# 首先写系统内置模块
import sys
import os
from datetime import datetime, timedelta, date
from time import sleep
import copy
import logging
# 第三方模块
import talib as ta
import math
import numpy
import requests
import execjs
import pykalman
# vntrader基础模块
from vnpy.trader.vtConstant import EMPTY_STRING, EMPTY_INT, DIRECTION_LONG, DIRECTION_SHORT, OFFSET_OPEN,OFFSET_CLOSE,OFFSET_CLOSETODAY,OFFSET_CLOSEYESTERDAY, STATUS_CANCELLED
# 然后CTA模块
from vnpy.trader.app.ctaStrategy.ctaTemplate import *
from vnpy.trader.app.ctaStrategy.ctaBase import *
from vnpy.trader.app.ctaStrategy.ctaPolicy import *
from vnpy.trader.app.ctaStrategy.ctaLineBar import *
from vnpy.trader.app.ctaStrategy.ctaPosition import *
from vnpy.trader.app.ctaStrategy.ctaGridTrade import *
cta_engine_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
######################################from##################################
class Strategy30(CtaTemplate):
"""非标准合约的协整套利+网格交易执行器
只接受外部界面的Grid输入
配置参考
{
"name": "S28_螺纹钢跨期套利",
"className": "Strategy28",
"vtSymbol": "rb1705;rb1710",
"symbol": "rb1705;rb1710",
"shortSymbol":"RB",
"Leg1Symbol":"rb1705",
"Leg2Symbol":"rb1710",
"baseUpLine":240,
"baseMidLine":0,
"baseDnLine":-240,
"minDiff":1,
"inputSS":1,
"height":5,
"win":10,
"maxPos":4,
"maxLots":4,
"deadLine":"2017-4-20",
"mode":"tick"
}
"""
className = 'Strategy30'
author = u'李来佳'
# 策略在外部设置的参数
inputSS = 1 # 参数SS,下单,范围是1~100,步长为1,默认=1,
minDiff = 1 # 商品的最小交易单位
maxPos = 10 # 最大仓位(网格)数量
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting=None):
"""Constructor"""
super(Strategy30, self).__init__(ctaEngine, setting)
self.paramList.append('inputSS')
self.paramList.append('Leg1Symbol') # 近期合约
self.paramList.append('Leg2Symbol') # 远期合约
self.paramList.append('minDiff')
self.paramList.append('maxPos')
self.paramList.append('maxLots')
self.paramList.append('height') #
self.paramList.append('win')
self.paramList.append('baseUpLine')
self.paramList.append('baseMidLine') # 基准中轴
self.paramList.append('baseDnLine')
#self.varList.remove('pos')
self.varList.append('gridpos')
self.varList.append('entrust')
self.varList.append('upGrids')
self.varList.append('dnGrids')
self.varList.append('tradingOpen')
self.cancelSeconds = 2 # 未成交撤单的秒数
self.curDateTime = None # 当前Tick时间
self.curTick = None # 最新的tick
self.Leg1Symbol = EMPTY_STRING
self.Leg2Symbol = EMPTY_STRING
self.lastLeg1Tick = None
self.lastLeg2Tick = None
self.firstTrade = True # 交易日的首个交易
# 交易窗口
self.tradeWindow = False
# 开市窗口
self.openWindow = False
# 收市平仓窗口
self.closeWindow = False
# 仓位状态
self.position = CtaPosition(self) # 0 表示没有仓位,1 表示持有多头,-1 表示持有空头
self.position.maxPos = self.maxPos
self.gridpos = 0
self.lastTradedTime = datetime.now() # 上一交易时间
self.tradingOpen = True # 允许开仓
self.recheckPositions = True
self.forceClose = EMPTY_STRING # 强制平仓的日期(参数,字符串)
self.forceCloseDate = None # 强制平仓的日期(日期类型)
self.forceTradingClose = False # 强制平仓标志
# 是否完成了策略初始化
self.inited = False
self.backtesting = False
# 初始化时读取的历史数据的起始日期(可以选择外部设置)
self.startDate = None
self.policy = CtaPolicy() # 成交后的执行策略
self.recheckPositions = True # 重新提交平仓订单。在每个交易日的下午14点59分时激活,在新的交易日(21点)开始时,重新执行。
self.volumeList = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.height = 4
self.win = 4
self.upGrids = EMPTY_STRING # 做空网格的显示字符串
self.dnGrids = EMPTY_STRING # 做多网格的显示字符串
self.sigle_exist = False # 是否一次性全平仓
#
self.baseUpLine = EMPTY_INT # 网格做空起步线
self.baseMidLine = EMPTY_INT # 基准中轴线,区分多空
self.baseDnLine = EMPTY_INT # 网格做多起步线
self.upRate = 1 # 做空网格间距放大比率
self.dnRate = 1 # 做多网格间距放大比率
self.rebuildUpGrid = False # 重建网格标志
self.rebuildDnGrid = False # 重建网格标志
self.rebuildGrid = False # 分钟触发重建网格
self.maxLots = 10 # 网格的最大数量#
self.logMsg = EMPTY_STRING # 临时输出日志变量
if setting:
# 根据配置文件更新参数
self.setParam(setting)
self.onInit()
#----------------------------------------------------------------------
def onInit(self, force = False):
"""初始化
"""
if force:
self.writeCtaLog(u'策略强制初始化')
self.inited = False
self.trading = False # 控制是否启动交易
else:
self.writeCtaLog(u'策略初始化')
if self.inited:
self.writeCtaLog(u'已经初始化过,不再执行')
return
# 初始化持仓相关数据
self.position.pos = EMPTY_INT
self.pos = self.position.pos
self.gridpos = self.position.pos
self.position.maxPos = self.maxPos
# 初始化网格
self.gridHeight = self.height * self.minDiff # 网格距离跳数*每跳高度
self.gridWin = self.win * self.minDiff # 止盈跳数*每跳高度
if self.baseUpLine == EMPTY_INT:
self.writeCtaLog(u'初始化baseUpLine为空,缺省设置为50个MinDiff')
self.baseUpLine = 9999 * self.minDiff # 网格做空起步线
if self.baseDnLine == EMPTY_INT:
self.writeCtaLog(u'baseDnLine,缺省设置为-50个MinDiff')
self.baseDnLine = -9999 * self.minDiff # 网格做多起步线
self.upLine = self.baseUpLine # 网格做空的上轨
self.dnLine = self.baseDnLine # 网格做多的下轨
# 创建网格交易策略
self.gt = CtaGridTrade(strategy=self, maxlots=self.maxLots, height=self.gridHeight, win=self.gridWin,
vol=self.inputSS, minDiff=self.minDiff)
# 更新网格仓位策略
if self.volumeList:
self.gt.volumeList = self.volumeList
else:
self.gt.volumeList = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# 初始化网格交易器(或从本地记录文件中获取)
self.__initGrids()
# 更新初始化标识和交易标识
self.inited = True
self.trading = True # 控制是否启动交易
self.recheckPositions = True
self.putEvent()
self.writeCtaLog(u'策略初始化完成')
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'启动')
self.trading = True
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.uncompletedOrders.clear()
self.recheckPositions = True
self.position.clear()
self.gridpos = self.position.pos
self.entrust = 0
self.writeCtaLog(u'保存下网格')
self.gt.save(direction=DIRECTION_LONG)
self.writeCtaLog(u'保存上网格')
self.gt.save(direction=DIRECTION_SHORT)
self.trading = False
self.writeCtaLog(u'停止' )
self.putEvent()
#----------------------------------------------------------------------
def onTrade(self, trade):
"""交易更新"""
self.writeCtaLog(u'{0},OnTrade(),当前持仓:{1} '.format(self.curDateTime, self.position.pos))
#----------------------------------------------------------------------
def onOrder(self, order):
"""报单更新"""
self.writeCtaLog(u'OnOrder()报单更新,orderID:{0},{1},totalVol:{2},tradedVol:{3},offset:{4},price:{5},direction:{6},status:{7}'
.format(order.orderID, order.vtSymbol, order.totalVolume,order.tradedVolume,
order.offset, order.price, order.direction, order.status))
orderkey = order.gatewayName+u'.'+order.orderID
if orderkey in self.uncompletedOrders:
if order.totalVolume == order.tradedVolume:
# 开仓,平仓委托单全部成交
self.__onOrderAllTraded(order)
elif order.tradedVolume > 0 and not order.totalVolume == order.tradedVolume :
# 委托单部分成交
self.__onOrderPartTraded(order)
elif order.offset == OFFSET_OPEN and order.status == STATUS_CANCELLED:
# 开仓委托单被撤销
pass
else:
self.writeCtaLog(u'OnOrder()委托单返回,total:{0},traded:{1}'
.format(order.totalVolume, order.tradedVolume,))
self.__updateGridsDisplay()
self.pos = self.position.pos
self.gridpos = self.position.pos
self.writeCtaLog(u'OnOrder()self.gridpos={0}'.format(self.gridpos))
self.putEvent()
def __onOrderAllTraded(self, order):
"""订单的所有成交事件"""
self.writeCtaLog(u'onOrderAllTraded(),{0},委托单全部完成'.format(order.orderTime ))
orderkey = order.gatewayName+u'.'+order.orderID
# 平空仓完成(cover)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG and order.offset != OFFSET_OPEN:
# 通过orderID,找到对应的网格
grid = self.uncompletedOrders[orderkey]['Grid']
if grid is not None:
orders = grid.orderRef.split(';')
if len(orders) >= 2 and orderkey in orders:
self.writeCtaLog(u'更新网格:closePrice={0}的{1}平空'.format(grid.closePrice, order.vtSymbol))
orders.remove(orderkey)
grid.orderRef = orders[0]
elif len(orders) == 1 and orderkey in orders:
self.writeCtaLog(u'更新网格:closePrice={0}的{1}平空'.format(grid.closePrice, order.vtSymbol))
grid.orderRef = EMPTY_STRING
grid.orderStatus = False
grid.openStatus = False
grid.closeStatus = False
grid.tradedVolume = EMPTY_INT
grid.openDatetime = EMPTY_STRING
# 更新仓位
direction = grid.direction
if direction == DIRECTION_LONG:
self.writeCtaLog(u'更新仓位,正套网格平多仓{0}手'.format(grid.volume))
self.position.closePos(DIRECTION_SHORT, vol=grid.volume)
self.writeCtaLog(u'移除网格{0},{1}'.format(grid.direction, grid.openPrice))
self.gt.dnGrids.remove(grid)
else:
self.writeCtaLog(u'更新仓位,反套网格平空仓{0}手'.format(grid.volume))
self.position.closePos(DIRECTION_LONG, vol=grid.volume)
self.writeCtaLog(u'移除网格{0},{1}'.format(grid.direction, grid.openPrice))
self.gt.upGrids.remove(grid)
self.entrust = 0
self.gridpos = self.position.pos
self.gt.save(direction=direction)
else:
self.writeCtaLog(u'异常,orderRef:{0}对应的网格内,Ref字段:{1}'.format(orderkey, grid.orderRef))
else:
self.writeCtaLog(u'异常,找不到orderRef:{0}对应的网格'.format(orderkey))
# 平多仓完成(sell)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT and order.offset != OFFSET_OPEN:
# 通过orderID,找到对应的网格
grid = self.uncompletedOrders[orderkey]['Grid']
if grid is not None:
orders = grid.orderRef.split(';')
if len(orders) >= 2 and orderkey in orders:
self.writeCtaLog(u'更新网格:closePrice={0}的{1}平多'.format(grid.closePrice, order.vtSymbol))
orders.remove(orderkey)
grid.orderRef = orders[0]
elif len(orders) == 1 and orderkey in orders:
self.writeCtaLog(u'更新网格:closePrice={0}的{1}平多'.format(grid.closePrice, order.vtSymbol))
grid.orderRef = EMPTY_STRING
grid.orderStatus = False
grid.openStatus = False
grid.closeStatus = False
grid.tradedVolume = EMPTY_INT
grid.openDatetime = EMPTY_STRING
# 更新仓位
direction = grid.direction
if direction == DIRECTION_LONG: # 网格的开仓方向是开多
self.writeCtaLog(u'更新仓位,正套网格平多仓{0}手'.format(grid.volume))
self.position.closePos(DIRECTION_SHORT, vol=grid.volume)
self.writeCtaLog(u'移除网格{0},{1}'.format(grid.direction,grid.openPrice))
self.gt.dnGrids.remove(grid)
else: # 网格的开仓方向是开空
self.writeCtaLog(u'更新仓位,反套网格平空仓{0}手'.format(grid.volume))
self.position.closePos(DIRECTION_LONG, vol=grid.volume)
self.writeCtaLog(u'移除网格{0},{1}'.format(grid.direction, grid.openPrice))
self.gt.upGrids.remove(grid)
self.gridpos = self.position.pos
self.entrust = 0
self.gt.save(direction=direction)
else:
self.writeCtaLog(u'异常,orderRef:{0}对应的网格内,Ref字段:{1}'.format(orderkey, grid.orderRef))
else:
self.writeCtaLog(u'异常,找不到orderRef:{0}对应的网格'.format(orderkey))
# 开多仓完成
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG and order.offset == OFFSET_OPEN:
self.writeCtaLog(u'{0}开多仓完成'.format(order.vtSymbol))
# 通过orderID,找到对应的网格
grid = self.uncompletedOrders[orderkey]['Grid']
if grid is not None:
orders = grid.orderRef.split(';')
if len(orders) >= 2 and orderkey in orders:
self.writeCtaLog(u'更新网格:Grid.OpenPrice={0}的{1}开多{2}'.format(grid.openPrice, order.vtSymbol, order.price))
orders.remove(orderkey)
grid.orderRef = orders[0]
elif len(orders) == 1 and orderkey in orders:
self.writeCtaLog(u'更新网格:Grid.OpenPrice={0}的{1}开多{2}'.format(grid.openPrice, order.vtSymbol, order.price))
grid.orderRef = EMPTY_STRING
grid.openStatus = True
grid.orderStatus = False
grid.openDatetime = self.curDateTime
# 更新仓位
self.writeCtaLog(u'更新仓位,网格{0}仓{1}手'.format(grid.direction, grid.volume))
self.position.openPos(grid.direction, vol=grid.volume, price=grid.openPrice)
self.pos = self.position.pos
self.gridpos = self.position.pos
self.entrust = 0
else:
self.writeCtaLog(u'异常,orderRef:{0}对应的网格内,Ref字段:{1}'.format(orderkey, grid.orderRef))
direction = grid.direction
self.gt.save(direction=direction)
else:
self.writeCtaLog(u'异常,找不到orderRef:{0}对应的网格'.format(orderkey))
# 开空仓完成
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT and order.offset == OFFSET_OPEN:
self.writeCtaLog(u'开空仓完成'.format(order.vtSymbol))
# 通过orderID,找到对应的网格
grid = self.uncompletedOrders[orderkey]['Grid']
if grid is not None:
orders = grid.orderRef.split(';')
if len(orders) >= 2 and orderkey in orders:
self.writeCtaLog(u'更新网格:Grid.OpenPrice={0}的{1}开空{2}'.format(grid.openPrice, order.vtSymbol,order.price))
orders.remove(orderkey)
grid.orderRef = orders[0]
elif len(orders) == 1 and orderkey in orders:
self.writeCtaLog(u'更新网格:Grid.OpenPrice={0}的{1}开空{2}'.format(grid.openPrice, order.vtSymbol,order.price))
grid.orderRef = EMPTY_STRING
grid.orderStatus = False
grid.openStatus = True
grid.openDatetime = self.curDateTime
# 更新仓位
self.writeCtaLog(u'更新仓位,网格{0}仓{1}手'.format(grid.direction, grid.volume))
self.position.openPos(grid.direction, vol=grid.volume, price=grid.openPrice)
self.pos = self.position.pos
self.gridpos = self.position.pos
self.entrust = 0
else:
self.writeCtaLog(u'异常,orderRef:{0}对应的网格内,Ref字段:{1}'.format(orderkey, grid.orderRef))
direction = grid.direction
self.gt.save(direction=direction)
else:
self.writeCtaLog(u'异常,找不到orderRef:{0}对应的网格'.format(orderkey))
try:
del self.uncompletedOrders[orderkey]
except Exception as ex:
self.writeCtaLog(u'onOrder uncompletedOrders中找不到{0}'.format(orderkey))
self.__updateGridsDisplay()
def __onOrderPartTraded(self, order):
"""订单部分成交"""
self.writeCtaLog(u'onOrderPartTraded(),{0},委托单部分完成'.format(order.orderTime ))
orderkey = order.gatewayName+u'.'+order.orderID
if orderkey in self.uncompletedOrders:
self.uncompletedOrders[orderkey]['TradedVolume'] = order.tradedVolume
else:
self.writeCtaLog(u'异常,找不到委托单:{0}'.format(orderkey))
self.__updateGridsDisplay()
def __onOpenOrderCanceled(self, order):
"""委托开仓单撤销"""
self.writeCtaLog(u'__onOpenOrderCanceled(),{0},委托开仓单已撤销'.format(order.orderTime ))
orderkey = order.gatewayName+u'.'+order.orderID
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG :
# 更新网格交易器
updateGrid = self.gt.getGrid(direction=DIRECTION_LONG, openPrice=order.price,
orderRef=order.orderID, t=u'OpenPrice')
if type(updateGrid)!=type(None):
self.writeCtaLog(u'更新网格[{0}]的状态为开多仓撤销'.format(updateGrid.openPrice))
updateGrid.openStatus = False
updateGrid.openDatetime = EMPTY_STRING
else:
self.writeCtaLog(u'异常,找不到网格[{0}]'.format(order.price))
self.gt.save(direction=DIRECTION_LONG)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT :
# 更新网格交易器
updateGrid = self.gt.getGrid(direction=DIRECTION_SHORT, openPrice=order.price,
orderRef=order.orderID, t=u'OpenPrice')
if type(updateGrid)!=type(None):
self.writeCtaLog(u'更新网格[{0}]的状态为开空仓撤销'.format(updateGrid.openPrice))
updateGrid.openStatus = False
updateGrid.openDatetime = EMPTY_STRING
else:
self.writeCtaLog(u'异常,找不到网格[{0}]'.format(order.price))
self.gt.save(direction=DIRECTION_SHORT)
self.__updateGridsDisplay()
# ----------------------------------------------------------------------
def onStopOrder(self, orderRef):
"""停止单更新"""
self.writeCtaLog(u'{0},停止单触发,orderRef:{1}'.format(self.curDateTime, orderRef))
pass
# ----------------------------------------------------------------------
def __combineTick(self, tick):
"""合并两腿合约,成为套利合约"""
combinable = False
if tick.vtSymbol == self.Leg1Symbol:
# leg1合约
self.lastLeg1Tick = tick
if self.lastLeg2Tick is not None:
if self.lastLeg1Tick.datetime == self.lastLeg2Tick.datetime:
combinable = True
elif tick.vtSymbol == self.Leg2Symbol:
# leg2合约
self.lastLeg2Tick = tick
if self.lastLeg1Tick is not None:
if self.lastLeg2Tick.datetime == self.lastLeg1Tick.datetime:
combinable = True
# 不能合并
if not combinable:
return None
spread_tick = CtaTickData()
spread_tick.vtSymbol = self.vtSymbol
spread_tick.symbol = self.symbol
spread_tick.datetime = tick.datetime
spread_tick.date = tick.date
spread_tick.time = tick.time
# 以下情况,基本为单腿涨跌停,不合成价差Tick
if (self.lastLeg1Tick.askPrice1 == float('1.79769E308') or self.lastLeg1Tick.askPrice1 == 0 or self.lastLeg1Tick.bidPrice1 == self.lastLeg1Tick.upperLimit) and self.lastLeg1Tick.askVolume1 == 0:
self.writeCtaLog(u'leg1:{0}涨停{1},不合成价差Tick'.format(self.lastLeg1Tick.vtSymbol,self.lastLeg1Tick.bidPrice1))
return None
if (self.lastLeg1Tick.bidPrice1 == float('1.79769E308') or self.lastLeg1Tick.bidPrice1 == 0 or self.lastLeg1Tick.askPrice1 == self.lastLeg1Tick.lowerLimit) and self.lastLeg1Tick.bidVolume1 == 0:
self.writeCtaLog(u'leg1:{0}跌停{1},不合成价差Tick'.format(self.lastLeg1Tick.vtSymbol, self.lastLeg1Tick.askPrice1))
return None
if (self.lastLeg2Tick.askPrice1 == float('1.79769E308') or self.lastLeg2Tick.askPrice1 == 0 or self.lastLeg2Tick.bidPrice1 == self.lastLeg2Tick.upperLimit) and self.lastLeg2Tick.askVolume1 == 0:
self.writeCtaLog(u'leg2:{0}涨停{1},不合成价差Tick'.format(self.lastLeg2Tick.vtSymbol, self.lastLeg2Tick.bidPrice1))
return None
if (self.lastLeg2Tick.bidPrice1 == float('1.79769E308') or self.lastLeg2Tick.bidPrice1 == 0 or self.lastLeg2Tick.askPrice1 == self.lastLeg2Tick.lowerLimit) and self.lastLeg2Tick.bidVolume1 == 0:
self.writeCtaLog(u'leg2:{0}跌停{1},不合成价差Tick'.format(self.lastLeg2Tick.vtSymbol, self.lastLeg2Tick.askPrice1))
return None
# 叫卖价差=leg1.askPrice1 - leg2.bidPrice1,volume为两者最小
spread_tick.askPrice1 = self.lastLeg1Tick.askPrice1 - self.lastLeg2Tick.bidPrice1
spread_tick.askVolume1 = min(self.lastLeg1Tick.askVolume1, self.lastLeg2Tick.bidVolume1)
# 叫买价差=leg1.bidPrice1 - leg2.askPrice1,volume为两者最小
spread_tick.bidPrice1 = self.lastLeg1Tick.bidPrice1 - self.lastLeg2Tick.askPrice1
spread_tick.bidVolume1 = min(self.lastLeg1Tick.bidVolume1, self.lastLeg2Tick.askVolume1)
return spread_tick
def __checkLiquidity(self):
"""检查流动性缺失"""
if self.lastLeg1Tick.bidPrice1 <= self.lastLeg1Tick.lastPrice <= self.lastLeg1Tick.askPrice1 \
and self.lastLeg2Tick.bidPrice1 <= self.lastLeg2Tick.lastPrice <= self.lastLeg2Tick.askPrice1:
return True
self.writeCtaLog(u'流动性缺失导致最新价超出买1卖1范围')
return False
# ----------------------------------------------------------------------
def __arbShort(self,grid):
"""非标准合约的套利反套(开空)指令"""
self.writeCtaLog(u'套利价差反套(开空)单,price={0},volume={1}'.format(grid.openPrice, grid.volume))
if not self.trading:
self.writeCtaLog(u'停止状态,不开仓')
return None
bidPrice = self.lastLeg1Tick.bidPrice1 - self.lastLeg2Tick.askPrice1
if self.lastLeg1Tick.bidPrice1 >= self.lastLeg1Tick.lastPrice:
if self.lastLeg1Tick.bidVolume1 < 3:
shortPrice = self.lastLeg1Tick.lastPrice - 2*self.minDiff
elif self.lastLeg1Tick.bidVolume1 < 10:
shortPrice = self.lastLeg1Tick.lastPrice - self.minDiff
else:
shortPrice = self.lastLeg1Tick.lastPrice
else:
if self.lastLeg1Tick.bidVolume1 < 10 or self.lastLeg1Tick.bidVolume1 <= grid.volume:
shortPrice = self.lastLeg1Tick.bidPrice1 - self.minDiff
else:
shortPrice = self.lastLeg1Tick.bidPrice1
if self.lastLeg2Tick.askPrice1 <= self.lastLeg2Tick.lastPrice:
if self.lastLeg2Tick.askVolume1 < 3:
buyPrice = self.lastLeg2Tick.lastPrice+2*self.minDiff
elif self.lastLeg2Tick.askVolume1 < 10:
buyPrice = self.lastLeg2Tick.lastPrice + self.minDiff
else:
buyPrice = self.lastLeg2Tick.lastPrice
else:
if self.lastLeg2Tick.askVolume1 < 10 or self.lastLeg2Tick.bidVolume1 <= grid.volume:
buyPrice = self.lastLeg2Tick.askPrice1 + self.minDiff
else:
buyPrice = self.lastLeg2Tick.askPrice1
if bidPrice < grid.openPrice:
self.writeCtaLog(u'实际价差{0}不满足:{1}'.format(bidPrice, grid.openPrice))
return None
if (shortPrice - buyPrice + self.minDiff) < grid.openPrice:
self.writeCtaLog(u'买卖价差{0}不满足:{1}'.format(shortPrice - buyPrice + self.minDiff, grid.openPrice))
return None
# 开空leg1
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_SHORT, shortPrice, grid.volume, self)
if orderID is None or len(orderID) == 0:
self.writeCtaLog(u'异常,Leg1开空仓失败')
return None
orders = orderID
self.uncompletedOrders[orderID] = {'SYMBOL':self.Leg1Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_OPEN, 'Volume': grid.volume,
'Price': shortPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid': grid}
# 开多leg2
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_BUY, buyPrice, grid.volume, self)
if orderID is None or len(orderID) == 0:
self.writeCtaLog(u'异常,Leg2开多仓失败')
# 这里要不要处理之前的Leg1开仓?(放在后面cancelorder中处理)
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_OPEN, 'Volume': grid.volume,
'Price': buyPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid': grid}
grid.orderStatus = True
grid.orderDatetime = self.curDateTime
self.entrust = -1
self.writeCtaLog(u'arb short Orders:{0}'.format(orders))
return orders
# ----------------------------------------------------------------------
def __arbBuy(self,grid):
"""非标准合约的套利正套(开多)指令"""
self.writeCtaLog(u'套利价差正套(开多)单,price={0},volume={1}'.format(grid.openPrice, grid.volume))
if not self.trading:
self.writeCtaLog(u'停止状态,不开仓')
return None
askPrice = self.lastLeg1Tick.askPrice1 - self.lastLeg2Tick.bidPrice1
if self.lastLeg1Tick.askPrice1 <= self.lastLeg1Tick.lastPrice:
if self.lastLeg1Tick.askVolume1 < 3:
buyPrice = self.lastLeg1Tick.lastPrice+2*self.minDiff
elif self.lastLeg1Tick.askVolume1 < 10:
buyPrice = self.lastLeg1Tick.lastPrice + self.minDiff
else:
buyPrice = self.lastLeg1Tick.lastPrice
else:
if self.lastLeg1Tick.askVolume1 < 10 or self.lastLeg1Tick.bidVolume1 <= grid.volume:
buyPrice = self.lastLeg1Tick.askPrice1 + self.minDiff
else:
buyPrice = self.lastLeg1Tick.askPrice1
if self.lastLeg2Tick.bidPrice1 >= self.lastLeg2Tick.lastPrice:
if self.lastLeg2Tick.bidVolume1 < 3:
shortPrice = self.lastLeg2Tick.lastPrice - 2*self.minDiff
elif self.lastLeg2Tick.bidVolume1 < 10:
shortPrice = self.lastLeg2Tick.lastPrice - self.minDiff
else:
shortPrice = self.lastLeg2Tick.lastPrice
else:
if self.lastLeg2Tick.bidVolume1 < 10 or self.lastLeg2Tick.bidVolume1 <= grid.volume:
shortPrice = self.lastLeg2Tick.bidPrice1 - self.minDiff
else:
shortPrice = self.lastLeg2Tick.bidPrice1
if askPrice > grid.openPrice:
self.writeCtaLog(u'实际价差{0}不满足:{1}'.format(askPrice, grid.openPrice))
return None
if (buyPrice - shortPrice - self.minDiff) > grid.openPrice:
self.writeCtaLog(u'对价价差{0}不满足:{1}'.format((buyPrice - shortPrice - self.minDiff), grid.openPrice))
return None
# 开多leg1
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_BUY, buyPrice, grid.volume, self)
if orderID is None or len(orderID) == 0:
self.writeCtaLog(u'异常,Leg1开多仓失败')
return None
orders = orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_OPEN, 'Volume': grid.volume,
'Price': buyPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid': grid}
# 开空leg2
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_SHORT, shortPrice, grid.volume, self)
if (orderID is None) or len(orderID) == 0:
self.writeCtaLog(u'异常,Leg2开空仓失败')
# 这里要不要处理之前的Leg1开仓?(放在后面cancelorder中处理)
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_OPEN, 'Volume': grid.volume,
'Price': shortPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid':grid}
grid.orderStatus = True
self.entrust = 1
grid.orderDatetime = self.curDateTime
self.writeCtaLog(u'arb Buy Orders:{0}'.format(orders))
return orders
# ----------------------------------------------------------------------
def __arbSell(self, grid, force = False):
"""非标准合约的套利平正套(平多)指令"""
self.writeCtaLog(u'套利价差正套(平多)单,price={0},volume={1}'.format(grid.closePrice, grid.volume))
if not self.trading:
self.writeCtaLog(u'停止状态,不开仓')
return None
bidPrice = self.lastLeg1Tick.bidPrice1 - self.lastLeg2Tick.askPrice1
if self.lastLeg1Tick.bidPrice1 >= self.lastLeg1Tick.lastPrice:
if self.lastLeg1Tick.bidVolume1 < 3:
sellPrice = self.lastLeg1Tick.lastPrice - 2*self.minDiff
elif self.lastLeg1Tick.bidVolume1 < 10:
sellPrice = self.lastLeg1Tick.lastPrice - self.minDiff
else:
sellPrice = self.lastLeg1Tick.lastPrice
else:
if self.lastLeg1Tick.bidVolume1 < 10 or self.lastLeg1Tick.bidVolume1 <= grid.volume:
sellPrice = self.lastLeg1Tick.bidPrice1 - self.minDiff
else:
sellPrice = self.lastLeg1Tick.bidPrice1
if self.lastLeg2Tick.askPrice1 <= self.lastLeg2Tick.lastPrice:
if self.lastLeg2Tick.askVolume1 < 3:
coverPrice = self.lastLeg2Tick.lastPrice + 2*self.minDiff
elif self.lastLeg2Tick.askVolume1 < 10:
coverPrice = self.lastLeg2Tick.lastPrice + self.minDiff
else:
coverPrice = self.lastLeg2Tick.lastPrice
else:
if self.lastLeg2Tick.askVolume1 < 10 or self.lastLeg2Tick.bidVolume1 <= grid.volume:
coverPrice = self.lastLeg2Tick.askPrice1 + self.minDiff
else:
coverPrice = self.lastLeg2Tick.askPrice1
if bidPrice < grid.closePrice and not force:
self.writeCtaLog(u'实际价差{0}不满足:{1}'.format(bidPrice, grid.closePrice))
return None
#if sellPrice - coverPrice < grid.closePrice and not force:
# self.writeCtaLog(u'对价差{0}不满足:{1}'.format(bidPrice, grid.closePrice))
# return None
if force:
sellPrice -= self.minDiff
coverPrice += self.minDiff
leg1Pos = self.ctaEngine.posBufferDict.get(self.Leg1Symbol, None)
leg2Pos = self.ctaEngine.posBufferDict.get(self.Leg2Symbol, None)
if leg1Pos is None:
self.writeCtaLog(u'查询不到Leg1:{0}的持仓数据'.format(self.Leg1Symbol))
return None
if leg2Pos is None:
self.writeCtaLog(u'查询不到Leg2:{0}的持仓数据'.format(self.Leg2Symbol))
return None
if leg1Pos.longPosition < grid.volume:
self.writeCtaLog(u'{0}仓位{1}不足{2}'.format(self.Leg1Symbol,leg1Pos.longPosition,grid.volume))
return None
if leg2Pos.shortPosition < grid.volume:
self.writeCtaLog(u'{0}仓位{1}不足{2}'.format(self.Leg2Symbol,leg2Pos.shortPosition,grid.volume))
return None
# ------------------平多leg1---------------------------------
# 只有1手的情况下
if grid.volume == 1 or self.backtesting:
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_SELL, sellPrice, grid.volume, self)
orders = orderID
if orderID is None:
self.writeCtaLog(u'异常,Leg1:{0}平多仓失败'.format(self.Leg1Symbol))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
else:
# 昨仓有,并且少于平仓数量
if leg1Pos.longYd > EMPTY_INT and leg1Pos.longYd < grid.volume:
volYd = leg1Pos.longYd # 昨仓全平
volToday = grid.volume - volYd # 剩余的数量,平今仓
self.writeCtaLog(u'{0}昨仓:{1}/今仓:{2},分别平昨仓:{3}手、:今仓{4}手'
.format(self.Leg1Symbol, leg1Pos.longYd, leg1Pos.longToday, volYd, volToday))
# 平昨仓(第一次调用时,ctaEngine同样使用昨仓优先)
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_SELL, sellPrice,
volYd, self)
if orderID is None:
self.writeCtaLog(u'异常,Leg1:{0}平多仓(昨仓:{1}手)失败'.format(self.Leg1Symbol,volYd))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': volYd, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 更新持仓数据中的昨仓为0,避免瞬间连续平仓引发的昨仓数量不足
leg1Pos.longYd = 0
leg1Pos.longPosition -= volYd
orders = orderID
# 平今仓
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_SELL, sellPrice, volToday, self)
if orderID is None:
self.writeCtaLog(u'异常,Leg1:{0}平多仓(今仓:{1}手)失败'.format(self.Leg1Symbol,volToday))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': volToday, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
orders = orders + ';' + orderID
# 剩余:要么昨仓数量大于平仓数量、要么没有昨仓数量,今仓数量 >= 平仓数量,都交给catEngine自己解决
else:
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_SELL, sellPrice, grid.volume, self)
orders = orderID
if orderID is None:
self.writeCtaLog(u'异常,Leg1:{0}平多仓:{1}手失败'.format(self.Leg1Symbol,grid.volume))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# --------------平空leg2-----------------------
# 只有1手的情况下
if grid.volume == 1 or self.backtesting:
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_COVER, coverPrice, grid.volume, self)
if orderID is None:
self.writeCtaLog(u'异常,Leg2:{0}平空仓失败'.format(self.Leg2Symbol))
# 这里要不要处理之前的Leg1开仓?(放在后面cancelorder中处理)
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
else:
# 昨仓有,并且少于平仓数量
if leg2Pos.shortYd > EMPTY_INT and leg2Pos.shortYd < grid.volume:
volYd = leg2Pos.shortYd # 平所有的昨仓
volToday = grid.volume - volYd # 剩余的今仓平
self.writeCtaLog(u'{0}当前昨仓{1}/今仓:{2},分别平昨仓:{3}、今仓:{4}'
.format(self.Leg2Symbol,leg2Pos.shortYd, leg2Pos.shortToday, volYd, volToday))
# 平昨仓
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_COVER, coverPrice, volYd, self)
if orderID is None:
self.writeCtaLog(u'异常,Leg2:{0}平空仓(昨仓:{1}手)失败'.format(self.Leg2Symbol, volYd))
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': volYd, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 更新持仓数据中的昨仓为0,避免瞬间连续平仓引发的昨仓数量不足
leg2Pos.shortYd = 0
leg2Pos.shortPosition -= volYd
# 平今仓
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_COVER, coverPrice, volToday, self)
if orderID is None:
self.writeCtaLog(u'异常,Leg2:{0}平空仓(今仓:{1})失败'.format(self.Leg2Symbol, volToday))
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': volToday, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
else:
# 其他情况
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_COVER, coverPrice, grid.volume, self)
if orderID is None:
self.writeCtaLog(u'异常,Leg2:{0}平空仓({1}手)失败'.format(self.Leg2Symbol, grid.volume))
# 这里要不要处理之前的Leg1开仓?(放在后面cancelorder中处理)
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume,
'Price': coverPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid': grid}
self.entrust = -1
grid.orderDatetime = self.curDateTime
self.writeCtaLog(u'arb Sell Orders:{0}'.format(orders))
return orders
# ----------------------------------------------------------------------
def __arbCover(self, grid, force = False):
"""非标准合约的套利平反套(平空)指令"""
self.writeCtaLog(u'套利价差平反套(平多)单,price={0},volume={1}'.format(grid.closePrice, grid.volume))
if not self.trading:
self.writeCtaLog(u'停止状态,不开仓')
return None
askPrice = self.lastLeg1Tick.askPrice1 - self.lastLeg2Tick.bidPrice1
if self.lastLeg1Tick.askPrice1 <= self.lastLeg1Tick.lastPrice:
if self.lastLeg1Tick.askVolume1 < 3:
coverPrice = self.lastLeg1Tick.lastPrice+2*self.minDiff
elif self.lastLeg1Tick.askVolume1 < 10:
coverPrice = self.lastLeg1Tick.lastPrice + self.minDiff
else:
coverPrice = self.lastLeg1Tick.lastPrice
else:
if self.lastLeg1Tick.askVolume1 < 10 or self.lastLeg1Tick.bidVolume1 <= grid.volume:
coverPrice = self.lastLeg1Tick.askPrice1 + self.minDiff
else:
coverPrice = self.lastLeg1Tick.askPrice1
if self.lastLeg2Tick.bidPrice1 >= self.lastLeg2Tick.lastPrice:
if self.lastLeg2Tick.bidVolume1 < 3:
sellPrice = self.lastLeg2Tick.lastPrice - 2*self.minDiff
elif self.lastLeg2Tick.bidVolume1 < 10:
sellPrice = self.lastLeg2Tick.lastPrice - self.minDiff
else:
sellPrice = self.lastLeg2Tick.lastPrice
else:
if self.lastLeg2Tick.bidVolume1 < 10 or self.lastLeg2Tick.bidVolume1 <= grid.volume:
sellPrice = self.lastLeg2Tick.bidPrice1 - self.minDiff
else:
sellPrice = self.lastLeg2Tick.bidPrice1
if askPrice > grid.closePrice and not force:
self.writeCtaLog(u'实际价差{0}不满足:{1}'.format(askPrice, grid.closePrice))
return None
#if (coverPrice - sellPrice) > grid.closePrice and not force:
# self.writeCtaLog(u'对价价差{0}不满足:{1}'.format((coverPrice - sellPrice), grid.closePrice))
# return None
if force:
coverPrice +=self.minDiff
sellPrice -= self.minDiff
leg1Pos = self.ctaEngine.posBufferDict.get(self.Leg1Symbol, None)
leg2Pos = self.ctaEngine.posBufferDict.get(self.Leg2Symbol, None)
if leg1Pos is None:
self.writeCtaLog(u'查询不到Leg1:{0}的持仓数据'.format(self.Leg1Symbol))
return None
if leg2Pos is None:
self.writeCtaLog(u'查询不到Leg2:{0}的持仓数据'.format(self.Leg2Symbol))
return None
if leg1Pos.shortPosition < grid.volume:
self.writeCtaLog(u'{0}仓位{1}不足{2}'.format(self.Leg1Symbol, leg1Pos.shortPosition, grid.volume))
return None
if leg2Pos.longPosition < grid.volume:
self.writeCtaLog(u'{0}仓位{1}不足{2}'.format(self.Leg2Symbol, leg2Pos.longPosition, grid.volume))
return None
# 平空leg1
# 只有1手的情况下
if grid.volume == 1 or self.backtesting:
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_COVER, coverPrice, grid.volume, self)
if orderID is None:
self.writeCtaLog(u'Leg1:{0}平空仓({1}手)失败'.format(self.Leg1Symbol, grid.volume))
return None
orders = orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
else:
# 昨仓有,并且少于平仓数量
if leg1Pos.shortYd > EMPTY_INT and leg1Pos.shortYd < grid.volume:
volYd = leg1Pos.shortYd # 昨仓全平
volToday = grid.volume - volYd # 今仓平剩余部分
self.writeCtaLog(u'{0}分别平昨仓:{1}、今仓:{2}'.format(self.Leg1Symbol, volYd, volToday))
# 优先平昨仓
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_COVER, coverPrice, volYd, self)
if orderID is None:
self.writeCtaLog(u'Leg1:{0}平空仓(昨仓:{1}手)失败'.format(self.Leg1Symbol, volYd))
return None
orders = orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': volYd, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 更新持仓数据中的昨仓为0,避免瞬间连续平仓引发的昨仓数量不足
leg1Pos.shortYd = 0
# 平今仓
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_COVER, coverPrice, volToday, self)
if orderID is None:
self.writeCtaLog(u'Leg1:{0}平空仓(今仓:{1}手)失败'.format(self.Leg1Symbol, volToday))
# 这里要不要处理之前的Leg1开仓?(放在后面cancelorder中处理)
return None
orders = orders + ';' + orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': volToday, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
else:
# 其他情况(全部昨天仓/全部今仓)
orderID = self.ctaEngine.sendOrder(self.Leg1Symbol, CTAORDER_COVER, coverPrice, grid.volume, self)
if orderID is None:
self.writeCtaLog(u'Leg1:{0}平空仓({1}手)失败'.format(self.Leg1Symbol, grid.volume))
# 这里要不要处理之前的Leg1开仓?(放在后面cancelorder中处理)
return None
orders = orderID
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 平多leg2
# 只有1手的情况下
if grid.volume == 1 or self.backtesting:
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_SELL, sellPrice, grid.volume, self)
orders = orders + ';' + orderID
if orderID is None:
self.writeCtaLog(u'Leg2:{0}平多仓({1}手)失败'.format(self.Leg2Symbol, grid.volume))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
else:
# 昨仓有,并且少于平仓数量
if leg2Pos.longYd > EMPTY_INT and leg2Pos.longYd < grid.volume:
volYd= leg2Pos.longYd
volToday = grid.volume - volYd
self.writeCtaLog(u'{0}分别平今仓:{1}、:昨仓{2}'.format(self.Leg2Symbol, volToday, volYd))
# 平昨仓(第一次调用时,ctaEngine同样使用昨仓优先)
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_SELL, sellPrice, volYd, self)
if orderID is None:
self.writeCtaLog(u'Leg2:{0}平多仓(昨仓:{1}手)失败'.format(self.Leg2Symbol, volYd))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': volYd, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 更新持仓数据中的昨仓为0,避免瞬间连续平仓引发的昨仓数量不足
leg2Pos.longYd = 0
orders = orders + ';' + orderID
# 平今仓
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_SELL, sellPrice, volToday, self)
if orderID is None:
self.writeCtaLog(u'Leg2:{0}平多仓(昨仓)失败'.format(self.Leg2Symbol))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': volToday, 'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
orders = orders + ';' + orderID
# 剩余:要么昨仓数量大于平仓数量、要么没有昨仓数量,今仓数量 >= 平仓数量,都交给catEngine自己解决
else:
orderID = self.ctaEngine.sendOrder(self.Leg2Symbol, CTAORDER_SELL, sellPrice, grid.volume, self)
orders = orders + ';' + orderID
if orderID is None:
self.writeCtaLog(u'Leg2:{0}平多仓({1}手)失败'.format(self.Leg2Symbol, grid.volume))
return None
self.uncompletedOrders[orderID] = {'SYMBOL': self.Leg2Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': grid.volume, 'TradedVolume': EMPTY_INT,
'Price': sellPrice,
'OrderTime': self.curDateTime,
'Grid': grid}
self.entrust = 1
grid.orderDatetime = self.curDateTime
self.writeCtaLog(u'arb Cover Orders:{0}'.format(orders))
return orders
# ----------------------------------------------------------------------
def onTick(self, tick):
"""行情更新
:type tick: object
"""
# 更新策略执行的时间(用于回测时记录发生的时间)
self.curDateTime = tick.datetime
spread_tick = None
# 合并tick
if tick.vtSymbol != self.vtSymbol:
spread_tick = self.__combineTick(tick)
if spread_tick is None :
return
# 修正lastPrice,大于中轴(0)时,取最小值,小于中轴时,取最大值
if spread_tick.bidPrice1 > self.baseMidLine and spread_tick.askPrice1 > self.baseMidLine:
spread_tick.lastPrice = min(spread_tick.bidPrice1, spread_tick.askPrice1)
elif spread_tick.bidPrice1 < self.baseMidLine and spread_tick.askPrice1 < self.baseMidLine:
spread_tick.lastPrice = max(spread_tick.bidPrice1, spread_tick.askPrice1)
self.curTick = spread_tick
if not self.backtesting:
dt = datetime.now()
if (dt.hour >= 3 and dt.hour < 8) or (dt.hour >= 16 and dt.hour < 20):
return
if (spread_tick.datetime.hour >= 3 and spread_tick.datetime.hour <= 8) or (spread_tick.datetime.hour >= 16 and spread_tick.datetime.hour <= 20):
self.writeCtaLog(u'休市/集合竞价排名时数据不处理')
self.__initGrids()
return
if not self.recheckPositions and self.closeWindow:
self.writeCtaLog(u'激活重新提交平仓单')
self.recheckPositions = True
# 2、计算交易时间和平仓时间
self.__timeWindow(spread_tick)
# 4、交易逻辑
# 首先检查是否是实盘运行还是数据预处理阶段
if not self.inited :
return
# 初始化网格交易器(或从本地记录文件中获取)
self.__initGrids()
if self.tradeWindow and not self.closeWindow and self.recheckPositions:
self.writeCtaLog(u'交易时间,重新计算持仓')
# 重新计算持仓
self.__recheckPositions()
if not self.tradeWindow and self.closeWindow and not self.recheckPositions:
self.writeCtaLog(u'收盘时间,重置计算持仓标志')
self.recheckPositions = True
if self.inited:
self.gt.save(direction=DIRECTION_LONG)
self.gt.save(direction=DIRECTION_SHORT)
# 执行撤单逻辑
self.__cancelLogic(self.curDateTime)
if self.curDateTime.second == 0:
self.__updateGridsDisplay()
if self.entrust == 0:
# 获取满足的未挂单(开空单)
pendingGrids = self.gt.getGrids(direction=DIRECTION_SHORT,
end=spread_tick.bidPrice1)
if len(pendingGrids) > 1:
self.writeCtaLog(u'有多个挂单,只选择价格最高的一个')
sortedGrids = sorted(pendingGrids, key=lambda g:g.openPrice)
pendingGrids = sortedGrids[-1:]
# 提交挂单
for x in pendingGrids[:]:
if self.position.avaliablePos2Add() < 1:
msg = u'持空仓数量已满,不再开仓'
if msg != self.logMsg:
self.logMsg = msg
self.writeCtaLog(msg)
continue
if not (spread_tick.bidPrice1 >= x.openPrice):
msg = u'spread_tick.bidPrice1:{0}< 网格:{1},不开空仓'.format(spread_tick.bidPrice1, x.openPrice)
if msg != self.logMsg:
self.logMsg = msg
self.writeCtaLog(msg)
continue
if not self.__checkAccountLimit():
msg = u'资金占用超过限制值,不开仓'
if msg != self.logMsg:
self.logMsg = msg
self.writeCtaLog(msg)
continue
# 调用套利下单指令
ref = self.__arbShort(x)
if ref is not None and len(ref)>0:
self.writeCtaLog(u'开空委托单号{0}'.format(ref))
self.gt.updateOrderRef(direction=DIRECTION_SHORT, openPrice=x.openPrice, orderRef=ref)
else:
self.writeCtaLog(u'开空委托单失败:{0},v:{1}'.format(x.openPrice, x.volume))
# 获取满足价格的未挂单(开多单)
pendingGrids = self.gt.getGrids(direction=DIRECTION_LONG,
end=spread_tick.askPrice1)
if len(pendingGrids) > 1:
self.writeCtaLog(u'有多个挂单,只选择价格最低的一个')
sortedGrids = sorted(pendingGrids, key=lambda g: g.openPrice)
pendingGrids = sortedGrids[0:1]
# 逐一提交挂单
for x in pendingGrids:
if self.position.avaliablePos2Add() < 1:
msg = u'持多仓数量已满,不再开多仓'
if msg != self.logMsg:
self.logMsg = msg
self.writeCtaLog(msg)
continue
if not spread_tick.askPrice1 <= x.openPrice:
msg = u'spread_tick.askPrice1:{0} > 网格:{1},不开多仓'.format(spread_tick.askPrice1, x.openPrice)
if msg != self.logMsg:
self.logMsg = msg
self.writeCtaLog(msg)
continue
if not self.__checkAccountLimit():
msg = u'资金占用超过限制值,不开仓'
if msg != self.logMsg:
self.logMsg = msg
self.writeCtaLog(msg)
continue
ref = self.__arbBuy(x)
if ref is not None and len(ref) > 0:
self.writeCtaLog(u'开多委托单号{0}'.format(ref))
self.gt.updateOrderRef(direction=DIRECTION_LONG, openPrice=x.openPrice, orderRef=ref)
else:
self.writeCtaLog(u'开多委托单失败:{0},v:{1}'.format(x.openPrice, x.volume))
# 持有正套的单
if self.position.longPos > 0 and self.entrust == 0 and self.tradeWindow:
# 从网格获取,未平仓状态,价格,注意检查是否有可以平仓的网格
pendingGrids = self.gt.getGrids(direction=DIRECTION_LONG, opened=True, closed=False, ordered=False,
begin=999999 , end= -999999)
for x in pendingGrids:
if self.sigle_exist and len(pendingGrids) > 1:
continue
if x.closePrice < spread_tick.bidPrice1:
ref = self.__arbSell(x)
if ref is not None:
self.writeCtaLog(u'平正套(平多)委托单号{0}'.format(ref))
self.gt.updateOrderRef(direction=DIRECTION_LONG, openPrice=x.openPrice, orderRef=ref)
else:
self.writeCtaLog(u'平正套(平多)委托单失败:{0},v:{1}'.format(x.closePrice, x.volume))
# 持有反套的单,检查平仓条件
if self.position.shortPos < 0 and self.entrust == 0 and self.tradeWindow:
# 从网格获取,未平仓状态,价格
pendingGrids = self.gt.getGrids(direction=DIRECTION_SHORT, opened=True, closed=False, ordered=False,
begin=-999999, end=999999)
for x in pendingGrids:
if self.sigle_exist and len(pendingGrids) > 1:
continue
if x.closePrice > spread_tick.askPrice1:
ref = self.__arbCover(x)
if ref is not None:
self.writeCtaLog(u'平反套(平空)委托单号{0}'.format(ref))
self.gt.updateOrderRef(direction=DIRECTION_SHORT, openPrice=x.openPrice, orderRef=ref)
else:
self.writeCtaLog(u'平反套(平空)委托单失败:{0},v:{1}'.format(x.closePrice, x.volume))
# ----------------------------------------------------------------------
def onBar(self, bar):
"""分钟K线数据更新
bar,k周期数据
"""
pass
# ----------------------------------------------------------------------
def __initGrids(self):
if len(self.gt.upGrids) <= 0 or len(self.gt.dnGrids) <= 0:
self.writeCtaLog(u'__initGrids(),初始化网格交易器')
self.gt.initGrid(upline=self.baseUpLine, dnline=self.baseDnLine)
self.writeCtaLog(u'__initGrids(),初始化网格完成')
self.recheckPositions = True
def __updateGridsDisplay(self):
"""更新网格显示信息"""
self.upGrids = self.gt.toStr(direction=DIRECTION_SHORT)
self.writeCtaLog(self.upGrids)
self.dnGrids = self.gt.toStr(direction=DIRECTION_LONG)
self.writeCtaLog(self.dnGrids)
def __closeAllGrids(self, direction, closePrice):
"""对所有的网格强制平仓"""
if direction == DIRECTION_SHORT:
# 扫描上网格
for x in self.gt.upGrids[:]:
# 已发送订单,已开仓,未平仓
if not x.openStatus or x.closeStatus:
continue
if x.orderStatus and x.orderRef != EMPTY_STRING and x.orderDatetime is not None:
orders = x.orderRef.split(';')
if len(orders) == 1:
self.writeCtaLog(u'{0}只有单腿委托{1}'.format(x.openPrice, orders[0]))
continue
# 当前分钟内,不再委托强平
if x.orderDatetime.minute == self.curDateTime.minute:
continue
self.writeCtaLog(u'取消平仓单:[ref={0},closeprice={1}]'.format(x.orderRef, x.closePrice))
for order in orders:
self.writeCtaLog(u'撤单:{0}'.format(order))
self.cancelOrder(order)
sleep(0.3)
oldPrice = x.closePrice
x.closePrice = closePrice
ref = self.__arbCover(x, force=True)
if ref:
x.orderRef = ref
x.orderStatus = True
x.orderDatetime = self.curDateTime
self.writeCtaLog(u'强制提交平空委托单[closeprice={0},volume={1}]'
.format(x.closePrice, x.volume))
else:
self.writeCtaLog(u'提交平仓委托单失败')
x.closePrice = oldPrice
if direction == DIRECTION_LONG:
# 扫描下网格
for x in self.gt.dnGrids[:]:
if not x.openStatus or x.closeStatus:
self.writeCtaLog(u'网格[open={0},close={1} 不满足状态'.format(x.openPrice, x.closePrice))
continue
if x.orderStatus and x.orderRef != EMPTY_STRING and x.orderDatetime is not None:
orders = x.orderRef.split(';')
if len(orders) == 1:
self.writeCtaLog(u'{0}只有单腿委托{1}'.format(x.openPrice, orders[0]))
continue
if x.orderDatetime.minute == self.curDateTime.minute:
continue
self.writeCtaLog(u'取消平多单:[ref={0},closeprice={1}]'.format(x.orderRef, x.closePrice))
for order in orders:
self.writeCtaLog(u'撤单:{0}'.format(order))
self.cancelOrder(order)
sleep(0.3)
oldPrice = x.closePrice
x.closePrice = closePrice
# 强制平仓
ref = self.__arbSell(x, force=True)
if ref:
x.orderRef = ref
x.orderStatus = True
x.orderDatetime = self.curDateTime
self.writeCtaLog(
u'强制提交平多委托单[closeprice={0},volume={1}]'.format(x.closePrice, x.volume ))
else:
self.writeCtaLog(u'提交平仓委托单失败')
x.closePrice = oldPrice
# ----------------------------------------------------------------------
def __timeWindow(self, tick):
"""交易与平仓窗口"""
# 交易窗口 避开早盘和夜盘的前5分钟,防止隔夜跳空。
self.closeWindow = False
self.tradeWindow = False
self.openWindow = False
# 初始化当日的首次交易
#if (tick.datetime.hour == 9 or tick.datetime.hour == 21) and tick.datetime.minute == 0 and tick.datetime.second ==0:
# self.firstTrade = True
# 开市期,波动较大,用于判断止损止盈,或开仓
if (tick.datetime.hour == 9 or tick.datetime.hour == 21) and tick.datetime.minute < 10:
self.openWindow = True
# 日盘
if tick.datetime.hour == 9 and ((tick.datetime.minute >= 0 and self.shortSymbol not in MARKET_ZJ) or tick.datetime.minute >= 15):
self.tradeWindow = True
return
if tick.datetime.hour == 10:
if (tick.datetime.minute <= 15 or tick.datetime.minute >= 30) or self.shortSymbol in MARKET_ZJ:
self.tradeWindow = True
return
if tick.datetime.hour == 11 and tick.datetime.minute <= 30:
self.tradeWindow = True
return
# 中金所是13:00开盘,大连、郑商、上期所,是13:30开盘
if tick.datetime.hour == 13 and tick.datetime.minute >= 00:
self.tradeWindow = True
return
# 大连、郑商、上期所,是15:00 收盘
if tick.datetime.hour == 14:
if tick.datetime.minute < 59 or self.shortSymbol in MARKET_ZJ:
self.tradeWindow = True
return
if tick.datetime.minute == 59: # 日盘平仓
self.closeWindow = True
return
# 中金所是15:15收盘
if tick.datetime.hour == 15 and self.shortSymbol in MARKET_ZJ:
if tick.datetime.minute < 14:
self.tradeWindow = True
return
if tick.datetime.minute >= 14: # 日盘平仓
self.closeWindow = True
return
# 夜盘
if tick.datetime.hour == 21 and tick.datetime.minute >= 0:
self.tradeWindow = True
return
# 上期 贵金属, 次日凌晨2:30
if self.shortSymbol in NIGHT_MARKET_SQ1:
if tick.datetime.hour == 22 or tick.datetime.hour == 23 or tick.datetime.hour == 0 or tick.datetime.hour ==1:
self.tradeWindow = True
return
if tick.datetime.hour == 2:
if tick.datetime.minute < 29: # 收市前29分钟
self.tradeWindow = True
return
if tick.datetime.minute == 29: # 夜盘平仓
self.closeWindow = True
return
return
# 上期 有色金属,黑色金属,沥青 次日01:00
if self.shortSymbol in NIGHT_MARKET_SQ2:
if tick.datetime.hour == 22 or tick.datetime.hour == 23:
self.tradeWindow = True
return
if tick.datetime.hour == 0:
if tick.datetime.minute < 59: # 收市前29分钟
self.tradeWindow = True
return
if tick.datetime.minute == 59: # 夜盘平仓
self.closeWindow = True
return
return
# 上期 天然橡胶 23:00
if self.shortSymbol in NIGHT_MARKET_SQ3:
if tick.datetime.hour == 22:
if tick.datetime.minute < 59: # 收市前1分钟
self.tradeWindow = True
return
if tick.datetime.minute == 59: # 夜盘平仓
self.closeWindow = True
return
# 郑商、大连 23:30
if self.shortSymbol in NIGHT_MARKET_ZZ or self.shortSymbol in NIGHT_MARKET_DL:
if tick.datetime.hour == 22:
self.tradeWindow = True
return
if tick.datetime.hour == 23:
if tick.datetime.minute < 29: # 收市前1分钟
self.tradeWindow = True
return
if tick.datetime.minute == 29 and tick.datetime.second > 30: # 夜盘平仓
self.closeWindow = True
return
return
def __checkAccountLimit(self):
"""主动检查是否超过总体资金占用比例"""
c, a, p, pl = self.ctaEngine.getAccountInfo()
if p > pl:
return False
return True
def __recheckPositions(self):
"""重新计算持仓"""
self.writeCtaLog(u'扫描网格,重新计算持仓')
# 重置position
self.position.clear()
checks = EMPTY_INT
# 扫描上网格
for x in self.gt.upGrids[:]:
# 已发送订单,已开仓,未平仓
if x.openStatus and not x.closeStatus:
closePrice = min(x.closePrice, self.curTick.lastPrice)
x.orderRef = EMPTY_STRING
# 未平仓的volume=网格的volume-已交易的volume,
# 更新仓位
self.position.openPos(direction=DIRECTION_SHORT, vol=x.volume - x.tradedVolume,
price=x.openPrice)
checks = checks + 1
self.writeCtaLog(u'增加空仓{0},V:{1}'.format(x.openPrice,x.volume - x.tradedVolume))
elif x.orderStatus and not x.openStatus:
self.writeCtaLog(u'重置网格[{0}]的开仓单委托'.format(x.openPrice))
x.orderStatus = False
x.orderRef = EMPTY_STRING
if checks == EMPTY_INT:
self.writeCtaLog(u'上网格没空单')
checks = EMPTY_INT
# 扫描下网格
for x in self.gt.dnGrids[:]:
# 已发送订单,已开仓,未平仓
if x.openStatus and not x.closeStatus:
closePrice = max(x.closePrice, self.curTick.lastPrice)
x.orderRef = EMPTY_STRING
# 未平仓的volume=网格的volume-已交易的volume,
# 更新仓位
self.position.openPos(direction=DIRECTION_LONG, vol=x.volume - x.tradedVolume,
price=x.openPrice)
checks = checks + 1
self.writeCtaLog(u'增加多仓{0},V:{1}'.format(x.openPrice, x.volume - x.tradedVolume))
elif x.orderStatus and not x.openStatus:
self.writeCtaLog(u'重置网格[{0}]的开仓单委托'.format(x.openPrice))
x.orderStatus = False
x.orderRef = EMPTY_STRING
if checks == EMPTY_INT:
self.writeCtaLog(u'下网格没有多单')
self.gridpos = self.position.pos
# 重置为已执行
self.recheckPositions = False
def __cancelLogic(self, dt, force=False):
"撤单逻辑"""
if len(self.uncompletedOrders) < 1:
return
canceled_keys = []
#if ((dt - self.lastOrderTime).seconds > self.cancelSeconds / i ) \
# or force: # 超过设置的时间还未成交
"""
{'SYMBOL': self.Leg1Symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_OPEN, 'Volume': grid.volume,
'Price': shortPrice, 'TradedVolume':0 ,
'OrderTime': self.curDateTime,
'Grid': grid}
"""
order_keys = self.uncompletedOrders.keys()
for order_key in order_keys:
if order_key not in self.uncompletedOrders:
self.writeCtaLog(u'{0}不在未完成的委托单中。'.format(order_key))
continue
order = self.uncompletedOrders[order_key]
order_time = order['OrderTime']
order_symbol = copy.copy(order['SYMBOL'])
order_volume = order['Volume'] - order['TradedVolume']
order_price = order['Price']
if (dt - order_time).seconds > self.cancelSeconds:
self.writeCtaLog(u'{0}超时{1}秒未成交,取消委托单:{2}'.format(order_symbol, (dt - order_time).seconds, order))
# 获得对应的网格,检查网格是否两个lge的order都未成交
grid = order['Grid']
orders_in_grid = grid.orderRef.split(';')
if len(orders_in_grid) > 1:
self.writeCtaLog(u'{0}=>{1}网格两腿超时未成交,均撤单'.format(grid.openPrice,grid.closePrice))
for order_in_grid in orders_in_grid:
# 分别撤销委托单
self.cancelOrder(str(order_in_grid))
self.writeCtaLog(u'删除orderID:{0}'.format(order_in_grid))
try:
del self.uncompletedOrders[order_in_grid]
except Exception as ex:
self.writeCtaLog(u'uncompletedOrders找不到{0}'.format(order_in_grid))
grid.orderStatus = False
grid.orderRef = EMPTY_STRING
grid.orderDatetime = None
self.entrust = 0
continue
# 撤销该委托单
self.cancelOrder(str(order_key))
# 撤销的委托单,属于平仓类,需要追平
if order['OFFSET'] == OFFSET_CLOSE:
# 属于平多委托单
if order['DIRECTION'] == DIRECTION_SHORT:
if order_symbol == self.Leg1Symbol:
sellPrice = min(self.lastLeg1Tick.bidPrice1, self.lastLeg1Tick.lastPrice) - self.minDiff
else:
sellPrice = min(self.lastLeg2Tick.bidPrice1, self.lastLeg2Tick.lastPrice) - self.minDiff
orderID = self.ctaEngine.sendOrder(order_symbol, CTAORDER_SELL, sellPrice, order_volume, self)
if orderID is None:
self.writeCtaLog(u'重新提交{0} {1}手平多单{2}失败'.format(order_symbol,order_volume,sellPrice))
continue
# 添加到待删除的清单
canceled_keys.append(order_key)
# 更新网格的委托单
grid = order['Grid']
grid.orderRef = grid.orderRef.replace(order_key, orderID)
# 重新添加平多委托单
self.uncompletedOrders[orderID] = {'SYMBOL': order_symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_CLOSE, 'Volume': order_volume,'TradedVolume': EMPTY_INT,
'Price': sellPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 属于平空委托单
else:
# 获取对价
if order_symbol == self.Leg1Symbol:
coverPrice = max(self.lastLeg1Tick.askPrice1, self.lastLeg1Tick.lastPrice) + self.minDiff
else:
coverPrice = max(self.lastLeg2Tick.askPrice1, self.lastLeg2Tick.lastPrice) + self.minDiff
orderID = self.ctaEngine.sendOrder(order_symbol, CTAORDER_COVER, coverPrice, order_volume, self)
if orderID is None:
self.writeCtaLog(u'重新提交{0} {1}手平空单{2}失败'.format(order_symbol, order_volume, coverPrice))
continue
# 添加到待删除的清单
canceled_keys.append(order_key)
# 更新网格的委托单
grid = order['Grid']
grid.orderRef = grid.orderRef.replace(order_key, orderID)
# 重新添加平空委托单
self.uncompletedOrders[orderID] = {'SYMBOL': order_symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_CLOSE, 'Volume': order_volume,'TradedVolume': EMPTY_INT,
'Price': coverPrice, 'OrderTime': self.curDateTime,
'Grid': grid}
# 撤销的委托单,属于开仓类,需要追开
else:
# 属于开空委托单
if order['DIRECTION'] == DIRECTION_SHORT:
if order_symbol == self.Leg1Symbol:
shortPrice = min(self.lastLeg1Tick.bidPrice1, self.lastLeg1Tick.lastPrice) - self.minDiff
else:
shortPrice = min(self.lastLeg2Tick.bidPrice1, self.lastLeg2Tick.lastPrice) - self.minDiff
# 发送委托
orderID = self.ctaEngine.sendOrder(order_symbol, CTAORDER_SHORT, shortPrice, order_volume, self)
if orderID is None or len(orderID) == 0:
self.writeCtaLog(u'重新提交{0} {1}手开空单{2}失败'.format(order_symbol, order_volume, shortPrice))
continue
# 添加到待删除的清单
canceled_keys.append(order_key)
# 更新网格的委托单
grid = order['Grid']
grid.orderRef = grid.orderRef.replace(order_key, orderID)
if shortPrice < order_price:
# 修正止盈点位
if grid.direction == DIRECTION_SHORT:
grid.closePrice -= (order_price-shortPrice)
else:
grid.closePrice += (order_price-shortPrice)
# 重新添加开空委托单
self.uncompletedOrders[orderID] = {'SYMBOL': order_symbol, 'DIRECTION': DIRECTION_SHORT,
'OFFSET': OFFSET_OPEN, 'Volume': order_volume,
'Price': shortPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid': grid}
# 属于开多委托单
else:
if order_symbol == self.Leg1Symbol:
buyPrice = max(self.lastLeg1Tick.askPrice1, self.lastLeg1Tick.lastPrice) + self.minDiff
else:
buyPrice = max(self.lastLeg2Tick.askPrice1, self.lastLeg2Tick.lastPrice) + self.minDiff
# 发送委托
orderID = self.ctaEngine.sendOrder(order_symbol, CTAORDER_BUY, buyPrice, order_volume, self)
if orderID is None or len(orderID) == 0:
self.writeCtaLog(u'重新提交{0} {1}手开多单{2}失败'.format(order_symbol, order_volume, buyPrice))
continue
# 添加到待删除的清单
canceled_keys.append(order_key)
# 更新网格的委托单
grid = order['Grid']
grid.orderRef = grid.orderRef.replace(order_key, orderID)
if buyPrice > order_price:
# 修正止盈点位
if grid.direction == DIRECTION_SHORT:
grid.closePrice -= (buyPrice - order_price)
else:
grid.closePrice += (buyPrice - order_price)
# 重新添加开空委托单
self.uncompletedOrders[orderID] = {'SYMBOL': order_symbol, 'DIRECTION': DIRECTION_LONG,
'OFFSET': OFFSET_OPEN, 'Volume': order_volume,
'Price': buyPrice, 'TradedVolume': EMPTY_INT,
'OrderTime': self.curDateTime,
'Grid': grid}
# 删除撤单的订单
for key in canceled_keys:
if key in self.uncompletedOrders:
self.writeCtaLog(u'删除orderID:{0}'.format(key))
del self.uncompletedOrders[key]
# ----------------------------------------------------------------------
def saveData(self, id):
"""保存过程数据"""
# 保存K线
if not self.backtesting:
return
|
{"hexsha": "9644ba0e85872fd05c394e69bc66c662263ff8fe", "size": 80304, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/Strategy/strategy30_NonStdArbitrageExecutor.py", "max_stars_repo_name": "frikyalong/vnpy", "max_stars_repo_head_hexsha": "d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-05T07:34:36.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-05T07:34:36.000Z", "max_issues_repo_path": "examples/Strategy/strategy30_NonStdArbitrageExecutor.py", "max_issues_repo_name": "frikyalong/vnpy", "max_issues_repo_head_hexsha": "d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/Strategy/strategy30_NonStdArbitrageExecutor.py", "max_forks_repo_name": "frikyalong/vnpy", "max_forks_repo_head_hexsha": "d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0722891566, "max_line_length": 203, "alphanum_fraction": 0.5129632397, "include": true, "reason": "import numpy", "num_tokens": 20433}
|
struct MultiDiscreteNonParametricSampler{T <: Real, S <: AbstractVector{T}, A <: AliasTable} <: Sampleable{Multivariate,Discrete}
support::Vector{S}
probabilities::S
aliastable::A
function MultiDiscreteNonParametricSampler{T,S}(support::Vector{S}, probs::AbstractVector{<:Real}) where {T <: Real, S<:AbstractVector{T}}
isempty(support) && "Empty support given."
aliastable = AliasTable(probs)
new{T,S,typeof(aliastable)}(support, convert(S, probs), aliastable)
end
end
function MultiDiscreteNonParametricSampler(support::Vector{S}, probs::AbstractVector{<:Real}) where {T <: Real, S <: AbstractVector{T}}
return MultiDiscreteNonParametricSampler{T,S}(support, probs)
end
function Base.length(s::MultiDiscreteNonParametricSampler)
return length(s.support[1])
end
Base.eltype(::Type{<:MultiDiscreteNonParametricSampler}) = Float64
function Distributions._rand!(rng::AbstractRNG, s::MultiDiscreteNonParametricSampler, x::AbstractVector)
@inbounds x .= s.support[rand(rng, s.aliastable)]
end
"""
SMPSSampler
Sampler object for SMPS scenarios. Obtained by reading from a model defined in SMPS format.
See also: [`SMPSScenario`](@ref)
"""
struct SMPSSampler{T <: AbstractFloat, M <: AbstractMatrix} <: AbstractSampler{SMPSScenario{T,M}}
template::LPData{T,M}
technology::UnitRange{Int}
recourse::UnitRange{Int}
random_variables::Dict{RowCol, Sampleable}
random_vectors::Dict{Vector{RowCol}, Sampleable}
inclusions::Dict{RowCol, Symbol}
end
function finite_support(sampler::SMPSSampler{T}) where T <: AbstractFloat
return all(ran_var -> ran_var isa DiscreteNonParametric, values(sampler.random_variables)) &&
all(ran_vec -> ran_vec isa MultiDiscreteNonParametricSampler, values(sampler.random_vectors))
end
function support_size(sampler)
ran_var_sizes = map(values(sampler.random_variables)) do ran_var
return Float64(length(ran_var.support))
end
ran_vec_sizes = map(values(sampler.random_vectors)) do ran_vec
return Float64(length(ran_vec))
end
return reduce(*, vcat(ran_var_sizes, ran_vec_sizes))
end
function SMPSSampler(sto::RawStoch{N}, stage::SMPSStage{T}) where {N, T <: AbstractFloat}
2 <= stage.id <= N + 1 || error("$(stage.id) not in range 2 to $(N + 1).")
random_variables = Dict{RowCol, Sampleable}()
random_vectors = Dict{Vector{RowCol}, Sampleable}()
inclusions = Dict{RowCol, Symbol}()
for ran_var in sto.random_variables[stage.id - 1]
inclusions[ran_var.rowcol] = ran_var.inclusion
if ran_var isa IndepDiscrete
random_variables[ran_var.rowcol] =
DiscreteNonParametric(ran_var.support, ran_var.probabilities)
elseif ran_var isa IndepDistribution
if ran_var.distribution == UNIFORM
random_variables[ran_var.rowcol] =
Uniform(first(ran_var.parameters), second(ran_var.parameters))
elseif ran_var.distribution == NORMAL
random_variables[ran_var.rowcol] =
Normal(first(ran_var.parameters), second(ran_var.parameters))
elseif ran_var.distribution == GAMMA
random_variables[ran_var.rowcol] =
Gamma(first(ran_var.parameters), second(ran_var.parameters))
elseif ran_var.distribution == BETA
random_variables[ran_var.rowcol] =
Beta(first(ran_var.parameters), second(ran_var.parameters))
elseif ran_var.distribution == LOGNORM
random_variables[ran_var.rowcol] =
LogNormal(first(ran_var.parameters), second(ran_var.parameters))
end
end
end
for ran_vec in sto.random_vectors[stage.id - 1]
if ran_vec isa BlockDiscrete
isempty(ran_vec.support[1]) && error("Block $block has empty support.")
rowcols = Vector{RowCol}()
support = Vector{Vector{T}}()
push!(support, Vector{T}())
for (rowcol, val) in ran_vec.support[1]
push!(rowcols, rowcol)
push!(support[end], val)
inclusions[rowcol] = ran_vec.inclusion
end
for remaining in ran_vec.support[2:end]
push!(support, Vector{T}())
for (rowcol, val) in ran_vec.support[1]
if haskey(remaining, rowcol)
push!(support[end], remaining[rowcol])
else
push!(support[end], val)
end
end
end
random_vectors[rowcols] = MultiDiscreteNonParametricSampler(support, ran_vec.probabilities)
end
end
return SMPSSampler(stage.uncertain,
stage.technology,
stage.recourse,
random_variables,
random_vectors,
inclusions)
end
function (sampler::SMPSSampler{T})() where T <: AbstractFloat
# Collect samples
samples = Dict{RowCol,T}()
for (rowcol, ran_var) in sampler.random_variables
samples[rowcol] = rand(ran_var)
end
for (rowcols, ran_vec) in sampler.random_vectors
block_sample = rand(ran_vec)
for (idx, rowcol) in enumerate(rowcols)
samples[rowcol] = block_sample[idx]
end
end
return create_scenario(sampler, samples)
end
function full_support(sampler::SMPSSampler{T}) where T <: AbstractFloat
# Sanity check
finite_support(sampler) || error("Sampler does not have finite support, cannot return full support.")
# Warn if support is large
nscenarios = support_size(sampler)
if nscenarios > 1e5
@warn "Generating full support of $nscenarios scenarios."
end
# Collect all realizations
all_realizations = Vector{Vector{Tuple{T,Dict{RowCol,T}}}}()
# Collect the full support of every random variable
for (rowcol, ran_var) in sampler.random_variables
push!(all_realizations, Vector{Tuple{T,Dict{RowCol,T}}}())
for (π,x) in zip(ran_var.p, ran_var.support)
push!(all_realizations[end], (π, Dict(rowcol => x)))
end
end
# Collect the full support of every random vector
for (rowcols, ran_vec) in sampler.random_vectors
push!(all_realizations, Vector{Tuple{T,Dict{RowCol,T}}}())
for (π,vec) in zip(ran_vec.probabilities, ran_vec.support)
d = Dict{RowCol,T}()
for (rowcol,x) in zip(rowcols,vec)
d[rowcol] = x
end
push!(all_realizations[end], (π, d))
end
end
# Generate scenario for every realization combination
return mapreduce(vcat, Base.Iterators.product(all_realizations...)) do realization
π = 1.0
samples = Dict{RowCol,T}()
for (ρ, outcome) in realization
samples = merge(samples, outcome)
π *= ρ
end
return create_scenario(sampler, samples; π = π)
end
end
function create_scenario(sampler::SMPSSampler{T}, samples::Dict{RowCol,T}; π::AbstractFloat = 1.0) where T <: AbstractFloat
# Prepare scenario data
Δq = copy(sampler.template.c₁)
A = copy(sampler.template.A)
Δd₁ = copy(sampler.template.d₁)
ΔC = copy(sampler.template.C)
Δd₂ = copy(sampler.template.d₂)
Δh = copy(sampler.template.b)
# Fill scenario data
for (rowcol, ξ) in samples
(row, col) = rowcol
(i,j,type) = sampler.template.indexmap[rowcol]
if type == OBJ
if sampler.inclusions[rowcol] == MULTIPLY
Δq[j] += abs(Δq[j]) * ξ
else
Δq[j] += ξ
end
elseif type == EQ
if col == RHS
if sampler.inclusions[rowcol] == MULTIPLY
Δh[i] += abs(Δh[i]) * ξ
else
Δh[i] += ξ
end
else
if sampler.inclusions[rowcol] == MULTIPLY
A[i,j] += abs(A[i,j]) * ξ
else
A[i,j] += ξ
end
end
elseif type == LEQ
if col == RHS
if sampler.inclusions[rowcol] == MULTIPLY
Δd₂[i] += abs(Δd₂[i]) * ξ
else
Δd₂[i] += ξ
end
else
if sampler.inclusions[rowcol] == MULTIPLY
ΔC[i,j] += abs(ΔC[i,j]) * ξ
else
ΔC[i,j] += ξ
end
end
elseif type == GEQ
if col == RHS
if sampler.inclusions[rowcol] == MULTIPLY
Δd₁[i] += abs(d₁[i]) * ξ
else
Δd₁[i] += ξ
end
else
if sampler.inclusions[rowcol] == MULTIPLY
ΔC[i,j] += abs(ΔC[i,j]) * ξ
else
ΔC[i,j] += ξ
end
end
elseif type == RANGE
if sampler.inclusions[rowcol] == MULTIPLY
Δd₁[i] += abs(d₁) * ξ
Δd₂[i] += abd(d₂) * ξ
else
Δd₁[i] += ξ
Δd₂[i] += ξ
end
end
end
ΔT = A[:,sampler.technology]
ΔW = A[:,sampler.recourse]
return SMPSScenario(Probability(π), Δq, ΔT, ΔW, Δh, ΔC, Δd₁, Δd₂)
end
|
{"hexsha": "a95c4cd8c6f11f8ffbad2dda5058d68cae555fbb", "size": 9449, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/io/smps/sampler.jl", "max_stars_repo_name": "rtwalker/StochasticPrograms.jl", "max_stars_repo_head_hexsha": "2e59f0ad0504515855bbef411c67653b5723b3a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/io/smps/sampler.jl", "max_issues_repo_name": "rtwalker/StochasticPrograms.jl", "max_issues_repo_head_hexsha": "2e59f0ad0504515855bbef411c67653b5723b3a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/io/smps/sampler.jl", "max_forks_repo_name": "rtwalker/StochasticPrograms.jl", "max_forks_repo_head_hexsha": "2e59f0ad0504515855bbef411c67653b5723b3a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4105691057, "max_line_length": 142, "alphanum_fraction": 0.5819663456, "num_tokens": 2363}
|
\documentclass[main.tex]{subfiles}
\begin{document}
\subsection{GW interferometry in the TT gauge}
\marginpar{Friday\\ 2020-5-1, \\ compiled \\ \today}
% We were discussing the basics of GW interferometry.
The TT gauge is a coordinate system in which the mirrors are free-falling (in the \(xy\) plane at least, and for the frequencies we are considering; all the delicate considerations we must make in order to distinguish the GW from the background still hold).
So, the mirrors are still; however the light propagating through spacetime is affected by the fact that the spacetime is ``stretched'': for our photons the metric reads
%
\begin{align}
\dd{s^2} = - c^2 \dd{t^2} + \qty(1 + h_{+} (t)) \dd{x}^2 + \qty(1 - h_{+}(t))^2 \dd{y}^2 + \dd{z}^2 = 0
\,,
\end{align}
%
so in the \(x\) arm, depending on the direction of propagation we will have:
%
\begin{align}
\dd{x} = \sqrt{ \frac{c^2 \dd{t^2}}{1 + h_{+}(t)}}
\approx \pm c \dd{t} \qty(1 - \frac{1}{2} h_{+}(t))
\,.
\end{align}
We can integrate this along the photon's path to recover the effective travel length: for the first leg of the journey we get
%
\begin{align}
L_{x} = \int_{0}^{L_x} \dd{x} = \int_{t_0 }^{t_1 } c \dd{t} = c (t_1 - t_0 )
- \frac{c}{2} \int_{t_0 }^{t_1 } \dd{t} h_{+}(t)
\,,
\end{align}
%
and for the second leg:
%
\begin{align}
- L_x = \int_{L_x}^{0} \dd{x} = - \int_{t_1 }^{t_2 } c \dd{t} \qty(1 - \frac{h_{+}(t)}{2}) = - c (t_2 - t_1 ) + \frac{c}{2} \int_{t_1 }^{t_2 } \dd{t} h_{+}(t)
\,,
\end{align}
%
where \(t_{0, 1, 2}\) are the times at which the photon leaves the beamsplitter, bounces off the mirror, returns to the mirror.
so the time difference is given by
%
\begin{align}
t_2 - t_0 = \frac{2L_x}{c} + \frac{1}{2} \int_{t_0 }^{t_2} \dd{t} h_{+}(t)
\,,
\end{align}
%
where we can consider \(t_2 \approx t_0 + 2 L_x /c = \tau\) in the integration bound, since the correction would be second order.
Let us then compute the integral:
%
\begin{align}
\frac{1}{2} \int_{t_0 }^{\tau } h_+ (t) \dd{t} = \frac{1}{2} \int_{t_0 }^{\tau } \dd{t} h_0 \cos(\omega_{GW}t) =
\frac{h_0 }{2 \omega_{GW}} \eval{\sin(\omega_{GW} t)}_{t=t_0 }^{t = \tau }
\,.
\end{align}
We can simplify the expression making use of the trigonometric relation
%
\begin{align}
\sin(\alpha + 2 \beta ) - \sin(\alpha ) = 2 \sin(\beta ) \cos(\alpha + \beta )
\,,
\end{align}
%
where our \(\alpha = \omega_{GW} t_0 \), and \(\beta = \omega_{GW} L_x / c\).
This yields:
%
\begin{align}
t_2 - t_0 = \frac{2L_x}{c} + \frac{L_x}{c} h_{+} \qty(t_0 + \frac{L_x}{c}) \frac{\sin(\frac{\omega_{gw} L_x}{c})}{\frac{\omega_{gw}L_x}{c}}
\,,
\end{align}
%
while for the \(y\) arm the sign of the corrective term is inverted --- the travel time diminishes.
Note that in this last expression we are computing \(h_+\) at the time \(t_0 + L_x / c\).
We have divided and multiplied by \(L_x / c\) in order to recover a sinc function, \(\operatorname{sinc} x = \sin x / x\).
The argument of this function is the ratio of the length of our arm to the wavelength of the GW.
If \(L_x / c \ll T_{GW} = 2 \pi / \omega_{GW}\), the perturbation is essentially ``frozen'' during the light's travel; in the opposite limit during the travel time of the light the perturbation oscillates back and forth, cancelling out most of the effect.
We have a maximum of the effect, then, when the GW perturbation is effectively static during the time in which we are detecting: this suggests we should build \emph{short} GW interferometers.
Keep in mind, though, that the effect size also scales with \(L_x\): a very small detector wouldn't work.
% If the length, on the other hand, is very large the perturbation cancels out during the flight of the photon.
We must trade off between these two.
If we fix the frequency, making the detector longer and longer does not help: further full oscillations of the path length will not have any effect.
The light will arrive at the beamsplitter at a time \(t = t_2 \), from which we can (at least approximately) recover \(t_0 \approx t - 2L /c \).
Similarly, we can calculate the starting times of the two beams by inverting the relation and plugging in what we have just found:
%
\begin{align}
t_0^{x} &= t - \frac{2L_x}{c} - \frac{L_x}{c} h_{+} \qty(t - \frac{L_x}{c}) \operatorname{sinc} \qty( \frac{\omega_{GW} L_x}{c}) \\
t_0^{y} &= t - \frac{2L_y}{c} + \frac{L_y}{c} h_{+} \qty(t - \frac{L_y}{c}) \operatorname{sinc} \qty( \frac{\omega_{GW} L_y}{c})
\,.
\end{align}
If the light arrives at a certain time \(t\) to the beamsplitter from both arms, then we can compute the phase difference of the beams by starting from \(t_0^{x} - t_0^{y}\): \(\Delta \phi = \omega_{l} (t_0^{x} - t_0^{y})\).
The interesting thing is the phase difference: if in both arms the light reaches the detector at \(t_2 \) we have
%
\begin{subequations}
\begin{align}
\Delta \phi
&= \omega_{l} \qty(t_0^{x }- t_0^{y}) \\
&= \underbrace{\omega_{l} 2 \frac{L_x -L_y}{c}}_{\Delta \phi_0 } + \underbrace{\omega_{l} \frac{2L}{c} \operatorname{sinc} \qty(\frac{\omega_{gw} L}{c} ) h_0 \cos(\omega_{gw} t + \alpha )}_{\Delta \phi_{GW}}
\,,
\end{align}
\end{subequations}
%
where we substituted \(h_{+} (t - L / c)\) with its explicit expression, with the constant phase \(\alpha = - \omega_{GW} L / c\).
The term \(\Delta \phi_0 \) is controlled by the experimenter, while the term \(\Delta \phi_{GW}\) is due to the GW.
We know that the output intensity will look like:
%
\begin{align}
I _{\text{out}} = E_0^2 \sin^2 (\Delta \phi_0 + \Delta \phi_{GW})
\,.
\end{align}
There will be two main contributions to \(\Delta \phi_0 \):
\begin{enumerate}
\item a microscopic term we can vary to change the working point of the interferometer, meaning the output with no GW;
\item a macroscopic term called the \textbf{Schnupp asymmetry} which allows the \emph{sideband frequency} \(\omega_{l} + \omega_{sb}\) to leak.
\end{enumerate}
We want to maximize \(\Delta \phi_{GW}\), so we have a tradeoff: we want to stay before the first zero of the sinc, but we also want to have a relatively large detector, since there is an \(L\) multiplying everything. Specifically, we have
%
\begin{align}
\Delta \phi_{GW} = \omega_{l} \frac{2L}{c} \operatorname{sinc} \qty(\frac{\omega_{GW}L}{c}) h_0 \cos(\omega_{GW} t + \alpha )
\propto L \operatorname{sinc} \qty( \frac{2 \pi L}{\lambda_{GW}})
\propto \sin(\frac{2 \pi L}{\lambda_{GW}})
\,,
\end{align}
%
which reaches a maximum when the argument of the sine is \(\pi /2\): so, we have
%
\begin{align}
\frac{\pi}{2} = \frac{2 \pi L}{\lambda_{GW}} \implies L = \frac{\lambda_{GW}}{4} \approx \SI{750}{km} \qty(\frac{\SI{100}{Hz}}{f_{GW}})
\,.
\end{align}
So, our optimal length is of the order of a quarter of the wavelength.
This is the same as saying we want to keep the photon in-flight for a quarter of the period of the GW.
% This would mean, for a frequency of \SI{100}{Hz}, that we would need a detector of around \SI{750}{km}.
If we do the computation accounting for the oscillation of the laser light, we get that the field out of the BS is
%
\begin{align}
E &= \frac{E_0}{2} \exp(i \omega_{l} t + i \Delta \phi_0 + i \Delta \phi_{GW}) \approx \frac{E_0}{2} \exp(i \omega_{l} t + i \Delta \phi_0)
\qty(1 + i\Delta \phi_{GW}) \\
&= \frac{E_0}{2} e^{-i \omega_{l} (t - 2L / c)}
\qty(1 + i \omega_{l} \frac{L}{c} \operatorname{sinc} \qty(\frac{\omega_{GW} L}{c}) \frac{e^{i \omega_{GW} t + i \alpha } + e^{-i \omega_{GW} t - i \alpha }}{2})
\\
&= \frac{E_0}{2} e^{-i \gamma } \qty(e^{-i \omega_{l} t}
+ \beta e^{-i \alpha } e^{-i (\omega_{l} - \omega_{gw})} + \beta e^{i \alpha } e^{-i (\omega_{l} + \omega_{gw}) t})
\,,
\end{align}
%
with a suitable definition of \(\gamma \) and \(\beta \) --- it does not really matter, the important thing is that we now have components of the oscillation at \(\omega_{l} \pm \omega_{GW}\).
% We can interpret this in different ways, depending on whether we want to model it as a modulation of the amplitude (adding the sidebands' sinusoids in phase with the main one) or as a modulation in phase (adding the sidebands' sinusoids in opposition of phase to the main one).
% This is the same as a single field which is modulated in amplitude.
We can interpret the addition of two sidebands which are in phase with the signal as an \emph{amplitude modulation}:
%
\begin{align}
\cos(\omega_{c}t)
+ A_{sb} \cos((\omega_{c} + \omega_{sb}) t )
+ A_{sb} \cos((\omega_{c} - \omega_{sb}) t )
= (1 + A_m \cos(\omega_{m} t)) \cos(\omega_{c} t )
\,,
\end{align}
%
while if the sidebands are out of phase with the signal we can interpret them as a phase modulation:
%
\begin{align}
\cos(\omega_{c}t)
+ A_{sb} \sin((\omega_{c} + \omega_{sb}) t )
+ A_{sb} \sin((\omega_{c} - \omega_{sb}) t )
= \cos(\omega_{c} t + A_m \cos(\omega_{m} t))
\,.
\end{align}
These two scenarios are identical (amplitude modulation and phase modulation) if we look at the amplitude of the Fourier transform, but with different phases.
The phase modulation scenario is approximate, since there is a slight amplitude modulation (which however is second order).
\subsection{Lasers and cavities}
We have seen that the optimal length of our detector is of the order of several hundreds of kilometers: this is an issue! In this section we will see how to ``fold'' our interferometer so that we can reach this sensitivity.
\subsubsection{Dielectric mirrors}
An ideal mirror would be a sharp interface between two mediums, where the incoming electric field \(E _{\text{in}}\) is split into \(r E _{\text{in}} = E_r\) and \(t E _{\text{in}} = E_{t}\). Due to energy conservation, these must satisfy \(r^2+t^2 = 1\).
For a perfectly reflecting mirror, the transmission coefficients \(t\) are symmetric for the swap of the two materials, while \(r\) changes sign if we go from the denser to the less dense material or vice versa.
The best mirrors in the world are built for GW detectors: these are dielectric mirrors.
The idea is to stack dielectric interfaces on top of each other by alternating layers of high and low index of refraction, so that each layer has an optical depth of \(\lambda /4\) at the desired wavelength \(\lambda \).
So, going through two of them changes the phase by \(\pi \) (half of a wavelength has gone by). Also, if we go through two of them then we pass \emph{one} low-to-high index of refraction transition, yielding a phase difference of \(\pi \). So, the global phase is \(2 \pi \equiv 0\).
So, all the reflected light keeps going back as the interference is constructive.
This only holds as long as the light is of the correct frequency, and its angle of incidence is a certain one.
% so that the wave coming back has constructive interference while the wave being transmitted interferes destructively with the next layer.
\subsubsection{Cavities}
A \textbf{cavity} is an arrangement of mirrors such that we have a closed path for light.
Mirrors are symmetric, if we can input some light then we are also losing the same amount.
In the round-trip the beam can lose some energy to the environment: this is described as a ``round-trip loss''.
We consider a horizontal cavity with two mirrors: we will have an incident field \(E _{\text{in}}\), a circulating field \(E_{c}\) and a transmitted field \(E_{t}\).
The ``in'' mirror is labelled 1, the ``out'' mirror is labelled 2. Both of these have reflection and transmission coefficients \(r_{1, 2}\) and \(t_{1, 2}\) respectively.
In general the reflection and transmission coefficients will be complex since the laser can acquire a phase while passing through the mirror, however this can be disregarded: it amounts to a global phase, which can be discarded by moving the mirrors until the desired working point is reached.
The circulating field can be obtained by adding the transmitted component of the incoming field, and the circulating field itself which has been reflected by both mirrors:
%
\begin{align}
E_{c} = t_1 E _{\text{in}} + r_1 r_2 E_{c} e^{-ik 2L}
\,,
\end{align}
%
and we can simplify this to
%
\begin{align}
E_{c} = E _{\text{in}} \frac{t_1 }{1 - r_1 r_2 e^{-ik2L}}
\,.
\end{align}
Although it might seem that this calculation is simplistic, neglecting the field which has done more than one round-trip, the result is in fact the same as the more complete calculation.
Here \(k\) is the wavevector of the electric field.
This expression gives us the \emph{round-trip gain}: the term in the denominator can become lower than 1.
The reflected field, which is in the same place as the incoming field but moving in the opposite direction, is given by
%
\begin{subequations}
\begin{align}
E_{r} &= -r_1 E _{\text{in}} + r_2 t_1 E_{c} e^{-ik2L} \\
&= E _{\text{in}} \qty(- r_1 + \frac{r_2 t_1^2 e^{-ik2L}}{1 - r_1 r_2 e^{-ik2L}}) \\
&= -E _{\text{in}} \frac{r_1 - r_2 e^{-ik 2L}}{1 - r_1 r_2 e^{-ik2L}}
\,.
\end{align}
\end{subequations}
On the other hand, the transmitted field going out of mirror 2 is
\begin{align}
E_{t} = t_2 E_c = E _{\text{in}} \frac{t_1 t_2 }{1 - r_1 r_2 e^{-ik2L}}
\,.
\end{align}
The circulating intensity can be calculated by the square modulus of the circulating field:
%
\begin{align}
I_{c} = E^2 _{\text{in}} \abs{\frac{t_1 }{1 - r_1 r_2 e^{-ik2L}}}^2
\,,
\end{align}
%
which we want to be large, so we want to minimize the denominator: this means we should tune the length to that the exponential is equal to 1, so \(ik2L = 2 \pi n\), which implies \(L = n \pi / k\), for some \(n \in \mathbb{R}\).
If we perform the optimal choice, we will find
%
\begin{align}
I_c = \abs{\frac{t_1}{1 - r_1 r_2 }}
\,.
\end{align}
We can plot the intensity as \(k\) varies (which means we are varying the wavelength of the laser: as long as \(r_1 r_2 \) is close to 1, we see distinct peaks, whose distance is fixed even as we vary \(r_1 r_2 \), since it only depends on the length of the cavity.
This distance, which is equal to \(c / 2L\), is called the \textbf{free spectral range}.
As we increase the reflectivity (which is measured by \(r_1 r_2 \)) the peaks get narrower and higher.
% The distance in frequency between the peaks, \(c / 2L\), is called the \emph{free spectral range}.
The \emph{finesse} is defined as the free spectral range divided by the FWHM of the peaks, and it can be shown that it can be expressed as
%
\begin{align}
\mathcal{F} = \frac{c / 2L}{\text{FWHM}} = \frac{\pi \sqrt{r_1 r_2 }}{1 - r_1 r_2 }
\,.
\end{align}
Actually, we take the last expression to be the definition of the finesse; it is only approximately the ratio of FSR and FWHM.
Using this, we can estimate the storage time of a photon inside the cavity:
%
\begin{align}
\tau_{s} \approx \frac{L \mathcal{F}}{c \pi }
\,.
\end{align}
For example, if \(r_1^2= \num{.99}\), \(r_2 = 1\) and \(L = \SI{3}{km}\) then \(\mathcal{F} \approx 625\) and \(\tau_{s} \approx \SI{2}{ms}\).
\end{document}
|
{"hexsha": "901023c7206d1500ecb99580fd900d49e9695078", "size": 14867, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ap_second_semester/gravitational_physics/may01.tex", "max_stars_repo_name": "jacopok/notes", "max_stars_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-10-10T13:10:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T14:52:50.000Z", "max_issues_repo_path": "ap_second_semester/gravitational_physics/may01.tex", "max_issues_repo_name": "jacopok/notes", "max_issues_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ap_second_semester/gravitational_physics/may01.tex", "max_forks_repo_name": "jacopok/notes", "max_forks_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-03T16:20:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-06T16:11:07.000Z", "avg_line_length": 49.5566666667, "max_line_length": 294, "alphanum_fraction": 0.6852761149, "num_tokens": 4782}
|
!------------------------------------------------------------------------------------------
! File: HMC_Module_Phys_ET.f90
! Author: Fabio Delogu
!
! Created on April 2, 2014, 5:19 PM
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Module Header
module HMC_Module_Phys_ET
!------------------------------------------------------------------------------------------
! External module(s) for all subroutine in this module
use HMC_Module_Vars_Loader, only: oHMC_Vars
use HMC_Module_Tools_Debug
! Implicit none for all subroutines in this module
implicit none
!------------------------------------------------------------------------------------------
contains
!------------------------------------------------------------------------------------------
! Subroutine to calculate evapotranspiration
subroutine HMC_Phys_ET_Cpl(iID, iRows, iCols, iTime, iDaySteps, sTime, iNLake, iNDam)
!------------------------------------------------------------------------------------------
! Variable(s) declaration
integer(kind = 4) :: iID
integer(kind = 4) :: iRows, iCols
integer(kind = 4) :: iNLake, iNDam
integer(kind = 4) :: iTime, iDaySteps, iStep
integer(kind = 4) :: iI, iJ, iL, iD
real(kind = 4), dimension (iRows, iCols) :: a2dVarET, a2dVarVTot, a2dVarETPot
real(kind = 4), dimension (iRows, iCols) :: a2dVarVRet, a2dVarVTotWP
real(kind = 4) :: dVarET, dVarAE, dVarETLake, dVarETTot, dVarETPot
integer(kind = 4), dimension (iRows, iCols) :: a2iVarMask, a2iVarChoice
real(kind = 4), dimension (iRows, iCols) :: a2dVarDEM, a2dVarAreaCell, a2dVarCtWP
real(kind = 4), dimension (iRows, iCols) :: a2dVarAE, a2dVarAEres
integer(kind = 4), dimension (iNDam, 2) :: a2iVarXYDam
real(kind = 4), dimension (iNDam) :: a1dVarCodeDam
real(kind = 4), dimension (iNDam) :: a1dVarVDam
integer(kind = 4), dimension (iNLake, 2) :: a2iVarXYLake
real(kind = 4), dimension (iNLake) :: a1dVarCodeLake
real(kind = 4), dimension (iNLake) :: a1dVarVLake
character(len = 19) :: sTime
character(len = 10) :: sVarAE, sVarET, sVarETTot, sVarETPot
character(len = 10), parameter :: sFMTVarET = "(F7.4)"
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Initialization variable(s)
a2iVarMask = 0; a2iVarChoice = 0; a2dVarDEM = 0.0; a2dVarAreaCell = 0.0; a2dVarCtWP = 0.0;
a2dVarAE = 0.0; a2dVarET = 0.0; a2dVarETPot = 0.0;
a2dVarVRet = 0.0; a2dVarVTot = 0.0; a2dVarVTotWP = 0.0;
dVarAE = 0.0; dVarET = 0.0; dVarETLake = 0.0; a2dVarAEres = 0.0;
dVarETTot = 0.0; dVarETPot = 0.0;
a2iVarXYDam = 0; a1dVarCodeDam = 0.0; a1dVarVDam = 0.0;
a2iVarXYLake = 0; a1dVarCodeLake = 0.0; a1dVarVLake = 0.0;
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Data static definition
a2dVarDEM = oHMC_Vars(iID)%a2dDem
a2iVarMask = oHMC_Vars(iID)%a2iMask
a2iVarChoice = oHMC_Vars(iID)%a2iChoice
a2dVarAreaCell = oHMC_Vars(iID)%a2dAreaCell
a2dVarCtWP = oHMC_Vars(iID)%a2dCtWP
! Lake(s) and dam(s) variable(s)
a2iVarXYDam = oHMC_Vars(iID)%a2iXYDam
a1dVarCodeDam = oHMC_Vars(iID)%a1dCodeDam
a1dVarVDam = oHMC_Vars(iID)%a1dVDam
a2iVarXYLake = oHMC_Vars(iID)%a2iXYLake
a1dVarCodeLake = oHMC_Vars(iID)%a1dCodeLake
a1dVarVLake = oHMC_Vars(iID)%a1dVLake
! Extracting dynamic variable(s)
a2dVarVRet = oHMC_Vars(iID)%a2dVRet
! Extracting dynamic state variable(s)
a2dVarVTot = oHMC_Vars(iID)%a2dVTot ! Total soil volume
a2dVarET = oHMC_Vars(iID)%a2dET ! Evapotranspiration ==> from LST phys or forcing dataset
a2dVarETPot = oHMC_Vars(iID)%a2dETPot ! Potential Evapotranspiration == from LST phys or forcing dataset
! Compute soil water content at wilting point - minimum threshold for evapotranspiration
a2dVarVTotWP = oHMC_Vars(iID)%a2dVTotWP
! Extracting checking variable(s)
dVarETTot = oHMC_Vars(iID)%dVarETTot
! Info start
call mprintf(.true., iINFO_Verbose, ' Phys :: Evapotranspiration ... ' )
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Debug
if (iDEBUG.gt.0) then
call mprintf(.true., iINFO_Extra, ' ========= EVAPOTRANSPIRATION START =========== ')
call mprintf(.true., iINFO_Extra, checkvar(a2dVarET, a2iVarMask, 'ET START ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarAE, a2iVarMask, 'AE START ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarETPot, a2iVarMask, 'ET.POT START ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarVTot, a2iVarMask, 'VTOT START ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarVRet, a2iVarMask, 'VRET START ') )
call mprintf(.true., iINFO_Extra, '')
endif
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Calculating control variable(s)
dVarAE = sum(a2dVarET, mask=a2dVarDem.gt.0.0)/max(1,count(a2dVarDem.gt.0.0))
dVarET = sum(a2dVarET, mask=a2dVarDem.gt.0.0)/max(1,count(a2dVarDem.gt.0.0))
dVarETPot = sum(a2dVarETPot, mask=a2dVarDem.gt.0.0)/max(1,count(a2dVarDem.gt.0.0))
dVarETTot = dVarETTot + dVarAE
! ET information time step
write(sVarAE, sFMTVarET) dVarAE
write(sVarETPot, sFMTVarET) dVarETPot
write(sVarET, sFMTVarET) dVarET
write(sVarETTot, sFMTVarET) dVarETTot
call mprintf(.true., iINFO_Basic, ' Phys :: EVT START :: AvgValue :: '// &
' AEvt: '//sVarAE//' [mm] '// &
' PEvt: '//sVarETPot//' [mm] '// &
' Evt: '//sVarET//' [mm] '// &
' Evt Tot: '//sVarETTot//' [mm]')
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Lake and dam ET updating
! Dam updating
if (iNDam .gt. 0) then
! Cycle on dam(s)
do iD = 1, iNDam
iI = 0; iJ = 0;
iI = a2iVarXYDam(iD, 2)
iJ = a2iVarXYDam(iD, 1)
! Compute dam volume
if (a1dVarCodeDam(iD) .gt. 0) then
! Distributed dam-lake
dVarETLake = 0.0
dVarETLake = sum(sum( a2dVarETPot, DIM = 1, mask=a2iVarChoice .eq. a1dVarCodeDam(iD)))
! Update dam volume
a1dVarVDam(iD) = a1dVarVDam(iD) - dVarETLake/1000*a2dVarAreaCell(iI, iJ) !in m^3
endif
if (a1dVarVDam(iD) .lt. 0.0) a1dVarVDam(iD) = 0.0
dVarETLake = 0.0
enddo
endif
! Lake updating
if (iNLake .gt. 0 ) then
! Cycle on lake(s)
do iL = 1, iNLake
iI = 0; iJ = 0;
iI = a2iVarXYLake(iL,2)
iJ = a2iVarXYLake(iL,1)
! Compute lake volume
if (a1dVarCodeLake(iL) .gt. 0) then
! Distributed lake
dVarETLake = 0.0
dVarETLake = sum(sum( a2dVarETPot, DIM = 1, mask=a2iVarChoice .eq. a1dVarCodeLake(iL)))
! Update dam volume
a1dVarVLake(iL) = a1dVarVLake(iL) - dVarETLake/1000*a2dVarAreaCell(iI, iJ) !in m^3
endif
if (a1dVarVLake(iL) .lt. 0.0) a1dVarVLake(iL) = 0.0
dVarETLake = 0.0
enddo
endif
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Passing evapotranspiration (ET) to Actual evapotranspiration (AE)
where (a2dVarDEM.gt.0.0)
a2dVarAE = a2dVarET
endwhere
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Calculating retention volume
where( (a2dVarVRet.gt.0.0) .and. (a2dVarVRet.ge.a2dVarETPot) .and. &
(a2dVarDEM.gt.0.0) .and. (a2iVarChoice.le.1) )
a2dVarVRet = a2dVarVRet - a2dVarETPot
a2dVarAE = a2dVarETPot
elsewhere( (a2dVarVRet.gt.0.0) .and. (a2dVarVRet.lt.a2dVarETPot) .and. &
(a2dVarDEM.gt.0.0) .and. (a2iVarChoice.le.1) )
! Compute residual evapotranspiration demand
a2dVarAEres = a2dVarETPot - a2dVarVRet
where( a2dVarAEres .lt. a2dVarAE)
a2dVarAE = a2dVarAEres
endwhere
where( a2dVarAE .lt. 0.0)
a2dVarAE = 0.0
endwhere
where( (a2dVarVTot - a2dVarVTotWP) .gt. a2dVarAE )
a2dVarVTot = a2dVarVTot - a2dVarAE
a2dVarAE = a2dVarVRet + a2dVarAE
elsewhere ( (a2dVarVTot - a2dVarVTotWP) .le. a2dVarAE .and. (a2dVarVTot - a2dVarVTotWP) .gt. 0.0 )
a2dVarAE = a2dVarVRet + (a2dVarVTot - a2dVarVTotWP)
a2dVarVTot = a2dVarVTotWP
elsewhere ( (a2dVarVTot-a2dVarVTotWP) .le. 0.0 ) ! to account also for VTot<Vwp situations
a2dVarAE = 0.0
endwhere
a2dVarVRet = 0.0
elsewhere (a2dVarDEM.gt.0.0 .and. a2iVarChoice.le.1) ! Retention == 0.0 not on lakes
where( a2dVarAE .lt. 0.0)
a2dVarAE = 0.0
endwhere
where ((a2dVarVTot - a2dVarVTotWP).gt.a2dVarAE)
! tolgo evt da a2dV solo quando "non piove" cio� a2dRetention=0
! quando piove l'evaporazione dal suolo � trascurabile
a2dVarVTot = a2dVarVTot - a2dVarAE
elsewhere ((a2dVarVTot-a2dVarVTotWP).le.a2dVarAE .and. (a2dVarVTot-a2dVarVTotWP) .gt. 0.0 )
a2dVarAE = a2dVarVTot - a2dVarVTotWP
a2dVarVTot = a2dVarVTotWP
elsewhere ( (a2dVarVTot-a2dVarVTotWP) .le. 0.0 ) ! to account also for VTot<Vwp situations
a2dVarAE = 0.0
endwhere
endwhere
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Calculating control variable(s)
dVarAE = sum(a2dVarAE, mask=a2dVarDem.gt.0.0)/max(1,count(a2dVarDem.gt.0.0))
dVarET = sum(a2dVarET, mask=a2dVarDem.gt.0.0)/max(1,count(a2dVarDem.gt.0.0))
dVarETPot = sum(a2dVarETPot, mask=a2dVarDem.gt.0.0)/max(1,count(a2dVarDem.gt.0.0))
dVarETTot = dVarETTot + dVarAE
! ET information time step
write(sVarAE, sFMTVarET) dVarAE
write(sVarETPot, sFMTVarET) dVarETPot
write(sVarET, sFMTVarET) dVarET
write(sVarETTot, sFMTVarET) dVarETTot
call mprintf(.true., iINFO_Basic, ' Phys :: EVT END :: AvgValue :: '// &
' AEvt: '//sVarAE//' [mm] '// &
' PEvt: '//sVarETPot//' [mm] '// &
' Evt: '//sVarET//' [mm] '// &
' Evt Tot: '//sVarETTot//' [mm]')
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Debug
if (iDEBUG.gt.0) then
call mprintf(.true., iINFO_Extra, '')
call mprintf(.true., iINFO_Extra, checkvar(a2dVarET, a2iVarMask, 'ET END ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarAE, a2iVarMask, 'AE END ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarETPot, a2iVarMask, 'ET.POT END ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarVTot, a2iVarMask, 'VTOT END ') )
call mprintf(.true., iINFO_Extra, checkvar(a2dVarVRet, a2iVarMask, 'VRET END ') )
call mprintf(.true., iINFO_Extra, ' ========= EVAPOTRANSPIRATION END =========== ')
endif
!------------------------------------------------------------------------------------------
!------------------------------------------------------------------------------------------
! Copy instantaneous AE & AEpot to 3D var
! AE - 3D
if (all(oHMC_Vars(iID)%a3dAE.lt.0.0))then
oHMC_Vars(iID)%a3dAE(:,:,int(iDaySteps)) = a2dVarAE
else
! Re-initializing
do iStep=2, int(iDaySteps)
oHMC_Vars(iID)%a3dAE(:,:,int(iStep-1)) = oHMC_Vars(iID)%a3dAE(:,:,int(iStep))
enddo
! Updating with new field
where(oHMC_Vars(iID)%a2dDEM.gt.0.0)
oHMC_Vars(iID)%a3dAE(:,:,int(iDaySteps)) = a2dVarAE
elsewhere
oHMC_Vars(iID)%a3dAE(:,:,int(iDaySteps)) = -9999.0
endwhere
endif
! AEpot - 3D
if (all(oHMC_Vars(iID)%a3dAEpot.lt.0.0))then
oHMC_Vars(iID)%a3dAEpot(:,:,int(iDaySteps)) = a2dVarETPot
else
! Re-initializing
do iStep=2, int(iDaySteps)
oHMC_Vars(iID)%a3dAEpot(:,:,int(iStep-1)) = oHMC_Vars(iID)%a3dAEpot(:,:,int(iStep))
enddo
! Updating with new field
where(oHMC_Vars(iID)%a2dDEM.gt.0.0)
oHMC_Vars(iID)%a3dAEpot(:,:,int(iDaySteps)) = a2dVarETPot
elsewhere
oHMC_Vars(iID)%a3dAEpot(:,:,int(iDaySteps)) = -9999.0
endwhere
endif
! Passing variable(s) to global declaration
oHMC_Vars(iID)%a2dAE = a2dVarAE
oHMC_Vars(iID)%a2dAEPot = a2dVarETPot
oHMC_Vars(iID)%a2dVRet = a2dVarVRet
oHMC_Vars(iID)%a1dVLake = a1dVarVLake
oHMC_Vars(iID)%a1dVDam = a1dVarVDam
! Updating state variable(s): ET and Total Volume
oHMC_Vars(iID)%a2dET = 0.0 ! Re-initializing ET
oHMC_Vars(iID)%a2dETPot = 0.0 ! Re-initializing ET pot
oHMC_Vars(iID)%a2dVTot = a2dVarVTot ! Updating total volume
oHMC_Vars(iID)%dVarETTot = dVarETTot ! Check mean cumulated ET
! Info end
call mprintf(.true., iINFO_Verbose, ' Phys :: Evapotranspiration ... OK ' )
!------------------------------------------------------------------------------------------
end subroutine HMC_Phys_ET_Cpl
!------------------------------------------------------------------------------------------
end module HMC_Module_Phys_ET
!------------------------------------------------------------------------------------------
|
{"hexsha": "4ade231bf03c7d52e4abbf3dc3bec4ff0435edab", "size": 16666, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "HMC_Module_Phys_ET.f90", "max_stars_repo_name": "c-hydro/hmc-dev", "max_stars_repo_head_hexsha": "49577101335633e543ecef35f5dbf1fd48792667", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HMC_Module_Phys_ET.f90", "max_issues_repo_name": "c-hydro/hmc-dev", "max_issues_repo_head_hexsha": "49577101335633e543ecef35f5dbf1fd48792667", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-04-08T08:12:55.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-01T20:38:38.000Z", "max_forks_repo_path": "HMC_Module_Phys_ET.f90", "max_forks_repo_name": "c-hydro/hmc-dev", "max_forks_repo_head_hexsha": "49577101335633e543ecef35f5dbf1fd48792667", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-20T16:40:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-20T16:40:49.000Z", "avg_line_length": 48.8739002933, "max_line_length": 118, "alphanum_fraction": 0.4359174367, "num_tokens": 4710}
|
import cv2
import numpy as np
# TODO: please add gray image support
def save_grad_img(grads, out_path):
"""
Save gradients as img
:param grads: gradients obtained from visulaziation model
:param out_path: the path to save gradients image
:return:
"""
grads = grads - grads.min()
grads /= grads.max()
grads = np.transpose(grads, [1, 2, 0])
grads = grads[:, :, ::-1]
grads = np.uint8(grads * 255)[..., ::-1]
grads = np.squeeze(grads)
cv2.imwrite(out_path, grads)
def save_cam_img(img, grads, out_path):
"""
save the activation map on the original img
:param img: original image with three chanels (RGB) in range(0,1)
:param grads: grads w.r.t input image
:param out_path: the path to save the image
:return:
"""
grads = np.transpose(grads, [1, 2, 0])
grads = grads[:, :, ::-1]
heat_map = cv2.applyColorMap(np.uint8(grads * 255), cv2.COLORMAP_JET)
heat_map = heat_map / 255
cam = heat_map + img
cam = cam / np.max(cam)
cv2.imwrite(out_path, np.uint8(255 * cam))
|
{"hexsha": "b2ad6ed36dfcb95f3fe9db5de60acfe665009e03", "size": 1075, "ext": "py", "lang": "Python", "max_stars_repo_path": "vis/utils.py", "max_stars_repo_name": "sunalbert/lucid.pytorch", "max_stars_repo_head_hexsha": "1bcc87a41c99bef1d64d37116c8a2440d11b0def", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-06-29T11:48:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T12:33:27.000Z", "max_issues_repo_path": "vis/utils.py", "max_issues_repo_name": "ykwon0407/pytorch-vis", "max_issues_repo_head_hexsha": "1bcc87a41c99bef1d64d37116c8a2440d11b0def", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vis/utils.py", "max_forks_repo_name": "ykwon0407/pytorch-vis", "max_forks_repo_head_hexsha": "1bcc87a41c99bef1d64d37116c8a2440d11b0def", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-17T14:46:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-17T14:46:41.000Z", "avg_line_length": 26.2195121951, "max_line_length": 73, "alphanum_fraction": 0.6279069767, "include": true, "reason": "import numpy", "num_tokens": 314}
|
! RUN: %python %S/test_errors.py %s %flang_fc1
! Test that the interface of specific intrinsics passed as dummy arguments
! are correctly validated against actual arguments explicit interface.
intrinsic :: abs, dabs
interface
subroutine foo(f)
interface
function f(x)
real :: f
real, intent(in) :: x
end function
end interface
end subroutine
subroutine foo2(f)
interface
function f(x)
double precision :: f
double precision, intent(in) :: x
end function
end interface
end subroutine
end interface
! OK
call foo(abs)
! OK
call foo2(dabs)
!ERROR: Actual procedure argument has interface incompatible with dummy argument 'f='
call foo(dabs)
!ERROR: Actual procedure argument has interface incompatible with dummy argument 'f='
call foo2(abs)
end
|
{"hexsha": "91ce2bfccc7f60143f7bb0442575be37af5a3d38", "size": 884, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "flang/test/Semantics/call20.f90", "max_stars_repo_name": "mkinsner/llvm", "max_stars_repo_head_hexsha": "589d48844edb12cd357b3024248b93d64b6760bf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2338, "max_stars_repo_stars_event_min_datetime": "2018-06-19T17:34:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T11:00:37.000Z", "max_issues_repo_path": "flang/test/Semantics/call20.f90", "max_issues_repo_name": "mkinsner/llvm", "max_issues_repo_head_hexsha": "589d48844edb12cd357b3024248b93d64b6760bf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3740, "max_issues_repo_issues_event_min_datetime": "2019-01-23T15:36:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:01:13.000Z", "max_forks_repo_path": "flang/test/Semantics/call20.f90", "max_forks_repo_name": "mkinsner/llvm", "max_forks_repo_head_hexsha": "589d48844edb12cd357b3024248b93d64b6760bf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 500, "max_forks_repo_forks_event_min_datetime": "2019-01-23T07:49:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T02:59:37.000Z", "avg_line_length": 22.6666666667, "max_line_length": 87, "alphanum_fraction": 0.6561085973, "num_tokens": 206}
|
# Copyright 2020 KCL-BMEIS - King's College London
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import time
import numpy as np
from exetera.processing import numpy_buffer
class Dataset:
"""
field_descriptors: a dictionary of field names to field descriptors that describe how the field
should be transformed when loading
keys: a list of field names that represent the fields you wish to load and in what order they
should be put. Leaving this blankloads all of the keys in csv column order
"""
def __init__(self, source, field_descriptors=None, keys=None, filter_fn=None,
show_progress_every=False, start_from=None, stop_after=None, early_filter=None,
verbose=True):
def print_if_verbose(*args):
if verbose:
print(*args)
self.names_ = list()
self.fields_ = list()
self.names_ = list()
self.index_ = None
csvf = csv.DictReader(source, delimiter=',', quotechar='"')
available_keys = csvf.fieldnames
if not keys:
fields_to_use = available_keys
index_map = [i for i in range(len(fields_to_use))]
else:
fields_to_use = keys
index_map = [available_keys.index(k) for k in keys]
early_key_index = None
if early_filter is not None:
if early_filter[0] not in available_keys:
raise ValueError(
f"'early_filter': tuple element zero must be a key that is in the dataset")
early_key_index = available_keys.index(early_filter[0])
tstart = time.time()
transforms_by_index = list()
new_fields = list()
# build a full list of transforms by index whether they are are being filtered by 'keys' or not
for i_n, n in enumerate(available_keys):
if field_descriptors and n in field_descriptors and\
field_descriptors[n].strings_to_values and\
field_descriptors[n].out_of_range_label is None:
# transforms by csv field index
transforms_by_index.append(field_descriptors[n])
else:
transforms_by_index.append(None)
# build a new list of collections for every field that is to be loaded
for i_n in index_map:
if transforms_by_index[i_n] is not None:
to_datatype = transforms_by_index[i_n].to_datatype
if to_datatype == str:
new_fields.append(list())
else:
new_fields.append(numpy_buffer.NumpyBuffer2(dtype=to_datatype))
else:
new_fields.append(list())
# read the cvs rows into the fields
csvf = csv.reader(source, delimiter=',', quotechar='"')
ecsvf = iter(csvf)
filtered_count = 0
for i_r, row in enumerate(ecsvf):
if show_progress_every:
if i_r % show_progress_every == 0:
if filtered_count == i_r:
print_if_verbose(i_r)
else:
print_if_verbose(f"{i_r} ({filtered_count})")
if start_from is not None and i_r < start_from:
del row
continue
# TODO: decide whether True means filter or not filter consistently
if early_filter is not None:
if not early_filter[1](row[early_key_index]):
continue
# TODO: decide whether True means filter or not filter consistently
if not filter_fn or filter_fn(i_r):
# for i_f, f in enumerate(fields):
for i_df, i_f in enumerate(index_map):
f = row[i_f]
t = transforms_by_index[i_f]
try:
new_fields[i_df].append(f if not t else t.strings_to_values[f])
except Exception as e:
msg = "{}: key error for value {} (permitted values are {}"
print_if_verbose(msg.format(fields_to_use[i_f], f, t.strings_to_values))
del row
filtered_count += 1
if stop_after and i_r >= stop_after:
break
if show_progress_every:
print_if_verbose(f"{i_r} ({filtered_count})")
# assign the built sequences to fields_
for i_f, f in enumerate(new_fields):
if isinstance(f, list):
self.fields_.append(f)
else:
self.fields_.append(f.finalise())
self.index_ = np.asarray([i for i in range(len(self.fields_[0]))], dtype=np.uint32)
self.names_ = fields_to_use
print_if_verbose('loading took', time.time() - tstart, "seconds")
# if i > 0 and i % lines_per_dot == 0:
# if i % (lines_per_dot * newline_at) == 0:
# print(f'. {i}')
# else:
# print('.', end='')
# if i % (lines_per_dot * newline_at) != 0:
# print(f' {i}')
def sort(self, keys):
#map names to indices
if isinstance(keys, str):
def single_index_sort(index):
field = self.fields_[index]
def inner_(r):
return field[r]
return inner_
self.index_ = sorted(self.index_,
key=single_index_sort(self.field_to_index(keys)))
else:
kindices = [self.field_to_index(k) for k in keys]
def index_sort(indices):
def inner_(r):
t = tuple(self.fields_[i][r] for i in indices)
return t
return inner_
self.index_ = sorted(self.index_, key=index_sort(kindices))
for i_f in range(len(self.fields_)):
unsorted_field = self.fields_[i_f]
self.fields_[i_f] = Dataset._apply_permutation(self.index_, unsorted_field)
del unsorted_field
@staticmethod
def _apply_permutation(permutation, field):
# n = len(permutation)
# for i in range(0, n):
# print(i)
# pi = permutation[i]
# while pi < i:
# pi = permutation[pi]
# fields[i], fields[pi] = fields[pi], fields[i]
# return fields
if isinstance(field, list):
sorted_field = [None] * len(field)
for ip, p in enumerate(permutation):
sorted_field[ip] = field[p]
else:
sorted_field = np.empty_like(field)
for ip, p in enumerate(permutation):
sorted_field[ip] = field[p]
return sorted_field
def field_by_name(self, field_name):
return self.fields_[self.field_to_index(field_name)]
def field_to_index(self, field_name):
return self.names_.index(field_name)
def value(self, row_index, field_index):
return self.fields_[field_index][row_index]
def value_from_fieldname(self, index, field_name):
return self.fields_[self.field_to_index(field_name)][index]
def row_count(self):
return len(self.index_)
def show(self):
for ir, r in enumerate(self.names_):
print(f'{ir}-{r}')
|
{"hexsha": "c23836cb9a481b39032f9d0d71bba5495db10f66", "size": 7901, "ext": "py", "lang": "Python", "max_stars_repo_path": "exetera/core/csvdataset.py", "max_stars_repo_name": "deng113jie/ExeTera", "max_stars_repo_head_hexsha": "613532a419b93a9838bf5ae5594fc7bb9738cd03", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-03-01T16:57:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T10:49:19.000Z", "max_issues_repo_path": "exetera/core/csvdataset.py", "max_issues_repo_name": "deng113jie/ExeTera", "max_issues_repo_head_hexsha": "613532a419b93a9838bf5ae5594fc7bb9738cd03", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 208, "max_issues_repo_issues_event_min_datetime": "2021-02-16T13:47:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:27:03.000Z", "max_forks_repo_path": "exetera/core/csvdataset.py", "max_forks_repo_name": "deng113jie/ExeTera", "max_forks_repo_head_hexsha": "613532a419b93a9838bf5ae5594fc7bb9738cd03", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-08T08:50:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T09:26:43.000Z", "avg_line_length": 38.1690821256, "max_line_length": 103, "alphanum_fraction": 0.5725857486, "include": true, "reason": "import numpy", "num_tokens": 1666}
|
include("utils.jl")
@testset "Stiefel" begin
@testset "Real" begin
M = Stiefel(3,2)
@testset "Basics" begin
x = [1.0 0.0; 0.0 1.0; 0.0 0.0]
@test representation_size(M) == (3,2)
@test manifold_dimension(M) == 3
@test_throws DomainError is_manifold_point(M, [1., 0., 0., 0.],true)
@test_throws DomainError is_manifold_point(M, 1im*[1.0 0.0; 0.0 1.0; 0.0 0.0],true)
@test !is_tangent_vector(M, x, [0., 0., 1., 0.])
@test_throws DomainError is_tangent_vector(M, x, 1 * im * zero_tangent_vector(M,x), true)
end
types = [
Matrix{Float64},
MMatrix{3, 2, Float64},
Matrix{Float32},
]
@testset "Type $T" for T in types
x = [1.0 0.0; 0.0 1.0; 0.0 0.0]
y = exp(M,x, [0.0 0.0; 0.0 0.0; 1.0 1.0])
z = exp(M,x,[ 0.0 0.0; 0.0 0.0; -1.0 1.0])
pts = convert.(T, [x,y,z])
v = inverse_retract(M,x,y,PolarInverseRetraction())
@test !is_manifold_point(M,2*x)
@test_throws DomainError !is_manifold_point(M,2*x,true)
@test !is_tangent_vector(M,2*x,v)
@test_throws DomainError !is_tangent_vector(M,2*x,v,true)
@test !is_tangent_vector(M,x,y)
@test_throws DomainError is_tangent_vector(M,x,y,true)
test_manifold(
M,
pts,
test_exp_log = false,
default_inverse_retraction_method = PolarInverseRetraction(),
test_injectivity_radius = false,
test_is_tangent = true,
test_project_tangent = true,
test_vector_transport = false,
test_forward_diff = false,
test_reverse_diff = false,
projection_atol_multiplier = 15.0,
retraction_atol_multiplier = 10.0,
is_tangent_atol_multiplier = 4*10.0^2,
retraction_methods = [PolarRetraction(), QRRetraction()],
inverse_retraction_methods = [PolarInverseRetraction(), QRInverseRetraction()]
)
@testset "inner/norm" begin
v1 = inverse_retract(M, pts[1], pts[2], PolarInverseRetraction())
v2 = inverse_retract(M, pts[1], pts[3], PolarInverseRetraction())
@test real(inner(M, pts[1], v1, v2)) ≈ real(inner(M, pts[1], v2, v1))
@test imag(inner(M, pts[1], v1, v2)) ≈ -imag(inner(M, pts[1], v2, v1))
@test imag(inner(M, pts[1], v1, v1)) ≈ 0
@test norm(M, pts[1], v1) isa Real
@test norm(M, pts[1], v1) ≈ sqrt(inner(M, pts[1], v1, v1))
end
end
end
@testset "Complex" begin
M = Stiefel(3,2,ℂ)
@testset "Basics" begin
@test representation_size(M) == (3,2)
@test manifold_dimension(M) == 8
@test !is_manifold_point(M, [1., 0., 0., 0.])
@test !is_tangent_vector(M, [1.0 0.0; 0.0 1.0; 0.0 0.0], [0., 0., 1., 0.])
x = [1.0 0.0; 0.0 1.0; 0.0 0.0]
@test_throws DomainError is_manifold_point(M, [:a :b; :c :d; :e :f],true)
@test_throws DomainError is_tangent_vector(M, x, [:a :b; :c :d; :e :f], true)
end
types = [
Matrix{ComplexF64},
]
@testset "Type $T" for T in types
x = [0.5+0.5im 0.5+0.5im; 0.5+0.5im -0.5-0.5im; 0.0 0.0]
y = exp(M, x, [0.0 0.0; 0.0 0.0; 1.0 1.0])
z = exp(M, x, [0.0 0.0; 0.0 0.0; -1.0 1.0])
pts = convert.(T, [x,y,z])
v = inverse_retract(M,x,y,PolarInverseRetraction())
@test !is_manifold_point(M,2*x)
@test_throws DomainError !is_manifold_point(M,2*x,true)
@test !is_tangent_vector(M,2*x,v)
@test_throws DomainError !is_tangent_vector(M,2*x,v,true)
@test !is_tangent_vector(M,x,y)
@test_throws DomainError is_tangent_vector(M,x,y,true)
test_manifold(
M,
pts,
test_exp_log = false,
default_inverse_retraction_method = PolarInverseRetraction(),
test_injectivity_radius = false,
test_is_tangent = true,
test_project_tangent = true,
test_vector_transport = false,
test_forward_diff = false,
test_reverse_diff = false,
projection_atol_multiplier = 15.0,
retraction_atol_multiplier = 10.0,
is_tangent_atol_multiplier = 4*10.0^2,
retraction_methods = [PolarRetraction(), QRRetraction()],
inverse_retraction_methods = [PolarInverseRetraction(), QRInverseRetraction()]
)
@testset "inner/norm" begin
v1 = inverse_retract(M, pts[1], pts[2], PolarInverseRetraction())
v2 = inverse_retract(M, pts[1], pts[3], PolarInverseRetraction())
@test real(inner(M, pts[1], v1, v2)) ≈ real(inner(M, pts[1], v2, v1))
@test imag(inner(M, pts[1], v1, v2)) ≈ -imag(inner(M, pts[1], v2, v1))
@test imag(inner(M, pts[1], v1, v1)) ≈ 0
@test norm(M, pts[1], v1) isa Real
@test norm(M, pts[1], v1) ≈ sqrt(inner(M, pts[1], v1, v1))
end
end
end
@testset "Quaternion" begin
M = Stiefel(3,2,ℍ)
@testset "Basics" begin
@test representation_size(M) == (3,2)
@test manifold_dimension(M) == 18
end
end
end
|
{"hexsha": "cc7c34bafda2a1416236d22dbaa606157f137e2c", "size": 5655, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/stiefel.jl", "max_stars_repo_name": "manuelweisser/Manifolds.jl", "max_stars_repo_head_hexsha": "07f889a290ece01569c6c53bb0c96a5608923a0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/stiefel.jl", "max_issues_repo_name": "manuelweisser/Manifolds.jl", "max_issues_repo_head_hexsha": "07f889a290ece01569c6c53bb0c96a5608923a0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/stiefel.jl", "max_forks_repo_name": "manuelweisser/Manifolds.jl", "max_forks_repo_head_hexsha": "07f889a290ece01569c6c53bb0c96a5608923a0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-09T10:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-09T10:46:39.000Z", "avg_line_length": 43.1679389313, "max_line_length": 101, "alphanum_fraction": 0.5177718833, "num_tokens": 1746}
|
import nltk
nltk.download('stopwords')
import re
import numpy as np
import pandas as pd
from pprint import pprint
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
#import pyLDAvis.gensim # don't skip this
import pyLDAvis.gensim_models
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
# # Import Dataset
# df = pd.read_json('https://raw.githubusercontent.com/selva86/datasets/master/newsgroups.json')
# print(df.target_names.unique())
# df.head()
# # Convert to list
# data = df.content.values.tolist()
dummydata = open("dummydata.txt", "r")
# print(dummydata.read())
# data = df.reviews.values.tolist()
data = dummydata.read().split("delimiter")
# Remove Emails
data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]
# Remove new line characters
data = [re.sub('\s+', ' ', sent) for sent in data]
# Remove distracting single quotes
data = [re.sub("\'", "", sent) for sent in data]
pprint(data[:1])
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(data))
print(data_words[:1])
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# See trigram example
print(trigram_mod[bigram_mod[data_words[0]]])
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
# Print the Keyword in the 10 topics
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# # Compute Perplexity
# print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# # Compute Coherence Score
# coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
# coherence_lda = coherence_model_lda.get_coherence()
# print('\nCoherence Score: ', coherence_lda)
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
|
{"hexsha": "003fa0288a0586e999b0d3c7cd5701cc9d0af8d2", "size": 4687, "ext": "py", "lang": "Python", "max_stars_repo_path": "daily_gratitude/daily_grat/topicmodeling.py", "max_stars_repo_name": "abigail432/daily_gratitude", "max_stars_repo_head_hexsha": "4427b4f7d5c6b7349b718a17aeac62b603f80d66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "daily_gratitude/daily_grat/topicmodeling.py", "max_issues_repo_name": "abigail432/daily_gratitude", "max_issues_repo_head_hexsha": "4427b4f7d5c6b7349b718a17aeac62b603f80d66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "daily_gratitude/daily_grat/topicmodeling.py", "max_forks_repo_name": "abigail432/daily_gratitude", "max_forks_repo_head_hexsha": "4427b4f7d5c6b7349b718a17aeac62b603f80d66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4563758389, "max_line_length": 116, "alphanum_fraction": 0.6974610625, "include": true, "reason": "import numpy", "num_tokens": 1185}
|
# Modified from the SHAPE-MaP pipeline to call mutations
# in miRNA cutting assay data. Functions were kepts and
# main loop was wrapped in an if __name__ == "__main__"
# statement
#
# Part of the SHAPE-MaP data analysis pipeline (ShapeMapper).
# Counts up mutations from sequencing data.
# Copyright Steven Busan 2014
#---------------------------------------------------------------------------------------------------
# GPL statement:
#
# This file is part of Shapemapper.
#
# ShapeMapper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShapeMapper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShapeMapper. If not, see <http://www.gnu.org/licenses/>.
#
#----------------------------------------------------------------------------------------------------
import sys, os, traceback, copy, re, time
#import conf
import numpy
import matplotlib.pyplot as plot
# def safeDivide(num,den):
# # convert input numbers to floats and divide
# # If denominator is 0, output -999 to indicate error.
# result = -999.0
# try:
# result = float(num)/float(den)
# except ZeroDivisionError:
# pass
# return result
#
# def parseFasta(fastaFile):
# # given an open fasta file, return a dict of sequence names
# # and associated sequence strings
# seq = {}
# lines = fastaFile.readlines()
# currentSeqName = ""
# for line in lines:
# if line[0] == ">":
# # sequence name found
# currentSeqName = line.strip()[1:]
# seq[currentSeqName] = ""
# else:
# # remove whitespace and append to current sequence
# seq[currentSeqName] += "".join(line.split())
# return seq
#
# #
def parseCigarString(cigarString, read, quals, refSeq, startIndex):
# define regular expression patterns for parsing CIGAR string
item = re.compile(r"[0-9]+[M|I|D|S]")
number = re.compile(r"([0-9]+)")
splitCigarString = item.findall(cigarString.strip())
cigarList = [number.split(x)[1:] for x in splitCigarString]
#print "CIGAR: %s\nsplit CIGAR: %s"%(cigarString,str(cigarList))
readCopy = read
alignedQuals = {}
lastQual= "#"
refIndex = startIndex # begin at leftmost matching position
events = {} # keys will be nuc indices with respect to reference sequence
#inserts = {}
for i in range(len(cigarList)):
region = cigarList[i]
regionLength = int(region[0])
regionType = region[1]
# alignment match (could be match or mismatch, use raw read to resolve)
if regionType == "M":
matchRegion = read[:regionLength]
qualsRegion = quals[:regionLength]
read = read[regionLength:]
quals = quals[regionLength:]
for regionIndex in range(len(matchRegion)):
nuc = matchRegion[regionIndex]
qual = qualsRegion[regionIndex]
lastQual = qual
alignedQuals[refIndex] = qual
# catch insertion at end of reference sequence
if refIndex > len(refSeq) -1:
continue
elif nuc != refSeq[refIndex]:
# nuc mismatch found
#print "Mismatch at ref index %i"%refIndex
events[refIndex] = nuc
else:
events[refIndex] = "|"
refIndex += 1
# insertion
elif regionType == "I":
insertedSeq = read[:regionLength]
#inserts[refIndex] = insertedSeq
read = read[regionLength:]
quals = quals[regionLength:]
# ignore for now (insertions are usually unhelpful)
# deletion
elif regionType == "D":
#print "Deletion %i nucs long at ref index %i"%(regionLength, refIndex)
for deletionIndex in range(regionLength):
events[refIndex] = "-"
alignedQuals[refIndex] = lastQual # don't have a phred score for a deletion (no basecall associated with position), so just use last nearby score
refIndex += 1
# padding
elif regionType == "S":
read = read[regionLength:]
quals = quals[regionLength:] # missing from v12b and all pipeline versions before - results in misaligned phred scores and errors combining read pairs
if i == len(cigarList)-1: # rightmost end of read
for offsetIndex in range(regionLength):
padIndex = refIndex+offsetIndex
if padIndex >= 0 and padIndex < len(refSeq):
#if True:
events[padIndex] = "s"
alignedQuals[padIndex] = "#"
elif i == 0: # leftmost end of read
for offsetIndex in range(regionLength+1):
padIndex = refIndex-offsetIndex
if padIndex > 0 and padIndex < len(refSeq):
#if True:
events[padIndex] = "s"
alignedQuals[padIndex] = "#"
sortedKeys = sorted(events.keys())
printEvents = ""
printQuals = ""
evalName = ""
lowQualFlag = False
mutCount = 0
for i in xrange(min(sortedKeys),max(sortedKeys)+1):
try:
printEvents += events[i]
printQuals += alignedQuals[i]
if events[i] != "|":
mutCount += 1
qscore = ord(alignedQuals[i]) -33
if qscore < 20:
lowQualFlag = True
evalName += "{0}{1}".format(i, events[i])
except KeyError:
printEvents += " "
printQuals += " "
if mutCount == 0:
evalName = None
#print printEvents
#print printQuals
#print evalName, lowQualFlag
return printEvents, printQuals, evalName, mutCount, lowQualFlag
if __name__ == '__main__':
cigar = "4M1D5M"
read = "AGCAAAGGC"
refSeq = "AGTAGTAGGC"
quals = "GGGGGGGGGG"
start = 0
parseCigarString(cigar, read, quals, refSeq, start)
|
{"hexsha": "44b68aa9119fabfd3064d5d8ea4a1e0196daba95", "size": 6550, "ext": "py", "lang": "Python", "max_stars_repo_path": "variomics_pipeline/countMutations.py", "max_stars_repo_name": "grice/DroSeq", "max_stars_repo_head_hexsha": "4fd30f301420927eaae0c9a64cdd67155ff94321", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "variomics_pipeline/countMutations.py", "max_issues_repo_name": "grice/DroSeq", "max_issues_repo_head_hexsha": "4fd30f301420927eaae0c9a64cdd67155ff94321", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "variomics_pipeline/countMutations.py", "max_forks_repo_name": "grice/DroSeq", "max_forks_repo_head_hexsha": "4fd30f301420927eaae0c9a64cdd67155ff94321", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7573964497, "max_line_length": 162, "alphanum_fraction": 0.5680916031, "include": true, "reason": "import numpy", "num_tokens": 1541}
|
import rpy2.robjects as ro
import rpy2.robjects.conversion as conversion
import rpy2.rinterface as rinterface
from rpy2.rinterface import SexpVector, INTSXP
import numpy
from rpy2.robjects.vectors import DataFrame, Vector, ListVector
original_conversion = conversion.py2ri
# The possible kind codes are listed at
# http://numpy.scipy.org/array_interface.shtml
_kinds = {
# "t" -> not really supported by numpy
"b": rinterface.LGLSXP,
"i": rinterface.INTSXP,
# "u" -> special-cased below
"f": rinterface.REALSXP,
"c": rinterface.CPLXSXP,
# "O" -> special-cased below
"S": rinterface.STRSXP,
"U": rinterface.STRSXP,
# "V" -> special-cased below
}
def numpy2ri(o):
""" Augmented conversion function, converting numpy arrays into
rpy2.rinterface-level R structures. """
if isinstance(o, numpy.ndarray):
if not o.dtype.isnative:
raise(ValueError("Cannot pass numpy arrays with non-native byte orders at the moment."))
# Most types map onto R arrays:
if o.dtype.kind in _kinds:
# "F" means "use column-major order"
vec = SexpVector(o.ravel("F"), _kinds[o.dtype.kind])
dim = SexpVector(o.shape, INTSXP)
res = ro.r.array(vec, dim=dim)
# R does not support unsigned types:
elif o.dtype.kind == "u":
raise(ValueError("Cannot convert numpy array of unsigned values -- R does not have unsigned integers."))
# Array-of-PyObject is treated like a Python list:
elif o.dtype.kind == "O":
res = conversion.py2ri(list(o))
# Record arrays map onto R data frames:
elif o.dtype.kind == "V":
if o.dtype.names is None:
raise(ValueError("Nothing can be done for this numpy array type %s at the moment." % (o.dtype,)))
df_args = []
for field_name in o.dtype.names:
df_args.append((field_name,
conversion.py2ri(o[field_name])))
res = ro.baseenv["data.frame"].rcall(tuple(df_args), ro.globalenv)
# It should be impossible to get here:
else:
raise(ValueError("Unknown numpy array type."))
else:
res = ro.default_py2ri(o)
return res
def ri2numpy(o):
if isinstance(o, ListVector):
res = numpy.rec.fromarrays(o, names=tuple(o.names))
elif isinstance(o, Vector) and (type(o) != Vector):
res = numpy.asarray(o)
else:
res = ro.default_ri2py(o)
return res
def activate():
conversion.py2ri = numpy2ri
conversion.ri2numpy = ri2numpy
|
{"hexsha": "0efe11597cf83dface429190c1d553f98539cf1c", "size": 2630, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/MethylSig/rpy2/rpy/robjects/numpy2ri.py", "max_stars_repo_name": "psnehal/MethylSig", "max_stars_repo_head_hexsha": "5efad71e71ff2515feff2e49579c856ef9a1bbd8", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/MethylSig/rpy2/rpy/robjects/numpy2ri.py", "max_issues_repo_name": "psnehal/MethylSig", "max_issues_repo_head_hexsha": "5efad71e71ff2515feff2e49579c856ef9a1bbd8", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/MethylSig/rpy2/rpy/robjects/numpy2ri.py", "max_forks_repo_name": "psnehal/MethylSig", "max_forks_repo_head_hexsha": "5efad71e71ff2515feff2e49579c856ef9a1bbd8", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.875, "max_line_length": 116, "alphanum_fraction": 0.6205323194, "include": true, "reason": "import numpy", "num_tokens": 662}
|
import os
import struct
import moderngl.experimental as mgl
import numpy as np
from objloader import Obj
from PIL import Image
from pyrr import Matrix44
from example_window import Example, run_example
def local(*path):
return os.path.join(os.path.dirname(__file__), '..', *path)
class MugExample(Example):
def __init__(self):
self.ctx = mgl.create_context()
self.canvas_prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
out vec2 v_vert;
void main() {
gl_Position = vec4(in_vert * 2.0 - 1.0, 0.0, 1.0);
v_vert = in_vert;
}
''',
fragment_shader='''
#version 330
uniform sampler2D Texture;
in vec2 v_vert;
out vec4 f_color;
void main() {
f_color = texture(Texture, v_vert);
}
''',
)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_vert;
in vec3 in_norm;
in vec2 in_text;
out vec3 v_vert;
out vec3 v_norm;
out vec2 v_text;
void main() {
gl_Position = Mvp * vec4(in_vert, 1.0);
v_vert = in_vert;
v_norm = in_norm;
v_text = in_text;
}
''',
fragment_shader='''
#version 330
uniform vec3 Light;
uniform sampler2D Texture;
in vec3 v_vert;
in vec3 v_norm;
in vec2 v_text;
out vec4 f_color;
void main() {
float lum = clamp(dot(normalize(Light - v_vert), normalize(v_norm)), 0.0, 1.0) * 0.8 + 0.2;
vec3 base = vec3(0.5, 0.5, 0.5) * lum;
vec3 spec = vec3(1.0, 1.0, 1.0) * pow(lum, 5.7);
vec4 tex = texture(Texture, v_text);
f_color = vec4(base * 0.1 + tex.rgb * lum + spec, tex.a);
}
''',
)
self.canvas_vbo = self.ctx.buffer(np.array([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], dtype='f4').tobytes())
self.canvas_vao = self.ctx.simple_vertex_array(self.canvas_prog, self.canvas_vbo, 'in_vert')
bg_img = Image.open(local('data', 'mug-background.jpg')).transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
self.bg_texture = self.ctx.texture(bg_img.size, 3, bg_img.tobytes())
self.bg_sampler = self.ctx.sampler(self.bg_texture)
sticker_img = Image.open(local('data', 'mug-pymet-logo.png')).transpose(Image.FLIP_TOP_BOTTOM).convert('RGBA')
self.sticker_texture = self.ctx.texture(sticker_img.size, 4, sticker_img.tobytes())
# self.sticker_texture.build_mipmaps(0, 2)
self.sticker_sampler = self.ctx.sampler(self.sticker_texture)
self.mug_texture = self.ctx.texture((1, 1), 3)
self.mug_texture.write(struct.pack('3B', 10, 10, 10))
self.mug_sampler = self.ctx.sampler(self.mug_texture)
obj = Obj.open(local('data', 'mug.obj'))
self.mug_vbo = self.ctx.buffer(obj.pack('vx vy vz nx ny nz tx ty'))
self.mug_vao = self.ctx.simple_vertex_array(self.prog, self.mug_vbo, 'in_vert', 'in_norm', 'in_text')
segs = 32
radius = 29.94
bottom = 6.601
top = 57.856
left = -163.12 * np.pi / 180.0
right = 11.25 * np.pi / 180.0
lin = np.linspace(left, right, segs)
sticker_vertices = np.array([
np.repeat(np.cos(lin) * radius, 2),
np.repeat(np.sin(lin) * radius, 2),
np.tile([bottom, top], segs),
np.repeat(np.cos(lin), 2),
np.repeat(np.sin(lin), 2),
np.tile([0.0, 0.0], segs),
np.repeat(np.linspace(0.0, 1.0, segs), 2),
np.tile([0.0, 1.0], segs),
])
self.sticker_vbo = self.ctx.buffer(sticker_vertices.T.astype('f4').tobytes())
self.sticker_vao = self.ctx.simple_vertex_array(self.prog, self.sticker_vbo, 'in_vert', 'in_norm', 'in_text')
def render(self):
self.ctx.screen.viewport = self.wnd.viewport
self.ctx.clear(1.0, 1.0, 1.0)
self.bg_sampler.use()
with self.ctx.scope(mgl.BLEND):
self.canvas_vao.render(mgl.TRIANGLE_STRIP)
with self.ctx.scope(mgl.DEPTH_TEST):
proj = Matrix44.perspective_projection(30.0, self.wnd.ratio, 1.0, 1000.0)
lookat = Matrix44.look_at(
(46.748, -280.619, 154.391),
(-23.844, 2.698, 44.493),
(0.0, 0.0, 1.0),
)
self.prog['Mvp'] = (proj * lookat).astype('f4').tobytes()
self.prog['Light'] = (-143.438, -159.072, 213.268)
self.mug_sampler.use()
self.mug_vao.render()
with self.ctx.scope(mgl.DEPTH_TEST | mgl.BLEND):
self.sticker_sampler.use()
self.sticker_vao.render(mgl.TRIANGLE_STRIP)
run_example(MugExample)
|
{"hexsha": "aa0e42dd0faae6698555ddadcc670d7bb0313359", "size": 5284, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/experimental/mug_mockup.py", "max_stars_repo_name": "dougbrion/ModernGL", "max_stars_repo_head_hexsha": "6de8938ccd0042c1389a32b697af5f9c9d279e41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/experimental/mug_mockup.py", "max_issues_repo_name": "dougbrion/ModernGL", "max_issues_repo_head_hexsha": "6de8938ccd0042c1389a32b697af5f9c9d279e41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/experimental/mug_mockup.py", "max_forks_repo_name": "dougbrion/ModernGL", "max_forks_repo_head_hexsha": "6de8938ccd0042c1389a32b697af5f9c9d279e41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4430379747, "max_line_length": 118, "alphanum_fraction": 0.5221423164, "include": true, "reason": "import numpy", "num_tokens": 1414}
|
using ModelingToolkit, DifferentialEquations, Plots
@variables t
D = Differential(t)
function dynamics(; name, τ=0.01, a_max=300.0, V₀=300.0, x_T=5E3, y_T=0.0)
states = @variables x(t) y(t) γ(t) a(t) a_cmd(t) r(t) σ(t) λ(t) V(t)
ps = @parameters τ = τ a_max = a_max x_T = x_T y_T = y_T
eqs = [
D(x) ~ V * cos(γ)
D(y) ~ V * sin(γ)
D(γ) ~ a / V
D(a) ~ (min(max(a_cmd, -a_max), a_max) - a) / τ
r ~ sqrt((x_T - x)^2 + (y_T - y)^2)
λ ~ atan(y_T - y, x_T - x)
σ ~ γ - λ
V ~ V₀
]
return ODESystem(eqs, t, states, ps; name)
end
function BPNG(; name, σ_lim=deg2rad(60), m=10.0, δ=0.01, k=4.0, n=1, p=0.8, N=3.0, r₀=5E3, γ_f_d=-deg2rad(50.0))
states = @variables r(t) γ(t) σ(t) λ(t) λ̇(t) V(t) e_γ_f(t) K_r(t) K_η(t) a_cmd(t)
ps = @parameters σ_lim = σ_lim m = m δ = δ k = k n = n p = p N = N r₀ = r₀ γ_f_d = γ_f_d
eqs = [
λ̇ ~ -V * sin(σ) / r
e_γ_f ~ (N * λ - γ) / (N - 1.0) - γ_f_d
K_r ~ (N - 1.0 + δ) * (1.0 + r / r₀)^n
K_η ~ 1.0 + k * (1.0 - abs(sin(σ) / sin(σ_lim))^m)
a_cmd ~ N * V * λ̇ + V^2 * cos(σ) * K_r * K_η / r * abs(e_γ_f)^p * sign(e_γ_f)
]
return ODESystem(eqs, t, states, ps; name)
end
function RPG(; name, σ_lim=deg2rad(60), m=2.0, n=3.0, γ_f_d=-deg2rad(50.0))
states = @variables r(t) γ(t) σ(t) λ(t) λ′(t) λ″(t) V(t) a_cmd(t) c_m(t) c_n(t)
ps = @parameters σ_lim = σ_lim m = m n = n γ_f_d = γ_f_d
eqs = [
c_m ~ (-n * (λ - γ_f_d) + tan(σ)) / (m - n) / r^m
c_n ~ (m * (λ - γ_f_d) - tan(σ)) / (m - n) / r^n
# λ′ ~ tan(σ)/r
λ′ ~ n * c_n * r^(n - 1) + m * c_m * r^(m - 1)
λ″ ~ n * (n - 1) * c_n * r^(n - 2) + m * (m - 1) * c_m * r^(m - 2)
a_cmd ~ -V^2 * cos(σ) * ((r * λ″ + tan(σ) / r) / (sec(σ))^2 + λ′)
]
return ODESystem(eqs, t, states, ps; name)
end
indexof(sym, syms) = findfirst(isequal(sym), syms)
function condition_stop(u, t, integrator)
u_sys = states(sys_simplified)
p_sys = parameters(sys_simplified)
x = u[indexof(model.x, u_sys)]
y = u[indexof(model.y, u_sys)]
γ = u[indexof(model.γ, u_sys)]
x_T = integrator.p[indexof(model.x_T, p_sys)]
y_T = integrator.p[indexof(model.y_T, p_sys)]
σ_lim = integrator.p[indexof(command.σ_lim, p_sys)]
r = sqrt((x_T - x)^2 + (y_T - y)^2)
λ = atan(y_T - y, x_T - x)
σ = γ - λ
return (r < 0.5) || (σ > σ_lim) #|| (r < 10 && ṙ >= 0)
end
affect!(integrator) = terminate!(integrator)
cb_stop = DiscreteCallback(condition_stop, affect!)
# @time sol = solve(prob, Tsit5(), callback = cb_stop)
##
case = 6 # [manual input required]
γ_f_d_deg = -170.0
if case <= 3
if case == 1
n_list = 1
p_list = [0.6, 0.8, 1.0]
elseif case == 2
n_list = [0, 1, 2]
p_list = 0.9
elseif case == 3
n_list = 0
p_list = 0.6
end
@named command = BPNG()
pv_list = [[command.n => n, command.p => p, command.γ_f_d => deg2rad(γ_f_d_deg)] for n in n_list for p in p_list]
else
if case == 4
n_list = [1.5, 2.0, 3.0]
m_list = 1.0
elseif case == 5
n_list = 3.0
m_list = [1.0, 1.5, 2.0]
elseif case == 6
n_list = 1.5
m_list = 1.0
end
@named command = RPG()
pv_list = [[command.n => n, command.m => m, command.γ_f_d => deg2rad(γ_f_d_deg)] for n in n_list for m in m_list]
end
@named model = dynamics()
eqs_c = [
model.V ~ command.V
model.r ~ command.r
model.γ ~ command.γ
model.σ ~ command.σ
model.λ ~ command.λ
model.a_cmd ~ command.a_cmd
]
sys = compose(ODESystem(eqs_c, name=:sys), model, command)
sys_simplified = structural_simplify(sys)
IC = [model.x => 0.0
model.y => 0.0
model.γ => deg2rad(55)
model.a => 0.0]
t_span = (0.0, 30.0)
# pv = [command.n => 1
# command.p => 1.0]
# prob = ODEProblem(sys_simplified, IC, t_span, pv; jac = true)
fs_val = 14
lw_val = 2.5
ls_list = [:solid :dash :dot :dashdot]
f_xy = plot()
f_r = plot()
f_σ = plot()
f_γ = plot()
f_e_γ_f = plot()
f_a = plot()
f_a_cmd = plot()
for pv in pv_list
prob = ODEProblem(sys_simplified, IC, t_span, pv; jac=true)
@time sol = solve(prob, solver=Tsit5(), callback=cb_stop, reltol=1e-8, abstol=1e-8)
if case <= 3
lgnd = "\$n = $(pv[1][2])\$, \$p = $(pv[2][2])\$"
plot!(f_e_γ_f, sol, vars=rad2deg(command.e_γ_f), label=lgnd, xlabel="\$t\$ [s]", ylabel="\$e_{\\gamma_{f}}\$ [deg]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
else
lgnd = "\$n = $(pv[1][2])\$, \$m = $(pv[2][2])\$"
plot!(f_e_γ_f, sol, vars=rad2deg(model.γ)-γ_f_d_deg, label=lgnd, xlabel="\$t\$ [s]", ylabel="\$\\gamma - \\gamma_{f}\$ [deg]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
end
plot!(f_xy, sol, vars=(model.x, model.y), aspect_ratio=:equal, label=lgnd, xlabel="\$x\$ [m]", ylabel="\$y\$ [m]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
plot!(f_r, sol, vars=model.r, label=lgnd, xlabel="\$t\$ [s]", ylabel="\$r\$ [m]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
plot!(f_σ, sol, vars=rad2deg(model.σ), label=lgnd, xlabel="\$t\$ [s]", ylabel="\$\\sigma\$ [deg]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val, ylims=(0, 60))
plot!(f_γ, sol, vars=rad2deg(model.γ), label=lgnd, xlabel="\$t\$ [s]", ylabel="\$\\gamma\$ [deg]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
plot!(f_a, sol, vars=model.a, label=lgnd, xlabel="\$t\$ [s]", ylabel="\$a\$ [m/s\$^{2}\$]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
plot!(f_a_cmd, sol, vars=model.a_cmd, label=lgnd, xlabel="\$t\$ [s]", ylabel="\$a_{cmd}\$ [m/s\$^{2}\$]", linewidth=lw_val, linestyle=ls_list[findfirst(isequal(pv), pv_list)], legendfontsize=fs_val, guidefontsize=fs_val, xtickfontsize=fs_val, ytickfontsize=fs_val)
println("\n sum(abs(a)) = $(sum(abs.(sol[model.a])))\n")
end
plot!(f_σ, [0.0, 20.0], [60, 60], linecolor=:red, linestyle=:dashdot, label="\$\\sigma_{\\lim}\$")
fig_dir = "Simulation/Julia_BPNG/Figures"
mkpath(fig_dir)
savefig(f_xy, joinpath(fig_dir, "Fig_xy_$(case).pdf"))
savefig(f_r, joinpath(fig_dir, "Fig_r_$(case).pdf"))
savefig(f_σ, joinpath(fig_dir, "Fig_sigma_$(case).pdf"))
savefig(f_γ, joinpath(fig_dir, "Fig_gamma_$(case).pdf"))
savefig(f_e_γ_f, joinpath(fig_dir, "Fig_e_gamma_f_$(case).pdf"))
savefig(f_a, joinpath(fig_dir, "Fig_a_$(case).pdf"))
savefig(f_a_cmd, joinpath(fig_dir, "Fig_a_cmd_$(case).pdf"))
display(f_xy)
display(f_r)
display(f_σ)
display(f_γ)
display(f_e_γ_f)
display(f_a)
display(f_a_cmd)
|
{"hexsha": "c8e56eddf14a680f74cf600d6fba67db5f1a28a0", "size": 7327, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "main/LCG_e_0/main_mtk_LCG_e_0.jl", "max_stars_repo_name": "nhcho91/FlightGNC.jl", "max_stars_repo_head_hexsha": "d6a44a434c7ad124bc7271c070d86d64e639582a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-24T12:18:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T12:08:10.000Z", "max_issues_repo_path": "main/LCG_e_0/main_mtk_LCG_e_0.jl", "max_issues_repo_name": "nhcho91/FlightGNC.jl", "max_issues_repo_head_hexsha": "d6a44a434c7ad124bc7271c070d86d64e639582a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-25T14:47:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-28T08:32:26.000Z", "max_forks_repo_path": "main/LCG_e_0/main_mtk_LCG_e_0.jl", "max_forks_repo_name": "nhcho91/FlightGNC.jl", "max_forks_repo_head_hexsha": "d6a44a434c7ad124bc7271c070d86d64e639582a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-25T15:19:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T07:55:07.000Z", "avg_line_length": 37.0050505051, "max_line_length": 293, "alphanum_fraction": 0.5986078886, "num_tokens": 2804}
|
[STATEMENT]
lemma sender_ip_valid:
"paodv i \<TTurnstile>\<^sub>A onll \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V (\<lambda>((\<xi>, _), a, _). anycast (\<lambda>m. not_Pkt m \<longrightarrow> msg_sender m = i) a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. paodv i \<TTurnstile>\<^sub>A onll \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V (\<lambda>((\<xi>, uu_), a, uu_). anycast (\<lambda>m. not_Pkt m \<longrightarrow> msg_sender m = i) a)
[PROOF STEP]
by (rule step_invariant_weaken_with_invariantE [OF ip_constant sender_ip_valid'])
(auto dest!: onlD onllD)
|
{"llama_tokens": 233, "file": "AODV_variants_d_fwdrreqs_D_Seq_Invariants", "length": 1}
|
/**
@file
Copyright John Reid 2007, 2013
*/
#include "bio-pch.h"
#include "bio/defs.h"
#include "bio/matrix_test_data.h"
#include "bio/fragment_test_data.h"
#include "bio/site_test_data.h"
#include "bio/biobase_db.h"
#include "bio/biobase_data_traits.h"
#include "bio/biobase_tf.h"
#include "bio/biobase_binding_model.h"
#include "bio/model_2_factor.h"
#include "bio/background_models.h"
#include "bio/biobase_filter.h"
#include <boost/iterator/transform_iterator.hpp>
#include <boost/assign/list_of.hpp>
#include <numeric>
#include <iostream>
BIO_NS_START
Test::~Test() { }
SiteTestData::SiteTestData(
const SiteTestCase & test_case)
: BiFaTestData( test_case.centre_sequence )
, site( BiobaseDb::singleton().get_entry< SITE_DATA >( test_case.site ) )
{
std::copy(
test_case.sequences.begin(),
test_case.sequences.end(),
std::back_inserter( input.conserved_sequences ) );
}
SiteTestData::~SiteTestData() { }
Test::ptr_t
SiteTestData::get_test_for_algorithm( BiFaAlgorithm::ptr_t algorithm )
{
return
Test::ptr_t(
new BiFaFactorTest(
*this,
site->factor_links,
algorithm
)
)
;
}
/** Generate some test data from a matrix by the following: Take a random sequence of the given length
and replace a section of it with one of the sequences used to generate the matrix. */
MatrixTestData::MatrixTestData(
const Matrix * matrix,
size_t seq_length)
: matrix(matrix)
{
//make sure we have a binding site to choose
if (0 == matrix->align_descs.size())
{
throw std::logic_error( "No binding site to choose from" );
}
//generate a random sequence
input.centre_sequence.reserve( seq_length );
gen_sequence_from_random_species_dna_hmm( input.centre_sequence, seq_length );
bool found_binding_site = false;
for( AlignDescList::const_iterator ad = matrix->align_descs.begin();
matrix->align_descs.end() != ad;
++ad )
{
if( seq_length < ad->get()->sequence.size() //make sure the site is not longer than the sequence
||
ad->get()->sequence.size() != (size_t) ad->get()->length
||
matrix->get_size() != ad->get()->sequence.size()
||
0 == ad->get()->sequence.size()
||
ad->get()->sequence.end()
!=
std::find_if(
ad->get()->sequence.begin(),
ad->get()->sequence.end(),
! boost::lambda::bind< bool >( is_known_nucleotide(), boost::lambda::_1 ) ) )
{
//not suitable
continue;
}
else
{
found_binding_site = true;
binding_site = *ad;
break;
}
}
if( ! found_binding_site )
{
throw std::logic_error( "Could not find suitable binding site" );
}
//choose a position in the sequence to put it
unsigned binding_site_idx = get_uniform_index(seq_length - binding_site->sequence.size());
//replace the substring with the site
input.centre_sequence.replace(
binding_site_idx,
binding_site->sequence.size(),
binding_site->sequence);
}
MatrixTestData::~MatrixTestData() { }
BiFaTestData::BiFaTestData(
const BiFaInput & input )
: input( input )
{
}
BiFaTestData::~BiFaTestData() { }
const BiFaInput &
BiFaTestData::get_input() const
{
return input;
}
const BiFaOutput &
BiFaTestData::get_output_for( BiFaAlgorithm::ptr_t algorithm )
{
output_map_t::iterator o = output.find( algorithm );
if( output.end() == o )
{
o = output.insert( output_map_t::value_type( algorithm, ( *algorithm )( input ) ) ).first;
}
return *( o->second );
}
const TF::hit_set_t &
BiFaTestData::get_factors_for( BiFaAlgorithm::ptr_t algorithm )
{
factor_map_t::iterator f = factor_hits.find( algorithm );
if( factor_hits.end() == f )
{
f = factor_hits.insert( factor_map_t::value_type( algorithm, TF::hit_set_ptr_t( new TF::hit_set_t ) ) ).first;
model_hits_2_factor_hits( get_output_for( algorithm ).hits, *( f->second ) );
}
return *( f->second );
}
/** Generate some test data from a matrix by the following: Take a random sequence of the given length
and replace a section of it with one of the sequences used to generate the matrix. */
FragmentTestData::FragmentTestData(
const Fragment * fragment)
: BiFaTestData( fragment->sequence )
, fragment( fragment )
{
}
FragmentTestData::~FragmentTestData() { }
MatrixTest::MatrixTest(
MatrixTestData & data,
BiFaAlgorithm::ptr_t algorithm )
: data( data )
, algorithm( algorithm )
{
}
MatrixTest::~MatrixTest() { }
Test::ptr_t
MatrixTestData::get_test_for_algorithm( BiFaAlgorithm::ptr_t algorithm )
{
return
Test::ptr_t(
new MatrixTest(
*this,
algorithm
)
)
;
}
BinaryTestResults
MatrixTest::get_results( double threshold )
{
//all of the models the algorithm applies
BindingModel::set_t universe;
algorithm->fill_model_universe( universe );
//those that the test is looking for
const BindingModel::set_t true_positives = boost::assign::list_of
( algorithm->get_model_for( boost::any( data.matrix->get_link() ) ) )
;
return
get_results_for(
data.get_output_for( algorithm ).hits,
threshold,
universe,
true_positives );
}
BinaryTestResults
get_results_for(
const BindingModel::hit_set_t & hits,
double threshold,
const BindingModel::set_t & universe,
const BindingModel::set_t & true_positives)
{
//check each and add to result
BinaryTestResults result;
for( BindingModel::set_t::const_iterator m = universe.begin();
universe.end() != m;
++m )
{
const double p_binding = get_p_binding( hits, *m );
const bool in_test_output = p_binding > threshold;
const bool should_be_in_test_output = true_positives.end() != true_positives.find( *m );
//update true positives, etc...
result( in_test_output, should_be_in_test_output );
}
return result;
}
Test::ptr_t
FragmentTestData::get_test_for_algorithm( BiFaAlgorithm::ptr_t algorithm )
{
return
Test::ptr_t(
new BiFaFactorTest(
*this,
fragment->factor_links,
algorithm
)
)
;
}
BiFaFactorTest::BiFaFactorTest(
BiFaTestData & data,
const FactorLinkList & factors,
BiFaAlgorithm::ptr_t algorithm )
: data( data )
, factors( factors )
, algorithm( algorithm )
{
}
BiFaFactorTest::~BiFaFactorTest() { }
BinaryTestResults
BiFaFactorTest::get_results( double threshold )
{
//all of the models the algorithm applies
BindingModel::set_t universe;
algorithm->fill_model_universe( universe );
//those that the test is looking for
BindingModel::set_t true_positives;
{
typedef std::set< TableLink > table_link_set;
table_link_set matrix_links;
for( FactorLinkList::const_iterator f = factors.begin();
factors.end() != f;
++f )
{
const Factor * factor = BiobaseDb::singleton().get_entry< FACTOR_DATA >( f->get()->link );
std::copy( factor->matrices.begin(), factor->matrices.end(), std::inserter( matrix_links, matrix_links.begin() ) );
}
for( table_link_set::const_iterator l = matrix_links.begin();
matrix_links.end() != l;
++l )
{
true_positives.insert( algorithm->get_model_for( boost::any( *l ) ) );
}
}
return
get_results_for(
data.get_output_for( algorithm ).hits,
threshold,
universe,
true_positives );
}
void create_test_data_from_matrices( BiFaTestData::vec_t & test_data, size_t seq_length )
{
const matrix_filter_it matrices_begin = get_matrices_begin();
const matrix_filter_it matrices_end = get_matrices_end();
for (matrix_filter_it i = matrices_begin;
matrices_end != i;
++i)
{
try
{
test_data.push_back( BiFaTestData::ptr_t( new MatrixTestData( i->second.get(), seq_length) ) );
}
catch ( const std::exception & )
{
//cerr << "Error: " << i->second->get_name() << ": " << ex << endl;
}
}
}
void create_test_data_from_fragments( BiFaTestData::vec_t & test_data )
{
for( Fragment::map_t::const_iterator i = BiobaseDb::singleton().get_fragments().begin();
BiobaseDb::singleton().get_fragments().end() != i;
++i)
{
test_data.push_back( BiFaTestData::ptr_t( new FragmentTestData( i->second.get() ) ) );
}
}
void
create_test_data_from_sites(
BiFaTestData::vec_t & test_data )
{
for( SiteTestCases::const_iterator s = SiteTestCases::singleton().begin();
SiteTestCases::singleton().end() != s;
++s )
{
test_data.push_back(
BiFaTestData::ptr_t(
new SiteTestData(
**s
)
)
)
;
}
}
struct threshold_calculator
{
double threshold;
double max_difference;
threshold_calculator( )
: threshold( 0.0 )
, max_difference( 0.0 )
{
}
void operator()(
double this_threshold,
double this_value,
double next_threshold,
double next_value)
{
double difference = fabs( next_value - this_value );
if( difference > max_difference )
{
max_difference = difference;
threshold = ( this_threshold + next_threshold ) / 2.0;
}
}
};
ROCPoint
get_roc_point(
Test::vec_t & test_data,
double threshold )
{
//get the roc point for this threshold
BinaryTestResults results =
std::accumulate(
boost::make_transform_iterator(
test_data.begin(),
boost::bind(
&Test::get_results,
_1,
threshold
)
),
boost::make_transform_iterator(
test_data.end(),
boost::bind(
&Test::get_results,
_1,
threshold
)
),
BinaryTestResults()
);
return results.get_roc_point();
}
BiFaTestData2Test::BiFaTestData2Test( BiFaAlgorithm::ptr_t algorithm )
: algorithm( algorithm )
{
}
Test::ptr_t
BiFaTestData2Test::operator()( BiFaTestData::ptr_t data ) const
{
return data->get_test_for_algorithm( algorithm );
}
void
run_tests(
Test::vec_t & test_data,
test_result_map_t & result_map,
double min_threshold,
double max_threshold,
unsigned num_thresholds )
{
result_map.clear();
result_map.insert( test_result_map_t::value_type( min_threshold, get_roc_point( test_data, min_threshold ) ) );
result_map.insert( test_result_map_t::value_type( max_threshold, get_roc_point( test_data, max_threshold ) ) );
bool using_spec = false;
while( result_map.size() < num_thresholds )
{
//calculate new threshold
threshold_calculator calc;
for( test_result_map_t::const_iterator r1 = result_map.begin();
result_map.end() != r1;
++r1 )
{
test_result_map_t::const_iterator r2 = boost::next( r1 );
if( result_map.end() == r2 )
{
break;
}
calc(
r1->first,
using_spec
? r1->second.specificity
: r1->second.sensitivity,
r2->first,
using_spec
? r2->second.specificity
: r2->second.sensitivity );
}
const double threshold = calc.threshold;
using_spec = ! using_spec;
ROCPoint roc_point = get_roc_point( test_data, threshold );
result_map.insert( test_result_map_t::value_type( threshold, roc_point ) );
}
}
std::ostream &
operator<<( std::ostream & os, const test_result_map_t::value_type & value )
{
return
os
<< value.first
<< "," << value.second.sensitivity
<< "," << value.second.specificity
;
}
std::ostream &
operator<<( std::ostream & os, const test_result_map_t & result_map )
{
os << "Threshold,Sensitivity,Specificity\n";
std::copy(
result_map.begin(),
result_map.end(),
std::ostream_iterator< test_result_map_t::value_type >( os, "\n" ) );
return os;
}
BIO_NS_END
|
{"hexsha": "ca166150c96b96f68a674b267de83323bd7f36f0", "size": 11048, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "C++/src/bio/lib/test_data.cpp", "max_stars_repo_name": "JohnReid/biopsy", "max_stars_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "C++/src/bio/lib/test_data.cpp", "max_issues_repo_name": "JohnReid/biopsy", "max_issues_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "C++/src/bio/lib/test_data.cpp", "max_forks_repo_name": "JohnReid/biopsy", "max_forks_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0839694656, "max_line_length": 118, "alphanum_fraction": 0.6965966691, "num_tokens": 3004}
|
[STATEMENT]
lemma securessel2:
assumes "ssel (STMap TAddr (STValue (TUInt 256))) (STR ''balance'') [SENDER] ep env cd st = Normal ((loc, type), st')"
and "fmlookup (storage st) (STR ''Victim'') = Some s"
and "ReadL\<^sub>i\<^sub>n\<^sub>t (accessBalance (accounts st) (STR ''Victim'')) - SUMM s \<ge> bal \<and> POS s"
obtains s' where
"loc = sender env + (STR ''.'' + STR ''balance'')"
and "type = STValue (TUInt 256)"
and "fmlookup (storage st') (STR ''Victim'') = Some s'"
and "ReadL\<^sub>i\<^sub>n\<^sub>t (accessBalance (accounts st') (STR ''Victim'')) - SUMM s' \<ge> bal \<and> POS s'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
from assms(1)
[PROOF STATE]
proof (chain)
picking this:
local.ssel (STMap TAddr (STValue (TUInt 256))) STR ''balance'' [SENDER] ep env cd st = Normal ((loc, type), st')
[PROOF STEP]
obtain v t st'' st''' x
where *: "expr SENDER ep env cd st = Normal ((KValue v, t), st'')"
and **: "ssel (STValue (TUInt 256)) (hash (STR ''balance'') v) [] ep env cd st'' = Normal (x,st''')"
and "st' = st'''"
[PROOF STATE]
proof (prove)
using this:
local.ssel (STMap TAddr (STValue (TUInt 256))) STR ''balance'' [SENDER] ep env cd st = Normal ((loc, type), st')
goal (1 subgoal):
1. (\<And>v t st'' x st'''. \<lbrakk>local.expr SENDER ep env cd st = Normal ((KValue v, t), st''); local.ssel (STValue (TUInt 256)) (hash STR ''balance'' v) [] ep env cd st'' = Normal (x, st'''); st' = st'''\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto split:if_split_asm)
[PROOF STATE]
proof (state)
this:
local.expr SENDER ep env cd st = Normal ((KValue v, t), st'')
local.ssel (STValue (TUInt 256)) (hash STR ''balance'' v) [] ep env cd st'' = Normal (x, st''')
st' = st'''
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
local.expr SENDER ep env cd st = Normal ((KValue v, t), st'')
local.ssel (STValue (TUInt 256)) (hash STR ''balance'' v) [] ep env cd st'' = Normal (x, st''')
st' = st'''
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
obtain s'' where "v =sender env"
and "t = Value TAddr"
and ***:"fmlookup (storage st'') (STR ''Victim'') = Some s''"
and ****: "ReadL\<^sub>i\<^sub>n\<^sub>t (accessBalance (accounts st'') (STR ''Victim'')) - SUMM s'' \<ge> bal \<and> POS s''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>s''. \<lbrakk>v = sender env; t = Value TAddr; fmlookup (storage st'') STR ''Victim'' = Some s''; bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using securesender[OF * assms(2,3)]
[PROOF STATE]
proof (prove)
using this:
(\<And>s'. \<lbrakk>v = sender env; t = Value TAddr; fmlookup (storage st'') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>s''. \<lbrakk>v = sender env; t = Value TAddr; fmlookup (storage st'') STR ''Victim'' = Some s''; bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
v = sender env
t = Value TAddr
fmlookup (storage st'') STR ''Victim'' = Some s''
bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
v = sender env
t = Value TAddr
fmlookup (storage st'') STR ''Victim'' = Some s''
bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
obtain s''' where "x = (hash (STR ''balance'') v, STValue (TUInt 256))"
and "fmlookup (storage st''') (STR ''Victim'') = Some s'''"
and "ReadL\<^sub>i\<^sub>n\<^sub>t (accessBalance (accounts st''') (STR ''Victim'')) - SUMM s''' \<ge> bal \<and> POS s'''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>s'''. \<lbrakk>x = (hash STR ''balance'' v, STValue (TUInt 256)); fmlookup (storage st''') STR ''Victim'' = Some s'''; bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s''' \<and> (\<forall>ad x. fmlookup s''' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using securessel[OF ** *** ****]
[PROOF STATE]
proof (prove)
using this:
(\<And>s'. \<lbrakk>x = (hash STR ''balance'' v, STValue (TUInt 256)); fmlookup (storage st''') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>s'''. \<lbrakk>x = (hash STR ''balance'' v, STValue (TUInt 256)); fmlookup (storage st''') STR ''Victim'' = Some s'''; bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s''' \<and> (\<forall>ad x. fmlookup s''' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x = (hash STR ''balance'' v, STValue (TUInt 256))
fmlookup (storage st''') STR ''Victim'' = Some s'''
bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s''' \<and> (\<forall>ad x. fmlookup s''' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
goal (1 subgoal):
1. (\<And>s'. \<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM s' \<and> (\<forall>ad x. fmlookup s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
local.expr SENDER ep env cd st = Normal ((KValue v, t), st'')
local.ssel (STValue (TUInt 256)) (hash STR ''balance'' v) [] ep env cd st'' = Normal (x, st''')
st' = st'''
v = sender env
t = Value TAddr
fmlookup (storage st'') STR ''Victim'' = Some s''
bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
x = (hash STR ''balance'' v, STValue (TUInt 256))
fmlookup (storage st''') STR ''Victim'' = Some s'''
bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s''' \<and> (\<forall>ad x. fmlookup s''' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
local.expr SENDER ep env cd st = Normal ((KValue v, t), st'')
local.ssel (STValue (TUInt 256)) (hash STR ''balance'' v) [] ep env cd st'' = Normal (x, st''')
st' = st'''
v = sender env
t = Value TAddr
fmlookup (storage st'') STR ''Victim'' = Some s''
bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
x = (hash STR ''balance'' v, STValue (TUInt 256))
fmlookup (storage st''') STR ''Victim'' = Some s'''
bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s''' \<and> (\<forall>ad x. fmlookup s''' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
goal (1 subgoal):
1. thesis
[PROOF STEP]
using assms(1) that
[PROOF STATE]
proof (prove)
using this:
local.expr SENDER ep env cd st = Normal ((KValue v, t), st'')
local.ssel (STValue (TUInt 256)) (hash STR ''balance'' v) [] ep env cd st'' = Normal (x, st''')
st' = st'''
v = sender env
t = Value TAddr
fmlookup (storage st'') STR ''Victim'' = Some s''
bal \<le> \<lceil>accessBalance (accounts st'') STR ''Victim''\<rceil> - SUMM s'' \<and> (\<forall>ad x. fmlookup s'' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
x = (hash STR ''balance'' v, STValue (TUInt 256))
fmlookup (storage st''') STR ''Victim'' = Some s'''
bal \<le> \<lceil>accessBalance (accounts st''') STR ''Victim''\<rceil> - SUMM s''' \<and> (\<forall>ad x. fmlookup s''' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)
local.ssel (STMap TAddr (STValue (TUInt 256))) STR ''balance'' [SENDER] ep env cd st = Normal ((loc, type), st')
\<lbrakk>loc = sender env + (STR ''.'' + STR ''balance''); type = STValue (TUInt 256); fmlookup (storage st') STR ''Victim'' = Some ?s'; bal \<le> \<lceil>accessBalance (accounts st') STR ''Victim''\<rceil> - SUMM ?s' \<and> (\<forall>ad x. fmlookup ?s' (ad + (STR ''.'' + STR ''balance'')) = Some x \<longrightarrow> 0 \<le> \<lceil>x\<rceil>)\<rbrakk> \<Longrightarrow> thesis
goal (1 subgoal):
1. thesis
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4944, "file": "Solidity_Reentrancy", "length": 17}
|
C %W% %G%
C****************************************************************
C
C File: p_gtbase.f
C Purpose: IPF shell program to process /OLD_BASE commands
C
C Author: Walt Powell Date: 20 February 1992
C Modified: 20 February 1992
C Called by:
C
C****************************************************************
C
integer function p_gtbase (in_buffer, out_buffer)
character in_buffer*(*)
character out_buffer*(*)
include 'ipfinc/parametr.inc'
include 'ipfinc/alpha.inc'
include 'ipfinc/blank.inc'
include 'ipfinc/lfiles.inc'
include 'ipfinc/filnam.inc'
include 'ipfinc/jobctl.inc'
include 'ipfinc/dtaiop.inc'
include 'ipfinc/errorsw.inc'
include 'ipfinc/pfstates.inc'
include 'ipfinc/prt.inc'
include 'ipfinc/errmsg.inc'
include 'ipfinc/errorx.inc'
include 'ipfinc/bus.inc'
include 'ipfinc/cbus.inc'
include 'ipfinc/area.inc'
include 'ipfinc/arcntl.inc'
include 'ipfinc/sortuvov.inc'
include 'ipfinc/zonehash.inc'
common /bldtbl/ bldtbl, fltstr
logical bldtbl
logical baseok
character null * 1, linefeed * 1, stext * 50
integer o2, apdoutbuf, fltstr, kmpzone, bldzone
external kmpzone, swapzone
max_buf = len( out_buffer ) - len ( stext ) - 10
if (max_buf .lt. 0) max_buf = len( out_buffer )
null = char(0)
linefeed = char(10)
p_gtbase = 0 ! default return SUCCESS state
numerr = 0 ! reinitialize error count
out_buffer(1:1) = null
call baseinit
call ctlpow
call loadarcv
buf = inrcd
crun1(3) = ' '
call gtbase (obasnm, crun1(3), baseok)
call prtime('GTBASE')
if (.not.baseok) then
jobreq(1) = 'quit'
p_gtbase = 1
ostates = 0
else
ostates = 0
if ( lskp .eq. 1 .or. lskp .eq. 2 ) then
ostates = 5
else if ( lskp .eq. 3 ) then
ostates = 7
endif
C
C Store Q_NET in CAPCOR(2,*)
C
do nb = 1, ntot
kt = inp2opt(nb)
capcor(2,kt) = qnetu(kt)
enddo
if (ntotc .eq. 0) then
c
c*** Build incomplete ACZNAM array from from bus data and sort it
c
do nb = 1, MAXCZN
nextptr_z(nb) = 0
enddo
do nb = 1, HASHSIZE_Z
htable_z(nb) = 0
enddo
nztot = 0
do nb = 1, ntot
izone = bldzone(zone(nb), jarzn(nb))
enddo
if (nztot .gt. 0) then
call qiksrt (1, nztot, kmpzone, swapzone)
endif
else
do i = 1, ntotc
k1 = karea(1,i)
pgen = busdta(8,k1)
ncb = kbsdta(15,k1)
do while (ncb .gt. 0)
pgen = pgen + bctbl(6,ncb)
ncb = bctbl_nxt(ncb)
enddo
area(7,i) = dble(pgen)
enddo
endif
bldtbl = .false.
c
c*** Try to catch any problems with "old" oldbase files (PF60xx)
c
c if ( lskp .eq. 2 ) then
c ostates = 2
c stext = '/solution' // linefeed // '>BASE_SOLUTION' //
c & null
c call cpyinbfil( stext, inp )
c i = isolton ()
c endif
c
endif
c
c************************************************************************
c*** info for debugging
c
c if (p_gtbase .eq. 0) then
c last = lastch (obasnm)
c write (errbuf(1), 336) obasnm(1:last)
c 336 format (' OLD_BASE file ', a, ' opened.')
c call prterx ('I', 1)
c else
c last = lastch (obasnm)
c write (errbuf(1), 338) obasnm(1:last)
c 338 format (' OLD_BASE file ', a, ' could not be opened.')
c call prterx ('I', 1)
c endif
c************************************************************************
c
c Append error messages to buffer
c
j = 1
length = 1
o2 = index (out_buffer,null)
do while (j .le. numerr .and. length .gt. 0)
length = apdoutbuf(o2, errm(j), out_buffer(o2:))
o2 = o2 + length
j = j + 1
enddo
c
c Append summary
c
if ( o2 .gt. max_buf ) then
o2 = max_buf
do while ( o2 .gt. 1 .and.
& out_buffer(o2:o2) .ne. linefeed )
o2 = o2 - 1
enddo
out_buffer(o2:o2) = null
endif
write (stext, 340) 'p_gtbase.f', p_gtbase, ostates
340 format ('/', a, ' return status: ', i2, ' IPF state: ', i2)
length = apdoutbuf(o2, stext, out_buffer(o2:))
o2 = o2 + length
c
c Reset error flag
c
call setercnt (0, ' ')
return
end
|
{"hexsha": "26e5ed62d5405cb3cbd6947e36faa1d3f20d12d0", "size": 5211, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/p_gtbase.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/p_gtbase.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/p_gtbase.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 30.4736842105, "max_line_length": 78, "alphanum_fraction": 0.4478986759, "num_tokens": 1507}
|
import numpy as np
from utils import load_data, load_polblogs_data
dataset = "polblogs"
if dataset == "polblogs":
tmp_adj, features, labels, idx_train, idx_test = load_polblogs_data()
print (sum(sum(tmp_adj)))
print (tmp_adj.shape)
else:
_, features, labels, idx_train, idx_val, idx_test, tmp_adj = load_data(dataset)
for i in range(10):
perturb = np.array([float(line.rstrip('\n')) for line in open("/home/xiao/Documents/pygcn/pygcn/vision4_result/{1}_xi4_epoch100/perturbation_{1}_{0}.txt".format(i, dataset))])
idx = list(np.where(perturb>0.5))
print (labels[idx])
|
{"hexsha": "46a6e4f0e5cbbd725fcf3671ea23c0d2a63f25b1", "size": 598, "ext": "py", "lang": "Python", "max_stars_repo_path": "GUA/utils_polblogs.py", "max_stars_repo_name": "Anou9531/GUA", "max_stars_repo_head_hexsha": "354acceb69656e76fb4ee296c66ae42c18cd939f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-02-16T02:31:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T12:53:48.000Z", "max_issues_repo_path": "GUA/utils_polblogs.py", "max_issues_repo_name": "Anou9531/GUA", "max_issues_repo_head_hexsha": "354acceb69656e76fb4ee296c66ae42c18cd939f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-15T04:56:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T02:41:32.000Z", "max_forks_repo_path": "GUA/utils_polblogs.py", "max_forks_repo_name": "Anou9531/GUA", "max_forks_repo_head_hexsha": "354acceb69656e76fb4ee296c66ae42c18cd939f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-14T06:48:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T04:32:47.000Z", "avg_line_length": 39.8666666667, "max_line_length": 179, "alphanum_fraction": 0.7140468227, "include": true, "reason": "import numpy", "num_tokens": 170}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
from astropy.tests.helper import pytest, remote_data
from .utils import turn_off_internet
# this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
# pytest magic:
# http://pytest.org/latest/plugins.html#_pytest.hookspec.pytest_configure
# use pytest.set_trace() to interactively inspect config's features
def pytest_configure(config):
if config.getoption('remote_data'):
pass
else:
turn_off_internet(verbose=config.option.verbose)
try:
from astropy.tests.pytest_plugins import pytest_configure
pytest_configure(config)
except ImportError:
# assume astropy v<0.3
pass
# Add astropy to test header information and remove unused packages.
# Pytest header customisation was introduced in astropy 1.0.
try:
PYTEST_HEADER_MODULES['astropy'] = 'astropy'
del PYTEST_HEADER_MODULES['h5py']
del PYTEST_HEADER_MODULES['Scipy']
del PYTEST_HEADER_MODULES['Matplotlib']
except NameError:
pass
|
{"hexsha": "9bca6d3f69ffece7acf42a68a4ce9bf4d880a7c0", "size": 1261, "ext": "py", "lang": "Python", "max_stars_repo_path": "astroquery/conftest.py", "max_stars_repo_name": "AlexaVillaume/astroquery", "max_stars_repo_head_hexsha": "85402770b9c0d4b98ce9ac451f41a6be1f838076", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-20T00:07:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-20T00:07:01.000Z", "max_issues_repo_path": "astroquery/conftest.py", "max_issues_repo_name": "AlexaVillaume/astroquery", "max_issues_repo_head_hexsha": "85402770b9c0d4b98ce9ac451f41a6be1f838076", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astroquery/conftest.py", "max_forks_repo_name": "AlexaVillaume/astroquery", "max_forks_repo_head_hexsha": "85402770b9c0d4b98ce9ac451f41a6be1f838076", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-20T00:07:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-20T00:07:05.000Z", "avg_line_length": 30.756097561, "max_line_length": 73, "alphanum_fraction": 0.7557494052, "include": true, "reason": "from astropy", "num_tokens": 285}
|
#!/bin/py
#
# inexact Newton-conjugate gradient method
#
# solve:
# min f(x) = 1/2 x.t (I + mu * A) x + sigma/4 (x.t A x)^2
#
import numpy as np
def mycg(A,b,maxiter,tol,x):
""" Conjugate Gradient Method. """
""" MYCG(A,B,maxiter,tol,x0) solves the system of linear equations A*X=B """
""" for X. The N-by-N coefficient matrix A must be symmetric and the right"""
"""hand side column vector B must have length N."""
r = A * np.transpose(x) - b
d = -r
print d
rsold = np.transpose(r)*r
for i in xrange(1,maxiter):
Ad = np.dot(A,d)
alpha = rsold/np.dot(d,Ad)
x = x+alpha*np.transpose(d)
r = np.dot(r,alpha*Ad)
rsnew = np.transpose(r)*r
if np.sqrt(rsnew)<tol:
break
beta = rsnew/rsold
d = beta*np.transpose(d)-r
rsold = rsnew
return d
# parameters
sigma = 1.0
mu = 10.0
#A = np.matrix('5 1 0 0.5; 1 4 0.5 0; 0 0.5 3 0; 0.5 0 0 2')
A = np.matrix('4 -1 1; -1 4 -2; 1 -2 4')
b = np.array([12, -1, 5])
x = np.transpose(np.array([1, -1, 2]))
#x = np.array([np.cos(70),np.sin(70),np.cos(70),np.sin(70)])
# identity matrix
I = np.identity(len(A))
if __name__ == '__main__':
"""Main function"""
print A
print I
print b
print mycg(A,b,1000,.0001,x)
#
# nick
# 2/25/14
#
|
{"hexsha": "126e954cfba8ddb8cdc96958d593304c618f7d4a", "size": 1347, "ext": "py", "lang": "Python", "max_stars_repo_path": "inv_prob/ps2/in-cg.py", "max_stars_repo_name": "nicholasmalaya/paleologos", "max_stars_repo_head_hexsha": "11959056caa80d3c910759b714a0f8e42f986f0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-04T17:49:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T17:49:42.000Z", "max_issues_repo_path": "inv_prob/ps2/in-cg.py", "max_issues_repo_name": "nicholasmalaya/paleologos", "max_issues_repo_head_hexsha": "11959056caa80d3c910759b714a0f8e42f986f0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inv_prob/ps2/in-cg.py", "max_forks_repo_name": "nicholasmalaya/paleologos", "max_forks_repo_head_hexsha": "11959056caa80d3c910759b714a0f8e42f986f0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-04T16:08:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-16T19:34:24.000Z", "avg_line_length": 22.0819672131, "max_line_length": 81, "alphanum_fraction": 0.5397178916, "include": true, "reason": "import numpy", "num_tokens": 479}
|
% To be compiled with pdf LaTeX
% This file is to be included into master file via \input command
% Note that there is no \begin{document} \end{document} brackets!
\newpage
\section{Control}
\label{sec:control}
%\subsection{Nomenclature}
%\mbox{}
%$\bm{s}$ - vector of wavefront sensor measurements concatenated both for x- and
%y-slopes and from different sensors, [pixels or radians of slope].
%\\
%$\phi(\bm{x})$ - continuous wavefront phase distribution in exit pupil, [rad].
%\\
%$\{ \phi(\bm{x})_{i} \}_{i=1}^{\infty}$ - set of basis functions for the exit
%pupil phase expansion, [rad].
%\\
%$\bm{x}_{Sc}$ - vector of discretized phase values in the telescope exit pupil
%for the light coming from a science object, [rad].
%\\
%$\hat{\bm{x}}_{0}$ - estimate of vector of discretized phase values in the
%telescope exit pupil
%for the light coming from a science object, [rad].
%\\
%$\bm{x}$ - vector of discretized phase values in the telescope exit pupil for
%the light from a guide star to wavefront sensor, concatenated for all sensors,
%[rad].
%\\
%$\bm{n}_{s}$ - vector of sensor readout noise concatenated from all sensors,
%[pixels or radians of slope].
%\\
%$\{ f(\bm{x})_{i} \}_{i=1}^{\#actuators}$ - set of continuous phase influence
%functions of a deformable mirror, [rad].
%$\bm{c}$ - vector of control commands concatenated from all correctors (DMs,
%tip-tilt mirrors, moving stages), [control units, e.g. V].
%\\
%$\hat{\bm{c}}$ - vector of control commands estimated from a control algorithm.
%\\
%$\bm{n}_{a}$ - vector of actuator noise concatenated from all correctors,
%[control units].
%\\
%$\mathcal{E}$ - estimation (control) matrix.
%\\
%$\mathcal{G}$ - wavefront-to-WFS interaction matrix, [rad/slope].
%\\
%$\delta \mathcal{G}$ - error in the wavefront-to-WFS interaction matrix,
%[rad/slope].
%\\
%$\mathcal{D}$ - command-to-WFS interaction (``poke'') matrix, [control
%units/slope].
%\\
%$\delta \mathcal{D}$ - error in the command-to-WFS interaction matrix,
%[rad/slope].
%\\
%$\mathcal{F}$ - command-to-phase interaction (``influence function'') matrix,
%[control units/rad].
%\\
%$\delta \mathcal{F}$ - error in command-to-phase interaction (``influence
%function'') matrix, [control units/rad].
%\\
%$\langle \bm{x} \bm{x}^{T} \rangle$ - auto-covariance matrix of the guide star
%phase in exit pupil, [rad$^{2}$].
%\\
%$\langle \bm{x}_{Sc} \bm{x}^{T} \rangle$ - cross-covariance matrix of the
%science object and guide star phase in exit pupil, [rad$^{2}$].
%\\
%$\langle \bm{n}_{s} \bm{n}_{s}^{T} \rangle$ - auto-covariance matrix of the
%sensor readout noise, [slope$^{2}$].
%\\
%$\langle \bm{n}_{a} \bm{n}_{a}^{T} \rangle$ - auto-covariance matrix of the
%actuator noise, [control units$^{2}$].
%\\
\subsection{Linear system model and discretization}
\label{subsec:system-models}
In our treatment of an AO system modeling we follow closely the ``natural
modeling'' approach described in Refs.
\cite{WibergMaxGavel1,WibergMaxGavel2}. To begin with, we consider the simplest
case of a single-conjugate AO system, which is modeled through the following
fundamental inputs.
\begin{enumerate}
\item A light source to be imaged with an AO system (the \emph{target})
creates a continuous phase distribution $\phi_{0}(\bm{x})$ in the
telescope entrance pupil,
which is the accumulated phase distortion along the
path from a light source to the telescope that includes atmospheric
turbulence distortion. It is supposed that the
autocorrelation function $\langle \phi_{0}(\bm{x}_{1}) \phi_{0}(\bm{x}_{2})
\rangle_{\phi}$ is known.
\item A set $\{ f_{i}(\bm{x}) \}_{i=1}^{\#ACT}$ ($\#ACT$ is the number of DM
actuators) of the DM actuator influence
functions projected as phase correction in the telescope entrance pupil. We
will write these functions in vectorial form $\bm{f}(\bm{x})$. Note that, same
as $\phi(\bm{x})$, these functions depend on the light source. The DM
correction is assumed to be a linear combination of the influence functions:
\begin{equation} \label{eq:dm-phase-correction}
\phi_{DM}(\bm{x}) = \sum_{i=1}^{\#ACT} c_{i} f_{i}(\bm{x})
\end{equation}
or
$$
\phi_{DM}(\bm{x}) = \bm{f}^{T}(\bm{x}) \bm{c},
$$
where $\bm{c}$ is the vector of DM correction commands.
\item A wavefront sensor (WFS) accepts light from a \emph{reference} source
not in general coinciding with the target. WFS is modeled as a linear
mapping of the reference wavefront $\phi{x,y}$ in the entrance pupil to a
set of sensor measurements:
\begin{equation} \label{eq:wfs-measurement-operator}
\bm{s} = \mathcal{M} [ \phi(\bm{x}) ],
\end{equation}
where $\mathcal{M}$ is a linear \emph{measurement operator}.
\index{measurement operator} The \emph{measurement equation}
\index{measurement equation} describing the full linear sensor model is
\begin{equation} \label{eq:measurement-equation}
\bm{s} = \mathcal{M} [ \phi(\bm{x}) + \delta \phi(\bm{x}) ] + \bm{n},
\end{equation}
where $\bm{n}$ is random sensor readout noise with known autocorrelation
matrix $\langle \bm{n} \bm{n}^{T} \rangle_{n}$,
$\delta \phi(\bm{x})$ is an additive aberration due to propagation from
telescope entrance pupil to the exit pupil conjugate to the WFS location. We
will assume for the moment that this aberration can be perfectly calibrated
out, so $\delta \phi (\bm{x}) = 0$.
\end{enumerate}
This small set of parameters are enough to fully describe a linear model of an
AO system.
\subsection{Minimum Mean Square Error AO control}
\label{subsec:MMSE-control}
The goal of the \emph{Minimum Mean Square Error} (MMSE) \index{Minimum Mean
Square Error} AO control is to find a command vector $\hat{\bm{c}}$ such that
the DM correction minimizes the target wavefront mean square phase error
(\emph{quadratic cost}) \index{quadratic cost} in the
telescope entrance pupil
\begin{equation} \label{eq:mmse-cost}
\langle J \rangle_{\phi,n} =
\langle ||\phi_{0}-\phi_{DM}||^{2} \rangle_{\phi,n},
\end{equation}
where Hilbert space norm
\begin{equation} \label{eq:Hilbert-norm}
|| a(\bm{x}) ||^{2} = [a(\bm{x}),a(\bm{x})] =
\frac{1}{|A|} \int_{A} ds \, a^{2}(\bm{x}),
\end{equation}
is derived from the Hilbert space metric
\begin{equation} \label{eq:Hilbert-metric}
[a(\bm{x}),b(\bm{x})] = \frac{1}{|A|} \int_{A} ds \, a(\bm{x}) b(\bm{x}),
\end{equation}
$A$ is the telescope entrance pupil domain (the \emph{aperture}),
\index{aperture} $|A|$ is the aperture area, and $\langle \rangle_{\phi}$
denotes averaging over joint statistics of the input turbulent wavefront and
the sensor noise. We can consider two cases of the quadratic cost
minimization: 1) \emph{DM fitting} \index{DM fitting} and 2) \emph{phase
estimation}. \index{phase estimation}
\subsubsection{DM fitting}
The DM fitting problem statement is:
given target wavefront phase $\phi_{0}(\bm{x})$ at the entrance pupil find the
DM command vector $\hat{\bm{c}}$ such that the deterministic wavefront error
is minimized:
\begin{equation} \label{eq:DM-fit-minimization}
\hat{\bm{c}} = \arg \min_{\forall \bm{c}}
||\phi_{0} - \bm{f}^{T} \bm{c}||^{2}.
\end{equation}
It is known from the theory of Hilbert spaces that the above equation is
equivalent to
\begin{equation} \label{eq:deterministic-orthogonality-principle}
[\phi_{0} - \bm{f}^{T} \hat{\bm{c}}, \bm{f}] = 0,
\end{equation}
which is a form of the \emph{orthogonality principle} stating that
\begin{flushleft}
\texttt{the optimal fitting error is orthogonal to the subspace spanned by
the influence functions.}
\end{flushleft}
Solving Eq. (\ref{eq:DM-fit-minimization}) or the equivalent Eq.
(\ref{eq:deterministic-orthogonality-principle}) yields for the optimal
control command
\begin{equation} \label{eq:fitting-commands}
\hat{\bm{c}} = [\bm{f},\bm{f}^{T}]^{\dagger} [\bm{f},\phi_{0}],
\end{equation}
where $[\bm{f},\bm{f}^{T}]$ is called \emph{Gramm matrix} of the function set
$\bm{f}(\bm{x})$, $^{\dagger}$ stands for pseudo inverse. The Gramm matrix is
square and is invertible in case the influence functions $\bm{f} (\bm{x})$ are
linearly independent. Since linear independence is not guaranteed for the real
DM influence functions, the filtered pseudo-inverse is used. Note that in case
of pseudo-inverse the orthogonality principle does not hold exactly. This,
however, is easily fixed if we redefine the influence functions as, e.g., a
subset of orthogonal singular modes of the Gramm matrix with sufficiently large
singular values.
The optimal \emph{fitting error} is \index{fitting error}
\begin{equation} \label{eq:fitting-error}
J_{c} = [\phi_{0} - \bm{f}^{T} \hat{\bm{c}},\phi_{0} -
\bm{f}^{T} \hat{\bm{c}}]
\end{equation}
$$
= [\phi_{0} - \bm{f}^{T} \hat{\bm{c}},\phi_{0}]
$$
$$
= [\phi_{0},\phi_{0}] -
[\bm{f}^{T},\phi_{0}] [\bm{f},\bm{f}^{T}]^{\dagger} [\bm{f},\phi_{0}],
$$
where we used Eqs. (\ref{eq:deterministic-orthogonality-principle}) and
(\ref{eq:fitting-commands}). The orthogonality principle states that the phase
can be presented as a sum of two mutually orthogonal \emph{controllable}
$\hat{\phi}_{0}$ and
\emph{uncontrollable} $\check{\phi}_{0}$ parts \index{controllable part}
\index{uncontrollable part}
\begin{equation} \label{eq:controllable-uncontrollable}
\phi (\bm{x}) = \hat{\phi}_{0} (\bm{x}) + \check{\phi}_{0} (\bm{x}),
\end{equation}
where
\begin{equation} \label{eq:controllable-projection}
\hat{\phi}_{0} = \bm{f}^{T} [\bm{f},\bm{f}^{T}]^{\dagger} [\bm{f},\phi_{0}] =
\mathcal{F} \phi_{0},
\end{equation}
\begin{equation} \label{eq:uncontrollable-projection}
\check{\phi}_{0} = \phi_{0} - \mathcal{F} (\phi_{0}) =
(\mathcal{I - F})\phi_{0}
\end{equation}
and $\mathcal{F},\mathcal{I - F}$ are orthogonal projection operators on,
respectively, \emph{controllable} and \emph{uncontrollable subspaces} of the
influence function set $\bm{f}$. \index{controllable subspace}
\index{uncontrollable subspace} Note that the dimension of the controllable
subspace is finite, so either $\hat{\bm{c}}$ or $\bm{\phi}_{0} =
[\bm{f},\phi_{0}]$
are the natural discrete representations for the controllable part of the
wavefront phase, the only part of interest in the AO control.
\subsubsection{Phase estimation}
The phase estimation problem statement is: given sensor measurements $\bm{s}$
find an estimate $\tilde{\phi}_{0}(\bm{x})$ of the target source phase in the
entrance pupil such
that the mean square error is minimized over the measurement statistics, in
our case, the joint turbulence and sensor noise statistics:
\begin{equation} \label{eq:estimation-minimization}
\tilde{\phi}_{0} =
\arg \min_{\forall \mathcal{E}}
\langle
||\phi_{0} - \mathcal{E} \bm{s}||^{2}
\rangle_{\phi,n},
\end{equation}
where $\mathcal{E}$ is the linear estimator operator.
Analogously to the deterministic orthogonality principle the \emph{orthogonality
principle of statistical estimation} \index{orthogonality principle} states that
\begin{flushleft}
\texttt{optimal estimator error is statistically orthogonal to the
measurements,}
\end{flushleft}
i.e., for our case
\begin{equation} \label{eq:statistical-orthogonality-principle}
\langle ( \mathcal{E}\bm{s} - \phi_{0} ) \bm{s}^{T} \rangle_{\phi,n} = 0.
\end{equation}
Solving Eq. (\ref{eq:estimation-minimization}) or its equivalent
(\ref{eq:statistical-orthogonality-principle}) yields
\begin{equation} \label{eq:phase-estimate}
\tilde{\phi}_{0} = \langle \phi_{0} \bm{s}^{T} \rangle_{\phi,n}
\langle \bm{s} \bm{s}^{T} \rangle_{\phi,n}^{-1} \bm{s}.
\end{equation}
The optimal estimation error is
\begin{equation} \label{eq:estimation-error}
\langle J_{e} \rangle_{\phi,n} =
\langle
[\phi_{0} - \tilde{\phi}_{0},\phi_{0} - \tilde{\phi}_{0}]
\rangle_{\phi,n}
\end{equation}
$$
= \langle [\phi_{0} - \tilde{\phi}_{0},\phi] \rangle_{\phi,n}
$$
$$
= \langle [\phi_{0},\phi_{0}] \rangle_{\phi,n} -
\langle \phi_{0} \bm{s}^{T} \rangle_{\phi,n}
\langle \bm{ss}^{T} \rangle_{\phi,n}^{-1}
\langle \bm{s} \phi_{0} \rangle_{\phi,n},
$$
where Eqs. (\ref{eq:statistical-orthogonality-principle}),
(\ref{eq:phase-estimate}) were used.
Comparison Eq. (\ref{eq:phase-estimate}) with Eq.
(\ref{eq:controllable-projection}) reveals the fact that
the statistical estimation and fitting problems have essentially the same
structure. Indeed, Eq. (\ref{eq:phase-estimate}) coincides with Eq.
(\ref{eq:controllable-projection}) for the controllable part of the wavefront
after substitutions
$$
\langle \bm{s} \phi_{0}(\bm{x}) \rangle_{\phi,n} \rightarrow
\bm{f}(\bm{x}), \,\,
\texttt{(estimation influence functions)},
$$
$$
\langle \bm{s} \bm{s}^{T} \rangle_{\phi,n} \rightarrow [\bm{f},\bm{f}^{T}],
\,\, \texttt{(estimation Gramm matrix)},
$$
$$
\bm{s} \rightarrow [\bm{f},\phi_{0}], \,\,
\texttt{(projection on measurements)},
$$
$$
\tilde{\phi}_{0} \rightarrow \hat{\phi}_{0}, \,\,
\texttt{(observable part of wavefront)}.
$$
Thus, there exists another, ``observable-unobservable'', orthogonal
decomposition of the input wavefront (see Ref. \cite{WibergMaxGavel2} for the
proof):
\begin{equation} \label{eq:observable-unobservable}
\phi_{0} (\bm{x}) = \tilde{\phi}_{0} (\bm{x}) + \bar{\phi}_{0} (\bm{x}),
\end{equation}
where
\begin{equation} \label{eq:observable-projection}
\tilde{\phi}_{0} = \langle \bm{s}^{T} \phi_{0} \rangle_{\phi,n}
\langle \bm{s} \bm{s}^{T} \rangle_{\phi,n}^{-1}
\mathcal{M} \phi = \mathcal{O} \phi,
\end{equation}
\begin{equation} \label{eq:unobservable-projection}
\bar{\phi}_{0} = \phi_{0} - \mathcal{O} \phi.
\end{equation}
Again, the observable part of the wavefront, the only part of interest for AO
wavefront sensing, is finite-dimensional, and either $\bm{s}$ or
$\bm{w} = \langle \bm{s} \bm{s}^{T} \rangle_{\phi,n}^{-1} \bm{s}$ can be
naturally used as discrete representations of the $\tilde{\phi}_{0}$.
\subsubsection{Joint estimation and fitting, separation principle}
Now consider the problem of joint estimation and fitting, namely, given
measurement $\bm{s}$ and a set of influence functions $\bm{f}(\bm{x})$ find the
control commands $\hat{\bm{c}}$ such that
\begin{equation} \label{eq:joint-minimization}
\hat{\bm{c}} = \mathcal{C} \bm{s} =
\arg \min_{\forall \mathcal{C}}
\langle
|| \phi_{0} - \bm{f}^{T} \mathcal{C} \bm{s} ||^{2}
\rangle_{\phi,n},
\end{equation}
where $\mathcal{C}$ is the estimator matrix creating linear mapping from the
set of sensor measurements to the set of DM commands. Expanding the norm in Eq.
(\ref{eq:joint-minimization}) one gets
\begin{equation} \label{eq:cost-expansion}
|| \phi_{0} - \hat{\phi} ||^{2} =
|| (\phi_{0} - \tilde{\phi}_{0}) + (\tilde{\phi}_{0} - \hat{\phi}) ||^{2}
\end{equation}
$$
= || \bar{\phi}_{0} ||^{2} +
2 [\bar{\phi}_{0},
\tilde{\phi}_{0} - \bm{f}^{T} \mathcal{C} \bm{s} ] +
|| \tilde{\phi}_{0} - \bm{f}^{T} \mathcal{C} \bm{s} ||^{2},
$$
$$
\hat{\phi} = \bm{f}^{T} \mathcal{C} \bm{s}.
$$
$
\langle
[\bar{\phi}_{0}, \tilde{\phi}_{0} - \bm{f}^{T} \mathcal{C} \bm{s} ]
\rangle_{\phi,n} = 0
$
for an optimal phase estimate $\tilde{\phi}_{0}$ because of the orthogonality
principle (\ref{eq:statistical-orthogonality-principle}). Thus
\begin{equation} \label{eq:separated-cost}
\langle J \rangle_{\phi,n} =
\langle || \phi - \hat{\phi} ||^{2} \rangle_{\phi,n}
\end{equation}
$$
= \langle || \bar{\phi}_{0} ||^{2} \rangle_{\phi,n} +
\langle
|| \tilde{\phi}_{0} - \bm{f}^{T} \mathcal{C} \bm{s} ||^{2}
\rangle_{\phi,n},
$$
which is known as the \emph{separation principle of the quadratic control}.
\index{separation principle}
Eq. (\ref{eq:separated-cost}) shows that the overall error can be minimized
in two independent steps:
\begin{enumerate}
\item Find the observable part of the target phase $\tilde{\phi}_{0}$ from
Eq. (\ref{eq:phase-estimate}).
\item Since $\tilde{\phi}_{0}$ is not a stochastic quantity, the $\langle
\rangle_{\phi,n}$ brackets can be dropped for the second term reducing its
minimization to deterministic fitting of the actuator influence
functions to the phase estimate according to Eq. (\ref{eq:fitting-commands}).
\end{enumerate}
Following this path, i.e. substituting Eq. (\ref{eq:phase-estimate}) into Eq.
(\ref{eq:fitting-commands}), we get for the optimal reconstructor matrix
\begin{equation} \label{eq:optimal-reconstructor}
\hat{\mathcal{C}} = [\bm{f},\bm{f}^{T}]^{\dagger}
[\bm{f}, \langle \phi_{0} \bm{s}^{T} \rangle_{\phi,n} ]
\langle \bm{s} \bm{s}^{T} \rangle_{\phi,n}^{-1}.
\end{equation}
The error for this reconstructor is
\begin{equation} \label{eq:reconstruction-error}
\langle \hat{J} \rangle_{\phi,n} =
\langle \bar{\phi}_{0} \rangle_{\phi,n} +
\langle \check{\tilde{\phi}}_{0} \rangle_{\phi,n},
\end{equation}
where the first term is the phase estimation error given by Eq.
(\ref{eq:estimation-error}), second term is the fitting error of the
observable phase to the influence functions and is given by Eq.
(\ref{eq:fitting-error}) after substituting $\tilde{\phi}_{0}$ instead of
$\phi_{0}$.
Another important result derived in Ref. \cite{WibergMaxGavel2} concerns with
the estimate of the reconstruction error due to non-optimal reconstruction
matrix:
\begin{equation} \label{eq:non-optimal-reconstruction-error}
\langle J \rangle_{\phi,n} - \langle \hat{J} \rangle_{\phi,n} =
\texttt{Tr} \left\{
[\bm{f},\bm{f}^{T}]
( \mathcal{C} - \hat{\mathcal{C}} )
\langle \bm{s} \bm{s}^{T} \rangle_{\phi,n}
( \mathcal{C} - \hat{\mathcal{C}} )^{T} \right\},
\end{equation}
where $\mathcal{C}$ is the arbitrary control matrix that is not computed
according to prescription (\ref{eq:optimal-reconstructor}) and
$\langle J \rangle_{\phi,n}$ is the corresponding reconstruction error.
\subsubsection{Estimator for projected wavefront}
A modification of the phase estimation algorithm is needed for the situation
when it is necessary to find an estimate of the input phase part extractable
from $\phi(\bm{x})$ by a projection operation
\begin{equation} \label{eq:projection}
\phi_{p}(\bm{x}) = \mathcal{P} ( \phi_{0} (\bm{x}) ),
\end{equation}
where $\mathcal{P}$ is a linear \emph{projection operator}. \index{projection
operator} Examples of such an operator are the controllable/uncontrollable
$\mathcal{F,(I-F)}$
projectors discussed above, the high-pass and low-pass spatial filters that
are an indispensable part of the GMT LTAO control strategy to be discussed
later in this document.
The optimal minimum least squares estimator for
$\mathcal{P} (\phi_{0})$ wavefront
instead of $\phi_{0}$ is derived from the minimization problem
\begin{equation} \label{eq:projected-estimation}
\mathcal{E}_{p} = \arg \min_{\forall \mathcal{E}}
\langle |\mathcal{P} (\phi_{0}) -
\mathcal{E} \bm{s}|^{2} \rangle
\end{equation}
or from the orthogonality principle
\begin{equation} \label{eq:projected-estimator-orthogonality-principle}
\langle (\mathcal{P} (\phi_{0}) -
\mathcal{E}_{p} \bm{s}) \bm{s}^{T} \rangle = 0,
\end{equation}
which, due to linearity of $\mathcal{P}$, trivially yealds
\begin{equation} \label{eq:projected-estimator}
\mathcal{E}_{p} = \mathcal{P(E)},
\end{equation}
where $\mathcal{E}$ is given by Eq. (\ref{eq:phase-estimate}). Interesting,
if Eq. (\ref{eq:projected-estimator}) is used to find an optimal estimate
of the controllable part $\hat{\phi}_{0}$ of the input phase, Eq.
(\ref{eq:optimal-reconstructor}) for the optimal joint reconstructor results.
\subsubsection{Information deficiency in the WFS model. Aliasing error.}
\mbox{}
Practical MMSE controllers prove to be sub-optimal due to
information deficiency in the underlying system models. The MMSE approach
is model-based, i.e. it relies on the internal mathematical representation of
the real system. It is the ultimate goal of the system modeling for the
MMSE-based AO control to build the internal model only on physically measurable
(calibratable) data. There are, however, multiple causes for the necessary
data to be un-measurable. A fundamental model deficiency is that only
finite-dimensional approximation of the sensor measurement operator
$\mathcal{M}$ is possible based on the measurable data because this operator
maps the infinite-dimensional space $\mathbb{H}$ of the wavefront phases
$\phi(\bm{x})$ onto finite-dimensional space $\mathbb{S}$ of the sensor
readouts. Assume there are orthogonal bases in both $\mathbb{H}$ and
$\mathbb{S}$. Since the dimensionality of the $\mathbb{H}$-basis is higher
than that of $\mathbb{S}$ there is no way to find such a pair of bases that
each $\mathbb{S}$ basis function maps onto exactly one $\mathbb{H}$ basis
function, i.e. generally all components of $\phi(\bm{x})$, low and high order,
act on (``alias with'') each WFS measurement $s_{i}$. The aliasing is not a
problem as long as the $\mathcal{M}$ operator is known exactly because in this
case the $\langle \phi_{0} \bm{s}^{T} \rangle$ and $\langle \bm{s} \bm{s}^{T}
\rangle$ catch the correct ``aliasing pattern''. One can, however, practically
measure only $\mathcal{M}(\mathcal{P}\phi)$, where $\mathcal{P}$ is the
projector on a finite-dimensional subspace of $\mathbb{H}$. The resulting
finite-dimensional measurement operator $\mathcal{MP}$, if substituted into
Eq. (\ref{eq:measurement-equation}) and then into Eq.
(\ref{eq:optimal-reconstructor}) will result in not quite correct aliasing
pattern prediction and thus in a suboptimal reconstructor. The corresponding
additional
\emph{aliasing error} \index{aliasing error} can be estimated through
Eq. (\ref{eq:non-optimal-reconstruction-error}) once exact or at least more
elaborated model for the true measurement operator $\mathcal{M}$ is known.
A natural choice for the finite dimensional measurement operator approximation
$\mathcal{MP}$ is when $\mathcal{P} = \mathcal{F}$, the projection on the
controllable subspace (see Eq. (\ref{eq:controllable-projection})). In this
case $\mathcal{MP} = \mathcal{M}\bm{f}^{T} = \mathcal{D}$, where $\mathcal{D}$
is the DM \emph{poke matrix} \index{poke matrix} that is measurable by
recording sensor readouts
due to action of a DM actuator controlled (``poked'') one at a time. Since,
generally, the controllable part of the wavefront reliably represents low
spacial frequency content of the wavefront aberrations, it is wise to apply
low-pass spatial filter to the reference wavefront $\phi$ falling on the
sensor in order to reduce the discrepancy between the true action
$\mathcal{M} (\mathcal{P}_{LP} \phi)$ of the low-pass filtered wavefront and
its prediction $\mathcal{MP} (\mathcal{P}_{LP} \phi)$ and thus the aliasing
error \cite{Poyneer1}. Note that the optimal reconstructor equation
(\ref{eq:optimal-reconstructor}) has to be modified by replacing $\bm{s} =
\mathcal{M} \phi$ with $\bm{s}_{LP} = \mathcal{M} ( \mathcal{P}_{LP} \phi )$,
which, in general, will increase the optimal reconstruction error. This
increase is, however, expected to be compensated with the much more
significant decrease of the aliasing error contribution.
\subsubsection{Dynamic and closed-loop control}
The MMSE controller described above is an oversimplified version of a real AO
control algorithm based on two fundamental simplifying assumptions:
\begin{itemize}
\item \emph{Open-loop operation}: \index{open-loop operation} it is assumed
that the sensor measures input signal in the exit pupil directly,
without any correction elements in the optical path in front of the sensor.
A real AO system rarely works in open-loop regime because of small dynamic
range of the existing WFSs. The more practical \emph{closed-loop} operation
assumes that all the correction elements (DMs, tip/tilt mirrors, etc.) are
located in front of sensors and the latter measure the difference between
the input signal (turbulent wavefront) and its correction by DMs. In this
case the input to the WFS is not
$\phi(\bm{x})$ but $\delta \phi(\bm{x})$ and the input to
controller is not $\bm{s}$ but $\delta \bm{s}$, the \emph{error signal}.
\index{error signal}
\item \emph{Non-dynamic} operation: \index{non-dynamic operation} it is
assumed that signals propagate through the control system instantaneously
and without temporal shape distortions. In reality, dynamic effects exist in
the system. Two most important of them are: 1) signal delays due to
data transfers, CCD exposure/readout time and controller computation time,
2) signal distortions due to finite temporal bandwidth of the correction
mechanism actuators. The dynamic effects increase the residual error and may
also lead to system instability in closed-loop regime. To introduce dynamic
effects one has to consider all quantities to be time-dependent by adding
$n$ sub-index, $n=1,...,\infty$, for discrete time.
\end{itemize}
\begin{figure}[htp]
\begin{center}
\begin{tabular}{c}
\includegraphics[width = 0.9\textwidth]{Forward.png} \\
(a) \\
\includegraphics[width = 0.9\textwidth]{Back.png} \\
(b) \\
\end{tabular}
\end{center}
\caption{Open-loop (a) and closed-loop (b) AO controller block diagrams.}
\label{fig:forward-back}
\end{figure}
Simplified signal block diagrams for an AO system in open-loop and closed-loop
configurations are shown on Fig. \ref{fig:forward-back}.
The system dynamics are modeled by adding: 1)
$k$-step signal delay element with $z$-domain transfer function $z^{-k}$ to
account for delays in sensor and controller; 2) a filter with $z$-domain
transfer function (matrix) $d(z)$ to account for the DM dynamic
effects. It is possible to derive a relationship between the open-loop (or
\emph{feedforward})
reconstructor $\mathcal{C}$ and the closed-loop (or \emph{feedback}) one
$\mathcal{C}_{FB}$ by noticing that, to deliver the same output signal, it
should be
\begin{equation} \label{eq:open-to-close}
\mathcal{C} \bm{s} = \mathcal{C}_{FB} \delta \bm{s}.
\end{equation}
From the diagrams:
\begin{equation} \label{eq:sopen-to-sclose}
\delta \bm{s}(z) = \bm{s}(z) - z^{-k} d(z) \mathcal{M} ( \bm{f}^{T}
\mathcal{C}_{FB} \delta \bm{s}(z) )
\end{equation}
$$
= \bm{s}(z) - z^{-k} d(z) \mathcal{DC}_{FB} \delta \bm{s}(z),
$$
where $\mathcal{D} = \mathcal{M} (\bm{f}^{T})$ is the \emph{poke matrix}
\index{poke matrix}
relating action of each DM influence function on the WFS measurements.
Substitution of Eq. (\ref{eq:sopen-to-sclose}) into Eq.
(\ref{eq:open-to-close}) yields
\begin{equation} \label{eq:fw-to-fb}
\mathcal{C}_{FB} = \mathcal{C}
( \mathcal{I} - z^{-1} d(z) \mathcal{CD} )^{-1}.
\end{equation}
The transfers from input wavefront phase $\phi(n)$ to the AO system
residual phase error $\delta \phi(n)$ (\emph{error rejection transfer
function}): \index{error rejection transfer function} for the feedforward and
feedback controllers shown on Fig. \ref{fig:forward-back} are:
\begin{equation} \label{eq:forward-transfer}
(\phi \rightarrow \delta \phi)(z) =
\mathcal{I} -
z^{-k} d(z) \mathcal{D} \mathcal{C};
\end{equation}
\begin{equation} \label{eq:feedback-transfer}
(\phi \rightarrow \delta \phi)_{FB}(z) =
( \mathcal{I}+z^{-k} d(z) \mathcal{D} \mathcal{C}_{FB} )^{-1}.
\end{equation}
The open-loop reconstructor $\mathcal{C}$ can also
be used directly in the \emph{pseudo open-loop} \index{pseudo
open-loop} (POL) setting of the closed-loop control when the open-loop
measurement is approximately restored
through an internal model for the DM. In the case of linear internal model the
approximate (pseudo) open-loop WFS measurement $\hat{\bm{s}}$ is
\begin{equation} \label{eq:wfs-restoration}
\hat{\bm{s}} = \delta \bm{s} + \mathcal{D} \bm{c}.
\end{equation}
Block diagram for a dynamic MMSE controller working in the POL regime is
shown on Fig. \ref{fig:POL}. The integrator/corrector filter $g(z)$ is used to
produce the absolute DM
commands from differential ones in a way ensuring system stability and dynamic
error minimization.
\begin{figure}[htp]
\begin{center}
\includegraphics[width = 0.8\textwidth]{POL.png}
\end{center}
\caption{Pseudo Open-Loop MMSE controller block diagram.}
\label{fig:POL}
\end{figure}
The set of dynamic equations for the POL controller is:
\begin{align}
\texttt{(pseudo open-loop measurement)} \nonumber \\
\bm{s}(n) = \delta \bm{s}(n-k) + \hat{\bm{s}}(n); \label{eq:POL-meas} \\
\texttt{(pseudo open-loop command)} \nonumber \\
\bm{c}(n) = \mathcal{C} \bm{s}(n); \label{eq:POL-est} \\
\texttt{(command increment)} \nonumber \\
\delta \bm{c} = \bm{c}(n) - \hat{\bm{c}}(n); \\
\texttt{(integrator/corrector state space equations, see Appendix
\ref{app:DF})} \nonumber \\
\bm{x}^{g}(i+1) = \mathcal{A}^{g} \bm{x}^{g}(n) +
\mathcal{B}^{g} \delta \bm{c}(n), \\
\hat{\bm{c}}(n) = \mathcal{C}^{g} \bm{x}^{g}(n) +
\mathcal{D}^{g} \delta \bm{c}(n); \\
\texttt{(pseudo open-loop measurement estimate)} \nonumber \\
\hat{\bm{s}}(n) = \mathcal{D} \hat{\bm{c}}(n).
\end{align}
Two important transfer functions (matrices) can be derived from the Fig.
\ref{fig:POL} diagram: 1) the transfer from WFS output $\delta \bm{s}(n)$ to
controller output $\hat{\bm{c}}(n)$ (\emph{controller transfer function}):
\index{controller transfer function}
\begin{equation} \label{eq:WFS-to-Control}
(\delta \bm{s} \rightarrow \hat{\bm{c}})_{POL}(z) =
z^{-2} g(z) \left[ \mathcal{I} +
g(z) (\mathcal{I}-\mathcal{RD}) \right]^{-1} \mathcal{C},
\end{equation}
and 2) the error rejection transfer function:
\begin{equation} \label{eq:phase-to-error}
(\phi \rightarrow \delta \phi)_{POL}(z) =
\left[ \mathcal{I} + d(z) \bm{f}^{T}
(\delta \bm{s} \rightarrow \hat{\bm{c}})_{POL}(z) \mathcal{D} \right]^{-1}.
\end{equation}
Being formally equivalent, feedforward, feedback and POL controllers have
different stability and error propagation properties.
\subsection{Tomographic MMSE reconstructor}
\label{subsec:MMSE-tomo}
\mbox{}
A generalization of the single-conjugate AO control is the \emph{star-oriented
tomography}. \index{star-oriented tomography} The AO control problem is
restated as:
\begin{itemize}
\item Given is a set of thin phase screens (PS)
$\{ \phi^{t}_{PS}(\bm{x}) \}_{t=1}^{\#PS}$
located at a number of altitudes above the telescope and representing the
atmospheric
turbulence on the path from a light sources to the telescope aperture.
\item Likewise, given is a set of phase screens $\{ \phi^{m}_{DM}(\bm{x})
\}_{m=1}^{\#DM}$ representing the corrections by several DMs that are
conjugated to a number of altitudes above the telescope. Each DM phase
screen is a linear combination of the DM influence functions:
\begin{equation} \label{eq:mcao-dm-inf}
\phi^{m}_{DM}(\bm{x}) = \bm{f}_{m} (\bm{x}) \bm{c}_{m},
\end{equation}
where $\bm{f}_{m} (\bm{x})$ are the $m^{th}$ DM influence function set,
$\bm{c}_{m}$ is the $m^{th}$ DM control command vector.
\item There are $\#TAR$ scientific targets to be imaged. Associated with
each target and each PS or DM is a set
$( \{ \mathcal{T}^{PS}_{lt} \}_{l=1,t=1}^{\#TAR,\#PS},
\{ \mathcal{T}^{DM}_{lm} \}_{l=1,m=1}^{\#TAR,\#DM} )$
of the \emph{propagation operators}
\index{propagation operator} that map PS or DM phase distributions to the
wavefront phase distribution in the telescope entrance pupil. With
assumption that these operators are linear, which is justified for weak
turbulence, the phase from $l^{th}$ light source in the entrance pupil due to
all PS phase distortions and all DM phase corrections is
\begin{equation} \label{eq:mcao-pupil-phase}
\delta \phi^{l}_{0}(\bm{x}) =
\sum_{t=1}^{\#PS}
\mathcal{T}^{PS}_{lt}(\phi_{PS}^{t}(\bm{x})) -
\sum_{m=1}^{\#DM}
\mathcal{T}^{DM}_{lm}(\phi_{DM}^{m}(\bm{x})),
\,\, l = 1,...,\#TAR.
\end{equation}
\item Likewise, there are $\#REF$ reference sources feeding light to
wavefront sensors conjugated to the telescope entrance pupil. Associated
with the reference sources as well as the PSs and DMs are the propagation
operators
$( \{ \mathcal{R}^{PS}_{st} \}_{s=1,t=1}^{\#REF,\#PS},
\{ \mathcal{R}^{DM}_{sm} \}_{s=1,m=1}^{\#REF,\#DM} )$
mapping PS or DM phase distributions to the
wavefront phase distribution in the sensor conjugate exit pupils. The
closed-loop phase distribution on an $s^{th}$ sensor due to all PS phase
distortions and all DM phase corrections is
\begin{equation} \label{eq:mcao-sensor-phase}
\delta \phi^{s}(\bm{x}) =
\sum_{t=1}^{\#PS}
\mathcal{R}^{PS}_{st}(\phi_{PS}^{t}(\bm{x})) -
\sum_{m=1}^{\#DM}
\mathcal{R}^{DM}_{sm}(\phi_{DM}^{t}(\bm{x})),
\,\, s = 1,...,\#REF.
\end{equation}
Correspondingly, the $s^{th}$ sensor (closed-loop) readouts are
\begin{equation} \label{eq:mcao-sensor-read}
\delta \bm{s}_{s} = \mathcal{M}_{s} (\delta \phi^{s}(\bm{x})),
\end{equation}
where $\mathcal{M}_{s}$ is the measurement opertor associated with $s^{th}$
sensor.
\item The goal of the tomographic AO control is: given a set of sensor
measurements $\bm{s} = \{ \bm{s}_{s} \}_{s=1}^{\#REF}$ find the commands
on all the DMs such that to minimize
\begin{equation} \label{eq:mcao-cost}
\langle J \rangle_{\phi,n} =
\sum_{l=1}^{\#TAR}
w_{l} \langle || \delta \phi_{0}^{l} ||^{2} \,|\, \bm{s} \rangle_{\phi,n},
\,\, \sum_{l=1}^{\#TAR} w_{l} = 1,
\end{equation}
where $\bm{w} = \{ w_{l} \}_{l=1}^{\#TAR}$ is the set of \emph{target
direction relative
weights} \index{target direction weights} and the conditional expectation
(estimator) is taken over the joint
statistics of the turbulence layers and the sensor noise.
\end{itemize}
For compactness of notation we introduce the following concatenations:
\begin{itemize}
\item Propagation operator matrices
\begin{equation} \label{eq:ps-tar-matrix}
\mathcal{T}^{PS} = \{ \mathcal{T}^{PS}_{lt} \}_{l=1,t=1}^{\#TAR,\#PS};
\end{equation}
\begin{equation} \label{eq:dm-tar-matrix}
\mathcal{T}^{DM} = \{ \mathcal{T}^{DM}_{lm} \}_{l=1,m=1}^{\#TAR,\#DM};
\end{equation}
\begin{equation} \label{eq:ps-ref-matrix}
\mathcal{R}^{PS} = \{ \mathcal{R}^{PS}_{st} \}_{s=1,t=1}^{\#REF,\#PS};
\end{equation}
\begin{equation} \label{eq:dm-ref-matrix}
\mathcal{R}^{DM} = \{ \mathcal{R}^{DM}_{sm} \}_{s=1,m=1}^{\#REF,\#DM}.
\end{equation}
\item Phase vectors
\begin{equation} \label{eq:ps-vector}
\bm{\phi}_{PS} (\bm{x}) = \{ \phi_{PS}^{t} (\bm{x}) \}_{t=1}^{\#PS};
\end{equation}
\begin{equation} \label{eq:dm-vector}
\bm{\phi}_{DM} (\bm{x}) = \{ \phi_{DM}^{m} (\bm{x}) \}_{m=1}^{\#DM};
\end{equation}
\begin{equation} \label{eq:ps-dm-tar-vector}
\delta \bm{\phi}_{0} (\bm{x}) =
\{ \delta \phi_{0}^{l} (\bm{x}) \}_{l=1}^{\#TAR};
\end{equation}
\begin{equation} \label{eq:ps-dm-ref-vector}
\delta \bm{\phi} (\bm{x}) =
\{ \delta \phi^{s} (\bm{x}) \}_{s=1}^{\#REF}.
\end{equation}
\item Sensor measurement vector
\begin{equation} \label{eq:mcao-meas}
\delta \bm{s} =
\{ \mathcal{M}_{s} (\delta \phi^{s}(\bm{x})) \}_{s=1}^{\#REF}.
\end{equation}
\item DM influence matrix and control command vector
\begin{equation} \label{eq:mcao-inf}
\mathcal{F} (\bm{x}) = \texttt{diag} \{ \bm{f}_{m} (\bm{x}) \}_{m=1}^{\#DM};
\end{equation}
\begin{equation} \label{eq:mcao-command}
\bm{c} = \{ \bm{c}_{m} \}_{m=1}^{\#DM}.
\end{equation}
\item Target direction weighting matrix
\begin{equation} \label{eq:direction-weight}
\mathcal{W} = \texttt{diag} \{ w_{l} \}_{l=1}^{\#TAR},
\end{equation}
and the \emph{weighted norm} \index{weighted norm}
\begin{equation} \label{eq:weighted-norm}
|| \bm{\phi} ||^{2}_{\mathcal{W}} =
\sum_{i} w_{i} || \bm{\phi}_{i} ||^{2} =
[ \bm{\phi}^{T}, \mathcal{W} \bm{\phi} ].
\end{equation}
\end{itemize}
In this notation the minimization problem for the cost function given in Eq.
(\ref{eq:mcao-cost}) can be written as
\begin{equation} \label{eq:mcao-minimization}
\hat{\bm{c}} = \arg \min_{\forall \bm{c}}
\langle
||
\mathcal{T}^{PS} (\bm{\phi}_{PS}) -
\mathcal{T}^{DM} (\mathcal{F}^{T}) \bm{c}
||^{2}_{\mathcal{W}} \,|\, \bm{s}
\rangle_{\phi,n},
\end{equation}
which is a tomographic analog of Eq. (\ref{eq:joint-minimization}).
The tomographic analog to the deterministic DM fitting problem is then stated
as: find the control commands $\hat{\bm{c}}$ for all DMs such that
\begin{equation} \label{eq:tomo-dm-fitting}
\hat{\bm{c}} = \arg \min_{\forall \bm{c}}
||
\mathcal{T}^{PS} (\bm{\phi}_{PS}) -
\mathcal{T}^{DM} (\mathcal{F}^{T}) \bm{c}
||^{2}_{\mathcal{W}}.
\end{equation}
To derive solution to this problem we first consider the matrix analog to the
orthogonality principle Eq. (\ref{eq:deterministic-orthogonality-principle}).
Replace each influence function $f_{i}(\bm{x})$ with e.g. a finite vector of
its values on the entrance pupil point grid:
$$ f_{i}(\bm{x}) \rightarrow \bm{f}_{i} = \{ f_{i}(\bm{x}_{j}) \}_{j=1,\#PT},
\,\, i = 1,...,\#ACT. $$
Then $ \bm{f}^{T}(\bm{x}) \bm{c} \rightarrow \mathcal{F}^{T} \bm{c} $, where
$\mathcal{F}^{T} = [\bm{f}_{1} \,...\, \bm{f}_{\#ACT}]$ and the matrix form
of Eq. (\ref{eq:deterministic-orthogonality-principle}) reads as
\begin{equation} \label{eq:matrix-orthogonality-principle}
\mathcal{F} (\bm{x}_{0} - \mathcal{F}^{T} \bm{c}) = 0,
\,\, \bm{x}_{0} = \{ \phi_{0}(\bm{x}_{j}) \}_{j}^{\#PT}.
\end{equation}
Applying this to Eq. (\ref{eq:tomo-dm-fitting}) one derives by analogy
\begin{equation} \label{eq:tomo-dm-fit}
[ ( \mathcal{T}^{DM} (\mathcal{F}^{T}) )^{T},
\mathcal{T}^{PS} (\bm{\phi}_{PS}) -
\mathcal{T}^{DM} (\mathcal{F}^{T}) \bm{c} ) ]_{\mathcal{W}} = 0
\end{equation}
and
\begin{equation} \label{eq:tomo-dm-fit}
\hat{\bm{c}} =
[ (\mathcal{T}^{DM} (\mathcal{F}^{T}))^{T},
\mathcal{T}^{DM} (\mathcal{F}^{T}) ]^{-1}_{\mathcal{W}}
[ (\mathcal{T}^{DM} (\mathcal{F}^{T}))^{T},
\mathcal{T}^{PS} (\bm{\phi}_{PS}) ]_{\mathcal{W}},
\end{equation}
where the \emph{weighted Hilbert metric} is defined as
\index{weighted Hilbert metric}
\begin{equation} \label{eq:weighted-Hilbert-metric}
[\bm{a}(\bm{x}),\bm{b}(\bm{x})]_{\mathcal{W}} =
[ \bm{a}^{T}(\bm{x}), \mathcal{W} \bm{b}(\bm{x}) ],
\end{equation}
$[*,*]$ is the usual Hilbert space metric. Correspondingly, the controllable
part of the phase in the entrance pupil for all the target directions is
\begin{equation} \label{eq:tomo-controllable}
\hat{\bm{\phi}}_{0} = \mathcal{T}^{DM} (\mathcal{F}^{T}) \hat{\bm{c}}.
\end{equation}
The tomographic phase estimation problem is stated as: find the estimate of
phase $\tilde{\bm{\phi}}_{0}$ in entrance pupil for all the target
directions such that
\begin{equation} \label{eq:tomo-phase-estimation}
\tilde{\bm{\phi}}_{0} = \arg \min_{\forall \mathcal{E}}
\langle
||
\mathcal{T}^{PS} (\bm{\phi}_{PS}) -\mathcal{E} \bm{s}
||^{2}_{\mathcal{W}}
\rangle_{\phi,n}
\end{equation}
with the solution written by analogy with Eq. (\ref{eq:phase-estimate})
\begin{equation} \label{eq:tomo-phase-estimate}
\tilde{\bm{\phi}}_{0} =
\langle
\mathcal{T}^{PS} (\bm{\phi}_{PS}) \bm{s}^{T}
\rangle_{\phi,n}
\langle
\bm{s} \bm{s}^{T}
\rangle_{\phi,n}^{-1} \bm{s}.
\end{equation}
Finally, according to the separation principle, the tomographic
reconstructor generates the estimate $\hat{\tilde{\bm{\phi}}}_{0}$ optimal in
the sense of Eq. (\ref{eq:mcao-minimization}) by substitution of Eq.
(\ref{eq:tomo-phase-estimate}) into Eq. (\ref{eq:tomo-dm-fit}) and then into
Eq. (\ref{eq:tomo-controllable}). This completes the description of a general
tomographic star-oriented AO system.
The GMT LTAO system can be described as a group of inter-connected tomographic
AO subsystems. The detailed mapping of the AO control theory developed in the
previous sections onto the GMT LTAO is given in the next section.
\subsection{GMT LTAO sub-systems}
\label{subsec:ltao-sub-systems}
\mbox{}
According to the \emph{split control concept} \index{split control concept}
the entire GMT LTAO system can be viewed as a set of weakly interacting
control sub-systems (feedback loops, see Fig. 4-35 in Ref.
\cite{GMT-LTAO-CODR}):
\begin{enumerate}
\item The \emph{ASM high order LGS-based} (ASM HO LGS) \index{ASM high order
LGS-based control loop}
control loop intended to reject high spatial order atmospheric/telescope
aberrations and using the LGS return for WFS measurements. This channel has
the following features:
\begin{itemize}
\item This channel works in closed-loop behind ASM.
\item Target is $\phi_{Sc}$ is the wavefront from a scientific object.
\item Reference is $\phi_{LGS}$ is the wavefront from 6 LGSs.
\item The control commands generated in this channel are sent to the ASM.
\item The wavefront sensors for this channel are the 6 LGS WFSs.
\item The sampling rate is the one of the LGS WFSs.
\end{itemize}
\item The \emph{ASM low order NGS-based} (ASM LO NGS) or ``truth'' control
loop is used to provide low order ASM correction, which is impossible to
determine from the LGSs. Another possible use of the LO NGS WFS is to sense
the primary segment differential pistons. \index{ASM low order NGS-based
control loop} This channel has the following features:
\begin{itemize}
\item This channel works in closed-loop behind ASM and OI DM.
\item Target is the wavefront $\phi_{Sc}$ from a scientific object.
\item Reference is the wavefront $\phi_{NGS}$ from one NGS.
\item The control commands generated by this channel are sent to ASM.
\item The wavefront sensor for this channel is the LO NGS WFS (``truth
sensor'').
\item The sampling rate is the one of the LO NGS WFS.
\end{itemize}
\item The \emph{ASM tip/tilt NGS-based} (ASM TT NGS) \index{ASM tip/tilt
NGS-based control loop} control loop
intended to sense and correct the waveront tip/tilt in the scientific object
direction using a natural guide star. This channel has
the following features:
\begin{itemize}
\item This channel works in closed-loop behind ASM and OI DM.
\item Target is the wavefront $\phi_{Sc}$ from a scientific object.
\item Reference is the wavefront $\phi_{NGS}$ from one NGS.
\item The control commands generated in this channel are sent to the ASM.
\item The wavefront sensor for this channel is a quad-cell tip/tilt NGS
(TT NGS) WFS.
\item The sampling rate is the one of the TT WFS.
\end{itemize}
\item The \emph{OI DM high order LGS-based} (OI DM HO LGS)
\index{OI DM high order LGS-based control loop} control loop is for
correcting the NGS wavefront by the OI DM in order to improve performance of
the NGS TT channel. This channel has the following features:
\begin{itemize}
\item This channel works in closed-loop behind ASM.
\item Target is the wavefront $\phi_{NGS}$ from a NGS.
\item Reference is the wavefront $\phi_{LGS}$ from 6 LGSs.
\item The control commands generated in this channel are sent to OI DM.
\item The wavefront sensors for this channel are the 6 LGS WFSs.
\item The control algorithm is closed-loop with respect to the ASM but
open-loop with respect to OI DM.
\item The sampling rate is the one of the LGS WFSs.
\end{itemize}
\item The \emph{OI DM low order NGS-based} (OI DM LO NGS) control loop is
to provide the low order OI DM correction, which is not possible to
determine from the LGSs. \index{OI DM low order NGS-based control loop}
This channel has the following features:
\begin{itemize}
\item This channel works in closed-loop behind ASM and OI DM.
\item Target is the wavefront $\phi_{NGS}$ from a NGS.
\item Reference is the wavefront $\phi_{NGS}$ from the same NGS.
\item The control commands generated by this channel are sent to OI DM.
\item The wavefront sensor for this channel is the LO NGS WFS.
\item The sampling rate is the one of the LO NGS WFS.
\end{itemize}
\end{enumerate}
One has also to mention some auxiliary control channels used for or together
with LTAO. They are not based on the standard AO control and use other control
approaches.
\begin{enumerate}
\item \emph{LGS focus stabilization subsystem} \index{LGS focus stabilization
subsystem} is used for adjusting the LGS WFS
module focal plane to follow slow LGS position changes in the sky. This
channel has the following features:
\begin{itemize}
\item The feedback signal is the global focus mode extracted from the 6
LGS WFS measurements.
\item The control is applied to a servo actuator moving the whole LGS WFS
module to adjust to the focus. The controller needs to be optimized to the
(actuator + LGS module) dynamics.
\end{itemize}
\item \emph{LGS pupil de-rotation subsystem} \index{LGS pupil de-rotation
subsystem} is used for compensation of the exit
pupil rotation on the LGS module induced by changes in the telescope pointing.
This channel has the following features:
\begin{itemize}
\item The feedback signal for this channel is the correlated part of the
LGS WFS spot motions.
\item The control is applied to a servo actuator rotating the whole LGS
WFS module to adjust to the pupil rotation.
\end{itemize}
\item \emph{OI WFS acquisition/image stabilization subsystem} \index{OI WFS
acquisition/image stabilization subsystem} is used find the NGS
in the telescope field-of-view, to point the light from an NGS to the
OI WFS detectors and stabilize it. This channel has the following features:
\begin{itemize}
\item An acquisition camera is used to provide a signal to the controller.
\item The control is applied to a two degree-of-freedom actuator attached
to the acquisition flat mirror located inside OI WFS module.
\item The acquisition camera and mirror work in closed-loop regime behind
ASM and OI DM.
\end{itemize}
\item \emph{GMT phasing subsystem} \index{GMT phasing subsystem} is an
\emph{active optics} system that keeps the telescope aligned, phased and
shape-stabilized.
\end{enumerate}
The auxiliary control sub-systems do not directly participate in the AO
correction but the errors in these systems become the additional error inputs
for the main AO controllers thus upsetting indirectly the AO system
performance.
%!!!!!!
%Let the low order wavefront part to be removed from the wavefront $\bm{x}_{Sc}$
%by the $\mathcal{P}_{HO}$-matrix or extracted from $\bm{x}_{Sc}$ by the
%$\mathcal{P}_{LO}$-matrix can be presented as a linear combination
%$$
%\bm{x}_{LO} = \sum_{i=1}^{L} w_{i} \bm{q}_{i}
%$$
%of the mutually orthonormal modes $\{ \bm{q}_{i} \}_{i=1}^{L}$, then the
%projection matrices are:
%\begin{eqnarray} \label{eq:l0-ho-projectors}
%\mathcal{P}_{LO} &=& \mathcal{QQ}^{T}, \\
%\mathcal{P}_{HO} &=& \mathcal{I} - \mathcal{QQ}^{T}, \\
%\mathcal{Q} &=& [\bm{q}_{1} \, ... \, \bm{q}_{L}], \,\,
%\mathcal{Q}^{T} \mathcal{Q} = \mathcal{I}.
%\end{eqnarray}
%In case $\{ \bm{q}_{i} \}_{i=1}^{L}$ are not orthonormal but linearly
%independent, they can be easily orthonormalized by means of the
%QR-decomposition.
%!!!!!! \\
\subsection{GMT LTAO models}
\label{subsec:LTAO-models}
\mbox{}
This section describes elements of the GMT LTAO system mathematical
description: signals, commands, matrices, operators, transfer functions.
\subsubsection{LTAO system element models}
\label{subsubsec:ltao-elements}
GMT LTAO system is a version of the star-oriented tomographic controller with
several channels as described in the previous section. The main physical parts
of the system that need to be modeled together with their interaction are:
\begin{enumerate}
\item Six (6) Laser Launch Telescopes (LLT) generating six off-axis,
finite-altitude \texttt{Na}
Laser Guide Stars (LGS) in regular pattern in the sky. The LGS geometry is
described in detail in Sec. \ref{sec:lgs}. Table \ref{tab:lgs-parameters}
gives approximate (subject to change in the course of development)
parameters of the GMT LGS reference light sources.
\begin{table}[htp]
\begin{center}
\begin{tabular}{c|cccccc}
\hline
\hline
& 1 & 2 & 3 & 4 & 5 & 6 \\
\hline
\hline
$x_{l}^{t}$, m &&&&&& \\
$y_{l}^{t}$, m &&&&&& \\
$z_{l}^{t}$, m &&&&&& \\
\hline
$\alpha_{l}^{t}$, rad &&&&&& \\
$\beta_{l}^{t}$, rad &&&&&& \\
\hline
$h_{\texttt{Na}}$, m & \multicolumn{6}{|c}{$90 \times10^{3}$} \\
\hline
\hline
\end{tabular}
\end{center}
\caption{GMT LGS parameters: $(x,y,z)_{l}^{t}$ -- $l^{th}$ LLT location
coordinates with respect to GMT entrance pupil; $(\alpha,\beta)_{l}^{t}$ --
$l^{th}$ LLT direction (first and second Euler angles)
with respect to GMT
optical axis; $h_{\texttt{Na}}$ -- mean altitude of the \texttt{Na} layer.
See Sec. \ref{sec:lgs} for details about the notation.}
\label{tab:lgs-parameters}
\end{table}
\item One (1) infinite-altitude Natural Guide Star (NGS) for tip/tilt and
possibly other low order aberration sensing. It can be located anywhere
within 2$^{\prime\prime}$ telescope field of view. Note that the NGS is a
reference source for the LGS channel and also both target and reference
source for the On-Instrument WFS channel.
\item One infinite-altitude (1) Science Target (ST) light source to be
imaged through LTAO
system. Normally it is assumed to be on the GMT optical axis but we reserve
a possibility for it to be at $(\alpha,\beta)^{t}_{ST} \neq 0$ off-axis
direction (first and second Euler angles) with respect to the optical axis.
\item Atmospheric turbulence propagation path. It is parameterized by the
$C_{n}^{2}$ profile, wind profile, split in to a number of discrete
turbulence phase screens (PS) located at altitudes
$\{ h_{s} \}_{s=1}^{\#PS}$, with the covariance functions
$\{ \langle \phi^{PS}_{s}(\bm{x}_{1},\bm{x}_{2}) \rangle \}_{s=1}^{\#PS}$,
where $\bm{\phi}^{PS} (\bm{x}) = \{ \phi^{PS}_{s}(\bm{x}) \}_{s=1}^{\#PS}$
are the instantaneous phase distributions on the
screens (see details in Section \ref{sec:atmosphere}).
Propagation from the target and reference light sources through the
atmosphere is described through the propagation operator matrices:
\begin{itemize}
\item $\mathcal{T}_{ST}^{PS}$ -- propagates light from the scientific
target through turbulence phase screen phase distributions
$\bm{\phi}^{PS}(\bm{x})$ to the entrance pupil.
\item $\mathcal{R}_{LGS}^{PS}$ -- propagates light from the 6 LGSs
through turbulence phase screen phase distributions
$\bm{\phi}^{PS}(\bm{x})$ to the entrance pupil.
\item $\mathcal{R}_{NGS}^{PS}$ -- propagates light from the NGS
through turbulence phase screen phase distributions
$\bm{\phi}^{PS}(\bm{x})$ to the entrance pupil.
\end{itemize}
\item GMT entrance pupil. It is parameterized through:
\begin{itemize}
\item Pupil location $(x,y,z)^{g}_{t}$ and orientation
$(\alpha,\beta)_{t}^{g}$ coordinates with respect to the
laboratory coordinate system (see Sec. \ref{sec:lgs}). The
\item Pupil shape (see Ref. \cite{ConanGMTmath}).
\end{itemize}
\item GMT deformable mirrors:
\begin{itemize}
\item Adaptive Secondary Mirror (ASM). It is parameterized by the set of
influence functions $\bm{f}_{ASM}$ and DM phase screen
$\phi_{DM}(\bm{x})$ conjugated to altitude $h_{ASM} = TBD$ and the
ASM transfer matrix
$\mathcal{H}_{ASM}(z)$ responsible for the mirror dynamics.
\item Analogously, the On-Instrument Deformable Mirror (OI DM) is
parameterized through $\bm{f}_{DM}$,
$\phi_{DM}(\bm{x})$, $h_{DM} = TBD$, $\mathcal{H}_{DM}(z)$.
\end{itemize}
Light propagation from the target and reference sources through ASM and
DM phase screens to the entrance pupil is described by propagation operator
matrices:
\begin{itemize}
\item $\mathcal{T}_{ST}^{DM}$ -- propagates light from the scientific
target through OI DM to the entrance pupil.
\item $\mathcal{T}_{ST}^{ASM}$ -- propagates light from the scientific
target through ASM to the entrance pupil.
\item $\mathcal{R}_{LGS}^{ASM}$ -- propagates light from the 6 LGSs
through ASM to the entrance pupil.
\item $\mathcal{R}_{NGS}^{DM}$ -- propagates light from the NGS
through OI DM to the entrance pupil.
\item $\mathcal{R}_{NGS}^{ASM}$ -- propagates light from the NGS
through ASM to the entrance pupil.
\end{itemize}
More details on the GMT deformable mirrors can be found in Sec. \ref{sec:dm}.
\item There are three (3) wavefront sensors:
\begin{itemize}
\item LGS HO WFS consisting of 6 sensor blocks. It is described by poke
matrix $\mathcal{D}_{LGS}^{ASM}$ for interaction between LGS HO WFS
and ASM, sensor noise covariance matrix
$\langle \bm{n}_{LGS}^{} \bm{n}_{LGS}^{T} \rangle$, anti-aliasing filter
operator $\mathcal{P}_{LGS}$ for action of the LGS HO WFS field
stops on the input wavefront phase.
\item NGS TT WFS described by matrix $\mathcal{D}_{TT}^{ASM}$ describing
interaction between NGS TT WFS
and ASM, poke matrix $\mathcal{D}_{TT}^{DM}$ for interaction between NGS
TT WFS and the OI DM, sensor noise covariance matrix
$\langle \bm{n}_{TT}^{} \bm{n}_{TT}^{T} \rangle$, anti-aliasing filter
operator $\mathcal{P}_{TT}$ describing action of the NGS TT WFS field
stop on the input wavefront phase.
\item NGS LO WFS described by matrix $\mathcal{D}_{LO}^{ASM}$ for
interaction between NGS TT WFS
and ASM, poke matrix $\mathcal{D}_{LO}^{DM}$ for interaction between NGS
LO WFS and the OI DM, sensor noise covariance matrix
$\langle \bm{n}_{LO}^{} \bm{n}_{LO}^{T} \rangle$, anti-aliasing filter
operator $\mathcal{P}_{LO}$ describing action of the NGS LO WFS field
stop on the input wavefront phase.
\end{itemize}
More details on the GMT wavefront sensors can be found in
Sec. \ref{sec:sh-wfs}.
\item The following signals circulate in the system:
\begin{itemize}
\item $\bm{\phi}_{PS}^{}(\bm{x})$ -- instantaneous phase distortions on the
atmospheric turbulence phase screens.
\item $\bm{\phi}_{ASM}^{}(\bm{x})$ -- instantaneous phase corrections on the
ASM.
\item $\bm{\phi}_{DM}^{}(\bm{x})$ -- instantaneous phase corrections on the
OI DM.
\item $\delta \bm{\phi}^{HO}_{ST} (\bm{x}) = \mathcal{T}^{PS}_{ST}
(\bm{\phi}_{PS}^{}) - \mathcal{T}^{ASM}_{ST} (\bm{\phi}_{ASM}^{})$
-- instantaneous phase distribution in the entrance pupil from
atmospheric turbulence and ASM corrections propagated from the Science
Target. This is the scientific target wavefront seen in the LGS HO WFS
channel and, up to
non-common path aberrations, in the scientific instrument channel.
\item $\delta \bm{\phi}^{OI}_{ST} (\bm{x}) = \mathcal{T}^{PS}_{ST}
(\bm{\phi}_{PS}^{}) - \mathcal{T}^{ASM}_{ST} (\bm{\phi}_{ASM}^{}) -
\mathcal{T}^{DM}_{ST} (\bm{\phi}_{DM}^{})$
-- instantaneous phase distribution in the entrance pupil from
atmospheric turbulence and both ASM and OI DM corrections propagated from
the Science Target. This is the scientific target wavefront seen in the OI
WFS channel.
\item $\delta \bm{\phi}_{LGS}^{HO} (\bm{x}) = \mathcal{R}^{PS}_{LGS}
(\bm{\phi}_{PS}^{}) - \mathcal{R}^{ASM}_{LGS} (\bm{\phi}_{ASM}^{})$
-- instantaneous phase distribution in the entrance pupil from both
atmospheric turbulence and DM corrections propagated from the 6 LGSs. This
is the reference source wavefront(s) seen in the LGS HO WFS channel.
\item $\delta \bm{\phi}^{HO}_{NGS} (\bm{x}) = \mathcal{R}^{PS}_{NGS}
(\bm{\phi}_{PS}^{}) - \mathcal{R}^{ASM}_{NGS} (\bm{\phi}_{ASM}^{})$
-- instantaneous phase distribution in the entrance pupil from
atmospheric turbulence and ASM corrections propagated from the Science
Target. This is the NGS wavefront seen in the LGS HO WFS
channel and used as a target for the OI DM control algorithm.
\item $\delta \bm{\phi}_{NGS}^{OI} (\bm{x}) = \mathcal{R}^{PS}_{NGS}
(\bm{\phi}_{PS}^{}) - \mathcal{R}^{ASM}_{NGS} (\bm{\phi}_{ASM}^{}) -
\mathcal{R}^{DM}_{NGS} (\bm{\phi}_{DM}^{})$
-- instantaneous phase distribution in the entrance pupil from
atmospheric turbulence and both ASM and DM corrections propagated from the
NGS. This is the reference source wavefront seen in the OI WFS channel.
\item $\delta \bm{s}_{LGS}^{} = \mathcal{M}^{LGS} \mathcal{P}_{LGS}
(\delta \bm{\phi}_{LGS}^{HO}) + \bm{n}_{LGS}^{}$
-- instantaneous LGS HO WFS readout, where $\mathcal{M}_{LGS}$ is exact
(infinite-dimensional) LGS HO WFS measurement operator, $\bm{n}_{LGS}^{}$ is
the instantaneous LGS HO WFS noise readout.
\item $\delta \bm{s}_{TT}^{} = \mathcal{M}^{TT} \mathcal{P}_{TT}
(\delta \bm{\phi}_{NGS}^{OI}) + \bm{n}_{TT}^{}$
-- instantaneous NGS TT WFS readout, where $\mathcal{M}_{TT}$ is exact
(infinite-dimensional) NGS TT WFS measurement operator, $\bm{n}_{TT}^{}$ is
the instantaneous NGS TT WFS noise readout.
\item $\delta \bm{s}_{LO}^{} = \mathcal{M}^{LO} \mathcal{P}_{LO}
(\delta \bm{\phi}_{NGS}^{OI}) + \bm{n}_{LO}^{}$
-- instantaneous NGS LO WFS readout, where $\mathcal{M}_{LO}$ is exact
(infinite-dimensional) NGS LO WFS measurement operator, $\bm{n}_{LO}^{}$ is
the instantaneous NGS LO WFS noise readout.
\item $\bm{c}_{ASM}^{} = \bm{c}^{TT}_{ASM} + \bm{c}^{LO}_{ASM} +
\bm{c}^{HO}_{ASM}$ -- instantaneous ASM control command vector consisting
of tip/tilt (TT), low order (LO) and high order (HO) corrections.
\item $\bm{c}_{DM}^{} = \bm{c}^{LO}_{DM} + \bm{c}^{HO}_{DM}$ --
instantaneous OI DM
control command vector consisting of low and high order corrections.
\item $\bm{c}_{LGS}^{}$ -- instantaneous LGS control command vector
consisting of LGS WFS platform focus and rotation commands and the 6 LLT
tip/tilt corrections.
\end{itemize}
\item The following projection operators are used for channel splitting:
\begin{itemize}
\item $\mathcal{P}_{ST}^{HO}$ -- spatial high-pass projection operator
acting on the $\delta \bm{\phi}_{ST}^{HO}$ wavefront.
\item $\mathcal{P}_{ST}^{LO}$ -- spatial low-pass projection operator
acting on the $\delta \bm{\phi}_{ST}^{HO}$ wavefront.
\item $\mathcal{P}_{NGS}^{HO}$ -- spatial high-pass projection operator
acting on the $\delta \bm{\phi}_{NGS}^{HO}$ wavefront.
\item $\mathcal{P}_{NGS}^{LO}$ -- spatial low-pass projection operator
acting on the $\delta \bm{\phi}_{NGS}^{OI}$ wavefront.
\item $\mathcal{P}_{NGS}^{TT}$ -- spatial low-pass projection operator
extracting tip/tilt from the $\delta \bm{\phi}_{NGS}^{OI}$ wavefront.
\end{itemize}
\end{enumerate}
The vectors, matrices and operators listed above are the
building blocks for the LTAO system control algorithms ans simulations. Below
we elaborate on the internal structure of these objects.
\subsubsection{Phase screens and propagation operators}
\label{subsubsec:phase-and-propagation}
As it was described in Sec. \ref{subsec:MMSE-tomo}, the phase
distribution vector $\vec{\phi}(\bm{x})$, is a concatenation of the
turbulence distributions $\phi(\bm{x})$ on each phase screen,
where $\bm{x}$ are the $(x,y)^{t}$-coordinates in the $t$-system (see Sec.
\ref{sec:lgs}) in the case of the phase screens being perpendicular to
the telescope optical axis regardless of the pointing, which we will assume
throughout this document. The phase distribution vector
$\bm{\phi} (\bm{x})$ in the entrance pupil, where $\bm{x}$ are the
pupil $(x,y)^{t}$-coordinates, is a concatenation of phase distributions
corresponding to propagation from the several target or reference sources.
Because of the assumed linearity of the propagation operators, propagation
operator matrices $\mathcal{T}$ or $\mathcal{R}$ are
block $\#SRC\times\#PS$ matrix, where $\#SRC$ is the number of sources and
$\#PS$ is the number of phase screens. Each block $ij$ of the propagation matrix
is a scalar propagation operator mapping phase distribution on the $j^{th}$
phase screen to the entrance pupil propagated from $i^{th}$ source. Thus, the
operators involved in the GMT LTAO model are
\begin{itemize}
\item $\mathcal{T}_{ST}^{PS}$ is a $1 \times \#PS$-matrix;
\item $\mathcal{R}_{LGS}^{PS}$ is a $6 \times \#PS$-matrix;
\item $\mathcal{R}_{NGS}^{PS}$ is a $1 \times \#PS$-matrix;
\item $\mathcal{T}_{ST}^{ASM}$ and $\mathcal{T}_{ST}^{DM}$ are $1 \times
1$-matrices and, to simplify equation for $\delta \bm{\phi}_{ST}^{OI}$, these
two matrices can be concatenated into one $1 \times 2$-matrix
$\mathcal{T}_{ST}^{OI}$;
\item $\mathcal{R}_{NGS}^{ASM}$ and $\mathcal{R}_{NGS}^{DM}$ are $1 \times
1$-matrices and, to simplify equation for $\delta \bm{\phi}_{NGS}^{OI}$, these
two matrices can be concatenated into one $1 \times 2$-matrix
$\mathcal{R}_{NGS}^{OI}$.
\end{itemize}
The $\mathcal{T}$ and $\mathcal{R}$ propagation operators have especially
simple form in case of the geometrical optics propagation (see Section
\ref{sec:lgs}):
\begin{equation} \label{eq:shift-propagator}
\mathcal{H}(\bm{\phi}(\bm{x})) = \bm{\phi}(a\bm{x}+\bm{b}),
\end{equation}
where $\mathcal{H}$ stands either for $\mathcal{T}$ or $\mathcal{R}$ and
$(a,\bm{b})$ are computed from Eq. (\ref{eq:pupil-to-layer}) for finite
altitude point source (LGS) or Eq. (\ref{eq:pupil-to-layer-inf}) for infinite
altitude point source (TS or NGS).
\subsubsection{Projection operators}
\label{subsubsec:projection-operators}
For the ideal channel split the following relations hold:
\begin{equation} \label{eq:HO-split}
\mathcal{I} = \mathcal{P}_{ST}^{HO} + \mathcal{P}_{ST}^{LO},
\end{equation}
\begin{equation} \label{eq:HO-LO-TT-split}
\mathcal{I} = \mathcal{P}_{NGS}^{HO} + \mathcal{P}_{NGS}^{LO} +
\mathcal{P}_{NGS}^{TT}.
\end{equation}
Thus, only three projection operators out of five need to be computed. Since
all the splitting projection operators act on the wavefront phase from a single
light source (ST or NGS) $\mathcal{P}$ are $1 \times 1$-matrices or just
scalar operators.
Let an
orthonormal basis $\bm{q}(\bm{x}) = \{ q_{i}(\bm{x}) \}_{i=1}^{\infty} $ be
defined on the entrance pupil domain $A$ with the basis functions
$q_{i}(\bm{x})$.
Then an orthogonal projector on a subset $ \{ q_{i}(\bm{x}) \}_{i \in S} $ is
\begin{equation} \label{eq:orthogonal-projector-def}
\mathcal{P}^{S} ( \phi(\bm{x}) ) = \bm{q}^{T}_{S} [\bm{q}_{S}^{},\phi],
\end{equation}
where $\bm{q}_{S}^{} (\bm{x}) = \{ q_{i} (\bm{x}) \}_{i \in S}$, $[*,*]$ is the
Hilbert space metric defined in Eq. (\ref{eq:Hilbert-metric}). If the basis
functions are sorted in ascending order with respect to the abundance of high
order details, then the $\mathcal{P}^{TT}$-operators are made of
$\{ q_{1},q_{2} \}$ (and, hopefully, $q_{1}(\bm{x})$ and $q_{2}(\bm{x})$ are
indeed are very similar to tip and tilt, which is not guaranteed for a
segmented aperture), the $\mathcal{P}^{TT}$-operators are made of
$\{ q_{3}, ..., q_{LO}^{} \}$, etc.
%In the following sections details of the subsystem control algorithms are
%discussed. We concentrate there on the non-dynamic algorithm versions to
%highlight the basic operation flow and the data structures involved. The
%algorithm modifications to account for the dynamic effects are postponed to
%the later sections. Note, however, that the information about the non-dynamic
%algorithms is higly reusable in the dynamic versions.
\subsection{ASM HO LGS controller algorithm}
\subsection{ASM LO NGS controller algorithm}
\subsection{ASM TT NGS controller algorithm}
\subsection{OI DM HO LGS controller algorithm}
\subsection{OI DM LO NGS controller algorithm}
\subsection{GMT LTAO system fusion}
\label{subsec:ltao-system-fusion}
TBD
\subsection{GMT LTAO system dynamic analysis}
\label{subsec:ltao-dynamics}
TBD
\subsection{GMT LTAO system error and robustness analysis}
\label{subsec:ltao-errors}
TBD
|
{"hexsha": "389c71b8030994fd30db9d06e6838e9265d38942", "size": 64660, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "_docGMT/Control.tex", "max_stars_repo_name": "cmcorreia/oomao", "max_stars_repo_head_hexsha": "59787859283e89cdb8c2ee88388198f283be9abb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-10-01T18:30:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T01:19:40.000Z", "max_issues_repo_path": "_docGMT/Control.tex", "max_issues_repo_name": "cmcorreia/oomao", "max_issues_repo_head_hexsha": "59787859283e89cdb8c2ee88388198f283be9abb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-30T17:22:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-30T17:22:35.000Z", "max_forks_repo_path": "_docGMT/Control.tex", "max_forks_repo_name": "cmcorreia/oomao", "max_forks_repo_head_hexsha": "59787859283e89cdb8c2ee88388198f283be9abb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2187276626, "max_line_length": 81, "alphanum_fraction": 0.6798020414, "num_tokens": 20351}
|
#
# Copyright (c) 2019-2021 James Thorne.
#
# This file is part of factual error correction.
# See https://jamesthorne.co.uk for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import time
import warnings
import numpy as np
import torch
from collections import defaultdict
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup
from typing import List, Tuple, Dict
from pathlib import Path
from error_correction.modelling.base_transformer import BaseTransformer
from error_correction.modelling.dataset.error_correction_dataset import (
ErrorCorrectionSeq2SeqDataset,
)
from error_correction.modelling.lightning_base import add_generic_args
from error_correction.modelling.reader.mask_based_correction_reader import (
MaskBasedCorrectionReader,
)
from error_correction.modelling.reader.supervised_correction_reader import (
SupervisedCorrectionReader,
)
from error_correction.modelling.utils import (
is_truthy,
SARI_KEYS,
use_task_specific_params,
pickle_save,
freeze_params,
lmap,
flatten_list,
save_json,
calculate_sari,
post_clean,
assert_all_frozen,
)
class ErrorCorrectionModule(BaseTransformer):
mode = "error-correction"
val_metric = SARI_KEYS[2]
loss_names = ["loss"]
metric_names = SARI_KEYS
def __init__(self, hparams, **kwargs):
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, "error-correction")
self.metrics_save_path = Path(self.output_dir) / (
"metrics.json"
if not self.hparams.do_predict
else "metrics_test_{}.json".format(os.path.basename(self.hparams.test_file))
)
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.dataset_kwargs: dict = dict(
max_source_length=self.hparams.max_source_length,
mutation_source=is_truthy(self.hparams.mutation_source),
mutation_target=is_truthy(self.hparams.mutation_target),
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {
k: v if v >= 0 else None for k, v in n_observations_per_split.items()
}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
self.data_paths = {
"train": self.hparams.train_file,
"val": self.hparams.val_file,
"test": self.hparams.test_file,
}
assert (
self.target_lens["train"] <= self.target_lens["val"]
), f"target_lens: {self.target_lens}"
assert (
self.target_lens["train"] <= self.target_lens["test"]
), f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
self.freeze_embeds()
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
self.dataset_class = ErrorCorrectionSeq2SeqDataset
self.wiki_reader = self.get_reader(self.hparams.reader, self.hparams.do_predict)
def get_reader(self, name, test):
labels = set()
if self.hparams.labels == "all":
labels.add("SUPPORTS")
labels.add("REFUTES")
else:
labels.add(self.hparams.labels.upper())
print("XXX Reader labels {}".format(self.hparams.labels))
print("XXX labels", labels)
if name == "supervised":
return SupervisedCorrectionReader(labels, test)
elif name == "mask":
return MaskBasedCorrectionReader(labels, test)
else:
raise RuntimeError(f"Unknown reader {name}")
def freeze_embeds(self):
"""Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
try:
freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
except AttributeError:
freeze_params(self.model.shared)
for d in [self.model.encoder, self.model.decoder]:
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
src_ids, src_mask = batch["input_ids"], batch["attention_mask"]
tgt_ids = batch["decoder_input_ids"]
decoder_input_ids = self.model._shift_right(tgt_ids)
outputs = self(
src_ids,
attention_mask=src_mask,
decoder_input_ids=decoder_input_ids,
use_cache=False,
)
lm_logits = outputs[0]
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert lm_logits.shape[-1] == self.model.config.vocab_size
loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1))
return (loss,)
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
return {"loss": loss_tensors[0], "log": logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
self.step_count += 1
losses = {
k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names
}
loss = losses["loss"]
rouges = {
k: np.array(flatten_list([x[k] for x in outputs])).mean().item()
for k in self.metric_names + ["gen_time", "summ_len"]
}
rouge_tensor: torch.FloatTensor = torch.tensor(rouges[self.val_metric]).type_as(
loss
)
rouges.update({k: v.item() for k, v in losses.items()})
losses.update(rouges)
metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
metrics["step_count"] = self.step_count
self.save_metrics(metrics, prefix) # writes to self.metrics_save_path
preds = flatten_list([x["preds"] for x in outputs])
targets = flatten_list([x["target"] for x in outputs])
metadata = flatten_list([x["metadata"] for x in outputs])
self.save_predictions(preds, targets, metadata, prefix)
return {
"log": metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": rouge_tensor,
}
def save_metrics(self, latest_metrics, type_path) -> None:
self.metrics[type_path].append(latest_metrics)
save_json(self.metrics, self.metrics_save_path)
def save_predictions(self, predictions, actual, metadata, type_path) -> None:
with open(
Path(self.output_dir)
/ (
"predictions_set_{}_epoch_{}_steps_{}_.jsonl".format(
type_path, self.trainer.current_epoch, self.step_count
)
if not self.hparams.do_predict
else "final_predictions_set_{}_file_{}".format(
type_path, os.path.basename(self.hparams.test_file)
)
),
"w+",
) as f:
for p, a, m in zip(predictions, actual, metadata):
f.write(
json.dumps({"prediction": p, "actual": a, "metadata": m}) + "\n"
)
def _generative_step(self, batch: dict) -> dict:
pad_token_id = self.tokenizer.pad_token_id
(
source_ids,
source_mask,
y,
original_ids,
) = ErrorCorrectionSeq2SeqDataset.trim_seq2seq_batch(batch, pad_token_id)
t0 = time.time()
generated_ids = self.model.generate(
input_ids=source_ids,
attention_mask=source_mask,
use_cache=True,
decoder_start_token_id=self.decoder_start_token_id,
max_length=self.hparams.val_max_target_length,
)
gen_time = (time.time() - t0) / source_ids.shape[0]
preds = self.ids_to_clean_text(generated_ids)
target = self.ids_to_clean_text(y)
original = self.ids_to_clean_text(original_ids)
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
metrics: Dict = self.calc_generative_metrics(original, preds, target)
summ_len = lmap(len, generated_ids)
base_metrics.update(
gen_time=[gen_time],
summ_len=summ_len,
preds=preds,
target=target,
metadata=batch["metadata"],
**metrics,
)
return base_metrics
def calc_generative_metrics(self, originals, preds, target) -> Dict:
# return calculate_rouge([p.replace(": ","") for p in preds], [p.replace("correction: ","") for p in target])
return calculate_sari(
originals, [post_clean(p) for p in preds], [post_clean(p) for p in target]
)
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_dataset(self, type_path) -> ErrorCorrectionSeq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
instance_generator = self.wiki_reader.read(self.data_paths[type_path])
dataset = self.dataset_class(
self.tokenizer,
instance_generator=instance_generator,
n_obs=n_obs,
max_target_length=max_target_length,
**self.dataset_kwargs,
)
return dataset
def get_dataloader(
self, type_path: str, batch_size: int, shuffle: bool = False
) -> DataLoader:
dataset = self.get_dataset(type_path)
sampler = None
if self.hparams.sortish_sampler and type_path == "train":
assert self.hparams.gpus <= 1 # TODO: assert earlier
sampler = dataset.make_sortish_sampler(batch_size)
shuffle = False
dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
sampler=sampler,
)
return dataloader
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader(
"train", batch_size=self.hparams.train_batch_size, shuffle=True
)
t_total = (
(
len(dataloader.dataset)
// (self.hparams.train_batch_size * max(1, self.hparams.gpus))
)
// self.hparams.accumulate_grad_batches
* float(self.hparams.max_epochs)
)
scheduler = get_linear_schedule_with_warmup(
self.opt,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=t_total,
)
if max(scheduler.get_last_lr()) > 0:
warnings.warn("All learning rates are 0")
self.lr_scheduler = scheduler
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument("--reader", default="wiki", type=str)
parser.add_argument("--train_file", required=True, type=str)
parser.add_argument("--val_file", required=True, type=str)
parser.add_argument("--test_file", required=False, type=str)
parser.add_argument(
"--max_source_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=256,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=256, # these defaults are optimized for CNNDM. For xsum, see README.md.
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=256,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--freeze_encoder", action="store_true")
parser.add_argument("--freeze_embeds", action="store_true")
parser.add_argument("--sortish_sampler", action="store_true", default=False)
parser.add_argument(
"--logger_name",
type=str,
choices=["default", "wandb", "wandb_shared"],
default="default",
)
parser.add_argument(
"--n_train",
type=int,
default=-1,
required=False,
help="# examples. -1 means use all.",
)
parser.add_argument(
"--n_val",
type=int,
default=-1,
required=False,
help="# examples. -1 means use all.",
)
parser.add_argument(
"--n_test",
type=int,
default=-1,
required=False,
help="# examples. -1 means use all.",
)
parser.add_argument("--mutation_source", required=True)
parser.add_argument("--mutation_target", required=True)
parser.add_argument(
"--labels", type=str, choices=["supports", "refutes", "all"], required=True
)
return parser
|
{"hexsha": "8cc1addaafbae8e66a9e0d5be99f1c045f40ba04", "size": 15912, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/error_correction/modelling/error_correction_module.py", "max_stars_repo_name": "gruentee/acl2021-factual-error-correction", "max_stars_repo_head_hexsha": "b500f589cc3e73ffa6958c7dab8c07f2535a448f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-06-12T14:24:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-15T02:03:44.000Z", "max_issues_repo_path": "src/error_correction/modelling/error_correction_module.py", "max_issues_repo_name": "gruentee/acl2021-factual-error-correction", "max_issues_repo_head_hexsha": "b500f589cc3e73ffa6958c7dab8c07f2535a448f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-06-14T14:52:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-04T13:18:03.000Z", "max_forks_repo_path": "src/error_correction/modelling/error_correction_module.py", "max_forks_repo_name": "gruentee/acl2021-factual-error-correction", "max_forks_repo_head_hexsha": "b500f589cc3e73ffa6958c7dab8c07f2535a448f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-10T13:40:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T08:50:05.000Z", "avg_line_length": 36.5793103448, "max_line_length": 117, "alphanum_fraction": 0.6215434892, "include": true, "reason": "import numpy", "num_tokens": 3375}
|
from __future__ import print_function
from time import time, ctime
from urllib2 import urlopen
import os.path
import json
import numpy as np
from backtest.data import DataSource
from backtest.util import parse_date, parse_time
def create_argument_parser(parser):
default_end = int(time())
default_start = default_end - 15552000
date_format = '%%Y-%%m-%%d | %%Y-%%m-%%d %%H:%%M | timestamp'
parser.add_argument(
'-b', '--begin', metavar='DATETIME',
help='start time ({0}) (default: {1})'.format(
date_format, ctime(default_start)
),
type=parse_date, default=default_start
)
parser.add_argument(
'-e', '--end', metavar='DATETIME',
help='end time ({0}) (default: {1})'.format(
date_format, ctime(default_end)
),
type=parse_date, default=default_end
)
parser.add_argument(
'-i', '--interval', metavar='TIME',
help='interval (<value><s | m | h | d>) (default: 4h)',
type=parse_time, default=14400
)
parser.add_argument(
'-p', '--pair',
help='currency pair (default: %(default)s)',
default='btc_usd'
)
parser.add_argument(
'-s', '--source',
help='data source (default: %(default)s)',
choices=['poloniex'], default='poloniex'
)
parser.add_argument(
'-o', '--output', metavar='PATH',
help='output file/directory ' \
'(default: <source>_<pair>_<start>_<end>_<interval>.npz)',
default=None
)
return parser
def main(args):
args.pair = args.pair.lower()
if args.output is None or os.path.isdir(args.output):
fname = '{0}_{1}_{2}_{3}_{4}.npz'.format(args.source, args.pair.lower(),
args.begin, args.end,
args.interval)
if args.output is None:
args.output = fname
else:
args.output = os.path.join(args.output, fname)
if args.source == 'poloniex':
url = 'https://poloniex.com/public?command=returnChartData' \
'¤cyPair={0}&start={1}&end={2}&period={3}' \
.format(args.pair.upper(), args.begin,
args.end, args.interval)
if args.pair == 'usdt_btc':
args.pair = 'btc_usd'
print('GET', url)
data = json.load(urlopen(url))
if 'error' in data:
print('Error:', data['error'])
exit(1)
if not isinstance(data, list):
print('Error: invalid data:', data)
exit(1)
if not data:
print('Error: no data')
exit(1)
start = data[0]['date']
print('start ', ctime(start))
print('end ', ctime(data[-1]['date']))
print('length ', len(data))
info = np.array([start, args.interval], dtype=int)
dataset = np.empty(
(len(data), len(DataSource.CANDLE_VALUES)),
dtype=float
)
save = {
'info': info,
args.pair: dataset
}
for i, candle in enumerate(data):
for attr in DataSource.CANDLE_VALUES:
dataset[i, DataSource.CANDLE[attr]] = candle[attr]
print('save ', args.output)
np.savez_compressed(args.output, **save)
|
{"hexsha": "b1836247fb2be37e6bebc2e9a4e0a1e80eeb6028", "size": 3389, "ext": "py", "lang": "Python", "max_stars_repo_path": "backtest/data/cli/get.py", "max_stars_repo_name": "dead-beef/backtest", "max_stars_repo_head_hexsha": "052c558aeeffbae7c2fde0a13bcecec3ca4d6bd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "backtest/data/cli/get.py", "max_issues_repo_name": "dead-beef/backtest", "max_issues_repo_head_hexsha": "052c558aeeffbae7c2fde0a13bcecec3ca4d6bd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backtest/data/cli/get.py", "max_forks_repo_name": "dead-beef/backtest", "max_forks_repo_head_hexsha": "052c558aeeffbae7c2fde0a13bcecec3ca4d6bd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4695652174, "max_line_length": 80, "alphanum_fraction": 0.5340808498, "include": true, "reason": "import numpy", "num_tokens": 804}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.