text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
module Oscar.Class.Equivalence where
open import Oscar.Class.Reflexivity
open import Oscar.Class.Symmetry
open import Oscar.Class.Transitivity
open import Oscar.Function
open import Oscar.Level
record Equivalence {a} {A : Set a} {ℓ} (_≋_ : A → A → Set ℓ) : Set (a ⊔ ℓ) where
field
⦃ ′reflexivity ⦄ : Reflexivity _≋_
⦃ ′symmetry ⦄ : Symmetry _≋_
⦃ ′transitivity ⦄ : Transitivity _≋_
open Equivalence ⦃ … ⦄ public hiding (′reflexivity; ′symmetry; ′transitivity)
instance
Equivalence⋆ : ∀
{a} {A : Set a} {ℓ} {_≋_ : A → A → Set ℓ}
⦃ _ : Reflexivity _≋_ ⦄
⦃ _ : Symmetry _≋_ ⦄
⦃ _ : Transitivity _≋_ ⦄
→ Equivalence _≋_
Equivalence.′reflexivity Equivalence⋆ = it
Equivalence.′symmetry Equivalence⋆ = it
Equivalence.′transitivity Equivalence⋆ = it
|
{"hexsha": "1076cdbc1ee33d0a77e7aabb5c9721d2df620273", "size": 791, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "archive/agda-2/Oscar/Class/Equivalence.agda", "max_stars_repo_name": "m0davis/oscar", "max_stars_repo_head_hexsha": "52e1cdbdee54d9a8eaee04ee518a0d7f61d25afb", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "archive/agda-2/Oscar/Class/Equivalence.agda", "max_issues_repo_name": "m0davis/oscar", "max_issues_repo_head_hexsha": "52e1cdbdee54d9a8eaee04ee518a0d7f61d25afb", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-29T00:35:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-11T23:33:04.000Z", "max_forks_repo_path": "archive/agda-2/Oscar/Class/Equivalence.agda", "max_forks_repo_name": "m0davis/oscar", "max_forks_repo_head_hexsha": "52e1cdbdee54d9a8eaee04ee518a0d7f61d25afb", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.275862069, "max_line_length": 80, "alphanum_fraction": 0.6687737042, "num_tokens": 308}
|
[STATEMENT]
lemma has_next_filter_generator:
"list.has_next (filter_generator g) s \<longleftrightarrow>
list.has_next g s \<and> (let (x, s') = list.next g s in if P x then True else list.has_next (filter_generator g) s')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list.has_next (local.filter_generator g) s = (list.has_next g s \<and> (let (x, s') = list.next g s in if P x then True else list.has_next (local.filter_generator g) s'))
[PROOF STEP]
apply(transfer)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>g s. terminates g \<Longrightarrow> fst (local.filter_has_next g, local.filter_next g) s = (fst g s \<and> (let (x, s') = snd g s in if P x then True else fst (local.filter_has_next g, local.filter_next g) s'))
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>g s. terminates g \<Longrightarrow> local.filter_has_next g s = (fst g s \<and> (case snd g s of (x, s') \<Rightarrow> \<not> P x \<longrightarrow> local.filter_has_next g s'))
[PROOF STEP]
apply(subst filter_has_next.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>g s. terminates g \<Longrightarrow> ((\<exists>sa. s = sa \<and> fst g sa \<and> P (fst (snd g sa))) \<or> (\<exists>sa. s = sa \<and> fst g sa \<and> \<not> P (fst (snd g sa)) \<and> local.filter_has_next g (snd (snd g sa)))) = (fst g s \<and> (case snd g s of (x, s') \<Rightarrow> \<not> P x \<longrightarrow> local.filter_has_next g s'))
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 608, "file": "Containers_List_Fusion", "length": 5}
|
[STATEMENT]
lemma Interleaves_suffix_snd [rule_format]:
"\<forall>n < length ws. \<not> P (ws ! n) (drop (Suc n) ws) \<Longrightarrow>
xs \<cong> {ys, zs, \<lambda>v vs. P v (vs @ ws)} = xs @ ws \<cong> {ys, zs @ ws, P}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n<length ws. \<not> P (ws ! n) (drop (Suc n) ws) \<Longrightarrow> xs \<cong> {ys, zs, \<lambda>v vs. P v (vs @ ws)} = xs @ ws \<cong> {ys, zs @ ws, P}
[PROOF STEP]
by (subst (1 2) Interleaves_swap, rule Interleaves_suffix_fst, simp)
|
{"llama_tokens": 223, "file": "List_Interleaving_ListInterleaving", "length": 1}
|
#
# Solution to Project Euler problem 150
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
def compute():
# Generate the triangle
ROWS = 1000
rand = lcg_random()
triangle = [[next(rand) for j in range(i + 1)] for i in range(ROWS)]
try:
ans = compute_numpy(triangle)
except ImportError:
ans = compute_plain(triangle)
return str(ans)
def compute_plain(triangle):
# Calculate cumulative sums for each row
rowsums = []
for row in triangle:
rowsum = [0]
for j in range(len(row)):
rowsum.append(rowsum[j] + row[j])
rowsums.append(rowsum)
# Calculate minimum subtriangle sum for each apex position
result = 0
for i in range(len(triangle)):
for j in range(len(triangle[i])):
# Apex element selected at triangle[i][j]
cursum = 0
for k in range(i, len(triangle)): # Ending row (inclusive)
cursum += rowsums[k][k - i + 1 + j] - rowsums[k][j]
result = min(cursum, result)
return result
def compute_numpy(triangle):
# Calculate cumulative sums for each row
import numpy
ROWS = len(triangle)
rowsums = numpy.zeros([ROWS, ROWS + 2], dtype=numpy.int64)
for (i, row) in enumerate(triangle):
rowsums[i, : i + 2] = numpy.cumsum([0] + row, dtype=numpy.int64)
# Calculate minimum subtriangle sum for each apex position
result = 0
for i in range(len(triangle)):
for j in range(len(triangle[i])):
# Apex element selected at triangle[i][j]
ks = numpy.arange(i, ROWS, dtype=numpy.uint32)
terms = rowsums[ks, ks - i + 1 + j] - rowsums[ks, j]
sums = numpy.cumsum(terms, dtype=numpy.int64)
result = min(numpy.min(sums), result)
return result
def lcg_random():
state = 0
while True:
state = (615949 * state + 797807) & ((1 << 20) - 1)
yield state - (1 << 19)
if __name__ == "__main__":
print(compute())
|
{"hexsha": "6c3c9f73db33f7b20adea26ac16c99962094a07f", "size": 1886, "ext": "py", "lang": "Python", "max_stars_repo_path": "solutions/p150.py", "max_stars_repo_name": "xianlinfeng/project_euler_python3", "max_stars_repo_head_hexsha": "77eca44eb2b1d13bc70d6dc0258b737449d43a23", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solutions/p150.py", "max_issues_repo_name": "xianlinfeng/project_euler_python3", "max_issues_repo_head_hexsha": "77eca44eb2b1d13bc70d6dc0258b737449d43a23", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solutions/p150.py", "max_forks_repo_name": "xianlinfeng/project_euler_python3", "max_forks_repo_head_hexsha": "77eca44eb2b1d13bc70d6dc0258b737449d43a23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1944444444, "max_line_length": 69, "alphanum_fraction": 0.6739130435, "include": true, "reason": "import numpy", "num_tokens": 567}
|
import warnings
warnings.filterwarnings('ignore')
import os
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
import torch
import numpy as np
import gym
gym.logger.set_level(40)
import time
import random
from pathlib import Path
from cfg import parse_cfg
from env import make_env
from algorithm.tdmpc import TDMPC
from algorithm.helper import Episode, ReplayBuffer
import logger
torch.backends.cudnn.benchmark = True
__CONFIG__, __LOGS__ = 'cfgs', 'logs'
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def evaluate(env, agent, num_episodes, step, env_step, video):
"""Evaluate a trained agent and optionally save a video."""
episode_rewards = []
for i in range(num_episodes):
obs, done, ep_reward, t = env.reset(), False, 0, 0
if video: video.init(env, enabled=(i==0))
while not done:
action = agent.plan(obs, eval_mode=True, step=step, t0=t==0)
obs, reward, done, _ = env.step(action.cpu().numpy())
ep_reward += reward
if video: video.record(env)
t += 1
episode_rewards.append(ep_reward)
if video: video.save(env_step)
return np.nanmean(episode_rewards)
def train(cfg):
"""Training script for TD-MPC. Requires a CUDA-enabled device."""
assert torch.cuda.is_available()
set_seed(cfg.seed)
work_dir = Path().cwd() / __LOGS__ / cfg.task / cfg.modality / cfg.exp_name / str(cfg.seed)
env, agent, buffer = make_env(cfg), TDMPC(cfg), ReplayBuffer(cfg)
# Run training
L = logger.Logger(work_dir, cfg)
episode_idx, start_time = 0, time.time()
for step in range(0, cfg.train_steps+cfg.episode_length, cfg.episode_length):
# Collect trajectory
obs = env.reset()
episode = Episode(cfg, obs)
while not episode.done:
action = agent.plan(obs, step=step, t0=episode.first)
obs, reward, done, _ = env.step(action.cpu().numpy())
episode += (obs, action, reward, done)
assert len(episode) == cfg.episode_length
buffer += episode
# Update model
train_metrics = {}
if step >= cfg.seed_steps:
num_updates = cfg.seed_steps if step == cfg.seed_steps else cfg.episode_length
for i in range(num_updates):
train_metrics.update(agent.update(buffer, step+i))
# Log training episode
episode_idx += 1
env_step = int(step*cfg.action_repeat)
common_metrics = {
'episode': episode_idx,
'step': step,
'env_step': env_step,
'total_time': time.time() - start_time,
'episode_reward': episode.cumulative_reward}
train_metrics.update(common_metrics)
L.log(train_metrics, category='train')
# Evaluate agent periodically
if env_step % cfg.eval_freq == 0:
common_metrics['episode_reward'] = evaluate(env, agent, cfg.eval_episodes, step, env_step, L.video)
L.log(common_metrics, category='eval')
L.finish(agent)
print('Training completed successfully')
if __name__ == '__main__':
train(parse_cfg(Path().cwd() / __CONFIG__))
|
{"hexsha": "4fda5cf7b86fdb27e932d323ffc635c0ea3ef1dd", "size": 2911, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train.py", "max_stars_repo_name": "nicklashansen/tdmpc", "max_stars_repo_head_hexsha": "96cb7036ecf06f75d5ffd64a0454bbab7d0d3e17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 60, "max_stars_repo_stars_event_min_datetime": "2022-03-10T02:56:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:27:13.000Z", "max_issues_repo_path": "src/train.py", "max_issues_repo_name": "nicklashansen/tdmpc", "max_issues_repo_head_hexsha": "96cb7036ecf06f75d5ffd64a0454bbab7d0d3e17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-14T13:13:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T16:42:03.000Z", "max_forks_repo_path": "src/train.py", "max_forks_repo_name": "nicklashansen/tdmpc", "max_forks_repo_head_hexsha": "96cb7036ecf06f75d5ffd64a0454bbab7d0d3e17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2022-03-10T02:57:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T18:38:47.000Z", "avg_line_length": 29.7040816327, "max_line_length": 102, "alphanum_fraction": 0.7220886293, "include": true, "reason": "import numpy", "num_tokens": 780}
|
import os
import re
import json
from collections import Counter
from confluent_kafka import Consumer, TopicPartition
import logging
import numpy as np
import pandas as pd
import matplotlib.cm as cm
import mpld3
import matplotlib.pyplot as plt
from matplotlib import rcParams
kafkaBroker = os.getenv('KAFKA_BROKER')
if kafkaBroker is None:
kafkaBroker = "localhost:9092"
mountPath = os.getenv('MOUNT_PATH')
if mountPath is None:
mountPath = "/www"
kafkaTopic = os.getenv('KAFKA_TOPIC')
if kafkaTopic is None:
kafkaTopic = 'wordpress_db.wordpress_db.wp_woocommerce_order_items'
versionString = os.getenv('VERSION_STRING')
if versionString is None:
versionString = "Version 0"
color = os.getenv('COLOR')
if color is None:
color = "b"
debug = os.getenv('DEBUG')
if debug is None:
debug = False
else:
debug = True
group = os.getenv('GROUP')
if group is None:
group = "groupid1"
productDict = {}
kConsumer = Consumer({
'bootstrap.servers': kafkaBroker,
'enable.auto.commit': 'False',
'group.id': group,
'auto.offset.reset': 'earliest'
})
kConsumer.subscribe([kafkaTopic])
def readMsg():
logging.debug("readMsg from kafkaTopic: %s", kafkaTopic)
msg = kConsumer.poll(5.0)
if msg is None:
logging.debug('Received message: None')
return None;
if msg.error():
logging.warning("Consumer error: {}".format(msg.error()))
return None
logging.debug('Received message: {}'.format(msg.value().decode('utf-8')))
msgJson= json.loads(msg.value())
logging.debug("got msg from kafkaTopic: %s", msgJson)
return msgJson
def processOrder(msgJson):
if msgJson is None:
return None
order = msgJson['payload']['after']
logging.info("got order payload from kafkaTopic: %s", order)
if order['order_item_type'] != 'line_item':
return None
logging.info("received order for item: %s", order['order_item_name'])
return order['order_item_name']
def createHtml():
htmlStr = f"""<html>
<head>
<script>
function updateImage() {{
document.getElementById("img").src = "/recommendation-service/recommendation-service.png?ts=" + encodeURIComponent(new Date().toString());
setTimeout(updateImage, 10000);
}};
</script>
<style>
.center {{
display: block;
margin-left: auto;
margin-right: auto;
}}
* {{
font-family: Arial, Helvetica, sans-serif
}}
</style>
</head>
<body onload="updateImage();">
<h1><center>{versionString}</center></h1>
<h2><center>Recommendation based on most ordered product</center></h2>
<img id="img" src="/recommendation-service/recommendation-service.png" class="center"/>
</body>
</html>
"""
f = open(mountPath + '/index.html', 'w')
f.write(htmlStr)
def createGraph():
most_common = dict(Counter(productDict).most_common(5))
plt.barh(list(most_common.keys()), most_common.values(), color=color)
plt.yticks(rotation=20)
plt.tight_layout()
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')
plt.savefig(mountPath + '/recommendation-service.png')
def createGraph2():
rcParams['font.family'] = 'sans-serif'
most_common = dict(Counter(productDict).most_common(5))
#plt.rcdefaults()
fig, ax = plt.subplots()
y_pos = np.arange(len(most_common))
ax.barh(y_pos, most_common.values(), align='center', color=color)
ax.set_yticks(y_pos)
ax.set_yticklabels(list(most_common.keys()))
ax.set_xlabel('Number of units sold')
ax.set_title('Recommendation Service ' + versionString)
plt.tight_layout()
mpld3.save_html(fig, mountPath + '/graph.html')
def main():
global productDict
logging.info("Kafka Broker: %s", kafkaBroker)
logging.info("Kafka Topic: %s", kafkaTopic)
logging.info("Mount Path: %s", mountPath)
createHtml()
try:
while True:
# Read from kafkaTopic
msg = readMsg()
if msg is None:
continue
order = processOrder(msg)
if order is None:
continue
if order in productDict:
productDict[order] += 1
else:
productDict[order] = 1
logging.info("product Dict: %s", productDict)
createGraph2()
createGraph()
finally:
logging.debug("Closing consumer")
kConsumer.close()
if __name__ == "__main__":
if debug:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.INFO)
main()
|
{"hexsha": "a97019530393707c172b02004518a2d3aa3268fb", "size": 4422, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/woodkraft/Part2/recommendation-service/recommendation-service.py", "max_stars_repo_name": "panlm/karbon-platform-services", "max_stars_repo_head_hexsha": "80a0cffb81bcfaedb9546e806e0f461b0c7ac1fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-09-15T07:52:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T15:27:22.000Z", "max_issues_repo_path": "projects/woodkraft/Part2/recommendation-service/recommendation-service.py", "max_issues_repo_name": "panlm/karbon-platform-services", "max_issues_repo_head_hexsha": "80a0cffb81bcfaedb9546e806e0f461b0c7ac1fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-11-13T19:04:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T11:16:24.000Z", "max_forks_repo_path": "projects/woodkraft/Part2/recommendation-service/recommendation-service.py", "max_forks_repo_name": "panlm/karbon-platform-services", "max_forks_repo_head_hexsha": "80a0cffb81bcfaedb9546e806e0f461b0c7ac1fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-11-25T14:44:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-17T11:41:29.000Z", "avg_line_length": 24.5666666667, "max_line_length": 140, "alphanum_fraction": 0.6922207146, "include": true, "reason": "import numpy", "num_tokens": 1133}
|
# Copyright (c) 2011-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""Functions for plotting Partitions."""
from __future__ import absolute_import
from __future__ import division
import logging
import numpy as np
import networkx as nx
from . import polytope as pc
# inline imports:
#
# import matplotlib as mpl
logger = logging.getLogger(__name__)
def plot_partition(
ppp, trans=None, ppp2trans=None, only_adjacent=False,
ax=None, plot_numbers=True, color_seed=None):
"""Plot partition with arrows from digraph.
For filtering edges based on label use L{plot_ts_on_partition}.
See Also
========
L{abstract.prop2partition.PropPreservingPartition},
L{plot_trajectory}
@type ppp: L{PropPreservingPartition}
@param trans: Transition matrix. If used,
then transitions in C{ppp} are shown with arrows.
Otherwise C{ppp.adj} is plotted.
To show C{ppp.adj}, pass: trans = True
@param plot_numbers: If True,
then annotate each Region center with its number.
@param ax: axes where to plot
@param color_seed: seed for reproducible random coloring
@param ppp2trans: order mapping ppp indices to trans states
@type ppp2trans: list of trans states
"""
import matplotlib as mpl
# needs to be converted to adjacency matrix ?
if isinstance(trans, nx.MultiDiGraph):
if trans is not None and ppp2trans is None:
msg = (
'trans is a networkx MultiDiGraph, '
'so ppp2trans required to define state order,\n'
'used when converting the graph to '
'an adjacency matrix.')
raise Exception(msg)
trans = nx.to_numpy_matrix(trans, nodelist=ppp2trans)
trans = np.array(trans)
l, u = ppp.domain.bounding_box
arr_size = (u[0, 0] - l[0, 0]) / 50.0
ax = pc._newax(ax)
# no trans given: use partition's
if trans is True and ppp.adj is not None:
ax.set_title('Adjacency from Partition')
trans = ppp.adj
elif trans is None:
trans = 'none'
else:
ax.set_title('Adjacency from given Transitions')
ax.set_xlim(l[0, 0], u[0, 0])
ax.set_ylim(l[1, 0], u[1, 0])
# repeatable coloring ?
if color_seed is not None:
prng = np.random.RandomState(color_seed)
else:
prng = np.random.RandomState()
# plot polytope patches
for i, reg in enumerate(ppp.regions):
# select random color,
# same color for all polytopes in each region
col = prng.rand(3)
# single polytope or region ?
reg.plot(color=col, ax=ax)
if plot_numbers:
reg.text(str(i), ax, color='red')
# not show trans ?
if trans is 'none':
mpl.pyplot.show()
return ax
# plot transition arrows between patches
rows, cols = np.nonzero(trans)
for i, j in zip(rows, cols):
# mask non-adjacent cell transitions ?
if only_adjacent:
if ppp.adj[i, j] == 0:
continue
plot_transition_arrow(
ppp.regions[i], ppp.regions[j], ax, arr_size)
mpl.pyplot.show()
return ax
def plot_transition_arrow(polyreg0, polyreg1, ax, arr_size=None):
"""Plot arrow starting from polyreg0 and ending at polyreg1.
@type polyreg0: L{Polytope} or L{Region}
@type polyreg1: L{Polytope} or L{Region}
@param ax: axes where to plot
@return: arrow object
"""
from matplotlib import patches
# brevity
p0 = polyreg0
p1 = polyreg1
rc0, xc0 = pc.cheby_ball(p0)
rc1, xc1 = pc.cheby_ball(p1)
if np.sum(np.abs(xc1 - xc0)) < 1e-7:
return None
if arr_size is None:
l, u = polyreg1.bounding_box
arr_size = (u[0, 0] - l[0, 0]) / 25.0
# TODO: 3d
x = xc0[0]
y = xc0[1]
dx = xc1[0] - xc0[0]
dy = xc1[1] - xc0[1]
arrow = patches.Arrow(
float(x), float(y), float(dx), float(dy),
width=arr_size, color='black')
ax.add_patch(arrow)
return arrow
|
{"hexsha": "3657a30ddfa7cd210fedd1680cfd17c837b5bd43", "size": 5570, "ext": "py", "lang": "Python", "max_stars_repo_path": "polytope/plot.py", "max_stars_repo_name": "samuelkolb/polytope", "max_stars_repo_head_hexsha": "4b8844f565b6cfb71803d336eb2fb3b1702ad800", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "polytope/plot.py", "max_issues_repo_name": "samuelkolb/polytope", "max_issues_repo_head_hexsha": "4b8844f565b6cfb71803d336eb2fb3b1702ad800", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "polytope/plot.py", "max_forks_repo_name": "samuelkolb/polytope", "max_forks_repo_head_hexsha": "4b8844f565b6cfb71803d336eb2fb3b1702ad800", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1547619048, "max_line_length": 73, "alphanum_fraction": 0.6658886894, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1432}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/21 14:23
# @Author : ganliang
# @File : npmatlib.py
# @Desc : 矩阵
import numpy as np
import numpy.matlib as ml
print ("empty")
print(ml.empty((3, 3), dtype=np.int, order='F'))
print(ml.empty((3, 3), dtype=np.int, order='C'))
print ("\nzeros")
print(ml.zeros((3, 3), dtype=np.int, order='C'))
print ("\nones")
print(ml.ones((3, 3), dtype=np.int, order='C'))
print ("\neye")
print(ml.eye(3, dtype=np.int, order='C'))
print ("\nidentity")
print(ml.identity(3, dtype=np.int))
print ("\nrand")
print(ml.rand(2, 3))
print ("\nmatrix")
a = np.arange(12).reshape(3, 4)
mr = ml.matrix(a)
print (a)
print (mr)
print (type(a))
print (type(mr))
|
{"hexsha": "31a1cdf7fae5be7c5a1bd6d3dafd6d56e8dec91a", "size": 712, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/numpy/npmatlib.py", "max_stars_repo_name": "mumupy/pythonlearn", "max_stars_repo_head_hexsha": "5be03d156f11af2467a6052a476de4b706f7d53a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/numpy/npmatlib.py", "max_issues_repo_name": "mumupy/pythonlearn", "max_issues_repo_head_hexsha": "5be03d156f11af2467a6052a476de4b706f7d53a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/numpy/npmatlib.py", "max_forks_repo_name": "mumupy/pythonlearn", "max_forks_repo_head_hexsha": "5be03d156f11af2467a6052a476de4b706f7d53a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.7777777778, "max_line_length": 48, "alphanum_fraction": 0.6123595506, "include": true, "reason": "import numpy", "num_tokens": 247}
|
{-# OPTIONS --safe --warning=error --without-K #-}
open import LogicalFormulae
open import Orders.Total.Definition
open import Orders.Partial.Definition
open import Setoids.Setoids
open import Setoids.Orders.Partial.Definition
open import Setoids.Orders.Total.Definition
open import Functions.Definition
open import Sets.EquivalenceRelations
open import Agda.Primitive using (Level; lzero; lsuc; _⊔_)
module Setoids.Orders.Total.Lemmas {a b : _} {A : Set a} {S : Setoid {a} {b} A} {c : _} {_<_ : A → A → Set c} {P : SetoidPartialOrder S _<_} (T : SetoidTotalOrder P) where
open SetoidTotalOrder T
open SetoidPartialOrder P
open Setoid S
open Equivalence eq
maxInequalitiesR : {a b c : A} → (a < b) → (a < c) → (a < max b c)
maxInequalitiesR {a} {b} {c} a<b a<c with totality b c
... | inl (inl x) = a<c
... | inl (inr x) = a<b
... | inr x = a<c
minInequalitiesR : {a b c : A} → (a < b) → (a < c) → (a < min b c)
minInequalitiesR {a} {b} {c} a<b a<c with totality b c
... | inl (inl x) = a<b
... | inl (inr x) = a<c
... | inr x = a<b
maxInequalitiesL : {a b c : A} → (a < c) → (b < c) → (max a b < c)
maxInequalitiesL {a} {b} {c} a<b a<c with totality a b
... | inl (inl x) = a<c
... | inl (inr x) = a<b
... | inr x = a<c
minInequalitiesL : {a b c : A} → (a < c) → (b < c) → (min a b < c)
minInequalitiesL {a} {b} {c} a<b a<c with totality a b
... | inl (inl x) = a<b
... | inl (inr x) = a<c
... | inr x = a<b
minLessL : (a b : A) → min a b <= a
minLessL a b with totality a b
... | inl (inl x) = inr reflexive
... | inl (inr x) = inl x
... | inr x = inr reflexive
minLessR : (a b : A) → min a b <= b
minLessR a b with totality a b
... | inl (inl x) = inl x
... | inl (inr x) = inr reflexive
... | inr x = inr x
maxGreaterL : (a b : A) → a <= max a b
maxGreaterL a b with totality a b
... | inl (inl x) = inl x
... | inl (inr x) = inr reflexive
... | inr x = inr x
maxGreaterR : (a b : A) → b <= max a b
maxGreaterR a b with totality a b
... | inl (inl x) = inr reflexive
... | inl (inr x) = inl x
... | inr x = inr reflexive
|
{"hexsha": "3290a14407d76955c104ffdbf3b244331194f72b", "size": 2037, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Setoids/Orders/Total/Lemmas.agda", "max_stars_repo_name": "Smaug123/agdaproofs", "max_stars_repo_head_hexsha": "0f4230011039092f58f673abcad8fb0652e6b562", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-08-08T12:44:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T06:04:15.000Z", "max_issues_repo_path": "Setoids/Orders/Total/Lemmas.agda", "max_issues_repo_name": "Smaug123/agdaproofs", "max_issues_repo_head_hexsha": "0f4230011039092f58f673abcad8fb0652e6b562", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-01-06T21:11:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-11T11:03:39.000Z", "max_forks_repo_path": "Setoids/Orders/Total/Lemmas.agda", "max_forks_repo_name": "Smaug123/agdaproofs", "max_forks_repo_head_hexsha": "0f4230011039092f58f673abcad8fb0652e6b562", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-29T13:23:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T13:23:07.000Z", "avg_line_length": 29.9558823529, "max_line_length": 171, "alphanum_fraction": 0.5949926362, "num_tokens": 827}
|
[STATEMENT]
lemma eventually_weak_subseq:
fixes u::"nat \<Rightarrow> nat"
assumes "(\<lambda>n. real(u n)) \<longlonglongrightarrow> \<infinity>" "eventually P sequentially"
shows "eventually (\<lambda>n. P (u n)) sequentially"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
obtain N where *: "\<forall>n\<ge>N. P n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>N. \<forall>n\<ge>N. P n \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
eventually P sequentially
goal (1 subgoal):
1. (\<And>N. \<forall>n\<ge>N. P n \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding eventually_sequentially
[PROOF STATE]
proof (prove)
using this:
\<exists>N. \<forall>n\<ge>N. P n
goal (1 subgoal):
1. (\<And>N. \<forall>n\<ge>N. P n \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>n\<ge>N. P n
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
obtain M where "\<forall>m\<ge>M. ereal(u m) \<ge> N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>M. \<forall>m\<ge>M. ereal (real N) \<le> ereal (real (u m)) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ereal (real (u x))) \<longlonglongrightarrow> \<infinity>
goal (1 subgoal):
1. (\<And>M. \<forall>m\<ge>M. ereal (real N) \<le> ereal (real (u m)) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (meson Lim_PInfty)
[PROOF STATE]
proof (state)
this:
\<forall>m\<ge>M. ereal (real N) \<le> ereal (real (u m))
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>m\<ge>M. ereal (real N) \<le> ereal (real (u m))
[PROOF STEP]
have "\<And>m. m \<ge> M \<Longrightarrow> u m \<ge> N"
[PROOF STATE]
proof (prove)
using this:
\<forall>m\<ge>M. ereal (real N) \<le> ereal (real (u m))
goal (1 subgoal):
1. \<And>m. M \<le> m \<Longrightarrow> N \<le> u m
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
M \<le> ?m \<Longrightarrow> N \<le> u ?m
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
M \<le> ?m \<Longrightarrow> N \<le> u ?m
[PROOF STEP]
have "\<And>m. m \<ge> M \<Longrightarrow> P(u m)"
[PROOF STATE]
proof (prove)
using this:
M \<le> ?m \<Longrightarrow> N \<le> u ?m
goal (1 subgoal):
1. \<And>m. M \<le> m \<Longrightarrow> P (u m)
[PROOF STEP]
using \<open>\<forall>n\<ge>N. P n\<close>
[PROOF STATE]
proof (prove)
using this:
M \<le> ?m \<Longrightarrow> N \<le> u ?m
\<forall>n\<ge>N. P n
goal (1 subgoal):
1. \<And>m. M \<le> m \<Longrightarrow> P (u m)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
M \<le> ?m \<Longrightarrow> P (u ?m)
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
M \<le> ?m \<Longrightarrow> P (u ?m)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
M \<le> ?m \<Longrightarrow> P (u ?m)
goal (1 subgoal):
1. \<forall>\<^sub>F n in sequentially. P (u n)
[PROOF STEP]
unfolding eventually_sequentially
[PROOF STATE]
proof (prove)
using this:
M \<le> ?m \<Longrightarrow> P (u ?m)
goal (1 subgoal):
1. \<exists>N. \<forall>n\<ge>N. P (u n)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F n in sequentially. P (u n)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1573, "file": "Ergodic_Theory_SG_Library_Complement", "length": 20}
|
""" This module contains functions to support our AE analyses.
"""
__author__ = "mjp,ef"
__date__ = "dec, 2017"
import math, random
import numpy as np
from numpy.linalg import norm
from scipy.stats import ortho_group
import pandas as pd
import pdb, unittest
from gaas import gaas
#-------------------------------------------------------------------------------
# Generic helper/utility functions
#-------------------------------------------------------------------------------
def splitpath(full_path):
"""
Splits a path into all possible pieces (vs. just head/tail).
"""
head, tail = os.path.split(full_path)
result = [tail]
while len(head) > 0:
[head, tail] = os.path.split(head)
result.append(tail)
result = [x for x in result if len(x)]
return result[::-1]
def makedirs_if_needed(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def finite_mean(v):
"Returns the mean of the finite elements in v."
if not np.any(np.isfinite(v)):
return np.nan
else:
return np.mean(v[np.isfinite(v)])
def finite_min(v):
"Returns the min of the finite elements in v."
if not np.any(np.isfinite(v)):
return np.nan
else:
return np.min(v[np.isfinite(v)])
#-------------------------------------------------------------------------------
# Functions for dealing with Tensorflow and/or network models
#-------------------------------------------------------------------------------
def to_one_hot(y, n_classes=None):
"""
y : One of:
* a numpy array of class labels (non-one-hot, obv.)
* a numpy matrix of predictions that should be made one-hot
n_classes : the total # of classes (only needed if y is a vector)
"""
if np.isscalar(y) or y.size == 1:
# case where y is scalar
# note that, even in this case, we return a 2d matrix
out = np.zeros((1,n_classes), dtype=np.float32)
out[0,y] = 1
elif y.ndim == 1:
# Case where y is a vector
out = np.zeros((y.size, n_classes), dtype=np.float32)
out[np.arange(y.size),y] = 1
else:
# Case where y is a matrix
out = np.zeros(y.shape, dtype=np.float32)
for idx,vals in enumerate(y):
out[idx,np.argmax(vals)] = 1
return out
def smoothed_one_hot(y):
""" Given a one-hot encoding of the class for a single example, returns a smoothed variant.
Note: I'm not sure if the code below is the "standard" way of doing this, but it
preserves a normalization property for y
"""
n_classes = y.size
mag_true = 0.9
mag_rest = (1. - mag_true) / (n_classes-1)
y_smooth = mag_rest * np.ones(y.shape)
y_smooth[np.argmax(y)] = mag_true
return y_smooth
def get_info(sess, model, x, y=None):
""" Queries CNN for basic information about a single example x.
"""
x_batch = np.zeros(model.batch_shape)
x_batch[0,...] = x
if y is not None:
assert(y.size == model.num_classes) # y should be one-hot
# if we have a class label, we can compute a loss
y_batch = np.zeros((model.batch_shape[0], model.num_classes))
y_batch[0,...] = y
pred, loss, grad = sess.run([model.logits, model.loss, model.loss_x],
feed_dict={model.x_tf : x_batch, model.y_tf : y_batch})
return pred[0,...], loss[0], grad[0]
else:
# without a class label we can only predict
pred = sess.run(model.logits, feed_dict={model.x_tf : x_batch})
return pred[0,...]
def run_in_batches(sess, x_tf, y_tf, output_tf, x_in, y_in, batch_size):
"""
Runs data through a CNN one batch at a time; gathers all results
together into a single tensor. This assumes the output of each
batch is tensor-like.
sess : the tensorflow session to use
x_tf : placeholder for input x
y_tf : placeholder for input y
output_tf : placeholder for CNN output
x_in : data set to process (numpy tensor)
y_in : associated labels (numpy, one-hot encoding)
batch_size : minibatch size (scalar)
"""
n_examples = x_in.shape[0] # total num. of objects to feed
# determine how many mini-batches are required
nb_batches = int(math.ceil(float(n_examples) / batch_size))
assert nb_batches * batch_size >= n_examples
out = []
with sess.as_default():
for start in np.arange(0, n_examples, batch_size):
# the min() stuff here is to handle the last batch, which may be partial
end = min(n_examples, start + batch_size)
start_actual = min(start, n_examples - batch_size)
feed_dict = {x_tf : x_in[start_actual:end], y_tf : y_in[start_actual:end]}
output_i = sess.run(output_tf, feed_dict=feed_dict)
# the slice is to avoid any extra stuff in last mini-batch,
# which might not be entirely "full"
skip = start - start_actual
output_i = output_i[skip:]
out.append(output_i)
out = np.concatenate(out, axis=0)
assert(out.shape[0] == n_examples)
return out
#-------------------------------------------------------------------------------
# Functions for sampling
#-------------------------------------------------------------------------------
class RandomDirections:
def __init__(self, shape):
self._shape = tuple(shape) # shape of a single direction vector
self.ortho_group = None # ortogonal group O(\N); we lazily create
def gaussian_direction(self, n_samps=1):
if n_samps == 1:
shape_out = self._shape
else:
shape_out = (n_samps,) + self._shape
return np.random.randn(*shape_out)
def haar_direction(self, n_samps):
# creating this group is computationally expensive (for large dimensions)
# so we defer creating it until we are sure we need it.
if self.ortho_group is None:
self.ortho_group = ortho_group.rvs(dim=np.prod(self._shape))
if n_samps == 1:
row = np.random.choice(self.ortho_group.shape[0],1)
return np.reshape(self.ortho_group[row,:], shape=self._shape)
else:
m = self.ortho_group.shape[0]
rows = np.random.choice(m, min(m, n_samps), replace=False)
out = self.ortho_group[rows,:]
return np.reshape(out, (n_samps,) + self._shape)
#-------------------------------------------------------------------------------
# Functions related to analysis of AE
#-------------------------------------------------------------------------------
def distance_to_decision_boundary(sess, model, x, y, direction, d_max, tol=1e-3):
""" Computes (approximately) the distance one needs to move along
some direction in order for the CNN to change its decision.
x : a single example/image with shape (rows x cols x channels)
y : class label associated with x, in one-hot encoding
We only require one-hot so that we don't need a separate
parameter indicating the number of classes.
direction : the search direction; same shape as x
d_max : the maximum distance to move along direction (scalar)
tol : the maximum size of the interval around the change
NOTE: this should be related to d_max (e.g. should be
an order or two smaller)
"""
assert(not np.isscalar(y))
y_scalar = np.argmax(y,axis=1)
# normalize search direction
direction = direction / norm(direction.ravel(),2)
n = model.batch_shape[0]
if n < 3:
raise RuntimeError('sorry, I assume a non-trivial batch size')
x_batch = np.zeros(model.batch_shape, dtype=np.float32)
a = 0
b = d_max
while (b-a) > tol:
# search over interval [a,b] for changes in label
epsilon_vals = np.linspace(a, b, n)
for ii in range(n):
x_batch[ii,...] = x + epsilon_vals[ii] * direction
preds = sess.run(model.logits, feed_dict={model.x_tf : x_batch})
y_hat = np.argmax(preds, axis=1)
if np.all(y_hat == y_scalar):
a,b = d_max, np.Inf # label never changed in given interval
break
first_change = np.min(np.where(y_hat != y_scalar)[0])
# OLD REFINE CODE - potential numerical issues! See new code below...
##assert(first_change > 0)
## refine interval
##a = epsilon_vals[first_change-1]
##b = epsilon_vals[first_change]
# if everything were deterministic and well-conditioned, first_change would
# always be greater than 0. However, in the event that it is not, we
# make some concession and try again.
if first_change > 0:
# the expected/typical case
a = epsilon_vals[first_change-1]
b = epsilon_vals[first_change]
else:
# unexpected case
print('[dtdb]: WARNING: first_change occurred at index 0!!')
a = min(0, a - 2*tol) # a hack
b = b
# loop terminated; either we found interval or failed
if np.isfinite(b):
# if a point was found, provide some additional info
y_new = y_hat[first_change]
pred, loss, _ = get_info(sess, model, x + b*direction, to_one_hot(y_new, y.size))
return a, b, y_new, loss
else:
# no point was found in the interval
return a, b, np.nan, np.nan
def loss_function_stats(sess, model, x0, y0, d_max,
n_samp_d=100, k_vals=[2,5,10], verbose=True, dir_sampler=None):
""" Computes various statistics related to the loss function in the viscinity of
a single example (x0,y0).
y0 : one-hot encoding of class label y
Returns: a Pandas data frame
"""
assert(not np.isscalar(y0))
# create a simple data structure to hold results
#changed to a list of dictionaries of relevant pieces
class Direction_Stats:
def __init__(self):
self.data = []
def __str__(self):
df = self.as_dataframe()
s = ""
if len(df) <= 0:
return s
for dname in ['gradient', 'neg-gradient']:
tmp = df.loc[df['direction_type'] == dname]
assert(len(tmp)==1)
if np.all(np.isfinite(tmp['boundary_distance'])):
s += ' label first changes (%d->%d) along "%s" direction at distance %0.3f\n' % (tmp['y'], tmp['y_hat'], dname, tmp['boundary_distance'])
for dname in ['gaussian', 'gaas']:
tmp = df.loc[df['direction_type'] == dname]
s += ' min dist label change along "%s" direction: %0.3f\n' % (dname, finite_min(tmp['boundary_distance']))
s += ' expected label change along "%s" direction: %0.3f\n' % (dname, finite_mean(tmp['boundary_distance']))
return s
def as_dataframe(self):
return pd.DataFrame(self.data)
def append(self, direction_type, y, boundary_distance, y_hat, delta_loss, **kargs):
assert(np.isscalar(y))
assert(np.isscalar(y_hat))
# TODO: could check if boundary_distance is finite to avoid adding Inf to
# the table. However, this is not necessarily an issue.
entry = {'direction_type' : direction_type, 'y' : y, 'y_hat' : y_hat, 'boundary_distance' : boundary_distance, 'delta_loss' : delta_loss}
entry.update(kargs)
self.data.append(entry)
stats = Direction_Stats()
if dir_sampler is None:
dir_sampler = RandomDirections(x0.shape)
#------------------------------
# get some basic info about x
#------------------------------
pred0, loss0, grad0 = get_info(sess, model, x0, y0)
assert(np.argmax(pred0) == np.argmax(y0))
#------------------------------
# distance in gradient direction
#------------------------------
a,b,y_new,loss_new = distance_to_decision_boundary(sess, model, x0, y0, grad0, d_max)
stats.append('gradient', np.argmax(y0), (a+b)/2., y_new, loss_new-loss0)
#------------------------------
# distance in -gradient direction
#------------------------------
a,b,y_new,loss_new = distance_to_decision_boundary(sess, model, x0, y0, -grad0, d_max)
stats.append('neg-gradient', np.argmax(y0), (a+b)/2., y_new, loss_new-loss0)
#------------------------------
# distance in random Gaussian directions
#------------------------------
for gv in dir_sampler.gaussian_direction(n_samp_d):
a, b, y_new, loss_new = distance_to_decision_boundary(sess, model, x0, y0, gv, d_max)
stats.append('gaussian', np.argmax(y0), (a+b)/2., y_new, loss_new-loss0)
#------------------------------
# distance in random orthogonal directions
#------------------------------
#for idx, ov in enumerate(dir_sampler.haar_direction(n_samp_d)):
# a, b, y_new, loss_new = distance_to_decision_boundary(sess, model, x0, y0, ov, d_max)
# stats.append('ortho_group', np.argmax(y0), (a+b)/2., y_new, loss_new-loss0, direction_id=idx)
#------------------------------
# distance in gaas directions
# Note: instead of picking k=n_samp_d we could use some smaller k and draw convex samples from that...
#------------------------------
for k_idx, k in enumerate(k_vals):
# Determine the k directions that define the "subspace"
Q = gaas(grad0, k)
# calculate approx. distance to decision boundary for each
# GAAS 'basis' vector
for did, col in enumerate(Q.T):
a, b, y_new, loss_new = distance_to_decision_boundary(sess, model, x0, y0, col.reshape(x0.shape), d_max)
stats.append('gaas', np.argmax(y0), (a+b)/2., y_new, loss_new-loss0, direction_id=did, k=k)
# Here we test sampling from the GAAS "subspace" by taking a convex
# combination of the q_i \in Q
for jj in range(min(n_samp_d, k)):
coeff = np.random.uniform(size=k) # coeff : a positive convex combo of q_i
coeff = coeff / np.sum(coeff)
q_dir = np.dot(Q,coeff) # take linear combo
q_dir = q_dir / norm(q_dir,2) # back to unit norm
q_dir = np.reshape(q_dir, x0.shape)
a,b,y_new,loss_new = distance_to_decision_boundary(sess, model, x0, y0, q_dir, d_max)
stats.append('gaas_convex_combo', np.argmax(y0), (a+b)/2., y_new, loss_new-loss0, k=k)
if verbose:
print("%s" % str(stats))
df = stats.as_dataframe()
df['ell2_grad'] = norm(grad0.ravel(), 2)
return df
#-------------------------------------------------------------------------------
class Tests(unittest.TestCase):
def test_gaussian_vector(self):
# we anticipate that vectors generated randomly in this fashion
# should be nearly orthogonal with high probability.
#
# TODO: look up any analytic result describing the rate
# and use this to set tol and dim...
tol = 1e-1
n_trials = 100
result = np.zeros((n_trials,))
rd = RandomDirections((10000,))
for ii in range(n_trials):
a = rd.gaussian_direction(); a = a / norm(a.ravel(),2)
b = rd.gaussian_direction(); b = b / norm(b.ravel(),2)
result[ii] = np.abs(np.dot(a,b))
self.assertTrue(np.sum(result < tol) > (.7*n_trials))
def test_haar_vector(self):
# these directions should be orthogonal
n_samps = 100
rd = RandomDirections((100,))
samps = rd.haar_direction(n_samps)
ip = np.dot(samps, samps.T)
self.assertTrue(norm(ip - np.eye(n_samps,n_samps), 'fro') < 1e-8)
def test_to_one_hot(self):
# test scalar form
y = 3
y_oh = to_one_hot(y, 10)
self.assertTrue(y_oh[0,3] == 1)
self.assertTrue(np.sum(y_oh) == 1)
# test vector form
y = np.array([0,1,2,3,4,5])
y_oh = to_one_hot(y, 6)
self.assertTrue(np.all(y_oh == np.eye(6,6)))
# test matrix form
self.assertTrue(np.all(y_oh == to_one_hot(y_oh + 10)))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "fd897d02dd5472f5db47b1bc3e312388e4240edd", "size": 15478, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ae_utils.py", "max_stars_repo_name": "mjpekala/ae-stability-study", "max_stars_repo_head_hexsha": "53023fca1e2d01192617393b3dc4e1d4d29c348e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ae_utils.py", "max_issues_repo_name": "mjpekala/ae-stability-study", "max_issues_repo_head_hexsha": "53023fca1e2d01192617393b3dc4e1d4d29c348e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ae_utils.py", "max_forks_repo_name": "mjpekala/ae-stability-study", "max_forks_repo_head_hexsha": "53023fca1e2d01192617393b3dc4e1d4d29c348e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5021645022, "max_line_length": 148, "alphanum_fraction": 0.600982039, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 3943}
|
import numpy as np
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def _make_funcs(p, v):
def m(i):
return lambda t: p[i] + t * v[i]
return m(0), m(1), m(2)
def make_graph(file_path, x, y):
lines = plt.plot(x, y)
plt.setp(lines[0], linewidth=4)
plt.savefig(file_path)
class Test:
def __init__(self, data):
"""
The parameter data is a list and the elements are floats. Here is what's inside data:
drone_A_init_pos_x
drone_A_init_pos_y
drone_A_init_pos_z
drone_A_velocity_x
drone_A_velocity_y
drone_A_velocity_z
drone_B_init_pos_x
drone_B_init_pos_y
drone_B_init_pos_z
drone_B_velocity_x
drone_B_velocity_y
drone_B_velocity_z
"""
self.p1 = [float(data[i]) for i in range(0, 3)]
self.v1 = [float(data[i]) for i in range(3, 6)]
self.p2 = [float(data[i]) for i in range(6, 9)]
self.v2 = [float(data[i]) for i in range(9, 12)]
self.x1, self.y1, self.z1 = _make_funcs(self.p1, self.v1)
self.x2, self.y2, self.z2 = _make_funcs(self.p2, self.v2)
def dist(self, t):
"""
Finds the distance from drone A to drone B at time t
"""
dx = self.x1(t) - self.x2(t)
dy = self.y1(t) - self.y2(t)
dz = self.z1(t) - self.z2(t)
return math.sqrt(dx**2 + dy**2 + dz**2)
def find_tcpa(self):
"""
Finds the time, t, where the distance from drone A to drone B is smallest
"""
p1 = np.array(self.p1)
p2 = np.array(self.p2)
v1 = np.array(self.v1)
v2 = np.array(self.v2)
dp = p2 - p1
dv = v2 - v1
return -np.dot(dv, dp) / np.dot(dv, dv)
def find_domain(self):
center = self.find_tcpa()
step = 0.02
return np.arange(0, 2*center+step, step)
def find_graph_data(self):
x = self.find_domain()
y = np.vectorize(self.dist)(x)
return x, y
def make_test(self):
x, y = self.find_graph_data()
make_graph("test1", x , y)
|
{"hexsha": "d8bbe3924a8c06d8f956d04f2b36d7df3184198d", "size": 2178, "ext": "py", "lang": "Python", "max_stars_repo_path": "Test.py", "max_stars_repo_name": "murphym18/plot-tcpa", "max_stars_repo_head_hexsha": "9da41ffe69a64512fbb0d1f5f77942c98964c239", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Test.py", "max_issues_repo_name": "murphym18/plot-tcpa", "max_issues_repo_head_hexsha": "9da41ffe69a64512fbb0d1f5f77942c98964c239", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Test.py", "max_forks_repo_name": "murphym18/plot-tcpa", "max_forks_repo_head_hexsha": "9da41ffe69a64512fbb0d1f5f77942c98964c239", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5696202532, "max_line_length": 93, "alphanum_fraction": 0.556932966, "include": true, "reason": "import numpy", "num_tokens": 629}
|
import Base: exp, expm1, log, log10, log1p, sqrt, cbrt, exponent,
significand, sin, sinpi, cos, cospi, tan, sec, cot, csc,
sinh, cosh, tanh, coth, sech, csch,
asin, acos, atan, acot, asec, acsc,
asinh, acosh, atanh, acoth, asech, acsch, sinc, cosc,
+, -, %, &, *
import Base: broadcast
import Base: Broadcast
import Base.Broadcast: Broadcasted, BroadcastStyle, combine_eltypes
"""
This is a way of suggesting that stage should call
stage_operand with the operation and other arguments
"""
struct PromotePartition{T,N} <: ArrayOp{T,N}
data::AbstractArray{T,N}
end
size(p::PromotePartition) = size(domain(p.data))
struct BCast{B, T, Nd} <: ArrayOp{T, Nd}
bcasted::B
end
BCast(b::Broadcasted) = BCast{typeof(b), combine_eltypes(b.f, b.args), length(axes(b))}(b)
size(x::BCast) = map(length, axes(x.bcasted))
function stage_operands(ctx, ::BCast, xs::ArrayOp...)
map(x->cached_stage(ctx, x), xs)
end
function stage_operands(ctx, ::BCast, x::ArrayOp, y::PromotePartition)
stg_x = cached_stage(ctx, x)
y1 = Distribute(domain(stg_x), y.data)
stg_x, cached_stage(ctx, y1)
end
function stage_operands(ctx, ::BCast, x::PromotePartition, y::ArrayOp)
stg_y = cached_stage(ctx, y)
x1 = Distribute(domain(stg_y), x.data)
cached_stage(ctx, x1), stg_y
end
struct DaggerBroadcastStyle <: BroadcastStyle end
BroadcastStyle(::Type{<:ArrayOp}) = DaggerBroadcastStyle()
BroadcastStyle(::DaggerBroadcastStyle, ::BroadcastStyle) = DaggerBroadcastStyle()
BroadcastStyle(::BroadcastStyle, ::DaggerBroadcastStyle) = DaggerBroadcastStyle()
function Base.copy(b::Broadcast.Broadcasted{<:DaggerBroadcastStyle})
BCast(b)
end
function stage(ctx, node::BCast)
bc = Broadcast.flatten(node.bcasted)
args = bc.args
args1 = map(args) do x
x isa ArrayOp ? cached_stage(ctx, x) : x
end
ds = map(x->x isa DArray ? domainchunks(x) : nothing, args1)
sz = size(node)
dss = filter(x->x !== nothing, collect(ds))
cumlengths = ntuple(ndims(node)) do i
idx = findfirst(d -> i <= length(d.cumlength), dss)
if idx === nothing
[sz[i]] # just one slice
end
dss[idx].cumlength[i]
end
args2 = map(args1) do arg
if arg isa AbstractArray
s = size(arg)
splits = map(enumerate(s)) do dim
i, n = dim
if n == 1
return [1]
else
cumlengths[i]
end
end |> Tuple
dmn = DomainBlocks(ntuple(_->1, length(s)), splits)
cached_stage(ctx, Distribute(dmn, arg)).chunks
else
arg
end
end
blcks = DomainBlocks(map(_->1, size(node)), cumlengths)
thunks = broadcast(delayed((args...)->broadcast(bc.f, args...); ),
args2...)
DArray(eltype(node), domain(node), blcks, thunks)
end
export mappart, mapchunk
struct MapChunk{F, Ni, T, Nd} <: ArrayOp{T, Nd}
f::F
input::NTuple{Ni, ArrayOp{T,Nd}}
end
mapchunk(f, xs::ArrayOp...) = MapChunk(f, xs)
Base.@deprecate mappart(args...) mapchunk(args...)
function stage(ctx, node::MapChunk)
inputs = map(x->cached_stage(ctx, x), node.input)
thunks = map(map(chunks, inputs)...) do ps...
Thunk(node.f, ps...)
end
DArray(Any, domain(inputs[1]), domainchunks(inputs[1]), thunks)
end
|
{"hexsha": "1ad813c58f9591b4a2dc5e8f94ec6308c024ff5a", "size": 3428, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/array/operators.jl", "max_stars_repo_name": "abx78/Dagger.jl", "max_stars_repo_head_hexsha": "309b9d94be63f55107c77c6da89977f5774a35f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/array/operators.jl", "max_issues_repo_name": "abx78/Dagger.jl", "max_issues_repo_head_hexsha": "309b9d94be63f55107c77c6da89977f5774a35f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/array/operators.jl", "max_forks_repo_name": "abx78/Dagger.jl", "max_forks_repo_head_hexsha": "309b9d94be63f55107c77c6da89977f5774a35f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8086956522, "max_line_length": 90, "alphanum_fraction": 0.6163943991, "num_tokens": 983}
|
"""Defines a Keras model and input function for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.applications.inception_v3 import preprocess_input, decode_predictions
from io import BytesIO
import numpy as np
import pandas as pd
import requests
model = InceptionV3(weights='imagenet') #https://keras.io/applications/#inceptionv3
url = 'https://github.com/hayatoy/deep-learning-datasets/releases/download/v0.1/tl_opera_capitol.npz'
response = requests.get(url)
dataset = np.load(BytesIO(response.content))
X_dataset = dataset['features']
y_dataset = dataset['labels']
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
X_dataset = preprocess_input(X_dataset)
y_dataset = np_utils.to_categorical(y_dataset)
X_train, X_test, y_train, y_test = train_test_split(
X_dataset, y_dataset, test_size=0.2, random_state=42)
|
{"hexsha": "ea94af9c602286ce37f6d91359ba15aa701cb326", "size": 1036, "ext": "py", "lang": "Python", "max_stars_repo_path": "GoogleCloudMLEngine/trainer/model.py", "max_stars_repo_name": "jrhender/full-page-screen-capture-chrome-extension", "max_stars_repo_head_hexsha": "aaee882cbd6982cf3e171ed5ec696b8adb3e71a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GoogleCloudMLEngine/trainer/model.py", "max_issues_repo_name": "jrhender/full-page-screen-capture-chrome-extension", "max_issues_repo_head_hexsha": "aaee882cbd6982cf3e171ed5ec696b8adb3e71a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GoogleCloudMLEngine/trainer/model.py", "max_forks_repo_name": "jrhender/full-page-screen-capture-chrome-extension", "max_forks_repo_head_hexsha": "aaee882cbd6982cf3e171ed5ec696b8adb3e71a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5333333333, "max_line_length": 101, "alphanum_fraction": 0.8185328185, "include": true, "reason": "import numpy", "num_tokens": 244}
|
import colorsys
import numpy as np
from PIL import Image
# See https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil
def _shift_hue(arr, hout):
r, g, b, a = np.rollaxis(arr, axis=-1)
h, s, v = np.vectorize(colorsys.rgb_to_hsv)(r, g, b)
h = hout
r, g, b = np.vectorize(colorsys.hsv_to_rgb)(h, s, v)
arr = np.dstack((r, g, b, a))
return arr
def colorize(image, hue):
"""
Colorize PIL image with the given ``hue``.
Args:
image: PIL image
hue: number between 0–360
Returns:
PIL image w/ hue changed
"""
img = image.convert('RGBA')
arr = np.array(np.asarray(img).astype('float'))
new_img = Image.fromarray(_shift_hue(arr, hue / 360.).astype('uint8'), 'RGBA')
return new_img
|
{"hexsha": "5221e35e4de6e2e8959964200d2a01a771a5deec", "size": 787, "ext": "py", "lang": "Python", "max_stars_repo_path": "taulubot/filter.py", "max_stars_repo_name": "fyysikkokilta/fyysikkospeksi-taulubot", "max_stars_repo_head_hexsha": "0b33789d970dd7e238270dbb4191f996b3e27078", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "taulubot/filter.py", "max_issues_repo_name": "fyysikkokilta/fyysikkospeksi-taulubot", "max_issues_repo_head_hexsha": "0b33789d970dd7e238270dbb4191f996b3e27078", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-08-01T14:45:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-17T16:41:28.000Z", "max_forks_repo_path": "taulubot/filter.py", "max_forks_repo_name": "fyysikkokilta/fyysikkospeksi-taulubot", "max_forks_repo_head_hexsha": "0b33789d970dd7e238270dbb4191f996b3e27078", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-05T13:46:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-05T13:46:01.000Z", "avg_line_length": 23.1470588235, "max_line_length": 84, "alphanum_fraction": 0.6226175349, "include": true, "reason": "import numpy", "num_tokens": 239}
|
Add LoadPath ".".
Load BenB.
Load BenB2.
(* ====================================================================== *)
(*
Praktika super TL artifact
======
Authors:
Jozef Coldenhoff s1017656
Charlotte Frenzen s4739760
Nils Golembiewski s1019649
Noah van der Vleuten s1018323
*)
(* ====================================================================== *)
(*
[
This file has to be a valid script, meaning that it
can be executed by Coq.
Therefore, explanations in natural language have to be between
comment markers.
In this project template, text within square brackets (within
comment markers) is intended to clarify what needs to be
written where.
In the final version, we expect that all these blocks have been
replaced by (your) proper content.
]
*)
(*
Abstract:
=========
[
Explain whether you managed to prove the correctness theorem.
And how did that go: did you have to change a lot compared to
the original model as it was before you started with the proof,
or could you use your formalization without many modifications?
]
*)
(*
Focus:
Modeling Goal:
==============
[Verification model]
Fragment of reality:
====================
[
Our model will focus on the user of the camera, the camera itself,
and the visual representation of the world within the scope of the lens
and the film inside of the camera.
]
Perspective:
============
[
Save selected visual state of world within the scope of the lens on film.
]
*)
(*
Abstractions or simplifications:
================================
[
Depending on the chosen focus, you may simplify certain aspects of
your artifact.
If you are modeling some kind of home automation system, it is not
unreasonable to assume that the net power is constant, although this
is not exactly the case in reality. However, if you are modeling an
artifact that protects against high peaks of power, these fluctuations
should be part of the model.
Write down explicitly which assumptions you have made to simplify
the artifact.
]
We have chosen not to model the viewfinder of the camera.
We did this because we thought it would add unnecessary complexity that doesn't really help prove what we're trying to prove.
*)
(* ====================================================================== *)
(* Domain model *)
(* Domains (including their meaning) *)
Definition T := R
.
(* Time as minutes in real numbers. *)
Variable SS : Set.
(* Set of all shutter speed settings. *)
Variable AS : Set.
(* All aperture settings. *)
Variable FS : Set.
(* All focus settings. *)
(* Constants (including their meaning) *)
(* Functions (including their meaning) *)
Variable ShutterTime : SS -> T.
(* Gives the associated amount of time of the shutter setting. *)
(* Predicates (including their meaning and measurements) *)
Variable hasPowerAt (* t *) : T -> Prop.
(* Components have power at time t. *)
(* Check if the internal spring in the camera is loaded. *)
Variable focusSelectorTurnedToAt (* fs t *) : FS -> T -> Prop.
(* The focus selector was turned to setting fs at time t. *)
(* Ask the user of the camera if he/she turned the focus selector. *)
Variable apertureSelectorTurnedToAt (* as t *) : AS -> T -> Prop.
(* The aperture selector was turned to setting as at time t. *)
(* Ask the user of the camera if he/she turned the aperture selector. *)
Variable shutterSelectorTurnedToAt (* ss t *) : SS -> T -> Prop.
(* The shutter speed selector was turned to setting ss at time t. *)
(* Ask the user of the camera if he/she turned the shutter speed selector. *)
Variable leverPulledAt (* t *) : T -> Prop.
(* The lever was pulled at time t. *)
(* Ask the user if and when the lever was pulled. *)
Variable visualWorldStateInScopeAt (* t *) : T -> Prop .
(* There is light entering the lens at time t. *)
(* Look through the viewfinder. *)
Variable shutterButtonPressedAt (* t *) : T -> Prop.
(* The shutter button was pressed at time t. *)
(* Ask the user of the camera if and when the shutter button was pressed. *)
Variable filmRollLoadedAt (* t *) : T -> Prop.
(* At time t the camera contains a roll of film. *)
(* Look if there is a roll of film in the camera. *)
Variable shutterSettingAt (* s t *) : SS -> T -> Prop.
(* Shutter setting s is present at time t. *)
(* Check the shutter selector wheel at time t. *)
Variable apertureSettingAt (* s t *) : AS -> T -> Prop.
(* Aperture setting s is present at time t. *)
(* Check the aperture selector ring at time t. *)
Variable focusSettingAt (* s t *) : FS -> T -> Prop.
(* Focus setting s is present at time t. *)
(* Check the focus selector ring at time t. *)
Variable apertureContractedAt (* t *) : T -> Prop.
(* The aperture is contracted at time t. *)
(* Look at the aperture at time t. *)
Variable shutterOpenAt (* t *) : T -> Prop.
(* The shutter is open at time t. *)
(* Look whether the shutter is open at time t. *)
Variable mirrorUpAt (* t *) : T -> Prop.
(* The mirror is up at time t. *)
(* Look if the viewfinder is dark. *)
Variable focussedLensLightAt (* t *) : T -> Prop.
(* Focussed lens light is available at time t. *)
(* Look through the viewfinder to see if the picture taken is in focus. *)
Variable selectedVisualWorldStateSavedOnFilm (* fs as1 ss t *) : FS -> AS -> SS -> T -> Prop.
(* The selected picture negative was saved on film at time t. *)
(* Develop the film and see if it is the picture that was meant to be taken. *)
(* ====================================================================== *)
(* Auxiliary predicates (including their meaning) *)
(*
[
At this place within this template you may define as many
auxiliary predicates as you want, but do not forget to include
their meaning.
]
*)
(* ====================================================================== *)
(* Components *)
Definition Lever :=
forall t : T,
leverPulledAt t
->
(
forall d : T,
shutterButtonPressedAt (t+d)
/\
d > 0
/\
(
forall d2 : T,
d2 in [t, t+d)
->
~shutterButtonPressedAt d2
)
->
(
forall i : T,
i in [t, t+d]
->
hasPowerAt i
)
)
.
(*
Meaning Lever:
If at any point in time the lever is pulled, then,
the first time the shutter button is pressed after that point,
the components have power from the time the lever is pulled
until the time the shutter button is pressed.
*)
Definition ShutterSpeedSelectorWheel :=
forall (t : T) (ss : SS),
shutterSelectorTurnedToAt ss t
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d *)
/\
(
forall ss2 : SS,
ss <> ss2
->
(
forall x: T,
x in [t, t+d]
->
~shutterSelectorTurnedToAt ss2 x
)
)
->
shutterSettingAt ss (t+d)
)
.
(*
Meaning ShutterSpeedSelectorWheel:
If at any point in time the shutter time selector is turned to a specific (first) setting,
then, if there is any point in the future at which the shutter time selector is
turned to another setting, the shutter setting is set to the first setting until that point
at which the shutter time selector is turned to the other setting. And, if there is no
point in the future at which the shutter time selector is turned to another setting,
the setting will always be the first setting in the future.
*)
Definition ApertureSelectorRing :=
forall (t : T) (as1 : AS),
apertureSelectorTurnedToAt as1 t
->
(
forall d: T,
d >= 0
/\
(
forall as2 : AS,
as1 <> as2
->
(
forall x: T,
x in [t, t+d]
->
~apertureSelectorTurnedToAt as2 x
)
)
->
apertureSettingAt as1 (t+d)
)
.
(*
Meaning ApertureSelectorRing:
If at any point in time the aperture selector is turned to a specific (first) setting,
then, if there is any point in the future at which the aperture selector is
turned to another setting, the aperture setting is set to the first setting until that point
at which the aperture selector is turned to the other setting. And, if there is no
point in the future at which the aperture selector is turned to another setting,
the setting will always be the first setting in the future.
*)
Definition FocusSelectorRing :=
forall (t : T) (fs : FS),
focusSelectorTurnedToAt fs t
->
(
forall d: T,
d >= 0
/\
(
forall fs2 : FS,
fs <> fs2
->
(
forall x: T,
x in [t, t+d]
->
~focusSelectorTurnedToAt fs2 x
)
)
->
focusSettingAt fs (t+d)
)
.
(*
Meaning FocusSelectorRing:
If at any point in time the focus selector is turned to a specific (first) setting,
then, if there is any point in the future at which the focus selector is
turned to another setting, the focus setting is set to the first setting until that point
at which the focus selector is turned to the other setting. And, if there is no
point in the future at which the focus selector is turned to another setting,
the setting will always be the first setting in the future.
*)
Definition Lens:=
forall (t : T) (fs : FS),
focusSettingAt fs t
/\
visualWorldStateInScopeAt t
->
focussedLensLightAt t
.
(*
Meaning Lens:
If at any point in time the focus setting is set to a specific setting,
and the visual state of the world is in scope at that time,
then there is focussed lens light at that time.
*)
Definition Shutter :=
forall (t : T) (ss : SS),
shutterSettingAt ss t
/\
hasPowerAt t
/\
shutterButtonPressedAt t
->
(
forall d :T,
d in [t, t + ShutterTime ss]
->
shutterOpenAt d
)
.
(*
Meaning Shutter:
If at any point in time, the shutter setting is a specific setting, and the components have power
and the shutter button is pressed, then the shutter is open from that point time, until that
point in time plus the interval specified by the shutter setting.
*)
Definition Aperture:=
forall (t : T) (ss : SS) (as1 : AS),
shutterSettingAt ss t
/\
hasPowerAt t
/\
shutterButtonPressedAt t
/\
apertureSettingAt as1 t
->
(
forall d :T,
d in [t, t + ShutterTime ss]
->
apertureContractedAt d
)
.
(*
Meaning Aperture:
If at any point in time, the shutter setting is a specific setting, the components have power,
the shutter button is pressed and the aperture setting is a specific setting,
then the aperture is contracted from that point time, until that
point in time plus the interval specified by the shutter setting.
*)
Definition Mirror :=
forall (t : T) (ss : SS),
shutterSettingAt ss t
/\
hasPowerAt t
/\
shutterButtonPressedAt t
->
(
forall d :T,
d in [t, t + ShutterTime ss]
->
mirrorUpAt d
)
.
(*
Meaning Mirror:
If at any point in time, the shutter setting is a specific setting, the components have power,
the shutter button is pressed,
then the mirror is up from that point time, until that
point in time plus the interval specified by the shutter setting.
*)
Definition Film :=
forall (t : T) (ss : SS) (as1 : AS) (fs : FS),
filmRollLoadedAt t
/\
shutterSettingAt ss t
/\
focusSettingAt fs t
/\
apertureSettingAt as1 t
/\
(
forall i: T,
i in [t, t + (ShutterTime ss))
->
shutterOpenAt i
/\
mirrorUpAt i
/\
apertureContractedAt i
/\
focussedLensLightAt i
)
->
selectedVisualWorldStateSavedOnFilm fs as1 ss (t + (ShutterTime ss))
.
(*
Meaning film:
If at some point in time, the shutter setting is set to a specific setting,
and the forcus setting is set to a specific setting, and the aperture setting is
set to a specific setting and, the mirror is up, and the aperture is contracted,
and there is focused lens light from the point in time until as long as the shutter
time setting specifies, then the selected visual world state at the point in time plus
the shutter time is saved on film with the shutter setting, focus setting and shutter setting.
*)
(*
[
For each component you have to specify the following information:
OUTSIDE comment markers:
- The 'Definition' to be read by Coq, in a readable layout that
matches the mathematical structure of the formula.
WITHIN comment markers:
- The specification of the component in natural language. Obviously,
this specification should be consistent with the formula used
by Coq.
- If appropriate, a short explanation in natural language about
the choices that have been made.
]
*)
(* ====================================================================== *)
(* Specification of the overall system *)
Definition Camera :=
forall (t1 : T) (t2 : T) (t3 : T) (ss : SS) (as1 : AS) (fs : FS),
t2 > t1
/\
t3 > t2
/\
filmRollLoadedAt t3
/\
ShutterTime ss > 0
/\
(
forall t : T,
t in [t1, t3 + ShutterTime ss]
->
(
(
forall as2 : AS,
as1 <> as2
->
~apertureSelectorTurnedToAt as2 t
)
/\
(
forall ss2 : SS,
ss <> ss2
->
~shutterSelectorTurnedToAt ss2 t
)
/\
(
forall fs2 : FS,
fs <> fs2
->
~focusSelectorTurnedToAt fs2 t
)
)
)
/\
(
exists y1 : T,
y1 in [t1, t2 )
/\
leverPulledAt y1
)
/\
(
forall t : T,
t in [t1, t3)
->
~shutterButtonPressedAt t
)
/\
(
exists y2 : T,
y2 in [t2, t3 )
/\
focusSelectorTurnedToAt fs y2
/\
apertureSelectorTurnedToAt as1 y2
/\
shutterSelectorTurnedToAt ss y2
)
/\
(
forall y3: T,
y3 in [t3 , t3 + (ShutterTime ss))
->
visualWorldStateInScopeAt y3
)
/\
shutterButtonPressedAt t3
->
selectedVisualWorldStateSavedOnFilm fs as1 ss (t3 + (ShutterTime ss))
.
(*
Meaning Camera:
For any three subsequent points in time and any shutter, aperture and focus setting,
if at the first point in time the lever is pulled, and at the second point in time the aperture,
shutter and, focus settings are set and are not changed until the shutter button is pressed,
and if we press the shutter button no earlier or later than the third point in time,
then the photo is saved on film after a short time after the third point in time that is associated with the chosen shutter speed setting.
*)
(*
[
Here you have to specify:
OUTSIDE comment markers:
- The 'Definition' to be read by Coq, in a readable layout that
matches the mathematical structure of the formula.
WITHIN comment markers:
- The specification of the overall system in natural language.
Obviously, this specification should be consistent with the
formula used by Coq.
- If appropriate, a short explanation in natural language about
the choices that have been made.
]
*)
(* ====================================================================== *)
(* Extras *)
(*
[
It is very likely that you do not need any extras!
However, if it turns out during your proof that you have to prove
several times (almost) the same, then you may define a 'Lemma' at
this place, followed by its proof. And in the proof of the correctness
theorem, you may apply this lemma several times.
Note that it is always allowed to add lemmas to this script!
Sometimes it happens that Coq has troubles with 'trivial' properties
of numbers, that cannot be solve easily using 'lin_solve'.
In such situations, you may contact your supervisor and discuss
whether this may be solved by adding an 'Axiom', which can also be
applied later on within the proof of the correctness theorem.
]
*)
(* Correctness theorem *)
(* ====================================================================== *)
(*
[
Write down your correctness theorem in the usual notation:
Theorem CorTheorem:
Component1 /\ Component2 /\ ... /\ ComponentN -> SpecOfTheOverallSystem.
Note that as long as you don't know what natural deduction is
and you cannot start with the proof yet, you should keep this
theorem within comment markers, otherwise you will get a red cross
for stating a theorem without a proof.
For the final version you obviously have to remove these comment
markers and provide a real proof!
Note that even if your proof is correct, you won't be able to
get a green check mark, but only an orange flag, for technical
reasons. But that is no problem.
]
*)
Theorem CorTheorem:
Lever
/\
Lens
/\
Mirror
/\
Aperture
/\
Shutter
/\
Film
/\
ShutterSpeedSelectorWheel
/\
ApertureSelectorRing
/\
FocusSelectorRing
->
Camera
.
Proof.
unfold Lever.
unfold Lens.
unfold Mirror.
unfold Aperture.
unfold Shutter.
unfold Film.
unfold ShutterSpeedSelectorWheel.
unfold ApertureSelectorRing.
unfold FocusSelectorRing.
unfold Camera.
imp_i a1.
destruct a1.
destruct H0.
destruct H1.
destruct H2.
destruct H3.
destruct H4.
destruct H5.
destruct H6.
all_i t1_strict.
all_i t2_strict.
all_i t3_strict.
all_i ss_strict.
all_i as1_strict.
all_i fs_strict.
imp_i a2.
destruct a2.
destruct H9.
destruct H10.
destruct H11.
destruct H12.
destruct H13.
destruct H14.
destruct H15.
destruct H16.
exi_e (
exists y1 : T,
y1 in [t1_strict, t2_strict)
/\
leverPulledAt y1
)
y1_strict a3
.
hyp H13.
destruct a3.
exi_e (
exists y2 : T,
y2 in [t2_strict, t3_strict)
/\
focusSelectorTurnedToAt fs_strict y2
/\
apertureSelectorTurnedToAt as1_strict y2
/\
shutterSelectorTurnedToAt ss_strict y2
)
y2_strict a4
.
hyp H15.
destruct a4.
destruct H21.
destruct H22.
exi_e (
exists d:T,
d = (t3_strict - y2_strict)
)
d_strict a5.
exi_i (t3_strict - y2_strict).
lin_solve.
imp_e (
filmRollLoadedAt t3_strict
/\
shutterSettingAt ss_strict t3_strict
/\
focusSettingAt fs_strict t3_strict
/\
apertureSettingAt as1_strict t3_strict
/\
(
forall i : T,
i in [t3_strict, t3_strict + ShutterTime ss_strict)
->
shutterOpenAt i
/\
mirrorUpAt i
/\
apertureContractedAt i
/\
focussedLensLightAt i
)
)
.
all_e (
forall (fs : FS),
filmRollLoadedAt t3_strict
/\
shutterSettingAt ss_strict t3_strict
/\
focusSettingAt fs t3_strict
/\
apertureSettingAt as1_strict t3_strict
/\
(
forall i : T,
i in [t3_strict, t3_strict + ShutterTime ss_strict)
->
shutterOpenAt i
/\
mirrorUpAt i
/\
apertureContractedAt i
/\
focussedLensLightAt i
)
->
selectedVisualWorldStateSavedOnFilm fs as1_strict ss_strict (t3_strict + ShutterTime ss_strict)
)
fs_strict.
all_e (
forall (as1 : AS)(fs : FS),
filmRollLoadedAt t3_strict
/\
shutterSettingAt ss_strict t3_strict
/\
focusSettingAt fs t3_strict
/\
apertureSettingAt as1 t3_strict
/\
(
forall i : T,
i in [t3_strict, t3_strict + ShutterTime ss_strict)
->
shutterOpenAt i
/\
mirrorUpAt i
/\
apertureContractedAt i
/\
focussedLensLightAt i
)
->
selectedVisualWorldStateSavedOnFilm fs as1 ss_strict (t3_strict + ShutterTime ss_strict)
)
as1_strict
.
all_e (
forall (ss : SS) (as1 : AS) (fs : FS),
filmRollLoadedAt t3_strict
/\
shutterSettingAt ss t3_strict
/\
focusSettingAt fs t3_strict
/\
apertureSettingAt as1 t3_strict
/\
(
forall i : T,
i in [t3_strict, t3_strict + ShutterTime ss)
->
shutterOpenAt i
/\
mirrorUpAt i
/\
apertureContractedAt i
/\
focussedLensLightAt i
)
->
selectedVisualWorldStateSavedOnFilm fs as1 ss (t3_strict + ShutterTime ss)
)
ss_strict
.
all_e (
forall (t:T) (ss : SS) (as1 : AS)(fs : FS),
filmRollLoadedAt t
/\
shutterSettingAt ss t
/\
focusSettingAt fs t
/\
apertureSettingAt as1 t
/\
(
forall i : T,
i in [t, t + ShutterTime ss)
->
shutterOpenAt i
/\
mirrorUpAt i
/\
apertureContractedAt i
/\
focussedLensLightAt i
)
->
selectedVisualWorldStateSavedOnFilm fs as1 ss (t + ShutterTime ss)
)
t3_strict
.
hyp H4.
con_i.
hyp H10.
imp_e (
apertureSettingAt as1_strict t3_strict
)
.
imp_i a_aperture_setting.
imp_e (
focusSettingAt fs_strict t3_strict
)
.
imp_i a_focus_setting.
imp_e (
shutterSettingAt ss_strict t3_strict
)
.
imp_i a_shutter_setting.
imp_e (
hasPowerAt t3_strict
)
.
imp_i a_has_power.
(*
imp_e (forall t:T, t in [t1_strict, t3_strict] -> t in [t1_strict, t3_strict + ShutterTime ss_strict]).
imp_i a_interval1.
*)
con_i.
hyp a_shutter_setting.
con_i.
hyp a_focus_setting.
con_i.
hyp a_aperture_setting.
all_i i_strict.
imp_i ab1.
con_i.
(*=============shutteropenedat=========*)
imp_e (
i_strict in [t3_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss_strict]
->
shutterOpenAt d
)
i_strict.
imp_e (
shutterSettingAt ss_strict t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
)
.
all_e (
forall (ss : SS),
shutterSettingAt ss t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
->
(
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss]
->
shutterOpenAt d
)
)
ss_strict
.
all_e (
forall (t : T) (ss : SS),
shutterSettingAt ss t
/\
hasPowerAt t
/\
shutterButtonPressedAt t
->
(
forall d : T,
d in [t, t + ShutterTime ss]
->
shutterOpenAt d
)
)
t3_strict
.
hyp H3.
con_i.
hyp a_shutter_setting.
con_i.
hyp a_has_power.
hyp H17.
interval.
imp_e (
t3_strict <= i_strict
/\
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab2.
con_i.
con_e1 (
i_strict < t3_strict + ShutterTime ss_strict
)
.
hyp ab2.
imp_e (
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i abc1.
lin_solve.
con_e2 (
t3_strict <= i_strict
)
.
hyp ab2.
hyp ab1.
(*===============mirrorupat=================*)
con_i.
imp_e (
i_strict in [t3_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss_strict]
->
mirrorUpAt d
)
i_strict.
imp_e (
shutterSettingAt ss_strict t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
)
.
all_e (
forall (ss : SS),
shutterSettingAt ss t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
->
(
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss]
->
mirrorUpAt d
)
)
ss_strict
.
all_e (
forall (t : T) (ss : SS),
shutterSettingAt ss t
/\
hasPowerAt t
/\
shutterButtonPressedAt t
->
(
forall d : T,
d in [t, t + ShutterTime ss]
->
mirrorUpAt d
)
)
t3_strict
.
hyp H1.
con_i.
hyp a_shutter_setting.
con_i.
hyp a_has_power.
hyp H17.
interval.
imp_e (
t3_strict <= i_strict
/\
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab2.
con_i.
con_e1 (
i_strict < t3_strict + ShutterTime ss_strict
)
.
hyp ab2.
imp_e (
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i abc1.
lin_solve.
con_e2 (
t3_strict <= i_strict
)
.
hyp ab2.
hyp ab1.
(*=======aperturecontractedat========*)
con_i.
imp_e (
i_strict in [t3_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss_strict]
->
apertureContractedAt d
)
i_strict.
imp_e (
shutterSettingAt ss_strict t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
/\
apertureSettingAt as1_strict t3_strict
)
.
all_e (
forall (as1 : AS),
shutterSettingAt ss_strict t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
/\
apertureSettingAt as1 t3_strict
->
(
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss_strict]
->
apertureContractedAt d
)
)
as1_strict
.
all_e (
forall (ss:SS) (as1 : AS),
shutterSettingAt ss t3_strict
/\
hasPowerAt t3_strict
/\
shutterButtonPressedAt t3_strict
/\
apertureSettingAt as1 t3_strict
->
(
forall d : T,
d in [t3_strict, t3_strict + ShutterTime ss]
->
apertureContractedAt d
)
)
ss_strict
.
all_e (
forall (t:T) (ss:SS) (as1 : AS),
shutterSettingAt ss t
/\
hasPowerAt t
/\
shutterButtonPressedAt t
/\
apertureSettingAt as1 t
->
(
forall d : T,
d in [t, t + ShutterTime ss]
->
apertureContractedAt d
)
)
t3_strict
.
hyp H2.
con_i.
hyp a_shutter_setting.
con_i.
hyp a_has_power.
con_i.
hyp H17.
hyp a_aperture_setting.
interval.
imp_e (
t3_strict <= i_strict
/\
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab2.
con_i.
con_e1 (
i_strict < t3_strict + ShutterTime ss_strict
)
.
hyp ab2.
imp_e (
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i abc1.
lin_solve.
con_e2 (
t3_strict <= i_strict
)
.
hyp ab2.
hyp ab1.
(*=========focussedlenslightat======*)
imp_e (
focusSettingAt fs_strict i_strict
/\
visualWorldStateInScopeAt i_strict
)
.
all_e (
forall (fs : FS),
focusSettingAt fs i_strict
/\
visualWorldStateInScopeAt i_strict
->
focussedLensLightAt i_strict
)
fs_strict
.
all_e (
forall (t : T) (fs : FS),
focusSettingAt fs t
/\
visualWorldStateInScopeAt t
->
focussedLensLightAt t
)
i_strict.
hyp H0.
con_i.
exi_e (
exists d:T,
y2_strict + d = i_strict
)
d2_strict ab3.
exi_i (i_strict - y2_strict).
lin_solve.
replace i_strict with (y2_strict + d2_strict).
imp_e (
d2_strict >= 0
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
forall x : T,
x in [y2_strict, (y2_strict + d2_strict)]
->
~focusSelectorTurnedToAt fs2 x
)
)
.
all_e (
forall d : T,
d >= 0
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
forall x : T,
x in [y2_strict, y2_strict + d]
->
~focusSelectorTurnedToAt fs2 x
)
->
focusSettingAt fs_strict (y2_strict + d)
)
d2_strict
.
imp_e (
focusSelectorTurnedToAt fs_strict y2_strict
)
.
all_e (
forall (fs : FS),
focusSelectorTurnedToAt fs y2_strict
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall fs2 : FS,
fs <> fs2
->
(
forall x: T,
x in [y2_strict, y2_strict + d]
->
~focusSelectorTurnedToAt fs2 x
)
)
->
focusSettingAt fs (y2_strict + d)
)
)
fs_strict
.
all_e (
forall (t : T) (fs : FS),
focusSelectorTurnedToAt fs t
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall fs2 : FS,
fs <> fs2
->
(
forall x: T,
x in [t, t + d]
->
~focusSelectorTurnedToAt fs2 x
)
)
->
focusSettingAt fs (t + d)
)
)
y2_strict
.
hyp H7.
hyp H21.
con_i.
replace d2_strict with (i_strict - y2_strict).
imp_e (
i_strict >= y2_strict
)
.
imp_i ab4.
lin_solve.
imp_e (
t3_strict <= i_strict
/\
y2_strict <= t3_strict
)
.
imp_i ab4.
imp_e (
t3_strict <= i_strict
)
.
imp_i ab5.
imp_e (
y2_strict <= t3_strict
)
.
imp_i ab6.
lin_solve.
con_e2 (
t3_strict <= i_strict
)
.
hyp ab4.
con_e1 (
y2_strict <= t3_strict
)
.
hyp ab4.
con_i.
con_e1 (
i_strict < t3_strict + ShutterTime ss_strict
)
.
hyp ab1.
imp_e (
y2_strict < t3_strict
)
.
imp_i ab4.
lin_solve.
con_e2 (
t2_strict <= y2_strict
)
.
hyp H20.
replace i_strict with (y2_strict + d2_strict).
lin_solve.
all_i fs2_strict.
imp_i ab5.
all_i x_strict.
imp_i ab6.
imp_e (
fs_strict <> fs2_strict
)
.
all_e (
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 x_strict
)
fs2_strict.
con_e2 (
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 x_strict
)
.
con_e2 (
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 x_strict
)
.
imp_e (
x_strict in [t1_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall t : T,
t in [t1_strict, t3_strict + ShutterTime ss_strict]
->
(
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 t
)
/\
(
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 t
)
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 t
)
)
x_strict.
hyp H12.
interval.
imp_e (
y2_strict <= x_strict
/\
x_strict <= y2_strict + d2_strict
)
.
imp_i ab7.
imp_e (
t3_strict <= i_strict
/\
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab8.
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i ab9.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab10.
imp_e (
y2_strict <= x_strict
)
.
imp_i ab11.
imp_e (
x_strict <= y2_strict + d2_strict
)
.
imp_i ab12.
imp_e (
t3_strict <= i_strict
/\
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab13.
imp_e (
i_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab14.
con_i.
lin_solve.
lin_solve.
con_e2 (
t3_strict <= i_strict
)
.
hyp ab8.
hyp ab1.
con_e2 (
y2_strict <= x_strict
)
.
hyp ab7.
con_e1 (
x_strict <= y2_strict + d2_strict
)
.
hyp ab6.
con_e1 (
y2_strict < t3_strict
)
.
hyp ab9.
hyp H20.
hyp ab1.
hyp ab6.
hyp ab5.
imp_e (
i_strict in [t3_strict, t3_strict + ShutterTime ss_strict)
)
.
all_e (
forall y3 : T,
y3 in [t3_strict, t3_strict + ShutterTime ss_strict)
->
visualWorldStateInScopeAt y3
)
i_strict.
hyp H16.
hyp ab1.
(*==============haspowerat===============*)
exi_e (
exists d3:T,
y1_strict + d3 = t3_strict
)
d3_strict ab1.
exi_i (t3_strict - y1_strict).
lin_solve.
replace t3_strict with (y1_strict + d3_strict).
imp_e (
(y1_strict + d3_strict) in [y1_strict, t3_strict]
)
.
all_e all_e (forall i : T, i in [y1_strict, t3_strict] -> hasPowerAt i) (y1_strict + d3_strict).
imp_e (
shutterButtonPressedAt (y1_strict + d3_strict)
/\
d3_strict > 0
/\
(
forall d2 : T,
d2 in [y1_strict, (y1_strict + d3_strict))
->
~shutterButtonPressedAt d2
)
)
.
replace t3_strict with (y1_strict + d3_strict).
all_e (
forall d : T,
shutterButtonPressedAt (y1_strict + d)
/\
d > 0
/\
(
forall d2 : T,
d2 in [y1_strict, y1_strict + d)
->
~shutterButtonPressedAt d2
)
->
(
forall i : T,
i in [y1_strict, y1_strict + d]
->
hasPowerAt i
)
)
d3_strict
.
imp_e (
leverPulledAt y1_strict
)
.
all_e (
forall t : T,
leverPulledAt t
->
forall d : T,
shutterButtonPressedAt (t + d)
/\
d > 0
/\
(
forall d2 : T,
d2 in [t, t + d)
->
~shutterButtonPressedAt d2
)
->
forall i : T,
i in [t, t + d]
->
hasPowerAt i
)
y1_strict
.
hyp H.
hyp H19.
con_i.
replace (y1_strict + d3_strict) with t3_strict.
hyp H17.
con_i.
imp_e (
t1_strict <= y1_strict
/\
y1_strict < t2_strict
)
.
imp_i ab2.
imp_e (
y1_strict < t2_strict
)
.
imp_i ab4.
lin_solve.
con_e2 (
t1_strict <= y1_strict
)
.
hyp ab2.
hyp H18.
all_i d2_strict.
imp_i ab2.
imp_e (
d2_strict in [t1_strict, t3_strict)
)
.
all_e (
forall t : T,
t in [t1_strict, t3_strict)
->
~shutterButtonPressedAt t
)
d2_strict.
hyp H14.
interval.
(*need ab2, H18, *)
imp_e (
y1_strict <= d2_strict
/\
d2_strict < y1_strict + d3_strict
)
.
imp_i ab3.
imp_e (
t1_strict <= y1_strict
/\
y1_strict < t2_strict
)
.
imp_i ab4.
imp_e (
y1_strict <= d2_strict
)
.
imp_i ab5.
imp_e (
t1_strict <= y1_strict
)
.
imp_i ab6.
imp_e (
d2_strict < y1_strict + d3_strict
)
.
imp_i ab7.
con_i.
lin_solve.
replace t3_strict with (y1_strict + d3_strict).
lin_solve.
con_e2 (
y1_strict <= d2_strict
)
.
hyp ab3.
con_e1 (
y1_strict < t2_strict
)
.
hyp ab4.
con_e1 (
d2_strict < y1_strict + d3_strict
)
.
hyp ab3.
hyp H18.
hyp ab2.
replace (y1_strict + d3_strict) with t3_strict.
imp_e (
t1_strict <= y1_strict
/\
y1_strict < t2_strict
)
.
imp_i ab2.
interval.
con_i.
imp_e (
y1_strict < t2_strict
)
.
imp_i ab3.
lin_solve.
con_e2 (
t1_strict <= y1_strict
)
.
hyp ab2.
lin_solve.
hyp H18.
(* ======================= *)
(* SHUTTERSETTING *)
(* ======================= *)
replace t3_strict with ((t3_strict - d_strict) + d_strict).
imp_e (
d_strict >= 0
/\
(
forall ss2 : SS,
ss_strict <> ss2
->
forall x : T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d_strict]
->
~shutterSelectorTurnedToAt ss2 x
)
)
.
all_e (
forall d : T,
d >= 0
/\
(
forall ss2 : SS,
ss_strict <> ss2
->
forall x : T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d]
->
~shutterSelectorTurnedToAt ss2 x
)
->
shutterSettingAt ss_strict ((t3_strict - d_strict) + d)
)
d_strict
.
imp_e (
shutterSelectorTurnedToAt ss_strict (t3_strict - d_strict)
)
.
all_e (
forall (ss : SS),
shutterSelectorTurnedToAt ss (t3_strict - d_strict)
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall ss2 : SS,
ss <> ss2
->
(
forall x: T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d]
->
~shutterSelectorTurnedToAt ss2 x
)
)
->
shutterSettingAt ss ((t3_strict - d_strict) + d)
)
)
ss_strict
.
all_e (
forall (t : T) (ss : SS),
shutterSelectorTurnedToAt ss t
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall ss2 : SS,
ss <> ss2
->
(
forall x: T,
x in [t, t + d]
->
~shutterSelectorTurnedToAt ss2 x
)
)
->
shutterSettingAt ss (t + d)
)
)
(t3_strict - d_strict)
.
hyp H5.
replace (t3_strict - d_strict) with y2_strict.
hyp H23.
replace d_strict with (t3_strict - y2_strict).
lin_solve.
con_i.
replace d_strict with (t3_strict - y2_strict).
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i a6.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab7.
imp_e (
y2_strict < t3_strict
)
.
imp_i ab8.
lin_solve.
con_e2 (
t2_strict <= y2_strict
)
.
hyp a6.
con_e1 (
y2_strict < t3_strict
)
.
hyp a6.
hyp H20.
replace (t3_strict - d_strict + d_strict) with t3_strict.
replace (t3_strict - d_strict) with y2_strict.
all_i ss2_strict.
imp_i a7.
all_i x_strict.
imp_i a8.
imp_e (
ss_strict <> ss2_strict
)
.
all_e (
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 x_strict
)
ss2_strict.
con_e1 (
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 x_strict
)
.
con_e2 (
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 x_strict
)
.
imp_e (
x_strict in [t1_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall t : T,
t in [t1_strict, t3_strict + ShutterTime ss_strict]
->
(
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 t
)
/\
(
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 t
)
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 t
)
)
x_strict.
hyp H12.
interval.
imp_e (
y2_strict <= x_strict
/\
x_strict <= t3_strict
)
.
imp_i ab7.
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i ab9.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab10.
imp_e (
y2_strict <= x_strict
)
.
imp_i ab11.
imp_e (
x_strict <= t3_strict
)
.
imp_i ab12.
imp_e (
x_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab14.
con_i.
lin_solve.
lin_solve.
lin_solve.
con_e2 (
y2_strict <= x_strict
)
.
hyp ab7.
con_e1 (
x_strict <= t3_strict
)
.
hyp a8.
con_e1 (
y2_strict < t3_strict
)
.
hyp ab9.
hyp H20.
hyp a8.
hyp a7.
replace d_strict with (t3_strict - y2_strict).
lin_solve.
lin_solve.
lin_solve.
(* =========================================== *)
(* FOCUSSETTING *)
(* =========================================== *)
replace t3_strict with ((t3_strict - d_strict) + d_strict).
imp_e (
d_strict >= 0
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
forall x : T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d_strict]
->
~focusSelectorTurnedToAt fs2 x
)
)
.
all_e (
forall d : T,
d >= 0
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
forall x : T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d]
->
~focusSelectorTurnedToAt fs2 x
)
->
focusSettingAt fs_strict ((t3_strict - d_strict) + d)
)
d_strict
.
imp_e (
focusSelectorTurnedToAt fs_strict (t3_strict - d_strict)
)
.
all_e (
forall (fs : FS),
focusSelectorTurnedToAt fs (t3_strict - d_strict)
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall fs2 : FS,
fs <> fs2
->
(
forall x: T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d]
->
~focusSelectorTurnedToAt fs2 x
)
)
->
focusSettingAt fs ((t3_strict - d_strict) + d)
)
)
fs_strict
.
all_e (
forall (t : T) (fs : FS),
focusSelectorTurnedToAt fs t
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall fs2 : FS,
fs <> fs2
->
(
forall x: T,
x in [t, t + d]
->
~focusSelectorTurnedToAt fs2 x
)
)
->
focusSettingAt fs (t + d)
)
)
(t3_strict - d_strict)
.
hyp H7.
replace (t3_strict - d_strict) with y2_strict.
hyp H21.
replace d_strict with (t3_strict - y2_strict).
lin_solve.
con_i.
replace d_strict with (t3_strict - y2_strict).
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i a6.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab7.
imp_e (
y2_strict < t3_strict
)
.
imp_i ab8.
lin_solve.
con_e2 (
t2_strict <= y2_strict
)
.
hyp a6.
con_e1 (
y2_strict < t3_strict
)
.
hyp a6.
hyp H20.
replace (t3_strict - d_strict + d_strict) with t3_strict.
replace (t3_strict - d_strict) with y2_strict.
all_i fs2_strict.
imp_i a7.
all_i x_strict.
imp_i a8.
imp_e (
fs_strict <> fs2_strict
)
.
all_e (
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 x_strict
)
fs2_strict.
con_e2 (
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 x_strict
)
.
con_e2 (
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 x_strict
)
.
imp_e (
x_strict in [t1_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall t : T,
t in [t1_strict, t3_strict + ShutterTime ss_strict]
->
(
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 t
)
/\
(
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 t
)
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 t
)
)
x_strict.
hyp H12.
interval.
imp_e (
y2_strict <= x_strict
/\
x_strict <= t3_strict
)
.
imp_i ab7.
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i ab9.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab10.
imp_e (
y2_strict <= x_strict
)
.
imp_i ab11.
imp_e (
x_strict <= t3_strict
)
.
imp_i ab12.
imp_e (
x_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab14.
con_i.
lin_solve.
lin_solve.
lin_solve.
con_e2 (
y2_strict <= x_strict
)
.
hyp ab7.
con_e1 (
x_strict <= t3_strict
)
.
hyp a8.
con_e1 (
y2_strict < t3_strict
)
.
hyp ab9.
hyp H20.
hyp a8.
hyp a7.
replace d_strict with (t3_strict - y2_strict).
lin_solve.
lin_solve.
lin_solve.
(* =========================================== *)
(* APERTURESETTING *)
(* =========================================== *)
replace t3_strict with ((t3_strict - d_strict) + d_strict).
imp_e (
d_strict >= 0
/\
(
forall as2 : AS,
as1_strict <> as2
->
forall x : T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d_strict]
->
~apertureSelectorTurnedToAt as2 x
)
)
.
all_e (
forall d : T,
d >= 0
/\
(
forall as2 : AS,
as1_strict <> as2
->
forall x : T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d]
->
~apertureSelectorTurnedToAt as2 x
)
->
apertureSettingAt as1_strict ((t3_strict - d_strict) + d)
)
d_strict
.
imp_e (
apertureSelectorTurnedToAt as1_strict (t3_strict - d_strict)
)
.
all_e (
forall (as1 : AS),
apertureSelectorTurnedToAt as1 (t3_strict - d_strict)
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall as2 : AS,
as1 <> as2
->
(
forall x: T,
x in [(t3_strict - d_strict), (t3_strict - d_strict) + d]
->
~apertureSelectorTurnedToAt as2 x
)
)
->
apertureSettingAt as1 ((t3_strict - d_strict) + d)
)
)
as1_strict
.
all_e (
forall (t : T) (as1 : AS),
apertureSelectorTurnedToAt as1 t
->
(
forall d: T,
d >= 0
(* possibly replace by t2 where t2 = t+d ?????????????????????????????*)
/\
(
forall as2 : AS,
as1 <> as2
->
(
forall x: T,
x in [t, t + d]
->
~apertureSelectorTurnedToAt as2 x
)
)
->
apertureSettingAt as1 (t + d)
)
)
(t3_strict - d_strict)
.
hyp H6.
replace (t3_strict - d_strict) with y2_strict.
hyp H22.
replace d_strict with (t3_strict - y2_strict).
lin_solve.
con_i.
replace d_strict with (t3_strict - y2_strict).
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i a6.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab7.
imp_e (
y2_strict < t3_strict
)
.
imp_i ab8.
lin_solve.
con_e2 (
t2_strict <= y2_strict
)
.
hyp a6.
con_e1 (
y2_strict < t3_strict
)
.
hyp a6.
hyp H20.
replace (t3_strict - d_strict + d_strict) with t3_strict.
replace (t3_strict - d_strict) with y2_strict.
all_i as2_strict.
imp_i a7.
all_i x_strict.
imp_i a8.
imp_e (
as1_strict <> as2_strict
)
.
all_e (
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 x_strict
)
as2_strict.
con_e1 (
(
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 x_strict
)
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 x_strict
)
)
.
imp_e (
x_strict in [t1_strict, t3_strict + ShutterTime ss_strict]
)
.
all_e (
forall t : T,
t in [t1_strict, t3_strict + ShutterTime ss_strict]
->
(
forall as2 : AS,
as1_strict <> as2
->
~apertureSelectorTurnedToAt as2 t
)
/\
(
forall ss2 : SS,
ss_strict <> ss2
->
~shutterSelectorTurnedToAt ss2 t
)
/\
(
forall fs2 : FS,
fs_strict <> fs2
->
~focusSelectorTurnedToAt fs2 t
)
)
x_strict.
hyp H12.
interval.
imp_e (
y2_strict <= x_strict
/\
x_strict <= t3_strict
)
.
imp_i ab7.
imp_e (
t2_strict <= y2_strict
/\
y2_strict < t3_strict
)
.
imp_i ab9.
imp_e (
t2_strict <= y2_strict
)
.
imp_i ab10.
imp_e (
y2_strict <= x_strict
)
.
imp_i ab11.
imp_e (
x_strict <= t3_strict
)
.
imp_i ab12.
imp_e (
x_strict < t3_strict + ShutterTime ss_strict
)
.
imp_i ab14.
con_i.
lin_solve.
lin_solve.
lin_solve.
con_e2 (
y2_strict <= x_strict
)
.
hyp ab7.
con_e1 (
x_strict <= t3_strict
)
.
hyp a8.
con_e1 (
y2_strict < t3_strict
)
.
hyp ab9.
hyp H20.
hyp a8.
hyp a7.
replace d_strict with (t3_strict - y2_strict).
lin_solve.
lin_solve.
lin_solve.
Qed.
|
{"author": "NilsHasNoGithub", "repo": "coq_formatter", "sha": "88c4480e87834c334beee09f69170c9e0ae491fa", "save_path": "github-repos/coq/NilsHasNoGithub-coq_formatter", "path": "github-repos/coq/NilsHasNoGithub-coq_formatter/coq_formatter-88c4480e87834c334beee09f69170c9e0ae491fa/out.v"}
|
import matplotlib.pyplot as plt
import random as ran
import numpy as np
import os
from scipy import stats
result=0
choice=0
rate=0
resultado=False
exit=False
min = 0
maximo = 36
cantidadTiradas = 80
cantidadJuegos = 5
ruleta = []
capital = 1000
def CrearRuleta():
ruleta.extend(range(min,maximo))
print("La ruleta es la siguiente:", ruleta)
def GirarRuleta():
return ran.randint(min,maximo)
def RealizarTiradas(cantidadTiradas):
tiradas = []
for i in range(0,cantidadTiradas):
tiradas.append(AssignSpace(GirarRuleta()))
return tiradas
def AssignSpace(result):
if (result>=1 and result<=18):
return 1
elif (result>=19 and result<=36):
return 2
elif(result==0):
return 0
def ChooseSide():
return ran.randint(1,2)
def calcularFrCadaElemento():
unicos, cuenta = np.unique(tiradas, return_counts=True)
fk= cuenta/cantidadTiradas
fkn = np.asarray((ruleta, fk)).T
return fk
def Dalembert(capital):
capital_inicial = capital
historico = []
cincoFRs = []
cincoHCs = []
for i in range(0,cantidadJuegos):
frecuenciasRelativas = []
historioCapital = []
apuesta = 1
capital_inicial = capital
indice = 0
vecesGanadas = 0
for i in range(0,cantidadTiradas):
indice += 1
tirada = AssignSpace(GirarRuleta())
if lado == tirada:
capital_inicial += apuesta
vecesGanadas += 1
print(tirada,apuesta, capital_inicial)
if apuesta > 1:
apuesta = apuesta - 1
else:
capital_inicial -= apuesta
print(tirada,apuesta, capital_inicial)
apuesta = apuesta + 1
frecuenciasRelativas.append(vecesGanadas/indice)
historioCapital.append(capital_inicial)
cincoFRs.append(frecuenciasRelativas)
cincoHCs.append(historioCapital)
if capital_inicial <= 0:
historico.append(0)
elif capital_inicial >= capital:
historico.append(1)
#Grafica frecuencia relativa
plt.title('Frecuencia relativa apuesta favorable con '+str(indice)+' tiradas')
plt.bar(range(0,indice), frecuenciasRelativas,1, color="red",edgecolor="blue")
plt.ylim(0,1)
plt.xlim(-1,indice)
plt.axhline(18/37, color='k',ls="dotted", xmax=indice)
plt.grid(True)
plt.show()
plt.clf()
#Grafica frecuencia relativa
plt.title('Frecuencia relativa de apuesta favorable')
plt.plot(range(0,indice),frecuenciasRelativas)
plt.xlabel("Tiradas")
plt.ylabel("Frecuencia Relativa")
plt.axhline(18/37, color='k',ls="dotted", xmax=indice)
plt.ylim(0,1)
plt.xlim(0,indice)
plt.show()
#Grafica fluctuacion de capital
plt.title('Capital en cada tirada')
plt.plot(range(0,indice),historioCapital)
plt.xlabel("Tiradas")
plt.ylabel("Cantidad de capital en pesos")
plt.axhline(capital, color='k',ls="dotted", xmax=indice)
plt.ylim(600,(capital*2)-600)
plt.xlim(0,indice)
plt.show()
#Grafica frecuencia relativa de 5
plt.title('Frecuencia relativa de apuesta favorable')
for fr in cincoFRs:
plt.plot(range(0,indice),fr)
plt.xlabel("Tiradas")
plt.ylabel("Frecuencia Relativa")
plt.axhline(18/37, color='k',ls="dotted", xmax=indice)
plt.ylim(0,1)
plt.xlim(0,indice)
plt.show()
#Grafica fluctuacion de capital de 5
plt.title('Capital en cada tirada')
for hc in cincoHCs:
plt.plot(range(0,indice),hc)
plt.plot(range(0,indice),historioCapital)
plt.xlabel("Tiradas")
plt.ylabel("Cantidad de capital en pesos")
plt.axhline(capital, color='k',ls="dotted", xmax=indice)
plt.ylim(600,(capital*2)-600)
plt.xlim(0,indice)
plt.show()
#Grafica fluctuacion de capital vs fr
plt.title('Porcentaje de capital con \n respecto a la freceuncia \n en cada tirada')
plt.plot(range(0,indice),frecuenciasRelativas)
historioCapital = np.asarray(historioCapital)/max(historioCapital)
plt.plot(range(0,indice),historioCapital)
plt.xlabel("Tiradas")
plt.ylabel("Cantidad de capital en pesos")
plt.axhline(capital, color='k',ls="dotted", xmax=indice)
plt.ylim(-0.5,1.5)
plt.xlim(0,indice)
plt.show()
'''print(historico)
unicos, cuenta = np.unique(historico, return_counts=True)
print(unicos,cuenta)
if cuenta[0] and cuenta[1]:
print("perdida: ", cuenta[0]*capital)
print("ganancia: ", cuenta[1]*10)
print("neto: ", cuenta[1]*10 - cuenta[0]*capital)'''
if __name__ == "__main__":
CrearRuleta()
tiradas = RealizarTiradas(cantidadTiradas) #tiradas en 0,1,2
lado = ChooseSide() #elijo un "lado" random
print(lado)
Dalembert(capital)
|
{"hexsha": "2156263feb1d9c56305110f89123fdc4b2d2b934", "size": 4902, "ext": "py", "lang": "Python", "max_stars_repo_path": "TP1_2Estrategias-dlambert_v2.py", "max_stars_repo_name": "NicolasBologna/Simulacion", "max_stars_repo_head_hexsha": "d407379b182a373d9f28ca74c25472e711b5fc0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TP1_2Estrategias-dlambert_v2.py", "max_issues_repo_name": "NicolasBologna/Simulacion", "max_issues_repo_head_hexsha": "d407379b182a373d9f28ca74c25472e711b5fc0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TP1_2Estrategias-dlambert_v2.py", "max_forks_repo_name": "NicolasBologna/Simulacion", "max_forks_repo_head_hexsha": "d407379b182a373d9f28ca74c25472e711b5fc0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8352941176, "max_line_length": 88, "alphanum_fraction": 0.633618931, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1418}
|
[STATEMENT]
lemma (in Ring) prime_nprod_exc:"\<lbrakk>prime_ideal R P; \<forall>i \<le> n. f i \<in> carrier R;
\<forall>l \<le> n. f l \<notin> P\<rbrakk> \<Longrightarrow> nprod R f n \<notin> P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>prime_ideal R P; \<forall>i\<le>n. f i \<in> carrier R; \<forall>l\<le>n. f l \<notin> P\<rbrakk> \<Longrightarrow> e\<Pi>\<^bsub>R,n\<^esub> f \<notin> P
[PROOF STEP]
by (simp add:nprod_excTr)
|
{"llama_tokens": 200, "file": "Group-Ring-Module_Algebra4", "length": 1}
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright(c) 2019 Aalborg University
# Joakim Bruslund Haurum, May 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
import yaml
import numpy as np
import matplotlib.pyplot as plt
def load_labels(filename):
"""
Loads a JSON file containing the labels
JSON data is saved into a global dict, and the function returns the corresponding function to get a specific label
Input:
filename: name of the JSON file
Output:
label function
label_dict: Dict containig the label dicts for each video
"""
with open(filename) as data_file:
label_dict = json.load(data_file)
keys = list(label_dict.keys())
tmp = label_dict[keys[0]]["labels"]
return label_dict
def read_yaml(file):
"""
Takes a YAML formatted file as input, and returns it as dictionary
Input:
file : File path to the input file (Assumed to have '%YAML:1.0' as its first line)
Output:
Returns a python dictionary containing the elements in the YAML input file
"""
with open(file, 'r') as fi:
fi.readline() #Skips the %YAML:1.0 on the first line
return yaml.load(fi)
def plot_graph(x, y, x_label, y_label, title, x_range = (-0.05, 1.05), y_range = (-0.05, 1.05), legend = None, output_dir="", output_filename=""):
'''
Makes a line plot
Input:
x: x-values
y: y-values. This can be a numpy array. (assumed to all use the same x axis values)
x_label: Label of the x axis
y_lbael: Label of the y axis
title: Plot title
x_range: Range of the x axis
y_range: Range of the y axis
legend: Whether to include a legend or not
output_dir: Directory where the plot should be saved
output_filename: Filename of the plot
'''
plt.clf()
plt.figure()
if type(y) is np.ndarray:
for i in range(y.shape[0]):
plt.plot(x,y[i])
else:
plt.plot(x, y)
plt.grid()
plt.title(title)
plt.ylim(*y_range)
plt.xlim(*x_range)
plt.xlabel(x_label)
plt.ylabel(y_label)
if legend:
plt.legend(legend,
bbox_to_anchor=(1.05, 1),
loc='upper left',
borderaxespad=0.)
plt.savefig(os.path.join(output_dir, output_filename), bbox_inches='tight')
plt.close()
def make_metrics_plots(metric_dict, thresholds, output_folder, output_filename):
'''
Takes a dictionary containing the different binary-classification metrics, and creates a set of plot for each of them
Input:
metric_dict: Contains the different binary classification metrics
thresholds: Threshold values used to calculate the different metric values
output_folder: Folder where the output should be saved
output_filename: Base filename. Assumed to end on .pdf
'''
TPR = [x[0] for x in metric_dict["Type Rates"]]
TNR = [x[1] for x in metric_dict["Type Rates"]]
FPR = [x[2] for x in metric_dict["Type Rates"]]
FNR = [x[3] for x in metric_dict["Type Rates"]]
NPV = [x[1] for x in metric_dict["Predictive Values"]]
Precision = [x[0] for x in metric_dict["Precision-Recall"]]
Recall = [x[1] for x in metric_dict["Precision-Recall"]]
f_score = [x[0] for x in metric_dict["F1-score"]]
inv_f_score = [x[1] for x in metric_dict["F1-score"]]
acc = metric_dict["Accuracy"]
inf = metric_dict["Informedness"]
marked = metric_dict["Markedness"]
MCC = metric_dict["MCC"]
plot_graph(FPR, TPR, "FPR", "TPR", "Receiver Operating Charateristic", output_dir = output_folder, output_filename = output_filename.replace(".pdf","_ROC.pdf"))
plot_graph(Recall, Precision, "Recall", "Precision", "Precision-Recall (TP)", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_PR.pdf"))
plot_graph(TNR, NPV, "Inv. Recall", "Inv. Precision", "Precision-Recall (TN)", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_PRInv.pdf"))
plot_graph(thresholds, f_score, "Threshold", "F1-Score (TP)", "F1-Score (TP)", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_F1S.pdf"))
plot_graph(thresholds, inv_f_score, "Threshold", "F1-Score (TN)", "F1-Score (TN)", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_F1SInv.pdf"))
plot_graph(thresholds, acc, "Threshold", "Accuracy", "Accuracy", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_Acc.pdf"))
plot_graph(thresholds, inf, "Threshold", "Informedness", "Informedness", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_Inf.pdf"))
plot_graph(thresholds, marked, "Threshold", "Markedness", "Markedness", output_dir = output_folder, output_filename =output_filename.replace(".pdf","_Mark.pdf"))
plot_graph(thresholds, MCC, "Threshold", "MCC", "Matthews Correlation Coefficient", (-0.05, 1.05), (-1.05, 1.05), output_dir = output_folder, output_filename = output_filename.replace(".pdf","_MCC.pdf"))
plot_graph(thresholds, np.asarray([TPR, TNR]), "Threshold", "True Rate", "Type Rates", legend = ("TPR", "TNR"), output_dir = output_folder, output_filename =output_filename.replace(".pdf","_TR.pdf"))
plot_graph(thresholds, np.asarray([FPR, FNR]), "Threshold", "False Rate", "Type Rates", legend = ("FPR", "FNR"), output_dir = output_folder, output_filename =output_filename.replace(".pdf","_FR.pdf"))
def get_frame_label(labels, offset, FPM, frame_num):
"""
Retrives the correct label for a frame, depending on the frames per minute and the offset of the video
Input:
labels: List containing all the labels per minute for the video
offset: How many frames left of the starting minute e.g. 16:00:45, has 15 seconds left
This corresponds to 450 frames (30 FPS), and we assume we are halfway through the second, so 435 frame offset
These initial 435 frames are assigned to the label of 16:00:00, while the 436th label is assigned to 16:01:00
FPM: The amount of frames in a minute in the video
frame_num: frame number of the first frame in the sequence
Output:
per-minute rain label
label index - corresponds to the minute
"""
# Logic flow to determine which label it should use (which minute)
if frame_num <= offset and offset > 0:
# If there is an offset (i.e. the video starts in the middle of a minute) and the frame number is below or equal this offset
# return: Label of the first minute in the video
ind = 0
else:
if offset > 0:
# If there is an offset
# Subtract offset from frame number, and divide by FPM, and then round up to get our index. e.g.
# offset = 300, frame_num = 400, FPM = 1800, ind = 400-300 / 1800 = 0.055 -> 1
ind = int(np.ceil((frame_num-offset)/FPM))
else:
# If there is not an offset
# Take frame number, and divide by FPM, and then round down to get our index. e.g.
# frame_num = 400, FPM = 1800, ind = 400 / 1800 = 0.2222 -> 0
ind = int(np.floor(frame_num/FPM))
ind = min(ind, len(labels)-1)
return labels[ind], ind
|
{"hexsha": "ae9b0ebd55edacc2499ed118741f09d6d41de239", "size": 8456, "ext": "py", "lang": "Python", "max_stars_repo_path": "Analysis/utils.py", "max_stars_repo_name": "chrisbahnsen/aau-virada", "max_stars_repo_head_hexsha": "9c5a65fdbbba6d80a6d8058bf520664e8663350d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-21T12:13:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T07:24:47.000Z", "max_issues_repo_path": "Analysis/utils.py", "max_issues_repo_name": "chrisbahnsen/aau-virada", "max_issues_repo_head_hexsha": "9c5a65fdbbba6d80a6d8058bf520664e8663350d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-29T06:23:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T09:12:40.000Z", "max_forks_repo_path": "Analysis/utils.py", "max_forks_repo_name": "chrisbahnsen/aau-virada", "max_forks_repo_head_hexsha": "9c5a65fdbbba6d80a6d8058bf520664e8663350d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-23T02:05:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-23T02:05:10.000Z", "avg_line_length": 43.8134715026, "max_line_length": 207, "alphanum_fraction": 0.6686376537, "include": true, "reason": "import numpy", "num_tokens": 2136}
|
CoInductive stream : Set :=
| cons : nat -> stream -> stream.
CoFixpoint ones : stream := Cons 1 ones.
|
{"author": "chemouna", "repo": "coq-experiment", "sha": "a9736d13a042a334422412a1d4d41b0d557cc1f4", "save_path": "github-repos/coq/chemouna-coq-experiment", "path": "github-repos/coq/chemouna-coq-experiment/coq-experiment-a9736d13a042a334422412a1d4d41b0d557cc1f4/experiment1.v"}
|
print("Running area coverage example 1\nUsing source files for package imports\nPARAMETERS:\
Using default hardcoded weight_dicts (listed in swarm_tasks/logs)")
import sys,os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),'../../..')))
print(sys.path)
import swarm_tasks
#Set demo parameters directly
import numpy as np
import random
swarm_tasks.utils.robot.DEFAULT_NEIGHBOURHOOD_VAL = 6
swarm_tasks.utils.robot.DEFAULT_SIZE= 0.4
swarm_tasks.utils.robot.MAX_SPEED = 1.5
swarm_tasks.utils.robot.MAX_ANGULAR: 1.0
np.random.seed(42)
random.seed(42)
from swarm_tasks.simulation import simulation as sim
from swarm_tasks.simulation import visualizer as viz
import swarm_tasks.utils as utils
import swarm_tasks.envs as envs
import swarm_tasks.controllers as ctrl
import swarm_tasks.controllers.potential_field as potf
import swarm_tasks.controllers.base_control as base_control
from swarm_tasks.tasks import area_coverage as cvg
import numpy as np
s = sim.Simulation(num_bots=10, env_name='rectangles', contents_file='attractors')
gui = viz.Gui(s)
gui.show_env()
gui.show_bots()
gui.show_grid()
while 1:
for b in s.swarm:
cmd = cvg.disp_exp_area_cvg(b)
cmd.exec(b)
s.update_grid()
gui.show_grid()
gui.update()
s.time_elapsed+=1
gui.run()
|
{"hexsha": "3838d4f527d8c9570acef4f74bca934e26952b72", "size": 1284, "ext": "py", "lang": "Python", "max_stars_repo_path": "swarm_tasks/Examples/full_tasks/area_cvg-1.py", "max_stars_repo_name": "rmvanarse/swarm_tasks", "max_stars_repo_head_hexsha": "3335297ba8fcdbff756ae519002bcce919d54a84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-13T12:54:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T12:12:28.000Z", "max_issues_repo_path": "swarm_tasks/Examples/full_tasks/area_cvg-1.py", "max_issues_repo_name": "rmvanarse/swarm_tasks", "max_issues_repo_head_hexsha": "3335297ba8fcdbff756ae519002bcce919d54a84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swarm_tasks/Examples/full_tasks/area_cvg-1.py", "max_forks_repo_name": "rmvanarse/swarm_tasks", "max_forks_repo_head_hexsha": "3335297ba8fcdbff756ae519002bcce919d54a84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-06T15:02:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T12:11:30.000Z", "avg_line_length": 23.7777777778, "max_line_length": 92, "alphanum_fraction": 0.7897196262, "include": true, "reason": "import numpy", "num_tokens": 326}
|
//=======================================================================
// Copyright 2001 Jeremy G. Siek, Andrew Lumsdaine, Lie-Quan Lee,
//
// This file is part of the Boost Graph Library
//
// You should have received a copy of the License Agreement for the
// Boost Graph Library along with the software; see the file LICENSE.
// If not, contact Office of Research, Indiana University,
// Bloomington, IN 47405.
//
// Permission to modify the code and to distribute the code is
// granted, provided the text of this NOTICE is retained, a notice if
// the code was modified is included with the above COPYRIGHT NOTICE
// and with the COPYRIGHT NOTICE in the LICENSE file, and that the
// LICENSE file is distributed with the modified code.
//
// LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED.
// By way of example, but not limitation, Licensor MAKES NO
// REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY
// PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS
// OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS
// OR OTHER RIGHTS.
//=======================================================================
#include <boost/config.hpp>
#include <iostream>
#include <fstream>
#include <string>
#include <boost/tokenizer.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/visitors.hpp>
#include <boost/graph/breadth_first_search.hpp>
#include <map>
using namespace boost;
template < typename DistanceMap > class bacon_number_recorder:public default_bfs_visitor
{
public:
bacon_number_recorder(DistanceMap dist):d(dist) {
}
template < typename Edge, typename Graph >
void tree_edge(Edge e, const Graph & g) const
{
typename graph_traits < Graph >::vertex_descriptor
u = source(e, g), v = target(e, g);
d[v] = d[u] + 1;
}
private:
DistanceMap d;
};
// Convenience function
template < typename DistanceMap >
bacon_number_recorder < DistanceMap > record_bacon_number(DistanceMap d)
{
return bacon_number_recorder < DistanceMap > (d);
}
int
main()
{
std::ifstream datafile("./kevin-bacon.dat");
if (!datafile) {
std::cerr << "No ./kevin-bacon.dat file" << std::endl;
return EXIT_FAILURE;
}
typedef adjacency_list < vecS, vecS, undirectedS, property < vertex_name_t,
std::string >, property < edge_name_t, std::string > > Graph;
Graph g;
typedef property_map < Graph, vertex_name_t >::type actor_name_map_t;
actor_name_map_t actor_name = get(vertex_name, g);
typedef property_map < Graph, edge_name_t >::type movie_name_map_t;
movie_name_map_t connecting_movie = get(edge_name, g);
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::map < std::string, Vertex > NameVertexMap;
NameVertexMap actors;
for (std::string line; std::getline(datafile, line);) {
char_delimiters_separator < char >sep(false, "", ";");
tokenizer <> line_toks(line, sep);
tokenizer <>::iterator i = line_toks.begin();
std::string actors_name = *i++;
NameVertexMap::iterator pos;
bool inserted;
Vertex u, v;
tie(pos, inserted) = actors.insert(std::make_pair(actors_name, Vertex()));
if (inserted) {
u = add_vertex(g);
actor_name[u] = actors_name;
pos->second = u;
} else
u = pos->second;
std::string movie_name = *i++;
tie(pos, inserted) = actors.insert(std::make_pair(*i, Vertex()));
if (inserted) {
v = add_vertex(g);
actor_name[v] = *i;
pos->second = v;
} else
v = pos->second;
graph_traits < Graph >::edge_descriptor e;
tie(e, inserted) = add_edge(u, v, g);
if (inserted)
connecting_movie[e] = movie_name;
}
std::vector < int >bacon_number(num_vertices(g));
Vertex src = actors["Kevin Bacon"];
bacon_number[src] = 0;
breadth_first_search(g, src,
visitor(record_bacon_number(&bacon_number[0])));
graph_traits < Graph >::vertex_iterator i, end;
for (tie(i, end) = vertices(g); i != end; ++i) {
std::cout << actor_name[*i] << " has a Bacon number of "
<< bacon_number[*i] << std::endl;
}
return 0;
}
|
{"hexsha": "4a47c8c4480f65ef4f847bd219fb7c5df4d5c545", "size": 4318, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sdk/boost_1_30_0/libs/graph/example/kevin-bacon.cpp", "max_stars_repo_name": "acidicMercury8/xray-1.0", "max_stars_repo_head_hexsha": "65e85c0e31e82d612c793d980dc4b73fa186c76c", "max_stars_repo_licenses": ["Linux-OpenIB"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-01-30T12:51:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-31T08:36:49.000Z", "max_issues_repo_path": "sdk/boost_1_30_0/libs/graph/example/kevin-bacon.cpp", "max_issues_repo_name": "acidicMercury8/xray-1.0", "max_issues_repo_head_hexsha": "65e85c0e31e82d612c793d980dc4b73fa186c76c", "max_issues_repo_licenses": ["Linux-OpenIB"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sdk/boost_1_30_0/libs/graph/example/kevin-bacon.cpp", "max_forks_repo_name": "acidicMercury8/xray-1.0", "max_forks_repo_head_hexsha": "65e85c0e31e82d612c793d980dc4b73fa186c76c", "max_forks_repo_licenses": ["Linux-OpenIB"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4661654135, "max_line_length": 89, "alphanum_fraction": 0.6440481704, "num_tokens": 1037}
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converters for jax2tf."""
import functools
import tempfile
from jax.experimental import jax2tf
from jax.experimental.jax2tf.examples import saved_model_lib
from jax.experimental.jax2tf.examples_eval import examples_converter
import tensorflow as tf
from tensorflowjs.converters import converter as tfjs_converter
TempDir = tempfile.TemporaryDirectory
def jax2tf_to_tfjs(module: examples_converter.ModuleToConvert):
"""Converts the given `module` using the TFjs converter."""
with TempDir() as saved_model_path, TempDir() as converted_model_path:
# the model must be converted with with_gradient set to True to be able to
# convert the saved model to TF.js, as "PreventGradient" is not supported
saved_model_lib.convert_and_save_model(
module.apply,
module.variables,
saved_model_path,
input_signatures=[
tf.TensorSpec(
shape=module.input_shape,
dtype=module.dtype,
name='input')
],
with_gradient=True,
compile_model=False,
enable_xla=False
)
tfjs_converter.convert([saved_model_path, converted_model_path])
def jax2tf_to_tflite(module: examples_converter.ModuleToConvert):
"""Converts the given `module` using the TFLite converter."""
apply = functools.partial(module.apply, module.variables)
tf_predict = tf.function(
jax2tf.convert(apply, enable_xla=False),
input_signature=[
tf.TensorSpec(
shape=module.input_shape,
dtype=module.dtype,
name='input')
],
autograph=False)
# Convert TF function to TF Lite format.
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[tf_predict.get_concrete_function()])
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
converter.convert()
|
{"hexsha": "c3a53b381ec79c397c325dbea739240be51d0e61", "size": 2536, "ext": "py", "lang": "Python", "max_stars_repo_path": "jax/experimental/jax2tf/examples_eval/converters.py", "max_stars_repo_name": "iolloj/jax", "max_stars_repo_head_hexsha": "1b80feea6acf758fd9dc3e616e8efcb8db831ce9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17375, "max_stars_repo_stars_event_min_datetime": "2018-11-18T02:15:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:49:46.000Z", "max_issues_repo_path": "jax/experimental/jax2tf/examples_eval/converters.py", "max_issues_repo_name": "iolloj/jax", "max_issues_repo_head_hexsha": "1b80feea6acf758fd9dc3e616e8efcb8db831ce9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5018, "max_issues_repo_issues_event_min_datetime": "2018-11-22T17:04:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:36:25.000Z", "max_forks_repo_path": "jax/experimental/jax2tf/examples_eval/converters.py", "max_forks_repo_name": "dan-zheng/jax", "max_forks_repo_head_hexsha": "752823e568cd080419d3d6d38ff51ecde16f7421", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1805, "max_forks_repo_forks_event_min_datetime": "2018-11-21T10:13:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T23:49:19.000Z", "avg_line_length": 36.2285714286, "max_line_length": 78, "alphanum_fraction": 0.7204258675, "include": true, "reason": "import jax,from jax", "num_tokens": 564}
|
import numpy as np
from tqdm import *
from utils import DataLoaderX
from dataset import collate
from math import *
def prediction(data, model, batch_size, cuda):
data_loader = DataLoaderX(data, batch_size=batch_size, collate_fn=collate, num_workers=0)
model.training = False
iterator = tqdm(data_loader)
out = []
for sample in iterator:
sample['data'] = sample['data'].float()
if cuda:
out += model(sample['data']).cpu()
else:
out += model(sample['data'])
return out
def recovery(ori_shape, output, size):
if size[0] >= ori_shape[1] or size[1] >= ori_shape[2]:
# de-padding
output = output[0].detach().numpy()
diff_x = size[0] - ori_shape[1]
diff_y = size[1] - ori_shape[2]
return output[:, diff_x // 2:-(diff_x - diff_x // 2),
diff_y // 2:-(diff_y - diff_y // 2)]
h, w = size[0], size[1]
cols = ceil(ori_shape[2] / w)
rows = ceil(ori_shape[1] / h)
assert rows * cols == len(output)
results = np.zeros((ori_shape[0], rows * size[0], cols * size[1]))
for i, out in enumerate(output):
out = out.detach().numpy()
out = out[:, 8:-8, 8:-8]
end_col = (i + 1) % cols * size[1] if (i + 1) % cols > 0 else cols * size[1]
results[:, i // cols * size[0]:(i // cols + 1) * size[0],
i % cols * size[1]:end_col] = out
return results[:, 0:ori_shape[1], 0:ori_shape[2]]
if __name__ == '__main__':
a = np.zeros((4, 3, 3))
print(a[:, 0:-1, 0:-1].shape)
|
{"hexsha": "d187fe47d5524b63a0f74b45076d6dc9c23a3d02", "size": 1556, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict.py", "max_stars_repo_name": "SuperbTUM/RAW-image-denoising", "max_stars_repo_head_hexsha": "9f81be8da6a576f641022707d98b8c37f5c599ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-10-18T04:13:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T14:10:46.000Z", "max_issues_repo_path": "predict.py", "max_issues_repo_name": "SuperbTUM/computational-photography", "max_issues_repo_head_hexsha": "9f81be8da6a576f641022707d98b8c37f5c599ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-10T02:59:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T03:32:09.000Z", "max_forks_repo_path": "predict.py", "max_forks_repo_name": "SuperbTUM/computational-photography", "max_forks_repo_head_hexsha": "9f81be8da6a576f641022707d98b8c37f5c599ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-10T02:57:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-10T02:57:34.000Z", "avg_line_length": 32.4166666667, "max_line_length": 93, "alphanum_fraction": 0.5616966581, "include": true, "reason": "import numpy", "num_tokens": 476}
|
c..............................................................................
subroutine SetUniOutPres(BC)
include "common.h"
include "mpif.h"
include "auxmpi.h"
integer BCfaceNode(nshg)
integer nBCfaceNode
integer isfID
real*8 BC(nshg,ndofBC)
if(myrank.eq.0)write(*,*)'Outlet pressure:',outPres1
isfID=isetOutPres
call sfID2np(isfID,nBCfaceNode,BCfaceNode)
if(nBCfaceNode .gt. 0)then
do i=1,nBCfaceNode
nn=BCfaceNode(i)
BC(nn,1)=outPres1
enddo
endif
return
end
c..............................................................................
subroutine setInlet_Duct(x,BC,iTurbWall)
include "common.h"
include "mpif.h"
include "auxmpi.h"
real*8 BC(nshg,ndofBC)
real*8 x(nshg,nsd)
integer i,nn
real*8 xcoor,ycoor,zcoor
real ry,rz
real*8 Temp,xVel
integer BCfaceNode(nshg)
integer nBCfaceNode
integer isfID
integer iTurbWall(nshg)
if(myrank.eq.0)write(*,*)'Inlet surf:',isetInlet_Duct
isfID=isetInlet_Duct
call sfID2np(isfID,nBCfaceNode,BCfaceNode)
if(nBCfaceNode .gt. 0)then
do i=1,nBCfaceNode
nn=BCfaceNode(i)
if(iTurbWall(nn).eq.0)then
xcoor=x(nn,1)
ycoor=x(nn,2)
zcoor=x(nn,3)
c...........................
c Contraction Square Length is 46.25 inch, 0.587375 = 46.25/2*0.0254
c Inlet Vel is computed based on throat Mach 0.43
c Inlet Temp is 330K, wall temp is 317K
if(ycoor<(-0.587375+1.0e-4))then
ry=(ycoor+0.587375)/1.0e-4
elseif(ycoor>(0.587375-1.0e-4))then
ry=(0.587375-ycoor)/1.0e-4
else
ry=1.0
endif
if(zcoor<(-0.587375+1.0e-4))then
rz=(zcoor+0.587375)/1.0e-4
elseif(zcoor>(0.587375-1.0e-4))then
rz=(0.587375-zcoor)/1.0e-4
else
rz=1.0
endif
ry=max(0.0,ry)
rz=max(0.0,rz)
xVel = 1.513158*ry*rz
c xVel = 0.1*ry*rz
Temp = 317.0+13.0*ry*rz
c..........................................
BC(nn,2) = Temp !Temp
BC(nn,3) = xVel ! set and scale x velocity
BC(nn,4) = 0
BC(nn,5) = 0
endif
enddo
endif
return
end
|
{"hexsha": "2121a836dcc42f0c1a0f19e37f0c1127cdbcf010", "size": 2706, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "phSolver/common/BCprofile2.f", "max_stars_repo_name": "yangf4/phasta", "max_stars_repo_head_hexsha": "a096094f33b98047de0a2e28225c4d74875a88d8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2015-04-16T13:45:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T01:02:49.000Z", "max_issues_repo_path": "phSolver/common/BCprofile2.f", "max_issues_repo_name": "yangf4/phasta", "max_issues_repo_head_hexsha": "a096094f33b98047de0a2e28225c4d74875a88d8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2015-10-06T19:50:43.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-17T03:47:51.000Z", "max_forks_repo_path": "phSolver/common/BCprofile2.f", "max_forks_repo_name": "yangf4/phasta", "max_forks_repo_head_hexsha": "a096094f33b98047de0a2e28225c4d74875a88d8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2015-04-21T12:13:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T19:38:00.000Z", "avg_line_length": 27.8969072165, "max_line_length": 79, "alphanum_fraction": 0.4345898004, "num_tokens": 817}
|
@testset "$TEST $G" begin
g5 = DG(4)
add_edge!(g5,1,2); add_edge!(g5,2,3); add_edge!(g5,1,3); add_edge!(g5,3,4)
@test degree_centrality(g5) == [0.6666666666666666, 0.6666666666666666, 1.0, 0.3333333333333333]
@test in_degree_centrality(g5, normalize=false) == [0.0, 1.0, 2.0, 1.0]
@test out_degree_centrality(g5; normalize=false) == [2.0, 1.0, 1.0, 0.0]
end # testset
|
{"hexsha": "a2e9c026f6a2b7dd27195744427990e4034033a4", "size": 371, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/centrality/degree.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_stars_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2017-02-24T15:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T19:59:23.000Z", "max_issues_repo_path": "test/centrality/degree.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_issues_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 76, "max_issues_repo_issues_event_min_datetime": "2017-02-23T09:31:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T09:10:31.000Z", "max_forks_repo_path": "test/centrality/degree.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_forks_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-03-04T21:05:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:54:44.000Z", "avg_line_length": 33.7272727273, "max_line_length": 96, "alphanum_fraction": 0.6819407008, "num_tokens": 171}
|
from __future__ import print_function
from builtins import str, input, object
from past.builtins import basestring
from copy import copy
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta # for doctest
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import errno
from functools import wraps
import imp
import inspect
import json
import logging
import os
import re
import shutil
import signal
import smtplib
from tempfile import mkdtemp
from alembic.config import Config
from alembic import command
from alembic.migration import MigrationContext
from contextlib import contextmanager
from sqlalchemy import event, exc
from sqlalchemy.pool import Pool
import numpy as np
from airflow import settings
from airflow.configuration import conf
class AirflowException(Exception):
pass
class AirflowSensorTimeout(Exception):
pass
class TriggerRule(object):
ALL_SUCCESS = 'all_success'
ALL_FAILED = 'all_failed'
ALL_DONE = 'all_done'
ONE_SUCCESS = 'one_success'
ONE_FAILED = 'one_failed'
DUMMY = 'dummy'
class State(object):
"""
Static class with task instance states constants and color method to
avoid hardcoding.
"""
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
SHUTDOWN = "shutdown" # External request to shut down
FAILED = "failed"
UP_FOR_RETRY = "up_for_retry"
UPSTREAM_FAILED = "upstream_failed"
SKIPPED = "skipped"
state_color = {
QUEUED: 'gray',
RUNNING: 'lime',
SUCCESS: 'green',
SHUTDOWN: 'blue',
FAILED: 'red',
UP_FOR_RETRY: 'gold',
UPSTREAM_FAILED: 'orange',
SKIPPED: 'pink',
}
@classmethod
def color(cls, state):
return cls.state_color[state]
@classmethod
def runnable(cls):
return [
None, cls.FAILED, cls.UP_FOR_RETRY, cls.UPSTREAM_FAILED,
cls.SKIPPED]
def pessimistic_connection_handling():
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
'''
Disconnect Handling - Pessimistic, taken from:
http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html
'''
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
raise exc.DisconnectionError()
cursor.close()
def initdb():
from airflow import models
upgradedb()
# Creating the local_mysql DB connection
C = models.Connection
session = settings.Session()
conn = session.query(C).filter(C.conn_id == 'local_mysql').first()
if not conn:
session.add(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
session.commit()
conn = session.query(C).filter(C.conn_id == 'presto_default').first()
if not conn:
session.add(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
session.commit()
conn = session.query(C).filter(C.conn_id == 'hive_cli_default').first()
if not conn:
session.add(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
session.commit()
conn = session.query(C).filter(C.conn_id == 'hiveserver2_default').first()
if not conn:
session.add(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
session.commit()
conn = session.query(C).filter(C.conn_id == 'metastore_default').first()
if not conn:
session.add(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost',
port=10001))
session.commit()
conn = session.query(C).filter(C.conn_id == 'mysql_default').first()
if not conn:
session.add(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
host='localhost'))
session.commit()
conn = session.query(C).filter(C.conn_id == 'sqlite_default').first()
if not conn:
home = conf.get('core', 'AIRFLOW_HOME')
session.add(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='{}/sqlite_default.db'.format(home)))
session.commit()
conn = session.query(C).filter(C.conn_id == 'http_default').first()
if not conn:
home = conf.get('core', 'AIRFLOW_HOME')
session.add(
models.Connection(
conn_id='http_default', conn_type='http',
host='http://www.google.com'))
session.commit()
conn = session.query(C).filter(C.conn_id == 'mssql_default').first()
if not conn:
session.add(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
session.commit()
conn = session.query(C).filter(C.conn_id == 'vertica_default').first()
if not conn:
session.add(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
session.commit()
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
session.close()
models.DagBag(sync_to_db=True)
def upgradedb():
logging.info("Creating tables")
package_dir = os.path.abspath(os.path.dirname(__file__))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url',
conf.get('core', 'SQL_ALCHEMY_CONN'))
command.upgrade(config, 'head')
def resetdb():
'''
Clear out the database
'''
from airflow import models
logging.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
initdb()
def validate_key(k, max_length=250):
if not isinstance(k, basestring):
raise TypeError("The key has to be a string")
elif len(k) > max_length:
raise AirflowException(
"The key has to be less than {0} characters".format(max_length))
elif not re.match(r'^[A-Za-z0-9_\-\.]+$', k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes, "
"dots and underscores exclusively".format(**locals()))
else:
return True
def date_range(start_date, end_date=datetime.now(), delta=timedelta(1)):
l = []
if end_date >= start_date:
while start_date <= end_date:
l.append(start_date)
start_date += delta
else:
raise AirflowException("start_date can't be after end_date")
return l
def json_ser(obj):
"""
json serializer that deals with dates
usage: json.dumps(object, default=utils.json_ser)
"""
if isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d
def readfile(filepath):
f = open(filepath)
content = f.read()
f.close()
return content
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
if 'session' not in kwargs:
needs_session = True
session = settings.Session()
kwargs['session'] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
session.commit()
session.close()
return result
return wrapper
def apply_defaults(func):
"""
Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > 1:
raise AirflowException(
"Use keyword arguments when initializing operators")
dag_args = {}
dag_params = {}
if 'dag' in kwargs and kwargs['dag']:
dag = kwargs['dag']
dag_args = copy(dag.default_args) or {}
dag_params = copy(dag.params) or {}
params = {}
if 'params' in kwargs:
params = kwargs['params']
dag_params.update(params)
default_args = {}
if 'default_args' in kwargs:
default_args = kwargs['default_args']
if 'params' in default_args:
dag_params.update(default_args['params'])
del default_args['params']
dag_args.update(default_args)
default_args = dag_args
arg_spec = inspect.getargspec(func)
num_defaults = len(arg_spec.defaults) if arg_spec.defaults else 0
non_optional_args = arg_spec.args[:-num_defaults]
if 'self' in non_optional_args:
non_optional_args.remove('self')
for arg in func.__code__.co_varnames:
if arg in default_args and arg not in kwargs:
kwargs[arg] = default_args[arg]
missing_args = list(set(non_optional_args) - set(kwargs))
if missing_args:
msg = "Argument {0} is required".format(missing_args)
raise AirflowException(msg)
kwargs['params'] = dag_params
result = func(*args, **kwargs)
return result
return wrapper
def ask_yesno(question):
yes = set(['yes', 'y'])
no = set(['no', 'n'])
done = False
print(question)
while not done:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond by yes or no.")
def send_email(to, subject, html_content, files=None):
SMTP_MAIL_FROM = conf.get('smtp', 'SMTP_MAIL_FROM')
if isinstance(to, basestring):
if ',' in to:
to = to.split(',')
elif ';' in to:
to = to.split(';')
else:
to = [to]
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = SMTP_MAIL_FROM
msg['To'] = ", ".join(to)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
msg.attach(MIMEApplication(
f.read(),
Content_Disposition='attachment; filename="%s"' % basename,
Name=basename
))
send_MIME_email(SMTP_MAIL_FROM, to, msg)
def send_MIME_email(e_from, e_to, mime_msg):
SMTP_HOST = conf.get('smtp', 'SMTP_HOST')
SMTP_PORT = conf.getint('smtp', 'SMTP_PORT')
SMTP_USER = conf.get('smtp', 'SMTP_USER')
SMTP_PASSWORD = conf.get('smtp', 'SMTP_PASSWORD')
SMTP_STARTTLS = conf.getboolean('smtp', 'SMTP_STARTTLS')
s = smtplib.SMTP(SMTP_HOST, SMTP_PORT)
if SMTP_STARTTLS:
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info("Sent an alert email to " + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
def import_module_attrs(parent_module_globals, module_attrs_dict):
'''
Attempts to import a set of modules and specified attributes in the
form of a dictionary. The attributes are copied in the parent module's
namespace. The function returns a list of attributes names that can be
affected to __all__.
This is used in the context of ``operators`` and ``hooks`` and
silence the import errors for when libraries are missing. It makes
for a clean package abstracting the underlying modules and only
brings functional operators to those namespaces.
'''
imported_attrs = []
for mod, attrs in list(module_attrs_dict.items()):
try:
path = os.path.realpath(parent_module_globals['__file__'])
folder = os.path.dirname(path)
f, filename, description = imp.find_module(mod, [folder])
module = imp.load_module(mod, f, filename, description)
for attr in attrs:
parent_module_globals[attr] = getattr(module, attr)
imported_attrs += [attr]
except Exception as err:
logging.debug("Error importing module {mod}: {err}".format(
mod=mod, err=err))
return imported_attrs
def is_in(obj, l):
"""
Checks whether an object is one of the item in the list.
This is different from ``in`` because ``in`` uses __cmp__ when
present. Here we change based on the object itself
"""
for item in l:
if item is obj:
return True
return False
@contextmanager
def TemporaryDirectory(suffix='', prefix=None, dir=None):
name = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield name
finally:
try:
shutil.rmtree(name)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
class AirflowTaskTimeout(Exception):
pass
class timeout(object):
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
logging.error("Process timed out")
raise AirflowTaskTimeout(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def is_container(obj):
"""
Test if an object is a container (iterable) but not a string
"""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def as_tuple(obj):
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def round_time(dt, delta, start_date=datetime.min):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
# Ignore the microseconds of dt
dt -= timedelta(microseconds = dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know it's exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then disecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper*delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1)*delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (start_date + (lower + 1)*delta) - dt <= dt - (start_date + lower*delta):
return start_date + (lower + 1)*delta
else:
return start_date + lower*delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate*delta >= dt:
upper = candidate
else:
lower = candidate
# in the special case when start_date > dt the search for upper will
# immediately stop for upper == 1 which results in lower = upper // 2 = 0
# and this function returns start_date.
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task)
class AirflowJsonEncoder(json.JSONEncoder):
def default(self, obj):
# convert dates and numpy objects in a json serializable format
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
elif type(obj) in [np.int_, np.intc, np.intp, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
return int(obj)
elif type(obj) in [np.bool_]:
return bool(obj)
elif type(obj) in [np.float_, np.float16, np.float32, np.float64,
np.complex_, np.complex64, np.complex128]:
return float(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
|
{"hexsha": "0f74532e355a9c3a296ffc035da46d4835257b84", "size": 20676, "ext": "py", "lang": "Python", "max_stars_repo_path": "airflow/utils.py", "max_stars_repo_name": "dtardoin/airflow", "max_stars_repo_head_hexsha": "4d7f413c7db3ffdb1236e7799e4fe647842e5dbd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-02T20:08:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T20:08:53.000Z", "max_issues_repo_path": "airflow/utils.py", "max_issues_repo_name": "dtardoin/airflow", "max_issues_repo_head_hexsha": "4d7f413c7db3ffdb1236e7799e4fe647842e5dbd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "airflow/utils.py", "max_forks_repo_name": "dtardoin/airflow", "max_forks_repo_head_hexsha": "4d7f413c7db3ffdb1236e7799e4fe647842e5dbd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-14T16:19:12.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-14T16:19:12.000Z", "avg_line_length": 32.1555209953, "max_line_length": 90, "alphanum_fraction": 0.6210098665, "include": true, "reason": "import numpy", "num_tokens": 4837}
|
from multiprocessing import set_start_method, cpu_count
#set_start_method('forkserver')
import os
os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.utils import shuffle
from tqdm import tqdm
import pandas as pd
import numpy as np
import tensorflow as tf
import nalu
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def chisq(y_true, y_pred, y_error): return np.sum(((y_true-y_pred)/y_error)**2.)
ap = ArgumentParser()
ap.add_argument('-d', '--directory', type=str, required=False, default='nalu_tf_save_dir/saves_{}'.format(time_now), help='The tensorflow ckpt save file.')
ap.add_argument('-nnl', '--n_nalu_layers', type=int, required=False, default=1, help='Whether to use 1 (default), 2, or ... N NALU layers.')
ap.add_argument('-nnn', '--n_nalu_neurons', type=int, required=False, default=0, help='How many features on the second NALU layer.')
ap.add_argument('-ne', '--n_epochs', type=int, required=False, default=200, help='Number of N_EPOCHS to train the network with.')
ap.add_argument('-nc', '--n_classes', type=int, required=False, default=1, help='n_classes == 1 for Regression (default); > 1 for Classification.')
ap.add_argument('-bs', '--batch_size', type=int, required=False, default=32, help='Batch size: number of samples per batch.')
ap.add_argument('-lr', '--learning_rate', type=float, required=False, default=1e-3, help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, required=False, default=0.75, help='How much to split the train / test ratio.')
ap.add_argument('-rs', '--random_state', type=int, required=False, default=42, help='Integer value to initialize train/test splitting randomization.')
ap.add_argument('-pp', '--pre_process', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to MinMax-preprocess the features.')
ap.add_argument('-pca', '--pca_transform', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to PCA-pretransform the features.')
ap.add_argument('-v', '--verbose', type=str2bool, nargs='?', required=False, default=False, help='Whether to set verbosity = True or False (default).')
ap.add_argument('-ds', '--data_set', type=str, required=False, default='', help='The csv file containing the data with which to train.')
try:
args = vars(ap.parse_args())
except Exception as e:
print('Error: {}'.format(e))
args = {}
args['directory'] = ap.get_default('directory')
args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
args['n_epochs'] = ap.get_default('n_epochs')
args['n_classes'] = ap.get_default('n_classes')
args['batch_size'] = ap.get_default('batch_size')
args['learning_rate'] = ap.get_default('learning_rate')
args['test_size'] = ap.get_default('test_size')
args['random_state'] = ap.get_default('random_state')
args['pre_process'] = ap.get_default('pre_process')
args['pca_transform'] = ap.get_default('pca_transform')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
DO_PP = args['pre_process']
DO_PCA = args['pca_transform']
verbose = args['verbose']
data_set_fname = args['data_set']
'''
print("loading pipelines on disk vis joblib.")
full_pipe = joblib.load('pmap_full_pipe_transformer_16features.joblib.save')
std_scaler_from_raw = joblib.load('pmap_standard_scaler_transformer_16features.joblib.save')
pca_transformer_from_std_scaled = joblib.load('pmap_pca_transformer_from_stdscaler_16features.joblib.save')
minmax_scaler_transformer_raw = joblib.load('pmap_minmax_scaler_transformer_from_raw_16features.joblib.save')
minmax_scaler_transformer_pca = joblib.load('pmap_minmax_scaler_transformer_from_pca_16features.joblib.save')
'''
label_n_error_filename = 'pmap_raw_labels_and_errors.csv'
print("Loading in raw labels and errors from {}".format(label_n_error_filename))
labels_df = pd.read_csv(label_n_error_filename)
labels = labels_df['Flux'].values[:,None]
labels_err = labels_df['Flux_err'].values
# Feature File Switch
if DO_PP and DO_PCA:
features_input_filename = 'pmap_full_pipe_transformed_16features.csv'
elif DO_PP:
features_input_filename = 'pmap_minmax_transformed_from_raw_16features.csv'
elif DO_PCA:
features_input_filename = 'pmap_pca_transformed_from_stdscaler_16features.csv'
else:
features_input_filename = 'pmap_raw_16features.csv'
print("Loading in pre-processed features from {}".format(features_input_filename))
features_input = pd.read_csv(feature_input_filename).drop(['idx'], axis=1).values
def nalu(input_layer, num_outputs):
""" Neural Arithmetic Logic Unit tesnorflow layer
Arguments:
input_layer - A Tensor representing previous layer
num_outputs - number of ouput units
Returns:
A tensor representing the output of NALU
"""
shape = (int(input_layer.shape[-1]), num_outputs)
# define variables
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
# operations according to paper
W = tf.tanh(W_hat) * tf.sigmoid(M_hat)
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W))
g = tf.sigmoid(tf.matmul(input_layer, G))
a = tf.matmul(input_layer, W)
out = g * a + (1 - g) * m
return out
if __name__ == "__main__":
N_FEATURES = features_input.shape[-1]
EXPORT_DIR = args['directory']
N_NALU_LAYERS = args['n_nalu_layers']
N_NALU_NEURONS = args['n_nalu_neurons'] if args['n_nalu_neurons'] > 0 else N_FEATURES
N_CLASSES = args['n_classes'] # = 1 for regression
TEST_SIZE = args['test_size']
RANDOM_STATE = args['random_state']
N_EPOCHS = args['n_epochs']
LEARNING_RATE = args['learning_rate']
BATCH_SIZE = args['batch_size']
EXPORT_DIR = EXPORT_DIR + '_nnl{}_nnn{}_nc{}_bs{}_lr{}_ne{}_ts{}_rs{}_PP{}_PCA{}/'.format(N_NALU_LAYERS, N_NALU_NEURONS, N_CLASSES,
BATCH_SIZE, LEARNING_RATE, N_EPOCHS,
TEST_SIZE, RANDOM_STATE,
{True:1, False:0}[DO_PP], {True:1, False:0}[DO_PCA])
print("Saving models to path: {}".format(EXPORT_DIR))
idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=TEST_SIZE, random_state=RANDOM_STATE)
X_data, Y_data = features_input[idx_train], labels[idx_train]#[:,None]
LAST_BIT = X_data.shape[0]-BATCH_SIZE*(X_data.shape[0]//BATCH_SIZE)
# Force integer number of batches total by dropping last "<BATCH_SIEZ" number of samples
X_data_use = X_data[:-LAST_BIT].copy()
Y_data_use = Y_data[:-LAST_BIT].copy()
output_dict = {}
output_dict['loss'] = np.zeros(N_EPOCHS)
output_dict['accuracy'] = np.zeros(N_EPOCHS)
output_dict['R2_train'] = np.zeros(N_EPOCHS)
output_dict['R2_test'] = np.zeros(N_EPOCHS)
output_dict['chisq_train'] = np.zeros(N_EPOCHS)
output_dict['chisq_test'] = np.zeros(N_EPOCHS)
with tf.device("/cpu:0"):
# tf.reset_default_graph()
# define placeholders and network
X = tf.placeholder(tf.float32, shape=[None, N_FEATURES])
Y_true = tf.placeholder(tf.float32, shape=[None, 1])
# Setup NALU Layers
nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)}
for kn in range(1, N_NALU_LAYERS):
#with tf.name_scope('nalu{}'.format(kn)):
nalu_layers['nalu{}'.format(kn)] = nalu(nalu_layers['nalu{}'.format(kn-1)], N_NALU_NEURONS)
# with tf.name_scope("output"):
Y_pred = nalu(nalu_layers['nalu{}'.format(N_NALU_LAYERS-1)], N_CLASSES) # N_CLASSES = 1 for regression
#with tf.name_scope('loss'):
# loss and train operations
loss = tf.nn.l2_loss(Y_pred - Y_true) # NALU uses mse
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss)
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()#max_to_keep=N_EPOCHS)
sess_config = tf.ConfigProto(
device_count={"CPU": cpu_count()},
inter_op_parallelism_threads=cpu_count(),
intra_op_parallelism_threads=cpu_count())
"""
with tf.name_scope("eval"):
''' Tensorboard Redouts'''
''' Training R-Squared Score'''
total_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, tf.reduce_mean(Y_true))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, Y_pred)))
R_squared = tf.subtract(1.0, tf.div(unexplained_error, total_error))
# ''' Testing R-Squared Score'''
# Y_pred_test = Y_pred.eval(feed_dict={X: features[idx_test]})
# total_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, tf.reduce_mean(Y_data_use))))
# unexplained_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, Y_pred_test)))
# R_squared_test = tf.subtract(1, tf.div(unexplained_error, total_error))
''' Loss and RMSE '''
squared_error = tf.square(tf.subtract(Y_true, Y_pred))
loss = tf.reduce_sum(tf.sqrt(tf.cast(squared_error, tf.float32)))
rmse = tf.sqrt(tf.reduce_mean(tf.cast(squared_error, tf.float32)))
with tf.name_scope("summary"):
''' Declare Scalar Tensorboard Terms'''
tf.summary.scalar('loss', loss)
tf.summary.scalar('RMSE', rmse)
tf.summary.scalar('R_sqrd', R_squared)
''' Declare Histogram Tensorboard Terms'''
# Squared Error Histogram
tf.summary.histogram('SqErr Hist', squared_error)
# NALU Layers Histogram
for kn in range(N_NALU_LAYERS):
tf.summary.histogram('NALU{}'.format(kn), nalu_layers['nalu{}'.format(kn)])
''' Merge all the summaries and write them out to `export_dir` + `/logs_train_`time_now`` '''
merged = tf.summary.merge_all()
"""
with tf.Session(config=sess_config) as sess:
''' Output all summaries to `export_dir` + `/logs_train_`time_now`` '''
train_writer = tf.summary.FileWriter(EXPORT_DIR + '/logs_train_{}'.format(time_now),sess.graph)
''' END Tensorboard Readout Step'''
sess.run(init_op)
best_test_r2 = 0
for ep in tqdm(range(N_EPOCHS)):
i = 0
gts = 0
# Reshuffle features and labels together
X_data_use, Y_data_use = shuffle(X_data_use, Y_data_use)
for k in tqdm(range(len(X_data_use)//BATCH_SIZE)):
batch_now = range(k*BATCH_SIZE, (k+1)*BATCH_SIZE)
# xs, ys = X_data_use[i:i+BATCH_SIZE], Y_data_use[i:i+BATCH_SIZE]
xs, ys = X_data_use[batch_now], Y_data_use[batch_now]
_, ys_pred, l = sess.run([train_op, Y_pred, loss],
feed_dict={X: xs, Y_true: ys})
# calculate number of correct predictions from batch
gts += np.sum(np.isclose(ys, ys_pred, atol=1e-4, rtol=1e-4))
ytest_pred = Y_pred.eval(feed_dict={X: features_input[idx_test]})
if np.isnan(ytest_pred).any(): ytest_pred = median_sub_nan(ytest_pred)
test_r2 = r2_score(labels[idx_test], ytest_pred)#[:,None]
# print("Test R2 Score: {}".format(test_r2_score))
acc = gts/len(Y_data_use)
train_r2 = r2_score(ys, ys_pred)
print('\nepoch {}, loss: {:.5}, accuracy: {:.5}, Batch R2: {:.5}, Test R2: {:.5}'.format(ep, l, acc, train_r2, test_r2))
"""
output_dict['loss'][ep] = l
output_dict['accuracy'][ep] = acc
output_dict['R2_train'][ep] = train_r2
output_dict['R2_test'][ep] = test_r2
output_dict['chisq_train'][ep] = chisq(ys.flatten(), ys_pred.flatten(), labels_err[i:i+BATCH_SIZE])
output_dict['chisq_test'][ep] = chisq(labels[idx_test], ytest_pred.flatten(), labels_err[idx_test])
"""
if verbose: print('Saving Default to Disk')
now_save_name = EXPORT_DIR + "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format( ep, l, acc, train_r2, test_r2)
save_path = saver.save(sess, now_save_name)
if test_r2 >= best_test_r2:
if verbose: print('Saving Best to Disk')
best_test_r2 = test_r2
''' Store the Best Scored Test-R2 '''
best_save_name = EXPORT_DIR + "best_test_r2/model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format( ep, l, acc, train_r2, test_r2)
save_path = saver.save(sess, best_save_name)
ep = '_FINAL'
final_save_name = EXPORT_DIR+ "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format( ep, l, acc, train_r2, test_r2)
save_path = saver.save(sess, final_save_name)
print("Model saved in path: {}".format(save_path))
"""
try:
pd.DataFrame(output_dict, index=range(N_EPOCHS)).to_csv(EXPORT_DIR+ "model_loss_acc_BatchR2_TestR2_DataFrame.csv")
except Exception as e:
print('DataFrame to CSV broke because', str(e))
"""
'''
with tf.name_scope("loss"):
def tf_nll(labels, output, uncs, coeff=1):
error = output - labels
return tf.reduce_sum(tf.divide(tf.squared_difference(output, labels) , tf.square(uncs)))# + tf.log(tf.square(uncs))
#return tf.reduce_sum(1 * (coeff * np.log(2*np.pi) + coeff * tf.log(uncs) + (0.5/uncs) * tf.pow(error, 2)))
negloglike = tf_nll(labels=y, output=output, uncs=unc)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([negloglike] + reg_losses, name="chisq")
with tf.name_scope("eval"):
accuracy = tf.reduce_mean(tf.squared_difference(output, y, name="accuracy"))
SqErrRatio= tf.divide(accuracy, tf.reduce_mean(tf.squared_difference(y, tf.reduce_mean(y))))
r2_acc = 1.0 - SqErrRatio
chsiqMean = tf_nll(labels=y, output=tf.reduce_mean(y), uncs=unc)
chisqModel= tf_nll(labels=y, output=output, uncs=unc)
rho2_acc = 1.0 - chisqModel / chsiqMean"
mse_summary = tf.summary.scalar('train_acc' , accuracy )
loss_summary = tf.summary.scalar('loss' , loss )
nll_summary = tf.summary.scalar('negloglike', negloglike)
r2s_summary = tf.summary.scalar('r2_acc' , r2_acc )
p2s_summary = tf.summary.scalar('rho2_acc' , rho2_acc )
val_summary = tf.summary.scalar('val_acc' , accuracy )
# hid1_hist = tf.summary.histogram('hidden1', hidden1)
# hid2_hist = tf.summary.histogram('hidden1', hidden1)
# hid3_hist = tf.summary.histogram('hidden1', hidden1)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
'''
|
{"hexsha": "b74a2208d65ec54ba76e1cfbd3263fb8ed023157", "size": 15910, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python Scripts/spitzer_cal_NALU_train.py", "max_stars_repo_name": "exowanderer/SpitzerDeepLearningNetwork", "max_stars_repo_head_hexsha": "37f3ca1731b05f64ec6080bf9e333c7a491840f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-24T10:25:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T10:25:17.000Z", "max_issues_repo_path": "Python Scripts/spitzer_cal_NALU_train.py", "max_issues_repo_name": "exowanderer/SpitzerDeepLearningNetwork", "max_issues_repo_head_hexsha": "37f3ca1731b05f64ec6080bf9e333c7a491840f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python Scripts/spitzer_cal_NALU_train.py", "max_forks_repo_name": "exowanderer/SpitzerDeepLearningNetwork", "max_forks_repo_head_hexsha": "37f3ca1731b05f64ec6080bf9e333c7a491840f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3848396501, "max_line_length": 159, "alphanum_fraction": 0.6443117536, "include": true, "reason": "import numpy", "num_tokens": 4080}
|
# coding: utf-8
# In[6]:
import numpy as np
import pandas as pd
import nltk
from nltk.collocations import *
import scipy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import train_test_split
from sklearn import linear_model
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# In[4]:
train_df = pd.read_csv("train.csv", encoding="ISO-8859-1")
train_df["search_term"] = train_df["product_title"].str.lower()
train_df["product_title"] = train_df["product_title"].str.lower()
# In[5]:
des=pd.read_csv("product_descriptions.csv",encoding="ISO-8859-1")
des["product_description"] = des["product_description"].str.lower()
train_df = train_df.merge(des, on="product_uid")
train_df = train_df.assign(prod_complete = lambda x: (x['product_title'] + ' ' + ['product_description']))
# In[8]:
s = train_df["relevance"]
notrelevant = train_df[s==1.00]
relevant = train_df[s==3.00]
s.hist()
# In[10]:
vectorizer = CountVectorizer(ngram_range=(1, 3), min_df=1)
search_counts = vectorizer.fit_transform(train_df["search_term"])
distinct_title_counts = vectorizer.transform(train_df["product_title"]. drop_duplicates())
distinct_descr_counts = vectorizer.transform(
des["product_description"].drop_duplicates())
feature_counts = scipy.sparse.vstack(
[search_counts,distinct_title_counts,distinct_descr_counts])
# In[11]:
notrelevant.sample(n=5)
# In[12]:
nrl = notrelevant[notrelevant["id"] == 24058]
nrl
# In[13]:
train_df[train_df["product_title"].str.contains("hydronic")]
# In[15]:
prods = pd.read_csv("product_descriptions.csv", encoding="ISO-8859-1")
prods.sample(n=5)
# In[16]:
hydronics = prods[prods["product_description"].str.contains("hydronic")]
hydronics[hydronics["product_description"].str.contains("heater")]
# In[17]:
bm = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(prods["product_description"].str.cat())
# In[18]:
score = finder.score_ngram(bm.pmi,"hydronic","heater")
score
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
{"hexsha": "de35e3dce593d1026e170987d99b9cae4bc94093", "size": 2329, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/ali/ali06/phi_ali06.py", "max_stars_repo_name": "Iolaum/Phi1337", "max_stars_repo_head_hexsha": "c73b01cb85c0187ed5c23c672d4f3d05a6934a9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/ali/ali06/phi_ali06.py", "max_issues_repo_name": "Iolaum/Phi1337", "max_issues_repo_head_hexsha": "c73b01cb85c0187ed5c23c672d4f3d05a6934a9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/ali/ali06/phi_ali06.py", "max_forks_repo_name": "Iolaum/Phi1337", "max_forks_repo_head_hexsha": "c73b01cb85c0187ed5c23c672d4f3d05a6934a9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.843537415, "max_line_length": 157, "alphanum_fraction": 0.6839845427, "include": true, "reason": "import numpy,import scipy", "num_tokens": 608}
|
[STATEMENT]
lemma generate_valid_stateful_policy_IFSACS_2_all_security_requirements_fulfilled_IFS: assumes validReqs: "valid_reqs M"
and wfG: "wf_graph G"
and high_level_policy_valid: "all_security_requirements_fulfilled M G"
and edgesList: "(set edgesList) \<subseteq> edges G"
shows "all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
have subseteq: "set (filter_IFS_no_violations G M edgesList) \<inter> set (filter_compliant_stateful_ACS G M edgesList) \<subseteq> set (filter_IFS_no_violations G M edgesList)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (filter_IFS_no_violations G M edgesList) \<inter> set (filter_compliant_stateful_ACS G M edgesList) \<subseteq> set (filter_IFS_no_violations G M edgesList)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
set (filter_IFS_no_violations G M edgesList) \<inter> set (filter_compliant_stateful_ACS G M edgesList) \<subseteq> set (filter_IFS_no_violations G M edgesList)
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
from wfG filter_IFS_no_violations_subseteq_input edgesList
[PROOF STATE]
proof (chain)
picking this:
wf_graph G
set (filter_IFS_no_violations ?G ?M ?Es) \<subseteq> set ?Es
set edgesList \<subseteq> edges G
[PROOF STEP]
have wfG': "wf_graph \<lparr>nodes = nodes G, edges = edges G \<union> set (filter_IFS_no_violations G M edgesList)\<rparr>"
[PROOF STATE]
proof (prove)
using this:
wf_graph G
set (filter_IFS_no_violations ?G ?M ?Es) \<subseteq> set ?Es
set edgesList \<subseteq> edges G
goal (1 subgoal):
1. wf_graph \<lparr>nodes = nodes G, edges = edges G \<union> set (filter_IFS_no_violations G M edgesList)\<rparr>
[PROOF STEP]
by (metis graph_eq_intro Un_absorb2 graph.select_convs(1) graph.select_convs(2) order.trans)
[PROOF STATE]
proof (state)
this:
wf_graph \<lparr>nodes = nodes G, edges = edges G \<union> set (filter_IFS_no_violations G M edgesList)\<rparr>
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
from high_level_policy_valid
[PROOF STATE]
proof (chain)
picking this:
all_security_requirements_fulfilled M G
[PROOF STEP]
have "all_security_requirements_fulfilled (get_IFS M) G"
[PROOF STATE]
proof (prove)
using this:
all_security_requirements_fulfilled M G
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) G
[PROOF STEP]
by(simp add: all_security_requirements_fulfilled_def get_IFS_def)
[PROOF STATE]
proof (state)
this:
all_security_requirements_fulfilled (get_IFS M) G
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
from filter_IFS_no_violations_correct[OF valid_reqs_IFS_D[OF validReqs] wfG this edgesList]
[PROOF STATE]
proof (chain)
picking this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList)\<rparr>)
[PROOF STEP]
have
"all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList)\<rparr>)"
[PROOF STATE]
proof (prove)
using this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList)\<rparr>)
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList)\<rparr>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList)\<rparr>)
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
from all_security_requirements_fulfilled_mono_stateful_policy_to_network_graph[OF valid_reqs_IFS_D[OF validReqs] subseteq wfG' this]
[PROOF STATE]
proof (chain)
picking this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList) \<inter> set (filter_compliant_stateful_ACS G M edgesList)\<rparr>)
[PROOF STEP]
have "all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))"
[PROOF STATE]
proof (prove)
using this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph \<lparr>hosts = nodes G, flows_fix = edges G, flows_state = set (filter_IFS_no_violations G M edgesList) \<inter> set (filter_compliant_stateful_ACS G M edgesList)\<rparr>)
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
by(simp add: generate_valid_stateful_policy_IFSACS_2_def)
[PROOF STATE]
proof (state)
this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
goal (1 subgoal):
1. all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
all_security_requirements_fulfilled (get_IFS M) (stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2626, "file": "Network_Security_Policy_Verification_TopoS_Stateful_Policy_Algorithm", "length": 18}
|
import time
import pandas as pd
from selenium import webdriver
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import numpy
start = time.time()
# initialize the list of all the data you need through lists
urls = []
name = []
types = []
duration = []
difficulty_level = []
Course_description = []
platforms = []
driver = webdriver.Chrome(
'/home/paritosh/PycharmProjects/Youtube/chromedriver') # this is chrome you have to download it from google
# this is scrolling code if you need more data add this code in main function
"""
while True:
scroll_height = 3000
document_height_before = driver.execute_script("return document.documentElement.scrollHeight")
driver.execute_script(f"window.scrollTo(0, {document_height_before + scroll_height});")
time.sleep(1.5)
document_height_after = driver.execute_script("return document.documentElement.scrollHeight")
if document_height_after == document_height_before:
break
"""
page = 8
def Data():
for i in range(0, page):
# this is the main link dont touch this
print("Loading Page number ", i)
driver.get(
'https://online.stanford.edu/search-catalog?type=All&free_or_paid[free]=free&free_or_paid[paid]=paid&page={}'.format(i))
content = driver.page_source.encode('utf-8').strip() # this get content of a page in normal way
soup = BeautifulSoup(content, 'lxml') # this is used to find the things we need in the source code
link_class = soup.findAll("div", class_="search-results--table") # titles of youtube videos is stored here
Data()
print(urls)
driver.close()
|
{"hexsha": "19405bf77884540846d90068e8f9053c6aac7f5d", "size": 1708, "ext": "py", "lang": "Python", "max_stars_repo_path": "Data/Stanford/StanFord_main.py", "max_stars_repo_name": "paritoshtripathi935/Savo", "max_stars_repo_head_hexsha": "40f41912d4d59765d8e4b60cdd76035f0406cc1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-28T06:38:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T06:38:17.000Z", "max_issues_repo_path": "Data/Stanford/StanFord_main.py", "max_issues_repo_name": "paritoshtripathi935/Savo", "max_issues_repo_head_hexsha": "40f41912d4d59765d8e4b60cdd76035f0406cc1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Data/Stanford/StanFord_main.py", "max_forks_repo_name": "paritoshtripathi935/Savo", "max_forks_repo_head_hexsha": "40f41912d4d59765d8e4b60cdd76035f0406cc1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9491525424, "max_line_length": 132, "alphanum_fraction": 0.6838407494, "include": true, "reason": "import numpy", "num_tokens": 371}
|
LOGICAL FUNCTION DELCTG( PAR1, PAR2, PAR3 )
C
C SLICOT RELEASE 5.5.
C
C Copyright (c) 2002-2012 NICONET e.V.
C
C PURPOSE
C
C Void logical function for DGGES.
C
DOUBLE PRECISION PAR1, PAR2, PAR3
C
DELCTG = .TRUE.
RETURN
END
|
{"hexsha": "2d9d5fbfcf22c1785f1a9be701f059e6b2359b09", "size": 286, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "External/SLICOT/delctg.f", "max_stars_repo_name": "bgin/MissileSimulation", "max_stars_repo_head_hexsha": "90adcbf1c049daafb939f3fe9f9dfe792f26d5df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2016-08-28T23:20:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T14:43:58.000Z", "max_issues_repo_path": "External/SLICOT/delctg.f", "max_issues_repo_name": "bgin/MissileSimulation", "max_issues_repo_head_hexsha": "90adcbf1c049daafb939f3fe9f9dfe792f26d5df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-06-02T21:29:51.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-05T05:59:31.000Z", "max_forks_repo_path": "External/SLICOT/delctg.f", "max_forks_repo_name": "bgin/MissileSimulation", "max_forks_repo_head_hexsha": "90adcbf1c049daafb939f3fe9f9dfe792f26d5df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-04T22:38:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-04T22:38:22.000Z", "avg_line_length": 17.875, "max_line_length": 51, "alphanum_fraction": 0.5769230769, "num_tokens": 101}
|
import open3d as o3d
from sys import argv, exit
from PIL import Image
import math
import numpy as np
import copy
import os
import re
import cv2
import matplotlib.pyplot as plt
from shapely.geometry import Point, Polygon
from scipy.spatial.transform import Rotation as R
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def readPoses(file):
f = open(file, 'r')
A = f.readlines()
f.close()
poses = []
for i, line in enumerate(A):
T = np.identity(4)
row = line.split(' ')
px, py, pz, qx, qy, qz, qw = float(row[0]), float(row[1]), float(row[2]), float(row[3]), float(row[4]), float(row[5]), float(row[6])
Rot = R.from_quat([qx, qy, qz, qw])
T[0:3, 0:3] = Rot.as_dcm()
T[0, 3] = px
T[1, 3] = py
T[2, 3] = pz
poses.append(T)
return poses
def display(pcd, T=np.identity(4)):
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
axis.transform(T)
o3d.visualization.draw_geometries([pcd, axis])
def draw_registration_result(source, target, transformation):
geometries = []
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
# source_temp.paint_uniform_color([1, 0.706, 0])
# target_temp.paint_uniform_color([0, 0.651, 0.929])
# X_target = Ttarget_source @ X_source
# Ttarget_source : source wrt target
source_temp.transform(transformation)
geometries.append(source_temp); geometries.append(target_temp)
axis1 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
axis2 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
axis2.transform(transformation)
geometries.append(axis1); geometries.append(axis2)
o3d.visualization.draw_geometries(geometries)
def readDepth(depthFile):
depth = Image.open(depthFile)
if depth.mode != "I":
raise Exception("Depth image is not in intensity format")
return np.asarray(depth)
def getPointCloud(rgbFile, depthFile):
# pts = [(275, 462), (435, 275), (465, 27), (41, 197)]
# poly = Polygon(pts)
# thresh = 5.6
thresh = 15.0
depth = readDepth(depthFile)
rgb = Image.open(rgbFile)
# cv2.imshow("Image", np.array(cv2.cvtColor(np.array(rgb), cv2.COLOR_BGR2RGB)))
# cv2.waitKey(0)
# print(type(depth))
# print(np.unique(depth))
# print(rgb.size, depth.shape)
# exit(1)
points = []
colors = []
srcPxs = []
for v in range(depth.shape[0]):
for u in range(depth.shape[1]):
# p = Point(u, v)
# if not (p.within(poly)):
# continue
Z = depth[v, u] / scalingFactor
if Z==0: continue
if (Z > thresh): continue
X = (u - centerX) * Z / focalX
Y = (v - centerY) * Z / focalY
srcPxs.append((u, v))
points.append((X, Y, Z))
colors.append(rgb.getpixel((u, v)))
srcPxs = np.asarray(srcPxs).T
points = np.asarray(points)
colors = np.asarray(colors)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors/255)
return pcd, srcPxs
def right2left():
# Right wrt left = TL_R = TR2L
thetaX = math.radians(90)
thetaY = math.radians(-90)
Rx = np.array([[1, 0, 0], [0, math.cos(thetaX), -math.sin(thetaX)], [0, math.sin(thetaX), math.cos(thetaX)]])
Ry = np.array([[math.cos(thetaY), 0, math.sin(thetaY)], [0, 1, 0], [-math.sin(thetaY), 0, math.cos(thetaY)]])
TR2L = np.identity(4)
TR2L[0:3, 0:3] = Ry @ Rx
return TR2L
def getRelativeT(poses, srcIdx, trgIdx):
# Right hand : ROS - X Forward, Y Left, Z Upward
# Left hand : General CV algorithms - Z Forward, X Right, Y Downwards
# Convert left PC to right, apply right coordinate transform, convert right PC to left
Ttrg = poses[trgIdx]
Tsrc = poses[srcIdx]
Ttrg_srcR = np.linalg.inv(Ttrg) @ Tsrc
TR2L = right2left()
Ttrg_srcL = TR2L @ Ttrg_srcR @ np.linalg.inv(TR2L)
return Ttrg_srcL
def getHomo(srcPcd, Ttrg_src, srcPxs):
srcPcd.transform(Ttrg_src)
if __name__ == '__main__':
gtPoses = argv[1]
rgbDir = argv[2]
depthDir = argv[3]
rgbImgs = natural_sort(os.listdir(rgbDir))
depthImgs = natural_sort(os.listdir(depthDir))
poses = readPoses(gtPoses)
rgbImgs = [os.path.join(rgbDir, img) for img in rgbImgs if ".jpg" in img]
depthImgs = [os.path.join(depthDir, img) for img in depthImgs if ".png" in img]
srcIdx = 0
trgIdx = 1399
# Realsense D455
focalX = 382.1996765136719
focalY = 381.8395690917969
centerX = 312.7102355957031
centerY = 247.72047424316406
scalingFactor = 1000.0
# Source wrt target in Left hand
Ttrg_src = getRelativeT(poses, srcIdx, trgIdx)
srcPcd, srcPxs = getPointCloud(rgbImgs[srcIdx], depthImgs[srcIdx])
trgPcd, trgPxs = getPointCloud(rgbImgs[trgIdx], depthImgs[trgIdx])
# display(srcPcd)
# display(trgPcd)
# draw_registration_result(source=srcPcd, target=trgPcd, transformation=Ttrg_src)
print(Ttrg_src)
# getHomo(srcPcd, Ttrg_src, srcPxs)
|
{"hexsha": "7bc29592c391c77ceeeb5b441fc25c99a7292fc4", "size": 4979, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantitative/getGTCorr.py", "max_stars_repo_name": "UditSinghParihar/d2-net", "max_stars_repo_head_hexsha": "b3592beebe6759cf4cc1acdfd23d603ef059ef30", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-09-04T04:06:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T08:41:37.000Z", "max_issues_repo_path": "quantitative/getGTCorr.py", "max_issues_repo_name": "UditSinghParihar/d2-net", "max_issues_repo_head_hexsha": "b3592beebe6759cf4cc1acdfd23d603ef059ef30", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quantitative/getGTCorr.py", "max_forks_repo_name": "UditSinghParihar/d2-net", "max_forks_repo_head_hexsha": "b3592beebe6759cf4cc1acdfd23d603ef059ef30", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-09-24T04:36:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T08:40:49.000Z", "avg_line_length": 24.4068627451, "max_line_length": 134, "alphanum_fraction": 0.6878891344, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1639}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
import contextlib
import functools
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import auto_control_deps_utils as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.types import core
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
acd.register_read_only_resource_op("ReadVariableOp")
acd.register_read_only_resource_op("VariableShape")
acd.register_read_only_resource_op("ResourceGather")
acd.register_read_only_resource_op("ResourceGatherNd")
acd.register_read_only_resource_op("_ReadVariablesOp")
# TODO(allenl): Remove this alias and migrate callers.
get_resource_handle_data = handle_data_util.get_resource_handle_data
def get_eager_safe_handle_data(handle):
"""Get the data handle from the Tensor `handle`."""
assert isinstance(handle, ops.Tensor)
if isinstance(handle, ops.EagerTensor):
return handle._handle_data # pylint: disable=protected-access
else:
return get_resource_handle_data(handle)
def _set_handle_shapes_and_types(tensor, handle_data, graph_mode):
"""Sets the shape inference result HandleData on tensor.
Args:
tensor: A `Tensor` or `EagerTensor`.
handle_data: A `CppShapeInferenceResult.HandleData`.
graph_mode: A python bool.
"""
tensor._handle_data = handle_data # pylint: disable=protected-access
if not graph_mode:
return
# Not an EagerTensor, so a graph tensor.
shapes, types = zip(
*[(pair.shape, pair.dtype) for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [
[d.size for d in s.dim] # pylint: disable=g-complex-comprehension
if not s.unknown_rank else None for s in shapes
]
pywrap_tf_session.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
tensor._op._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
shapes,
ranks,
types)
def _combine_handle_data(handle, initial_value):
"""Concats HandleData from tensors `handle` and `initial_value`.
Args:
handle: A `Tensor` of dtype `resource`.
initial_value: A `Tensor`.
Returns:
A `CppShapeInferenceResult.HandleData`. If `initial_value` has dtype
`variant`, the `HandleData` contains the concatenation of the shape_and_type
from both `handle` and `initial_value`.
Raises:
RuntimeError: If handle, which was returned by VarHandleOp, either has
no handle data, or its len(handle_data.shape_and_type) != 1.
"""
assert handle.dtype == dtypes.resource
variable_handle_data = get_eager_safe_handle_data(handle)
if initial_value.dtype != dtypes.variant:
return variable_handle_data
extra_handle_data = get_eager_safe_handle_data(initial_value)
if extra_handle_data is not None and extra_handle_data.is_set:
if (variable_handle_data is None or not variable_handle_data.is_set or
len(variable_handle_data.shape_and_type) != 1):
raise RuntimeError(
"Expected VarHandleOp to return a length==1 shape_and_type, "
f"but saw: '{variable_handle_data}'")
variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)
return variable_handle_data
def _variable_handle_from_shape_and_dtype(shape,
dtype,
shared_name,
name,
graph_mode,
initial_value=None):
"""Create a variable handle, copying in handle data from `initial_value`."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
shape = tensor_shape.as_shape(shape)
dtype = dtypes.as_dtype(dtype)
if not graph_mode:
if shared_name is not None:
raise errors.InternalError( # pylint: disable=no-value-for-parameter
"Using an explicit shared_name is not allowed when executing eagerly."
)
shared_name = context.anonymous_name()
handle = gen_resource_variable_ops.var_handle_op(
shape=shape,
dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if initial_value is None:
initial_value = handle
if graph_mode:
full_handle_data = _combine_handle_data(handle, initial_value)
_set_handle_shapes_and_types(handle, full_handle_data, graph_mode)
return handle
else:
handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()
handle_data.is_set = True
handle_data.shape_and_type.append(
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=shape.as_proto(), dtype=dtype.as_datatype_enum))
if initial_value is not None and initial_value.dtype == dtypes.variant:
extra_handle_data = get_eager_safe_handle_data(initial_value)
if extra_handle_data is not None and extra_handle_data.is_set:
if (not handle_data.is_set or len(handle_data.shape_and_type) != 1):
raise RuntimeError(
"Expected VarHandleOp to return a length==1 shape_and_type, "
f"but saw: '{handle_data}'")
handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)
_set_handle_shapes_and_types(handle, handle_data, graph_mode)
return handle
def eager_safe_variable_handle(initial_value, shape, shared_name, name,
graph_mode):
"""Creates a variable handle with information to do shape inference.
The dtype is read from `initial_value` and stored in the returned
resource tensor's handle data.
If `initial_value.dtype == tf.variant`, we additionally extract the handle
data (if any) from `initial_value` and append it to the `handle_data`.
In this case, the returned tensor's handle data is in the form
```
is_set: true
shape_and_type {
shape {
// initial_value.shape
}
dtype: DT_VARIANT
}
shape_and_type {
// handle_data(initial_value).shape_and_type[0]
}
shape_and_type {
// handle_data(initial_value).shape_and_type[1]
}
...
```
Ops that read from this tensor, such as `ReadVariableOp` and
`AssignVariableOp`, know that `handle_data(handle).shape_and_type[1:]`
correspond to the handle data of the variant(s) stored in the Variable.
Args:
initial_value: A `Tensor`.
shape: The shape of the handle data. Can be `TensorShape(None)` (i.e.
unknown shape).
shared_name: A string.
name: A string.
graph_mode: A python bool.
Returns:
The handle, a `Tensor` of type `resource`.
"""
dtype = initial_value.dtype.base_dtype
return _variable_handle_from_shape_and_dtype(shape, dtype, shared_name, name,
graph_mode, initial_value)
@contextlib.contextmanager
def _handle_graph(handle):
# Note: might have an eager tensor but not be executing eagerly when building
# functions.
if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor) or
ops.has_default_graph()):
yield
else:
with handle.graph.as_default():
yield
class EagerResourceDeleter:
"""An object which cleans up a resource handle.
An alternative to defining a __del__ method on an object. The intended use is
that ResourceVariables or other objects with resource handles will maintain a
single reference to this object. When the parent object is collected, this
object will be too. Even if the parent object is part of a reference cycle,
the cycle will be collectable.
"""
__slots__ = ["_handle", "_handle_device", "_context"]
def __init__(self, handle, handle_device):
if not isinstance(handle, ops.Tensor):
raise ValueError(
(f"Passed handle={handle} to EagerResourceDeleter. Was expecting "
f"the handle to be a `tf.Tensor`."))
self._handle = handle
self._handle_device = handle_device
# This is held since the __del__ function runs an op, and if the context()
# is collected before this object, there will be a segfault when running the
# op.
self._context = context.context()
def __del__(self):
# Resources follow object-identity when executing eagerly, so it is safe to
# delete the resource we have a handle to.
try:
# A packed EagerTensor doesn't own any resource.
if isinstance(self._handle, ops.EagerTensor) and self._handle.is_packed:
return
# This resource was created in eager mode. However, this destructor may be
# running in graph mode (especially during unit tests). To clean up
# successfully, we switch back into eager mode temporarily.
with context.eager_mode():
with ops.device(self._handle_device):
gen_resource_variable_ops.destroy_resource_op(
self._handle, ignore_lookup_error=True)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
def shape_safe_assign_variable_handle(handle, shape, value, name=None):
"""Helper that checks shape compatibility and assigns variable."""
with _handle_graph(handle):
value_tensor = ops.convert_to_tensor(value)
shape.assert_is_compatible_with(value_tensor.shape)
return gen_resource_variable_ops.assign_variable_op(
handle, value_tensor, name=name)
def _maybe_set_handle_data(dtype, handle, tensor):
if dtype == dtypes.variant:
# For DT_VARIANT types, the handle's shape_and_type[1:] stores the
# variant's handle data. Extract it.
handle_data = get_eager_safe_handle_data(handle)
if handle_data.is_set and len(handle_data.shape_and_type) > 1:
tensor._handle_data = ( # pylint: disable=protected-access
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(
is_set=True, shape_and_type=handle_data.shape_and_type[1:]))
def variable_accessed(variable):
"""Records that `variable` was accessed for the tape and FuncGraph."""
if hasattr(ops.get_default_graph(), "watch_variable"):
ops.get_default_graph().watch_variable(variable)
if variable.trainable:
tape.variable_accessed(variable)
class BaseResourceVariable(variables.VariableV1, core.Tensor):
"""A python variable from an existing handle."""
# TODO(wangpeng): Deprecate `constraint` when callers no long pass it in.
def __init__( # pylint: disable=super-init-not-called
self,
trainable=None,
shape=None,
dtype=None,
handle=None,
constraint=None,
synchronization=None,
aggregation=None,
distribute_strategy=None,
name=None,
unique_id=None,
handle_name=None,
graph_element=None,
initial_value=None,
initializer_op=None,
is_initialized_op=None,
cached_value=None,
save_slice_info=None,
caching_device=None,
in_graph_mode=None,
validate_shape=True,
**unused_kwargs):
"""Creates a variable from a handle.
Args:
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
shape: The variable's shape. This shape can be set to tf.TensorShape(None)
in order to assign values of different shapes to this variable.
Otherwise (i.e. if the shape is fully determined), it will trigger run
time checks to ensure that each assignment is of the same shape.
dtype: The variable's dtype.
handle: The variable's handle
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
distribute_strategy: The distribution strategy this variable was created
under.
name: The name for this variable.
unique_id: Internal. Unique ID for this variable's handle.
handle_name: The name for the variable's handle.
graph_element: Optional, required only in session.run-mode. Pre-created
tensor which reads this variable's value.
initial_value: Optional. Variable's initial value.
initializer_op: Operation which assigns the variable's initial value.
is_initialized_op: Pre-created operation to check whether this variable is
initialized.
cached_value: Pre-created operation to read this variable in a specific
device.
save_slice_info: Metadata for variable partitioning.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
in_graph_mode: whether we are executing in TF1 graph mode. If None, will
detect within the function. This is to avoid repeated init_scope()
conetxt entrances which can add up.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
"""
if in_graph_mode is None:
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
else:
self._in_graph_mode = in_graph_mode
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
self._trainable = trainable
self._synchronization = synchronization
self._aggregation = aggregation
self._save_slice_info = save_slice_info
self._initial_value = initial_value
self._initializer_op = initializer_op
self._is_initialized_op = is_initialized_op
self._graph_element = graph_element
self._caching_device = caching_device
self._cached_value = cached_value
self._distribute_strategy = distribute_strategy
# Store the graph key so optimizers know how to only retrieve variables from
# this graph. Guaranteed to be the same as the eager graph_key.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
self._handle = handle
self._unique_id = unique_id
if handle_name is None:
self._handle_name = "Variable:0"
else:
self._handle_name = handle_name + ":0"
self._constraint = constraint
self._cached_shape_as_list = None
self._validate_shape = validate_shape
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
# If we cannot read the value for any reason (e.g. variable uninitialized
# during tf.function tracing), still produce a __repr__. Note that for
# async eager, errors due to uninitialized variables will raise in
# ops.value_text when the handle is resolved, so we need to keep that
# under the try...except if we want to suppress them.
try:
with ops.device(self.device):
value_text = ops.value_text(self.read_value(), is_repr=True)
except: # pylint: disable=bare-except
value_text = "numpy=<unavailable>"
return "<tf.Variable '%s' shape=%s dtype=%s, %s>" % (
self.name, self.get_shape(), self.dtype.name, value_text)
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def __tf_tracing_type__(self, signature_context):
return signature_context.make_reference_type(
VariableSpec(self.shape, self.dtype), self._handle._id) # pylint:disable=protected-access
@contextlib.contextmanager
def _assign_dependencies(self):
"""Makes assignments depend on the cached value, if any.
This prevents undefined behavior with reads not ordered wrt writes.
Yields:
None.
"""
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
def __array__(self, dtype=None):
"""Allows direct conversion to a numpy array.
>>> np.array(tf.Variable([1.0]))
array([1.], dtype=float32)
Returns:
The variable value as a numpy array.
"""
# You can't return `self.numpy()` here because for scalars
# that raises:
# ValueError: object __array__ method not producing an array
# Even `self.read_value().__array__()` and `self.read_value()._numpy()` give
# the same error. The `EagerTensor` class must be doing something behind the
# scenes to make `np.array(tf.constant(1))` work.
return np.asarray(self.numpy(), dtype=dtype)
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.read_value())
def __copy__(self):
return self
def __deepcopy__(self, memo):
if not context.executing_eagerly():
raise NotImplementedError(
"__deepcopy__() is only available when eager execution is enabled.")
copied_variable = ResourceVariable(
initial_value=self.read_value(),
trainable=self._trainable,
constraint=self._constraint,
dtype=self._dtype,
name=self._shared_name,
distribute_strategy=self._distribute_strategy,
synchronization=self.synchronization,
aggregation=self.aggregation)
memo[self._unique_id] = copied_variable
return copied_variable
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def device(self):
"""The device this variable is on."""
return self.handle.device
@property
def graph(self):
"""The `Graph` of this variable."""
return self.handle.graph
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle_name
@property
def shape(self):
"""The shape of this variable."""
return self._shape
def set_shape(self, shape):
self._shape = self._shape.merge_with(shape)
def _shape_as_list(self):
if self.shape.ndims is None:
return None
return [dim.value for dim in self.shape.dims]
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
@property
def create(self):
"""The op responsible for initializing this variable."""
if not self._in_graph_mode:
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return self._initializer_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
def value(self):
"""A cached operation which reads the value of this variable."""
if self._cached_value is not None:
return self._cached_value
with ops.colocate_with(None, ignore_existing=True):
return self._read_variable_op()
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initializer_op
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable."""
if context.executing_eagerly():
raise RuntimeError("This property is not supported "
"when eager execution is enabled.")
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
@property
def op(self):
"""The op for this variable."""
return self.handle.op
@property
def trainable(self):
return self._trainable
@property
def synchronization(self):
return self._synchronization
@property
def aggregation(self):
return self._aggregation
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return self._graph_element.eval(session=session)
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return gen_state_ops.resource_count_up_to(
self.handle, limit=limit, T=self.dtype)
def _map_resources(self, save_options):
"""For implementing `Trackable`."""
new_variable = None
if save_options.experimental_variable_policy._save_variable_devices(): # pylint:disable=protected-access
with ops.device(self.device):
new_variable = copy_to_graph_uninitialized(self)
else:
new_variable = copy_to_graph_uninitialized(self)
obj_map = {self: new_variable}
resource_map = {self.handle: new_variable.handle}
return obj_map, resource_map
def _read_variable_op(self, no_copy=False):
"""Reads the value of the variable.
If the variable is in copy-on-read mode and `no_copy` is True, the variable
is converted to copy-on-write mode before it is read.
Args:
no_copy: Whether to prevent a copy of the variable.
Returns:
The value of the variable.
"""
variable_accessed(self)
def read_and_set_handle(no_copy):
if no_copy and forward_compat.forward_compatible(2022, 5, 3):
gen_resource_variable_ops.disable_copy_on_read(self.handle)
result = gen_resource_variable_ops.read_variable_op(
self.handle, self._dtype)
_maybe_set_handle_data(self._dtype, self.handle, result)
return result
if getattr(self, "_caching_device", None) is not None:
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._caching_device):
result = read_and_set_handle(no_copy)
else:
result = read_and_set_handle(no_copy)
if not context.executing_eagerly():
# Note that if a control flow context is active the input of the read op
# might not actually be the handle. This line bypasses it.
tape.record_operation(
"ReadVariableOp", [result], [self.handle],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return result
def read_value(self):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Returns:
The value of the variable.
"""
with ops.name_scope("Read"):
value = self._read_variable_op()
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def read_value_no_copy(self):
"""Constructs an op which reads the value of this variable without copy.
The variable is read without making a copy even when it has been sparsely
accessed. Variables in copy-on-read mode will be converted to copy-on-write
mode.
Returns:
The value of the variable.
"""
with ops.name_scope("Read"):
value = self._read_variable_op(no_copy=True)
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
variable_accessed(self)
value = gen_resource_variable_ops.resource_gather(
self.handle, indices, dtype=self._dtype, name=name)
if self._dtype == dtypes.variant:
# For DT_VARIANT types, the handle's shape_and_type[1:] stores the
# variant's handle data. Extract it.
handle_data = get_eager_safe_handle_data(self.handle)
if handle_data.is_set and len(handle_data.shape_and_type) > 1:
value._handle_data = ( # pylint: disable=protected-access
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(
is_set=True, shape_and_type=handle_data.shape_and_type[1:]))
return array_ops.identity(value)
def gather_nd(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather_nd`."""
with ops.name_scope("GatherNd" if name is None else name) as name:
if self.trainable:
variable_accessed(self)
value = gen_resource_variable_ops.resource_gather_nd(
self.handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value)
def to_proto(self, export_scope=None):
"""Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name,
export_scope)
if self._initial_value is not None:
# This is inside an if-statement for backwards compatibility, since
# self._initial_value might be None for variables constructed from old
# protos.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name,
export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name,
export_scope)
else:
# Store the graph_element here
var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name,
export_scope)
var_def.is_resource = True
var_def.trainable = self.trainable
var_def.synchronization = self.synchronization.value
var_def.aggregation = self.aggregation.value
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(
self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return ResourceVariable(
variable_def=variable_def, import_scope=import_scope)
__array_priority__ = 100
def is_initialized(self, name=None):
"""Checks whether a resource variable has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
"""Subtracts a value from this variable.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# TODO(apassos): this here and below is not atomic. Consider making it
# atomic if there's a way to do so without a performance cost for those who
# don't need it.
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_sub_op)
return assign_sub_op
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
"""Adds a value to this variable.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_add_op)
return assign_add_op
def _lazy_read(self, op):
variable_accessed(self)
return _UnreadVariable(
handle=self.handle,
dtype=self.dtype,
shape=self._shape,
in_graph_mode=self._in_graph_mode,
parent_op=op,
unique_id=self._unique_id)
def assign(self, value, use_locking=None, name=None, read_value=True):
"""Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# Note: not depending on the cached value here since this can be used to
# initialize the variable.
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
if not self._shape.is_compatible_with(value_tensor.shape):
if self.name is None:
tensor_name = ""
else:
tensor_name = " " + str(self.name)
raise ValueError(
(f"Cannot assign value to variable '{tensor_name}': Shape mismatch."
f"The variable shape {self._shape}, and the "
f"assigned value shape {value_tensor.shape} are incompatible."))
kwargs = {}
if forward_compat.forward_compatible(2022, 3, 23):
# If the shape is fully defined, we do a runtime check with the shape of
# value.
validate_shape = self._validate_shape and self._shape.is_fully_defined()
kwargs["validate_shape"] = validate_shape
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name, **kwargs)
if read_value:
return self._lazy_read(assign_op)
return assign_op
def __reduce__(self):
# The implementation mirrors that of __deepcopy__.
return functools.partial(
ResourceVariable,
initial_value=self.numpy(),
trainable=self.trainable,
name=self._shared_name,
dtype=self.dtype,
constraint=self.constraint,
distribute_strategy=self._distribute_strategy), ()
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `tf.IndexedSlices` from this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_sub(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_add(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_max(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of max with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_max(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_min(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of min with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_min(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
"""Multiply this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to multiply this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_mul(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_div(self, sparse_delta, use_locking=False, name=None):
"""Divide this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to divide this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_div(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_update(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
state_ops.batch_scatter_update(
self,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name))
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_sub(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_add(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_update(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_max(self, indices, updates, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_max(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_min(self, indices, updates, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_min(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def _write_object_proto(self, proto, options):
"""Writes additional information of the variable into the SavedObject proto.
Subclasses of ResourceVariables could choose to override this method to
customize extra information to provide when saving a SavedModel.
Ideally, this should contain the logic in
write_object_proto_for_resource_variable but `DistributedValue` is an
outlier at the momemnt. Once `DistributedValue` becomes a proper
ResourceVariable, we should remove the helper method below.
Args:
proto: `SavedObject` proto to update.
options: A `SaveOption` instance that configures save behavior.
"""
write_object_proto_for_resource_variable(self, proto, options)
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask):
with _handle_graph(self.handle), self._assign_dependencies():
return self._lazy_read(
gen_array_ops.resource_strided_slice_assign(
ref=self.handle,
begin=begin,
end=end,
strides=strides,
value=ops.convert_to_tensor(value, dtype=self.dtype),
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask))
def __complex__(self):
return complex(self.value().numpy())
def __int__(self):
return int(self.value().numpy())
def __long__(self):
return long(self.value().numpy())
def __float__(self):
return float(self.value().numpy())
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None and not dtype.is_compatible_with(self.dtype):
raise ValueError(
f"Incompatible type conversion requested to type {dtype.name} for "
f"`tf.Variable of type {self.dtype.name}. (Variable: {self})")
if as_ref:
return self.read_value().op.inputs[0]
else:
return self.value()
def __iadd__(self, unused_other):
raise RuntimeError("`variable += value` with `tf.Variable`s is not "
"supported. Use `variable.assign_add(value)` to modify "
"the variable, or `out = variable + value` if you "
"need to get a new output Tensor.")
def __isub__(self, unused_other):
raise RuntimeError("`variable -= value` with `tf.Variable`s is not "
"supported. Use `variable.assign_sub(value)` to modify "
"the variable, or `out = variable * value` if you "
"need to get a new output Tensor.")
def __imul__(self, unused_other):
raise RuntimeError("`var *= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var * value)` to modify "
"the variable, or `out = var * value` if you "
"need to get a new output Tensor.")
def __idiv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __itruediv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __irealdiv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __ipow__(self, unused_other):
raise RuntimeError("`var **= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var ** value)` to modify "
"the variable, or `out = var ** value` if you "
"need to get a new output Tensor.")
class ResourceVariable(BaseResourceVariable):
"""Variable based on resource handles.
See the [Variables How To](https://tensorflow.org/guide/variables)
for a high level overview.
A `ResourceVariable` allows you to maintain state across subsequent calls to
session.run.
The `ResourceVariable` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
Just like any `Tensor`, variables created with
`tf.Variable(use_resource=True)` can be used as inputs for other Ops in the
graph. Additionally, all the operators overloaded for the `Tensor` class are
carried over to variables, so you can also add nodes to the graph by just
doing arithmetic on variables.
Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
to the graph. The Tensors returned by a read_value operation are guaranteed to
see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable from operations that depend on the read_value operation.
Updates from operations that have no dependency relationship to the read_value
operation might or might not be visible to read_value.
For example, if there is more than one assignment to a ResourceVariable in
a single session.run call there is a well-defined value for each operation
which uses the variable's value if the assignments and the read are connected
by edges in the graph. Consider the following example, in which two writes
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
a = tf.Variable(1.0, use_resource=True)
a.initializer.run()
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
with tf.control_dependencies([b]):
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
# Will print 2.0 because the value was read before other_assign ran. If
# `a` was a tf.Variable instead, 2.0 or 3.0 could be printed.
tf.compat.v1.Print(b, [b]).eval()
```
"""
def __init__(
self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=None,
collections=None,
validate_shape=True, # pylint: disable=unused-argument
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None,
distribute_strategy=None,
synchronization=None,
aggregation=None,
shape=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. Can also be a callable with
no argument that returns the initial value when called. (Note that
initializer functions from init_ops.py must first be bound to a shape
before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
Defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type. If None,
either the datatype will be kept (if initial_value is a Tensor) or
float32 will be used (if it is a Python object convertible to a Tensor).
variable_def: `VariableDef` protocol buffer. If not None, recreates the
`ResourceVariable` object with its contents. `variable_def` and other
arguments (except for import_scope) are mutually exclusive.
import_scope: Optional `string`. Name scope to add to the
ResourceVariable. Only used when `variable_def` is provided.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
distribute_strategy: The tf.distribute.Strategy this variable is being
created inside of.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, the default for the `collections` argument
is `None`, which signifies that this `Variable` will not be added to any
collections.
@end_compatibility
"""
if variable_def:
if initial_value is not None:
raise ValueError(f"The variable_def and initial_value args to "
f"`tf.Variable` are mutually exclusive, but got both: "
f"variable_def={variable_def},\n"
f"initial_value={initial_value}")
if context.executing_eagerly():
raise ValueError(f"Creating a `tf.Variable` with a `variable_def` arg "
f"is not supported when eager execution is enabled. "
f"Got: variable_def={variable_def}")
self._init_from_proto(variable_def, import_scope=import_scope,
validate_shape=validate_shape)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
shape=shape,
distribute_strategy=distribute_strategy,
validate_shape=validate_shape,
)
def _init_from_args(self,
initial_value=None,
trainable=None,
collections=None,
caching_device=None,
name=None,
dtype=None,
constraint=None,
synchronization=None,
aggregation=None,
distribute_strategy=None,
shape=None,
validate_shape=True,
):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound to
a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
Defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type. If None,
either the datatype will be kept (if initial_value is a Tensor) or
float32 will be used (if it is a Python object convertible to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
distribute_strategy: DistributionStrategy under which this variable was
created.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, variables are never added to collections.
It is not implicitly added to the `GLOBAL_VARIABLES` or
`TRAINABLE_VARIABLES` collections, and the `collections` argument is
ignored.
@end_compatibility
"""
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if initial_value is None:
raise ValueError("The `initial_value` arg to `tf.Variable` must "
"be specified except when you are not providing a "
"`variable_def`. You provided neither.")
init_from_fn = callable(initial_value)
if isinstance(initial_value, ops.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError(f"Argument `initial_value` ({initial_value}) could not "
"be lifted out of a `tf.function`. "
f"(Tried to create variable with name='{name}'). "
"To avoid this error, when constructing `tf.Variable`s "
"inside of `tf.function` you can create the "
"`initial_value` tensor in a "
"`tf.init_scope` or pass a callable `initial_value` "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`). "
"Please file a feature request if this "
"restriction inconveniences you.")
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
f"collections argument to Variable constructor must be a list, "
f"tuple, or set. Got {collections} of type {type(collections)}")
if constraint is not None and not callable(constraint):
raise ValueError(f"Argument `constraint` must be None or a callable. "
f"a callable. Got a {type(constraint)}: {constraint}")
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(
name,
"Variable", [] if init_from_fn else [initial_value],
skip_on_eager=False) as name:
# pylint: disable=protected-access
handle_name = ops.name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
unique_id = shared_name
else:
# When in eager mode use a uid for the shared_name, to prevent
# accidental sharing.
unique_id = "%s_%d" % (handle_name, ops.uid())
shared_name = None # Never shared
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
device_context_manager = (
ops.device if self._in_graph_mode else ops.NullContextmanager)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), device_context_manager(None):
if init_from_fn:
initial_value = initial_value()
if isinstance(initial_value, trackable.CheckpointInitialValue):
self._maybe_initialize_trackable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
if shape is not None:
if not initial_value.shape.is_compatible_with(shape):
raise ValueError(
f"In this `tf.Variable` creation, the initial value's shape "
f"({initial_value.shape}) is not compatible with "
f"the explicitly supplied `shape` argument ({shape}).")
else:
shape = initial_value.shape
handle = eager_safe_variable_handle(
initial_value=initial_value,
shape=shape,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
handle._parent_trackable = weakref.ref(self)
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
f"The `initial_value` passed to `tf.Variable` {name} is from "
f"inside a control-flow construct, such as a loop or "
f"conditional. When creating a "
f"`tf.Variable` inside a loop or conditional, use a lambda as "
f"the `initial_value`. Got: initial_value=({initial_value})")
# pylint: enable=protected-access
dtype = initial_value.dtype.base_dtype
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(handle))
if initial_value is not None:
# pylint: disable=g-backslash-continuation
with ops.name_scope("Assign") as n, \
ops.colocate_with(None, ignore_existing=True), \
ops.device(handle.device):
# pylint: disable=protected-access
initializer_op = (
gen_resource_variable_ops.assign_variable_op(
handle,
variables._try_guard_against_uninitialized_dependencies(
name, initial_value),
name=n))
# pylint: enable=protected-access
# pylint: enable=g-backslash-continuation
with ops.name_scope("Read"):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(handle.device):
value = gen_resource_variable_ops.read_variable_op(handle, dtype)
_maybe_set_handle_data(dtype, handle, value)
graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
cached_value = array_ops.identity(value)
else:
cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(handle, initial_value)
is_initialized_op = None
initializer_op = None
graph_element = None
if caching_device:
with ops.device(caching_device):
cached_value = gen_resource_variable_ops.read_variable_op(
handle, dtype)
_maybe_set_handle_data(dtype, handle, cached_value)
else:
cached_value = None
if cached_value is not None:
# Store the variable object so that the original variable can be
# accessed to generate functions that are compatible with SavedModel.
cached_value._cached_variable = weakref.ref(self) # pylint: disable=protected-access
if not context.executing_eagerly():
# Eager variables are only added to collections if they are part of an
# eager variable store (otherwise in an interactive session they would
# hog memory and cause OOM). This is done in ops/variable_scope.py.
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
initial_value = initial_value if self._in_graph_mode else None
super(ResourceVariable, self).__init__(
trainable=trainable,
shape=shape,
dtype=dtype,
handle=handle,
synchronization=synchronization,
constraint=constraint,
aggregation=aggregation,
distribute_strategy=distribute_strategy,
name=name,
unique_id=unique_id,
handle_name=handle_name,
graph_element=graph_element,
initial_value=initial_value,
initializer_op=initializer_op,
is_initialized_op=is_initialized_op,
cached_value=cached_value,
caching_device=caching_device,
validate_shape=validate_shape,
)
def _init_from_proto(self, variable_def, import_scope=None,
validate_shape=True):
"""Initializes from `VariableDef` proto."""
# Note that init_from_proto is currently not supported in Eager mode.
assert not context.executing_eagerly()
self._in_graph_mode = True
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError(f"The `variable_def` you passed to `tf.Variable` is "
f"Trying to restore a TF 1.x Reference Variable "
f"as a TF 2.x ResourceVariable. This is unsupported. "
f"Got variable_def={variable_def}")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(
variable_def.variable_name, import_scope=import_scope))
self._shape = tensor_shape.TensorShape(self._handle.op.get_attr("shape"))
self._handle_name = self._handle.name
self._unique_id = self._handle_name
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initializer_name, import_scope=import_scope))
# Check whether initial_value_name exists for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initial_value_name, import_scope=import_scope))
else:
self._initial_value = None
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
variable_def.synchronization, variable_def.aggregation,
variable_def.trainable, variable_def.variable_name))
self._synchronization = synchronization
self._aggregation = aggregation
self._trainable = trainable
if variable_def.snapshot_name:
snapshot = g.as_graph_element(
ops.prepend_name_scope(
variable_def.snapshot_name, import_scope=import_scope))
if snapshot.op.type != "ReadVariableOp":
self._cached_value = snapshot
else:
self._cached_value = None
while snapshot.op.type != "ReadVariableOp":
snapshot = snapshot.op.inputs[0]
self._graph_element = snapshot
else:
self._cached_value = None
# Legacy case for protos without the snapshot name; assume it's the
# following.
self._graph_element = g.get_tensor_by_name(self._handle.op.name +
"/Read/ReadVariableOp:0")
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._constraint = None
self._validate_shape = validate_shape
class UninitializedVariable(BaseResourceVariable):
"""A variable with no initializer."""
def __init__( # pylint: disable=super-init-not-called
self,
trainable=None,
caching_device=None,
name=None,
shape=None,
dtype=None,
constraint=None,
synchronization=None,
aggregation=None,
extra_handle_data=None,
distribute_strategy=None,
**unused_kwargs):
"""Creates the variable handle.
Args:
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
shape: The variable's shape.
dtype: The variable's dtype.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
extra_handle_data: Optional, another resource handle or Tensor with handle
data to merge with `shape` and `dtype`.
distribute_strategy: The tf.distribute.Strategy this variable is being
created inside of.
"""
with ops.init_scope():
# Here we are detecting eagerness within an init_scope, so this will only
# be true when we are running in TF1 graph mode.
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "Variable", skip_on_eager=False) as name:
handle_name = ops.name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
unique_id = shared_name
else:
unique_id = "%s_%d" % (handle_name, ops.uid())
shared_name = None # Never shared
handle = _variable_handle_from_shape_and_dtype(
shape=shape,
dtype=dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode,
initial_value=extra_handle_data)
handle._parent_trackable = weakref.ref(self)
if self._in_graph_mode:
# We only need to add the read_variable_op in TF1.
with ops.name_scope("Read"):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(handle.device):
value = gen_resource_variable_ops.read_variable_op(handle, dtype)
_maybe_set_handle_data(dtype, handle, value)
graph_element = value
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
# Do *not* add to TRAINABLE_VARIABLES here, even if self._trainable,
# because retraining or frozen use of imported SavedModels is
# controlled at higher levels of model building.
else:
graph_element = None
super(UninitializedVariable, self).__init__(
distribute_strategy=distribute_strategy,
shape=shape,
dtype=dtype,
unique_id=unique_id,
handle_name=handle_name,
constraint=constraint,
handle=handle,
graph_element=graph_element,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation,
in_graph_mode=self._in_graph_mode)
_pywrap_utils.RegisterType("ResourceVariable", ResourceVariable)
math_ops._resource_variable_type = ResourceVariable # pylint: disable=protected-access
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(BaseResourceVariable,
_dense_var_to_tensor)
class _UnreadVariable(BaseResourceVariable):
"""Represents a future for a read of a variable.
Pretends to be the tensor if anyone looks.
"""
def __init__(self, handle, dtype, shape, in_graph_mode, parent_op, unique_id):
if isinstance(handle, ops.EagerTensor):
handle_name = ""
else:
handle_name = handle.name
# Only create a graph_element if we're in session.run-land as only
# session.run requires a preexisting tensor to evaluate. Otherwise we can
# avoid accidentally reading the variable.
if context.executing_eagerly() or ops.inside_function():
graph_element = None
else:
with ops.control_dependencies([parent_op]):
graph_element = gen_resource_variable_ops.read_variable_op(
handle, dtype)
_maybe_set_handle_data(dtype, handle, graph_element)
super(_UnreadVariable, self).__init__(
handle=handle,
shape=shape,
handle_name=handle_name,
unique_id=unique_id,
dtype=dtype,
graph_element=graph_element)
self._parent_op = parent_op
@property
def name(self):
if self._in_graph_mode:
return self._parent_op.name
else:
return "UnreadVariable"
def value(self):
return self._read_variable_op()
def read_value(self):
return self._read_variable_op()
def _read_variable_op(self):
with ops.control_dependencies([self._parent_op]):
result = gen_resource_variable_ops.read_variable_op(
self._handle, self._dtype)
_maybe_set_handle_data(self._dtype, self._handle, result)
return result
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).assign_sub(delta, use_locking, name,
read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).assign_add(delta, use_locking, name,
read_value)
def assign(self, value, use_locking=None, name=None, read_value=True):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).assign(value, use_locking, name,
read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_sub(sparse_delta, use_locking,
name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_add(sparse_delta, use_locking,
name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_max(sparse_delta, use_locking,
name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_min(sparse_delta, use_locking,
name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_mul(sparse_delta, use_locking,
name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_div(sparse_delta, use_locking,
name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable,
self).scatter_update(sparse_delta, use_locking, name)
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable,
self).batch_scatter_update(sparse_delta, use_locking, name)
def scatter_nd_sub(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_sub(indices, updates, name)
def scatter_nd_add(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_add(indices, updates, name)
def scatter_nd_update(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable,
self).scatter_nd_update(indices, updates, name)
def scatter_nd_max(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_max(indices, updates, name)
def scatter_nd_min(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_min(indices, updates, name)
@property
def op(self):
"""The op for this variable."""
return self._parent_op
@ops.RegisterGradient("ReadVariableOp")
def _ReadGrad(_, grad):
"""Gradient for read op."""
return grad
def variable_shape(handle, out_type=dtypes.int32):
handle_data = get_eager_safe_handle_data(handle)
if handle_data is None or not handle_data.is_set:
return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)
shape_proto = handle_data.shape_and_type[0].shape
if shape_proto.unknown_rank or any(x.size == -1 for x in shape_proto.dim):
return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)
return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type)
@ops.RegisterGradient("ResourceGather")
def _GatherGrad(op, grad):
"""Gradient for gather op."""
# Build appropriately shaped IndexedSlices
handle = op.inputs[0]
indices = op.inputs[1]
params_shape = variable_shape(handle)
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return (indexed_slices.IndexedSlices(values, indices, params_shape), None)
def _to_proto_fn(v, export_scope=None):
"""Converts Variable and ResourceVariable to VariableDef for collections."""
return v.to_proto(export_scope=export_scope)
def _from_proto_fn(v, import_scope=None):
"""Creates Variable or ResourceVariable from VariableDef as needed."""
if v.is_resource:
return ResourceVariable.from_proto(v, import_scope=import_scope)
return variables.Variable.from_proto(v, import_scope=import_scope)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.TRAINABLE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.LOCAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MODEL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_STEP,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.METRIC_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
@tf_export("__internal__.ops.is_resource_variable", v1=[])
def is_resource_variable(var):
""""Returns True if `var` is to be considered a ResourceVariable."""
return isinstance(var, BaseResourceVariable) or hasattr(
var, "_should_act_as_resource_variable")
def copy_to_graph_uninitialized(var):
"""Copies an existing variable to a new graph, with no initializer."""
# Like ResourceVariable.__deepcopy__, but does not set an initializer on the
# new variable.
# pylint: disable=protected-access
new_variable = UninitializedVariable(
trainable=var.trainable,
constraint=var._constraint,
shape=var.shape,
dtype=var.dtype,
name=var._shared_name,
synchronization=var.synchronization,
aggregation=var.aggregation,
extra_handle_data=var.handle)
new_variable._maybe_initialize_trackable()
# pylint: enable=protected-access
return new_variable
ops.NotDifferentiable("Assert")
ops.NotDifferentiable("VarIsInitializedOp")
ops.NotDifferentiable("VariableShape")
class VariableSpec(tensor_spec.DenseSpec):
"""Describes a tf.Variable."""
__slots__ = ["trainable"]
value_type = property(lambda self: BaseResourceVariable)
def __init__(self,
shape,
dtype=dtypes.float32,
trainable=True):
super(VariableSpec, self).__init__(shape, dtype=dtype)
self.trainable = trainable
def is_compatible_with(self, spec_or_value):
return (isinstance(spec_or_value, (type(self), self.value_type)) and
self.shape.is_compatible_with(spec_or_value.shape) and
self.dtype == spec_or_value.dtype and
self.trainable == spec_or_value.trainable)
@classmethod
def from_value(cls, value):
return cls(
value.shape,
dtype=value.dtype,
trainable=value.trainable)
def _to_components(self, value):
return value.handle
def _from_components(self, components):
return BaseResourceVariable(
trainable=self.trainable,
shape=self.shape,
dtype=self.dtype,
handle=components)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self.shape, dtypes.resource)
def _from_compatible_tensor_list(self, tensor_list):
assert len(tensor_list) == 1
return tensor_list[0]
def _serialize(self):
return self.shape, self.dtype, self.trainable
def __tf_tracing_type__(self, signature_context):
return signature_context.make_reference_type(self, id(self))
def __repr__(self):
return (f"{type(self).__name__}(shape={self.shape}, dtype={self.dtype}, "
f"trainable={self.trainable})")
def __hash__(self):
return hash((self.shape, self.dtype, self.trainable))
def __eq__(self, other):
return (type(self) is type(other) and
self.shape == other.shape and
self.dtype == other.dtype and
self.trainable == other.trainable)
_pywrap_utils.RegisterType("VariableSpec", VariableSpec)
def write_object_proto_for_resource_variable(resource_variable,
proto,
options,
enforce_naming=True):
"""Writes additional information of the variable into the SavedObject proto.
This allows users to define a `hook` to provide extra information of the
variable to the SavedObject.
For example, DistributedVariable class would fill in components in the
distributed context.
Args:
resource_variable: A `ResourceVariable` or `DistributedValue` that has the
information to be saved into the proto.
proto: `SavedObject` proto to update.
options: A `SaveOption` instance that configures save behavior.
enforce_naming: A bool determining whether to check that names end in the
expected string ':0'
"""
proto.variable.SetInParent()
if enforce_naming and not resource_variable.name.endswith(":0"):
raise ValueError(f"Cowardly refusing to save variable "
f"{resource_variable.name} because of "
f"unexpected suffix in the name (expected ':0')"
f"which won't be restored.")
proto.variable.name = meta_graph._op_name(resource_variable.name) # pylint: disable=protected-access
proto.variable.trainable = resource_variable.trainable
proto.variable.dtype = resource_variable.dtype.as_datatype_enum
proto.variable.synchronization = resource_variable.synchronization.value
proto.variable.aggregation = resource_variable.aggregation.value
proto.variable.shape.CopyFrom(resource_variable.shape.as_proto())
if options.experimental_variable_policy._save_variable_devices( # pylint: disable=protected-access
):
if hasattr(resource_variable, "device"):
proto.variable.device = resource_variable.device
|
{"hexsha": "d9dc1b1ddad4e6599491fdedbb4300665412e5fe", "size": 99307, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/ops/resource_variable_ops.py", "max_stars_repo_name": "TheRakeshPurohit/tensorflow", "max_stars_repo_head_hexsha": "bee6d5a268122df99e1e55a7b92517e84ad25bab", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-09-27T05:37:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-22T06:41:12.000Z", "max_issues_repo_path": "tensorflow/python/ops/resource_variable_ops.py", "max_issues_repo_name": "TheRakeshPurohit/tensorflow", "max_issues_repo_head_hexsha": "bee6d5a268122df99e1e55a7b92517e84ad25bab", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/python/ops/resource_variable_ops.py", "max_forks_repo_name": "TheRakeshPurohit/tensorflow", "max_forks_repo_head_hexsha": "bee6d5a268122df99e1e55a7b92517e84ad25bab", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4015459723, "max_line_length": 109, "alphanum_fraction": 0.6783106931, "include": true, "reason": "import numpy", "num_tokens": 21605}
|
import os
import numpy as np
# Tiles are 10x10 squares
TILE_LENGTH = 10
# 2d array representing the seamonster
SEA_MONSTER = np.array([
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "#", " "],
["#", " ", " ", " ", " ", "#", "#", " ", " ", " ", " ", "#", "#", " ", " ", " ", " ", "#", "#", "#"],
[" ", "#", " ", " ", "#", " ", " ", "#", " ", " ", "#", " ", " ", "#", " ", " ", "#", " ", " ", " "],
])
# Possible values for flipX and flipY
FLIP_VALUES = [False, True]
class Tile:
def __init__(self, number):
self.number = number
self.top = set()
self.right = set()
self.bottom = set()
self.left = set()
class Transformation:
def __init__(self, flipX, flipY, numRot):
self.flipX = flipX
self.flipY = flipY
self.numRot = numRot
def areTilesAdjacent(tilePixels, otherTilePixels, getTargetArray, getComparisonArray):
targetArray = getTargetArray(tilePixels)
for flipX in FLIP_VALUES:
for flipY in FLIP_VALUES:
comparisonTile = np.copy(otherTilePixels)
if flipX:
comparisonTile = np.fliplr(comparisonTile)
if flipY:
comparisonTile = np.flipud(comparisonTile)
# Rotate the tile 0, 90, 180, and 270 degrees
numRot = 0
while numRot < 4:
rotatedTile = np.rot90(comparisonTile, numRot)
# Check other tile to see if it fits...
if (targetArray == getComparisonArray(rotatedTile)).all():
return True, Transformation(flipX, flipY, numRot)
numRot += 1
return False, Transformation(False, False, 0)
def tileMatchesTop(tilePixels, otherTilePixels):
return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[0], lambda tile: tile[-1])
def tileMatchesRight(tilePixels, otherTilePixels):
return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[:, -1], lambda tile: tile[:, 0])
def tileMatchesBottom(tilePixels, otherTilePixels):
return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[-1], lambda tile: tile[0])
def tileMatchesLeft(tilePixels, otherTilePixels):
return areTilesAdjacent(tilePixels, otherTilePixels, lambda tile: tile[:, 0], lambda tile: tile[:, -1])
def populateTileMatches(tileObjs):
# Find the matching top, right, bottom, left tiles for each individual tile...
tileIdx = 0
while tileIdx < len(tileObjs):
tileObj = tileObjs[tileIdx]
otherTileIdx = 0
while otherTileIdx < len(tileObjs):
if tileIdx == otherTileIdx:
otherTileIdx += 1
continue
otherTileObj = tileObjs[otherTileIdx]
# check if other tile matches top
(tileMatches, _) = tileMatchesTop(tiles[tileObj.number], tiles[otherTileObj.number])
if tileMatches:
tileObj.top.add(otherTileObj.number)
# check if other tile matches right
(tileMatches, _) = tileMatchesRight(tiles[tileObj.number], tiles[otherTileObj.number])
if tileMatches:
tileObj.right.add(otherTileObj.number)
# check if other tile matches bottom
(tileMatches, _) = tileMatchesBottom(tiles[tileObj.number], tiles[otherTileObj.number])
if tileMatches:
tileObj.bottom.add(otherTileObj.number)
# check if other tile matches left
(tileMatches, _) = tileMatchesLeft(tiles[tileObj.number], tiles[otherTileObj.number])
if tileMatches:
tileObj.left.add(otherTileObj.number)
otherTileIdx += 1
tileIdx += 1
def getCornerTiles(tileObjs):
cornerTiles = []
for tileObj in tileObjs:
# Determine if this is a corner tile
hasAdjacentTopRight = (len(tileObj.top) == 0 and len(tileObj.right) == 0)
hasAdjacentBottomRight = (len(tileObj.right) == 0 and len(tileObj.bottom) == 0)
hasAdjacentBottomLeft = (len(tileObj.bottom) == 0 and len(tileObj.left) == 0)
hasAdjacentTopLeft = (len(tileObj.left) == 0 and len(tileObj.top) == 0)
if hasAdjacentTopRight or hasAdjacentBottomRight or hasAdjacentBottomLeft or hasAdjacentTopLeft:
cornerTiles.append(tileObj)
return cornerTiles
def setPixelsOnImage(image, rowOffset, colOffset, tilePixels):
for rowIdx, row in enumerate(tilePixels):
for colIdx, pixel in enumerate(row):
image[rowOffset + rowIdx, colOffset + colIdx] = pixel
def getMatchingTile(matchFunction, currentTilePixels, tiles, usedTilesNumbers):
matches = []
for candidateTileNumber in tiles:
if candidateTileNumber in usedTilesNumbers:
continue
candidateTilePixels = tiles[candidateTileNumber]
# Check if candidate tile can be placed as determined by the matchFunction (one of the tileMatches* functions defined above)
isMatch, transformations = matchFunction(currentTilePixels, candidateTilePixels)
if isMatch:
if transformations.flipX:
candidateTilePixels = np.fliplr(candidateTilePixels)
if transformations.flipY:
candidateTilePixels = np.flipud(candidateTilePixels)
candidateTilePixels = np.rot90(candidateTilePixels, transformations.numRot)
matches.append((candidateTileNumber, candidateTilePixels))
if len(matches) == 0:
raise Exception("No matching tiles found.")
return matches[0]
def generateImageFromTopLeft(tiles, currentTileNumber, currentTilePixels):
# Determine size of final image
imageLength = int(np.ceil(np.sqrt(len(tiles))))
imagePixelLength = imageLength * TILE_LENGTH
image = np.empty((imagePixelLength, imagePixelLength), str)
imageNoBordersPixelLength = imageLength * (TILE_LENGTH - 2)
imageNoBorders = np.empty((imageNoBordersPixelLength, imageNoBordersPixelLength), str)
# Track used tiles
usedTilesNumbers = []
rowOffset = 0
rowOffsetNoBorders = 0
while rowOffset < imagePixelLength:
rowTilePixels = currentTilePixels
colOffset = 0
colOffsetNoBorders = 0
while colOffset < imagePixelLength:
setPixelsOnImage(image, rowOffset, colOffset, currentTilePixels)
setPixelsOnImage(imageNoBorders, rowOffsetNoBorders, colOffsetNoBorders, currentTilePixels[1:-1, 1:-1])
usedTilesNumbers.append(currentTileNumber)
colOffset += TILE_LENGTH
colOffsetNoBorders += (TILE_LENGTH - 2)
if colOffset < imagePixelLength:
currentTileNumber, currentTilePixels = getMatchingTile(tileMatchesRight, currentTilePixels, tiles, usedTilesNumbers)
rowOffset += TILE_LENGTH
rowOffsetNoBorders += (TILE_LENGTH - 2)
if rowOffset < imagePixelLength:
currentTileNumber, currentTilePixels = getMatchingTile(tileMatchesBottom, rowTilePixels, tiles, usedTilesNumbers)
return image, imageNoBorders
def hasSeaMonster(imageSegment):
for rowIdx, imageRow in enumerate(imageSegment):
for colIdx, imagePixel in enumerate(imageRow):
seaMonsterPixel = SEA_MONSTER[rowIdx, colIdx]
if seaMonsterPixel == "#" and imagePixel != seaMonsterPixel:
return False
return True
def countSeaMonsters(image):
count = 0
imageRowIdx = 0
while imageRowIdx <= image.shape[0] - SEA_MONSTER.shape[0]:
imageColIdx = 0
while imageColIdx <= image.shape[1] - SEA_MONSTER.shape[1]:
if hasSeaMonster(image[imageRowIdx:imageRowIdx + SEA_MONSTER.shape[0], imageColIdx:imageColIdx + SEA_MONSTER.shape[1]]):
count += 1
imageColIdx += 1
imageRowIdx += 1
return count
def printImage(image, tileInterval):
for rowIdx, row in enumerate(image):
if rowIdx % tileInterval == 0:
print("")
outputLine = ""
for colIdx, pixel in enumerate(row):
if colIdx % tileInterval == 0:
outputLine += " "
if pixel == "":
outputLine += " "
else:
outputLine += pixel
print(outputLine)
def part1(tiles):
tileObjs = [Tile(tileNumber) for tileNumber in tiles]
populateTileMatches(tileObjs)
cornerTiles = getCornerTiles(tileObjs)
productOfCornerTiles = np.prod([cornerTile.number for cornerTile in cornerTiles])
print(f"Part 1 - Solution: {productOfCornerTiles}")
def part2(tiles):
tileNumberToObjs = {}
for tileNumber in tiles:
tileObj = Tile(tileNumber)
tileNumberToObjs[tileNumber] = tileObj
populateTileMatches(list(tileNumberToObjs.values()))
cornerTiles = getCornerTiles(list(tileNumberToObjs.values()))
# Get corner tiles
topRightTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.top) == 0 and len(cornerTile.right) == 0)]
bottomRightTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.bottom) == 0 and len(cornerTile.right) == 0)]
bottomLeftTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.bottom) == 0 and len(cornerTile.left) == 0)]
topLeftTiles = [cornerTile for cornerTile in cornerTiles if (len(cornerTile.top) == 0 and len(cornerTile.left) == 0)]
currentTileNumber = None
currentTilePixels = None
if len(topLeftTiles) == 1:
currentTileNumber = topLeftTiles[0].number
currentTilePixels = tiles[currentTileNumber]
elif len(topRightTiles) == 1:
currentTileNumber = topRightTiles[0].number
currentTilePixels = tiles[currentTileNumber]
# Make top right into top left... (flip X)
currentTilePixels = np.fliplr(currentTilePixels)
elif len(bottomRightTiles) == 1:
currentTileNumber = bottomRightTiles[0].number
currentTilePixels = tiles[currentTileNumber]
# Make bottom right into top left... (flip X and flip Y)
currentTilePixels = np.fliplr(currentTilePixels)
currentTilePixels = np.flipud(currentTilePixels)
elif len(bottomLeftTiles) == 1:
currentTileNumber = bottomLeftTiles[0].number
currentTilePixels = tiles[currentTileNumber]
# Make bottom left into top left... (flip Y)
currentTilePixels = np.flipud(currentTilePixels)
image, imageNoBorders = generateImageFromTopLeft(tiles, currentTileNumber, currentTilePixels)
if image is None:
raise Exception("Could not find a supported corner to generate the image")
print("\n------------- IMAGE -------------")
printImage(image, TILE_LENGTH)
print("\n------------- NO BORDERS -------------")
printImage(imageNoBorders, TILE_LENGTH - 2)
# Attempt to find sea monsters for every transformation of imageNoBorders
for flipX in FLIP_VALUES:
for flipY in FLIP_VALUES:
imageToLookUp = np.copy(imageNoBorders)
if flipX:
imageToLookUp = np.fliplr(imageToLookUp)
if flipY:
imageToLookUp = np.flipud(imageToLookUp)
# Rotate the image 0, 90, 180, and 270 degrees
numRot = 0
while numRot < 4:
rotatedImage = np.rot90(imageToLookUp, numRot)
numSeaMonsters = countSeaMonsters(rotatedImage)
# If we find any sea monsters, this is a solution and we'll return
if numSeaMonsters > 0:
nonSeaMonsterPoundPixels = np.sum(rotatedImage == "#") - np.sum(SEA_MONSTER == "#") * numSeaMonsters
print(f"\nPart 2 - Solution: {nonSeaMonsterPoundPixels}")
return
numRot += 1
if __name__ == "__main__":
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "input.txt")
with open(filename, "r") as fileInput:
lines = [line.strip() for line in fileInput.readlines()]
tiles = {}
currentTileNumber = None
currentTile = np.empty((TILE_LENGTH, TILE_LENGTH), str)
tileRow = 0
for line in lines:
if len(line) == 0:
# If this is an empty line in the input, we have to save the current tile
# we're processing (if any) and clear the state
if currentTileNumber != None:
# Save tile
tiles[currentTileNumber] = currentTile
# Clear state
currentTileNumber = None
currentTile = np.empty((TILE_LENGTH, TILE_LENGTH), str)
tileRow = 0
elif line.startswith("Tile"):
# Store the tile number for processing
currentTileNumber = int(line.split(" ")[1][:-1])
else:
# Parse tile
for tileCol, pixel in enumerate(line):
currentTile[tileRow, tileCol] = pixel
tileRow += 1
part1(tiles)
part2(tiles)
|
{"hexsha": "9269dde9f6202e3cedcbeb7e1c3e57a3e5493bc0", "size": 11973, "ext": "py", "lang": "Python", "max_stars_repo_path": "20/solution.py", "max_stars_repo_name": "poscar/advent-of-code-2020", "max_stars_repo_head_hexsha": "adcded980ab6bb103f498029f7c40d44f16c62a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-09T15:39:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-09T15:39:33.000Z", "max_issues_repo_path": "20/solution.py", "max_issues_repo_name": "poscar/advent-of-code-2020", "max_issues_repo_head_hexsha": "adcded980ab6bb103f498029f7c40d44f16c62a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "20/solution.py", "max_forks_repo_name": "poscar/advent-of-code-2020", "max_forks_repo_head_hexsha": "adcded980ab6bb103f498029f7c40d44f16c62a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9537037037, "max_line_length": 128, "alphanum_fraction": 0.6840390879, "include": true, "reason": "import numpy", "num_tokens": 3124}
|
import unittest
import numpy as np
from pyrepo import weighting_methods as mcda_weights
# Test for CRITIC weighting
class Test_CRITIC(unittest.TestCase):
def test_critic(self):
"""Test based on paper Tuş, A., & Aytaç Adalı, E. (2019). The new combination with CRITIC and WASPAS methods
for the time and attendance software selection problem. Opsearch, 56(2), 528-538."""
matrix = np.array([[5000, 3, 3, 4, 3, 2],
[680, 5, 3, 2, 2, 1],
[2000, 3, 2, 3, 4, 3],
[600, 4, 3, 1, 2, 2],
[800, 2, 4, 3, 3, 4]])
types = np.array([-1, 1, 1, 1, 1, 1])
test_result = mcda_weights.critic_weighting(matrix)
real_result = np.array([0.157, 0.249, 0.168, 0.121, 0.154, 0.151])
self.assertEqual(list(np.round(test_result, 3)), list(real_result))
# Test for Entropy weighting
class Test_Entropy(unittest.TestCase):
def test_Entropy(self):
"""Test based on paper Xu, X. (2004). A note on the subjective and objective integrated approach to
determine attribute weights. European Journal of Operational Research, 156(2),
530-532."""
matrix = np.array([[30, 30, 38, 29],
[19, 54, 86, 29],
[19, 15, 85, 28.9],
[68, 70, 60, 29]])
types = np.array([1, 1, 1, 1])
test_result = mcda_weights.entropy_weighting(matrix)
real_result = np.array([0.4630, 0.3992, 0.1378, 0.0000])
self.assertEqual(list(np.round(test_result, 4)), list(real_result))
def test_Entropy2(self):
"""Test based on paper Zavadskas, E. K., & Podvezko, V. (2016). Integrated determination of objective
criteria weights in MCDM. International Journal of Information Technology & Decision
Making, 15(02), 267-283."""
matrix = np.array([[3.0, 100, 10, 7],
[2.5, 80, 8, 5],
[1.8, 50, 20, 11],
[2.2, 70, 12, 9]])
types = np.array([-1, 1, -1, 1])
test_result = mcda_weights.entropy_weighting(matrix)
real_result = np.array([0.1146, 0.1981, 0.4185, 0.2689])
self.assertEqual(list(np.round(test_result, 4)), list(real_result))
def test_Entropy3(self):
"""Ersoy, Y. (2021). Equipment selection for an e-commerce company using entropy-based
topsis, edas and codas methods during the COVID-19. LogForum, 17(3)."""
matrix = np.array([[256, 8, 41, 1.6, 1.77, 7347.16],
[256, 8, 32, 1.0, 1.8, 6919.99],
[256, 8, 53, 1.6, 1.9, 8400],
[256, 8, 41, 1.0, 1.75, 6808.9],
[512, 8, 35, 1.6, 1.7, 8479.99],
[256, 4, 35, 1.6, 1.7, 7499.99]])
types = np.array([-1, 1, -1, 1])
test_result = mcda_weights.entropy_weighting(matrix)
real_result = np.array([0.405, 0.221, 0.134, 0.199, 0.007, 0.034])
self.assertEqual(list(np.round(test_result, 3)), list(real_result))
# Test for Standard Deviation weighting
class Test_STD(unittest.TestCase):
def test_std(self):
"""Test based on paper Sałabun, W., Wątróbski, J., & Shekhovtsov, A. (2020). Are mcda methods benchmarkable?
a comparative study of topsis, vikor, copras, and promethee ii methods. Symmetry, 12(9),
1549."""
matrix = np.array([[0.619, 0.449, 0.447],
[0.862, 0.466, 0.006],
[0.458, 0.698, 0.771],
[0.777, 0.631, 0.491],
[0.567, 0.992, 0.968]])
types = np.array([1, 1, 1])
test_result = mcda_weights.std_weighting(matrix)
real_result = np.array([0.217, 0.294, 0.488])
self.assertEqual(list(np.round(test_result, 3)), list(real_result))
def main():
test_critic = Test_CRITIC()
test_critic.test_critic()
test_entropy = Test_Entropy()
test_entropy.test_Entropy()
test_entropy.test_Entropy2()
test_entropy.test_Entropy3()
test_std = Test_STD()
test_std.test_std()
if __name__ == '__main__':
main()
|
{"hexsha": "31df4101c7747d50aa165fb378314bf743841781", "size": 3940, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_weights.py", "max_stars_repo_name": "energyinpython/pre-pyrepo", "max_stars_repo_head_hexsha": "92e44594e12d1110247f011e51734e5ce1fe0b8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_weights.py", "max_issues_repo_name": "energyinpython/pre-pyrepo", "max_issues_repo_head_hexsha": "92e44594e12d1110247f011e51734e5ce1fe0b8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_weights.py", "max_forks_repo_name": "energyinpython/pre-pyrepo", "max_forks_repo_head_hexsha": "92e44594e12d1110247f011e51734e5ce1fe0b8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3898305085, "max_line_length": 117, "alphanum_fraction": 0.595177665, "include": true, "reason": "import numpy", "num_tokens": 1325}
|
! eggx.f90
!
! Fortran 2003 ISO C binding interfaces to the EGGX graphics library.
! For more information on EGGX/ProCALL, see:
!
! https://www.ir.isas.jaxa.jp/~cyamauch/eggx_procall/
!
! Author: Philipp Engel
! Licence: ISC
module eggx
use, intrinsic :: iso_c_binding
implicit none
private
public :: eggx_gputimage
public :: eggx_winname
interface
! int eggx_gputimage(int win, double x, double y, unsigned char *buf, int width, int height, int mask)
function eggx_gputimage(nwin, x, y, buf, nw, nh, mask) bind(c, name='eggx_gputimage')
import :: c_char, c_double, c_int
integer(kind=c_int), intent(in), value :: nwin
real(kind=c_double), intent(in), value :: x
real(kind=c_double), intent(in), value :: y
character(kind=c_char), intent(in) :: buf(*)
integer(kind=c_int), intent(in), value :: nw
integer(kind=c_int), intent(in), value :: nh
integer(kind=c_int), intent(in), value :: mask
integer(kind=c_int) :: eggx_gputimage
end function eggx_gputimage
! int winname(int wn, const char *argsformat, ...)
function eggx_winname(nwin, argsformat) bind(c, name='eggx_winname')
import :: c_char, c_int
integer(kind=c_int), intent(in), value :: nwin
character(kind=c_char), intent(in) :: argsformat
integer(kind=c_int) :: eggx_winname
end function eggx_winname
end interface
end module eggx
|
{"hexsha": "dd9f012b9de0161041e2b0b4bc3eb658b01fa649", "size": 1604, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/eggx.f90", "max_stars_repo_name": "interkosmos/eggx-procall-2003", "max_stars_repo_head_hexsha": "9741f8237c07041dfffde566d9e04604885ecc37", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-05T07:24:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T09:12:36.000Z", "max_issues_repo_path": "src/eggx.f90", "max_issues_repo_name": "interkosmos/eggx-procall-2003", "max_issues_repo_head_hexsha": "9741f8237c07041dfffde566d9e04604885ecc37", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-23T01:31:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-23T17:49:51.000Z", "max_forks_repo_path": "src/eggx.f90", "max_forks_repo_name": "interkosmos/eggx-procall-2003", "max_forks_repo_head_hexsha": "9741f8237c07041dfffde566d9e04604885ecc37", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1219512195, "max_line_length": 110, "alphanum_fraction": 0.5947630923, "num_tokens": 418}
|
Block <- java.type('net.minecraft.block.Block')
AbstractBlock <- java.type('net.minecraft.block.AbstractBlock')
AbstractBlockSettingsArray <- java.type('net.minecraft.block.AbstractBlock$Settings[]')
ClassArray <- java.type('java.lang.Class[]')
Material <- java.type('net.minecraft.block.Material')
BlockItem <- java.type('net.minecraft.item.BlockItem')
ItemGroup <- java.type('net.minecraft.item.ItemGroup')
Registry <- java.type('net.minecraft.util.registry.Registry')
Identifier <- java.type('net.minecraft.util.Identifier')
ActionResult <- java.type('net.minecraft.util.ActionResult')
LiteralText <- java.type('net.minecraft.text.LiteralText')
FabricBlockSettings <- java.type('net.fabricmc.fabric.api.object.builder.v1.block.FabricBlockSettings')
FabricItemSettings <- java.type('net.fabricmc.fabric.api.item.v1.FabricItemSettings')
IncludeMethodFilter <- java.type('dev.vriska.quiltlangpolyglot.IncludeMethodFilter')
ProxyFactory <- java.type('javassist.util.proxy.ProxyFactory')
ArrayList <- java.type('java.util.ArrayList')
onUseRBlock <- function(block, state, world, pos, player, hand, hit) {
if (!world$isClient) {
player$sendMessage(new(LiteralText, "Hello from R!"), FALSE)
}
return(ActionResult$SUCCESS)
}
rBlockHandler <- function(self, method, proceed, args) {
return(do.call(onUseRBlock, as.list(c(self, args))))
}
onInitialize <- function() {
blockFactory <- new(ProxyFactory)
blockFactory$setSuperclass(Block$class)
filteredMethods <- new(ArrayList, 1)
filteredMethods$add("onUse")
blockFactory$setFilter(new(IncludeMethodFilter, Block$class, filteredMethods))
rBlockSettings <- FabricBlockSettings$of(Material$METAL)$strength(4)
paramTypes <- new(ClassArray, 1)
paramTypes[1] <- AbstractBlock$Settings$class
params <- new(AbstractBlockSettingsArray, 1)
params[1] <- rBlockSettings
R_BLOCK = blockFactory$create(paramTypes, params, rBlockHandler)
Registry$register(Registry$BLOCK, new(Identifier, "quilt_lang_polyglot", "r_block"), R_BLOCK)
rItemSettings <- new(FabricItemSettings)["group(net.minecraft.item.ItemGroup)"](ItemGroup$MISC)
blockItem <- new(BlockItem, R_BLOCK, rItemSettings)
Registry$register(Registry$ITEM, new(Identifier, "quilt_lang_polyglot", "r_block"), blockItem)
}
|
{"hexsha": "7b5f1083572e56c9b8e92af5fed4b77ddb2ad834", "size": 2301, "ext": "r", "lang": "R", "max_stars_repo_path": "src/main/resources/main.r", "max_stars_repo_name": "leo60228/QuiltLangJS", "max_stars_repo_head_hexsha": "61b2ef1f4409a0fb10e46ec23e85be21cf6cb857", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-26T03:44:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T03:44:09.000Z", "max_issues_repo_path": "src/main/resources/main.r", "max_issues_repo_name": "leo60228/QuiltLangPolyglot", "max_issues_repo_head_hexsha": "61b2ef1f4409a0fb10e46ec23e85be21cf6cb857", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main/resources/main.r", "max_forks_repo_name": "leo60228/QuiltLangPolyglot", "max_forks_repo_head_hexsha": "61b2ef1f4409a0fb10e46ec23e85be21cf6cb857", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8363636364, "max_line_length": 103, "alphanum_fraction": 0.7505432421, "num_tokens": 546}
|
\subsection{Project Management Summary}
\subsubsection{Time Management Record}
To effectively manage time over the course of the project the group constructed
a gannt chart to track the components tasks of the project and to fit those tasks into the
tight schedule defined by the project guidelines. To build the gantt chart the
group first plotted the milestones of the project (shown as diamonds on the
gantt chart); submitting a brief to the
customer, the two progress seminars, the report hand-in and the final
presentation. The time between the milestones formed the development cycles
used in the project, with two iterations of development of four weeks in length
each before each progress seminar. One week at the end of each iteration was
left for preparation of the progress seminar.
At the end of the first iteration, the group was on time with the deliverables
set in the plan, and there was time during the preparation of the first progress
seminar to integrate the code produced and present a demo of the framework
retrieving trends and URLs. After the progress seminar the group started to work on
the malware scanners, having researched scanning methods during the first
iteration. When preparing for the second progress seminar, the group started to
suspect that the task of creating and integrating malware scanners into the
framework was harder than originally estimated, resulting in development effort
continuing after the second progress seminar in contrast to the plan. The
consequence of this was reduction in scope for the amount of results
collected, and the delay of the web reporting interface to an additional task
completed after the delivery of the rest of the project but still in time for
presenting to the customer at the final presentation.
The Gantt charts can be found in Appendix B.
\subsubsection{Writing the Report}
Although the first words of the report were written in early November, writing
the report started in earnest at the beginning of December. To aid the group
with assessing current progress with the report, a python script was written
that shows a day by day cumulative word count drawn on a stacked bar chart with
one segment per group member. The output of the python script is shown in Figure
\ref{fig:rep-1}.
This script uses the github API and texcount.pl script to count words in each
commit in the repository.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\textwidth]{img/reportificate.png}
\caption{Final Report Word Count Breakdown}
\label{fig:rep-1}
\end{figure}
Sections of the report were allocated to group members using a spreadsheet that
can be found in the accompanying data package under
\verb`doc/tools/word-org.ods`. The spreadsheet provided useful validation for
the sum of words allocated, and ensured that group members were not given an
unfair amount of report to write.
\subsubsection{Achievements \& Lessons Learnt}
The total length of time available to complete the project in was 11 weeks, in
this time the group had to fit in a customer brief, two presentations, and a
24000 word report. This meant that the effective development time for the
project was in the region of 4-6 weeks. In this time the group managed to
construct a working framework that could collect trends from a number of
sources, retrieve URLs related to those trends, and conduct rudimentary analysis
of the URLs for malware. As of writing the only working malware scanners are
those which involve the very lowest levels of interaction. Whilst the
Capture-HPC scanner was successfully integrated into the framework, further
configuration of the system is needed before it can be used to process URLs in
an effective way. The other malware scanners are not yet fully integrated into
the framework. Given the time constraints, the group managed to achieve most of
the work needed for a fully featured prototype and it is expected that it will be
possible to demo a completed version of the project at the final presentation to
the customer. In retrospect the scope of the project was very wide, and perhaps
a more complete product could have been delivered if the scope had been reduced
somewhat.
Another lesson learned during the course of the project relates to the division
of work. The style of development was intended to allow a reasonable degree of
autonomy, but still provide ample opportunity for the group members to assist
each other with tasks in case of difficulty. An unintended consequence of this
was that no one group member had complete knowledge of all of the technical
aspects of the system, making integration more complicated and hence causing
delays.
|
{"hexsha": "aacb551c3719bf0dc792d2b9724723531953fadc", "size": 4647, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/report/control.tex", "max_stars_repo_name": "chrissorchard/malucrawl", "max_stars_repo_head_hexsha": "cb9633baff8752f3e043a2cfdb91cde868666ae2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-12-09T03:02:05.000Z", "max_stars_repo_stars_event_max_datetime": "2016-12-09T03:02:05.000Z", "max_issues_repo_path": "doc/report/control.tex", "max_issues_repo_name": "chrissorchard/malucrawl", "max_issues_repo_head_hexsha": "cb9633baff8752f3e043a2cfdb91cde868666ae2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/report/control.tex", "max_forks_repo_name": "chrissorchard/malucrawl", "max_forks_repo_head_hexsha": "cb9633baff8752f3e043a2cfdb91cde868666ae2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.3214285714, "max_line_length": 90, "alphanum_fraction": 0.8142887885, "num_tokens": 958}
|
function svgClipping(s, path)
% Adds a clipping path
% PRELIMINARY IMPLEMENTATION (Parameters may change)
%
% svgClipping(s, path)
% Parameters:
% s : Array of plot object handles
% path : Clipping path nx3 or nx2.
for i = 1:length(s)
userdata = get(s(i),'UserData');
userdata.svg.ClippingPath = path;
set(s(i),'UserData', userdata);
end
|
{"author": "marianux", "repo": "ecg-kit", "sha": "c8e3de47c54a9214138143676d2aa546b0540dd2", "save_path": "github-repos/MATLAB/marianux-ecg-kit", "path": "github-repos/MATLAB/marianux-ecg-kit/ecg-kit-c8e3de47c54a9214138143676d2aa546b0540dd2/common/plot2svg/svgClipping.m"}
|
#! /usr/bin/env python
"""U.S. Strike Duration Data"""
__all__ = ['COPYRIGHT','TITLE','SOURCE','DESCRSHORT','DESCRLONG','NOTE', 'load']
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
This is a subset of the data used in Kennan (1985). It was originally
published by the Bureau of Labor Statistics.
::
Kennan, J. 1985. "The duration of contract strikes in US manufacturing.
`Journal of Econometrics` 28.1, 5-28.
"""
DESCRSHORT = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production."""
DESCRLONG = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production. The data is a subset of the data originally
used by Kennan. The data here is data for the months of June only to avoid
seasonal issues."""
#suggested notes
NOTE = """
Number of observations - 62
Number of variables - 2
Variable name definitions::
duration - duration of the strike in days
iprod - unanticipated industrial production
"""
from numpy import recfromtxt, column_stack, array
from scikits.statsmodels.datasets import Dataset
from os.path import dirname, abspath
def load():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = recfromtxt(open(filepath + '/strikes.csv', 'rb'), delimiter=",",
names=True, dtype=float)
names = list(data.dtype.names)
##### SET THE INDEX #####
endog = array(data[names[0]], dtype=float)
endog_name = names[0]
##### SET THE INDEX #####
exog = column_stack(data[i] for i in names[1:]).astype(float)
exog_name = names[1:]
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name = endog_name, exog_name=exog_name)
return dataset
|
{"hexsha": "4f6617757b5b767a8be8110655a0e91b95367409", "size": 2058, "ext": "py", "lang": "Python", "max_stars_repo_path": "scikits/statsmodels/datasets/strikes/data.py", "max_stars_repo_name": "matthew-brett/statsmodels", "max_stars_repo_head_hexsha": "915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scikits/statsmodels/datasets/strikes/data.py", "max_issues_repo_name": "matthew-brett/statsmodels", "max_issues_repo_head_hexsha": "915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scikits/statsmodels/datasets/strikes/data.py", "max_forks_repo_name": "matthew-brett/statsmodels", "max_forks_repo_head_hexsha": "915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7164179104, "max_line_length": 80, "alphanum_fraction": 0.6812439261, "include": true, "reason": "from numpy", "num_tokens": 497}
|
import numpy as np
import sys
import matplotlib.ticker as mticker
def file2stats(filename):
#f=open(filename)
f=open('results/'+filename)
print('WARNING: Results read have not been regenerated')
lines = f.readlines()
f.close()
A = []
for line in lines:
A.append(float(line[:-1]))
A=np.array(A)
mean = np.mean(A)
std = np.std(A)
maxVal = np.amax(A)
minVal = np.amin(A)
return mean, std, maxVal, minVal
f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
g = lambda x,pos : "{}".format(f._formatSciNotation('%1.2e' % x))
fmt = mticker.FuncFormatter(g)
gbs = lambda x,pos : r"\boldsymbol{"+"{}".format(f._formatSciNotation('%1.2e' % x))
fmtbs = mticker.FuncFormatter(gbs)
gbe = lambda x,pos : "{}".format(f._formatSciNotation('%1.2e' % x)+r"}")
fmtbe = mticker.FuncFormatter(gbe)
def appendOptToString(string,eps,metric):
string += " & "
if metric=='Max':
if eps==0:
string += r'$ 0 $'
if eps==1:
#string += r'$' + fmt(5.3632) + r" \pm " + fmt(0.0149) +'$'
string += r'$' + fmt(5.3632) +'$'
if eps==2:
#string += r'$' + fmt(10.8079) + r" \pm " + fmt(0.0335) +'$'
string += r'$' + fmt(10.8079) +'$'
if eps==3:
#string += r'$' + fmt(16.1125) + r" \pm " + fmt(0.0504) +'$'
string += r'$' + fmt(16.1125) +'$'
if eps==4:
#string += r'$' + fmt(21.5276) + r" \pm " + fmt(0.0594) +'$'
string += r'$' + fmt(21.5276) +'$'
if metric=='Mean':
string += r'$' + fmt(2*eps/np.sqrt(2*np.pi)) + '$'
string += r' \\ '
return string
def appendResultToString(string,n,eps,mode,metric):
if mode=='GP':
mean1, std1, _, _ = file2stats(metric+'_phase_n'+str(n)+'_eps'+str(eps))
mean2, std2, _, _ = file2stats(metric+'_random_n'+str(n)+'_eps'+str(eps))
mean3, std3, _, _ = file2stats(metric+'_kmeans_n'+str(n)+'_eps'+str(eps))
elif mode=='NN':
mean1, std1, _, _ = file2stats(metric+'NN_phase_n'+str(n)+'_eps'+str(eps))
mean2, std2, _, _ = file2stats(metric+'NN_random_n'+str(n)+'_eps'+str(eps))
mean3, std3, _, _ = file2stats(metric+'NN_kmeans_n'+str(n)+'_eps'+str(eps))
listMeans = np.array([mean1,mean2,mean3])
minVal = np.argsort(listMeans)[0]
if minVal==0:
string += r" & $"+fmtbs(mean1) + r" \pm " + fmtbe(std1) +'$'
else:
string += r" & $"+fmt(mean1) + r" \pm " + fmt(std1) +'$'
if minVal==1:
string += r" & $"+fmtbs(mean2) + r" \pm " + fmtbe(std2) +'$'
else:
string += r" & $"+fmt(mean2) + r" \pm " + fmt(std2) +'$'
if minVal==2:
string += r" & $"+fmtbs(mean3) + r" \pm " + fmtbe(std3) +'$'
else:
string += r" & $"+fmt(mean3) + r" \pm " + fmt(std3) +'$'
string = appendOptToString(string,eps,metric)
return string
nSampleList = [1000]
epsilonList = [0, 1, 2, 3, 4]
i_iter = 0
# GP Results
print(r"\begin{table}[h]")
print(r"\caption{{\color{red}GP results}}")
print(r"\label{tab:GPResults}")
print(r"\begin{center}")
print(r"\begin{tabular}{ |c|c|c|c|c|c|c| }")
print(r"\hline")
print(r" Metric & n & $\varepsilon$ & Algo.~\ref{algo:iterative} (2 iter.) & Random & Stratified & Optimum \\ \hline ")
string = r"\multirow{5}{*}{Mean} & \multirow{5}{*}{$1,000$} & 0 "
string = appendResultToString(string,1000,0,'GP','Mean')
print(string)
string = r" & & 1 "
string = appendResultToString(string,1000,1,'GP','Mean')
print(string)
string = r" & & 2 "
string = appendResultToString(string,1000,2,'GP','Mean')
print(string)
string = r" & & 3 "
string = appendResultToString(string,1000,3,'GP','Mean')
print(string)
string = r" & & 4 "
string = appendResultToString(string,1000,4,'GP','Mean')
string += r"\hline"
print(string)
string = r"\multirow{5}{*}{Max} & \multirow{5}{*}{$1,000$} & 0 "
string = appendResultToString(string,1000,0,'GP','Max')
print(string)
string = r" & & 1 "
string = appendResultToString(string,1000,1,'GP','Max')
print(string)
string = r" & & 2 "
string = appendResultToString(string,1000,2,'GP','Max')
print(string)
string = r" & & 3 "
string = appendResultToString(string,1000,3,'GP','Max')
print(string)
string = r" & & 4 "
string = appendResultToString(string,1000,4,'GP','Max')
string += r"\hline"
print(string)
print(r"\end{tabular}")
print(r"\end{center}")
print(r"\end{table}")
print("\n\n\n")
# NN Results
print(r"\begin{table}[h]")
print(r"\caption{{\color{red}NN results}}")
print(r"\label{tab:NNResults}")
print(r"\begin{center}")
print(r"\begin{tabular}{ |c|c|c|c|c|c|c| }")
print(r"\hline")
print(r" Metric & n & $\varepsilon$ & Algo.~\ref{algo:iterative} (2 iter.) & Random & Stratified & Optimum \\ \hline ")
string = r"\multirow{5}{*}{Mean} & \multirow{5}{*}{$1,000$} & 0 "
string = appendResultToString(string,1000,0,'NN','Mean')
print(string)
string = r" & & 1 "
string = appendResultToString(string,1000,1,'NN','Mean')
print(string)
string = r" & & 2 "
string = appendResultToString(string,1000,2,'NN','Mean')
print(string)
string = r" & & 3 "
string = appendResultToString(string,1000,3,'NN','Mean')
print(string)
string = r" & & 4 "
string = appendResultToString(string,1000,4,'NN','Mean')
string += r"\hline"
print(string)
string = r"\multirow{5}{*}{Max} & \multirow{5}{*}{$1,000$} & 0 "
string = appendResultToString(string,1000,0,'NN','Max')
print(string)
string = r" & & 1 "
string = appendResultToString(string,1000,1,'NN','Max')
print(string)
string = r" & & 2 "
string = appendResultToString(string,1000,2,'NN','Max')
print(string)
string = r" & & 3 "
string = appendResultToString(string,1000,3,'NN','Max')
print(string)
string = r" & & 4 "
string = appendResultToString(string,1000,4,'NN','Max')
string += r"\hline"
print(string)
string = r"\multirow{5}{*}{Mean} & \multirow{5}{*}{$10,000$} & 0 "
string = appendResultToString(string,10000,0,'NN','Mean')
print(string)
string = r" & & 1 "
string = appendResultToString(string,10000,1,'NN','Mean')
print(string)
string = r" & & 2 "
string = appendResultToString(string,10000,2,'NN','Mean')
print(string)
string = r" & & 3 "
string = appendResultToString(string,10000,3,'NN','Mean')
print(string)
string = r" & & 4 "
string = appendResultToString(string,10000,4,'NN','Mean')
string += r"\hline"
print(string)
string = r"\multirow{5}{*}{Max} & \multirow{5}{*}{$10,000$} & 0 "
string = appendResultToString(string,10000,0,'NN','Max')
print(string)
string = r" & & 1 "
string = appendResultToString(string,10000,1,'NN','Max')
print(string)
string = r" & & 2 "
string = appendResultToString(string,10000,2,'NN','Max')
print(string)
string = r" & & 3 "
string = appendResultToString(string,10000,3,'NN','Max')
print(string)
string = r" & & 4 "
string = appendResultToString(string,10000,4,'NN','Max')
string += r"\hline"
print(string)
print(r"\end{tabular}")
print(r"\end{center}")
print(r"\end{table}")
|
{"hexsha": "5655bba723e5a197e20d61cd9fe7f3055c7eb86f", "size": 6952, "ext": "py", "lang": "Python", "max_stars_repo_path": "data-efficientML/artificialCase/generateLatexTable.py", "max_stars_repo_name": "NREL/Phase-space-sampling", "max_stars_repo_head_hexsha": "b3b65ce4fad962f2204c4452c844a6b284e048ae", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data-efficientML/artificialCase/generateLatexTable.py", "max_issues_repo_name": "NREL/Phase-space-sampling", "max_issues_repo_head_hexsha": "b3b65ce4fad962f2204c4452c844a6b284e048ae", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data-efficientML/artificialCase/generateLatexTable.py", "max_forks_repo_name": "NREL/Phase-space-sampling", "max_forks_repo_head_hexsha": "b3b65ce4fad962f2204c4452c844a6b284e048ae", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-23T13:47:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T13:47:21.000Z", "avg_line_length": 32.7924528302, "max_line_length": 119, "alphanum_fraction": 0.6008342923, "include": true, "reason": "import numpy", "num_tokens": 2370}
|
Require Import Privilege.
Require Import Axioms.
Require Import Ctl.BinaryRelations.
Require Import Glib.Glib.
Open Scope string_scope.
(* Dynamic environments (partial maps from variables to arbitrary types) *)
(* TODO: abstract as a section variable? *)
Definition var := string.
Definition comp := string.
Definition env := var -> option (access × Σ V, V).
Declare Scope env_scope.
(* Delimit Scope env_scope with env. *)
Bind Scope env_scope with env.
Open Scope env_scope.
Definition env_empty: env := λ _, None.
Notation "•" := env_empty : env_scope.
Definition env_singleton {V} var (acc: access) (v: V): env := λ lookup,
if lookup =? var then Some (acc, existT _ V v) else None.
Notation "var ↦ v" := (env_singleton var allAcc v)
(at level 55, right associativity) : env_scope.
Definition map_access (Γ: env) (acc: access): env := λ lookup,
match Γ lookup with
| Some (_, v) => Some (acc, v)
| None => None
end.
Notation "acc ? Γ" := (map_access Γ acc)
(at level 60) : env_scope.
(* (at level 60, format "acc ? Γ") : env_scope. *)
(* First env shadows definitions in second *)
Definition env_concat (Γ1 Γ2: env) : env := λ lookup,
match Γ1 lookup with
| Some v => Some v
| None => Γ2 lookup
end.
Notation "Γ1 ;; Γ2" := (env_concat Γ1 Γ2)
(at level 65, right associativity) : env_scope.
Definition read {V} (Γ: env) (c: comp) (name: var) (v: V) : Prop :=
exists! acc: access,
Γ name = Some (acc, existT _ V v) /\
acc c p_read.
Definition write {V} (Γ: env) (c: comp) (name: var) (v: V) (Γ': env) : Prop :=
exists! (acc: access) curr,
Γ name = Some (acc, curr) /\
acc c p_write /\
Γ' = acc ? name ↦ v ;; Γ.
Definition remove_var (Γ: env) var : env := λ lookup,
if lookup =? var then None else Γ lookup.
Lemma map_acc_env_singleton : forall acc x V (v: V),
(acc ? x ↦ v) = env_singleton x acc v.
Proof using.
intros *.
extensionality y.
unfold env_singleton.
replace (
acc ? (λ lookup,
if lookup =? x
then Some (allAcc, existT _ V v)
else None))
with
(λ lookup,
if lookup =? x
then Some (acc, existT _ V v)
else None
).
- reflexivity.
- extensionality lookup.
unfold map_access.
now destruct (lookup =? x).
Qed.
Lemma refl_string_eq : forall x y,
x =? y ->
x = y.
Proof using.
intros * ?.
now destruct (eqb_spec x y).
Qed.
Theorem env_singleton_inv : forall acc x y V (v: V) v',
(acc ? x ↦ v) y = Some v' ->
x = y /\ v' = (acc, existT _ V v).
Proof using.
intros * H.
rewrite map_acc_env_singleton in H.
unfold env_singleton in H.
destruct (y =? x) eqn:case; [|discriminate].
apply refl_string_eq in case as ->.
now inv H.
Qed.
Theorem env_prepend_singleton_unchanged : forall (Γ: env) x y acc V (v: V),
x <> y ->
Γ y = (acc ? x ↦ v ;; Γ) y.
Proof using.
intros * Hneq.
unfold env_concat.
destruct ((acc ? x ↦ v) y) eqn:case.
- apply env_singleton_inv in case as [-> _].
contradiction.
- reflexivity.
Qed.
Theorem write_unchanged : forall Γ Γ' x y c V (v: V),
write Γ c x v Γ' ->
x <> y ->
Γ y = Γ' y.
Proof using.
intros * Hwrite Hneq.
destruct Hwrite as [acc [[curr [(Hwrite1 & Hwrite2 & ->) _]] _]].
now apply env_prepend_singleton_unchanged.
Qed.
Theorem write_unchanged_read : forall Γ Γ' x y c c' V (v: V) V' (v': V'),
write Γ c x v Γ' ->
x <> y ->
read Γ c' y v' ->
read Γ' c' y v'.
Proof using.
intros * Hwrite Hneq Hread.
unfold read in *.
destruct Hread as (acc & (Hlookup & Hacc) & Hunique).
exists acc.
max split.
- rewrite <- Hlookup.
symmetry.
enow eapply write_unchanged.
- assumption.
- intros * [H _].
erewrite <- write_unchanged in H; [|eassumption|eassumption].
rewrite Hlookup in H.
now inv H.
Qed.
Theorem write_unchanged_read' : forall Γ Γ' x y c c' V (v: V) V' (v': V'),
write Γ c x v Γ' ->
x <> y ->
read Γ' c' y v' ->
read Γ c' y v'.
Proof using.
intros * Hwrite Hneq Hread.
unfold read in *.
destruct Hread as (acc & (Hlookup & Hacc) & Hunique).
exists acc.
max split.
- rewrite <- Hlookup.
enow eapply write_unchanged.
- assumption.
- intros * [H _].
erewrite <- write_unchanged in Hlookup; [|eassumption|eassumption].
rewrite Hlookup in H.
now inv H.
Qed.
Theorem no_lookup_no_read : forall Γ c x V (v: V),
Γ x = None -> ~ read Γ c x v.
Proof using.
intros * Heq Hread.
destruct Hread as (acc & (Heq' & _) & _).
rewrite Heq in Heq'.
discriminate Heq'.
Qed.
Close Scope env_scope.
Close Scope string_scope.
|
{"author": "gjurgensen", "repo": "thesis", "sha": "fee5e9e2ba728f3707eee7ad9d90837c25cf7764", "save_path": "github-repos/coq/gjurgensen-thesis", "path": "github-repos/coq/gjurgensen-thesis/thesis-fee5e9e2ba728f3707eee7ad9d90837c25cf7764/src/TransitionSystems.v"}
|
#define BOOST_TEST_MAIN
#include <boost/test/included/unit_test.hpp>
// just a blank file to get the unit test main function going
|
{"hexsha": "a112035a257edb423a2e3a2115b2d7b8fcd5c1d6", "size": 132, "ext": "cc", "lang": "C++", "max_stars_repo_path": "t/all.t.cc", "max_stars_repo_name": "dru/libroutez", "max_stars_repo_head_hexsha": "ef1b564db024b10b83eb531ce3ff925a5a32fa21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2016-05-08T23:08:51.000Z", "max_stars_repo_stars_event_max_datetime": "2016-05-08T23:08:51.000Z", "max_issues_repo_path": "t/all.t.cc", "max_issues_repo_name": "dru/libroutez", "max_issues_repo_head_hexsha": "ef1b564db024b10b83eb531ce3ff925a5a32fa21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "t/all.t.cc", "max_forks_repo_name": "dru/libroutez", "max_forks_repo_head_hexsha": "ef1b564db024b10b83eb531ce3ff925a5a32fa21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4, "max_line_length": 61, "alphanum_fraction": 0.7878787879, "num_tokens": 29}
|
import gym
import os
import numpy as np
from stable_baselines3 import SAC, PPO, A2C
from testEnv import TestEnvironment
from sac import create_model_SAC
from ppo import create_model_PPO
from a2c import create_model_A2C
env = TestEnvironment()
def load_model(algorithm, model_name):
model = algorithm.load(model_name)
return model
def run_test(env, model, model_name):
episode_rewards, episode_lengths, episode_discounted_rewards = [], [], []
for episode in range(10):
done = False
episode_reward = 0.0
episode_discounted_reward = 0.0
episode_length = 0
obs = env.reset()
# run microgrid for 10000 steps
for step in range(10000):
action, new_states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
episode_reward += -reward
episode_discounted_reward += -reward * (env.gamma ** episode_length)
episode_length += 1
if done:
obs = env.reset()
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
episode_discounted_rewards.append(episode_discounted_reward)
mean_reward = np.mean(episode_rewards)
mean_discounted_reward = np.mean(episode_discounted_rewards)
std_reward = np.std(episode_rewards)
std_discounted_reward = np.std(episode_discounted_rewards)
print("****** ", model_name, " ******")
print('mean cost is %.2f' % mean_reward, 'std_cost %.3f' % std_reward)
isSACModelAvailable = os.path.isfile('sac_gym_anm_model.zip')
isPPOModelAvailable = os.path.isfile('ppo_gym_anm_model.zip')
isA2CModelAvailable = os.path.isfile('a2c_gym_anm_model.zip')
# SAC model
if(not isSACModelAvailable):
print("SAC Model saved")
create_model_SAC(env)
sac_model = load_model(SAC, 'sac_gym_anm_model')
run_test(env, sac_model, 'SAC')
# PPO model
if(not isPPOModelAvailable):
create_model_PPO(env)
print("PPO Model saved")
ppo_model = load_model(PPO,'ppo_gym_anm_model')
run_test(env, ppo_model, 'PPO')
# A2c model
if(not isA2CModelAvailable):
create_model_A2C(env)
print("A2C Model saved")
a2c_model = load_model(A2C,'a2c_gym_anm_model')
run_test(env, a2c_model, 'A2C')
|
{"hexsha": "ab59a899ed5b21fdd7863ae61d979fdea7fbe834", "size": 2266, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/rl_techniques_baseline3/main.py", "max_stars_repo_name": "anushaihalapathirana/RL-Gym-ANM-tool", "max_stars_repo_head_hexsha": "2dee2da9be26f512179d313c985832718a34042b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/rl_techniques_baseline3/main.py", "max_issues_repo_name": "anushaihalapathirana/RL-Gym-ANM-tool", "max_issues_repo_head_hexsha": "2dee2da9be26f512179d313c985832718a34042b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rl_techniques_baseline3/main.py", "max_forks_repo_name": "anushaihalapathirana/RL-Gym-ANM-tool", "max_forks_repo_head_hexsha": "2dee2da9be26f512179d313c985832718a34042b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4722222222, "max_line_length": 80, "alphanum_fraction": 0.7012356575, "include": true, "reason": "import numpy", "num_tokens": 597}
|
from textwrap import dedent
from numpy.testing import assert_array_equal
import pytest
from svmlight_loader import (
InvalidSVMLight,
classification_from_lines,
multilabel_classification_from_lines,
regression_from_lines,
)
all_loaders = pytest.mark.parametrize(
"from_lines", [
classification_from_lines,
multilabel_classification_from_lines,
regression_from_lines,
],
)
def test_simple():
X, y = classification_from_lines([b"-1 1:0.43 3:0.12 9:0.2"])
assert_array_equal(y, [-1])
assert_array_equal(
X.toarray(),
[[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2]],
)
def test_multiple_rows():
X, y = classification_from_lines(
dedent(
"""\
1 1:0.43 3:0.12 9:0.2
0 2:0.12 8:0.2
1 3:0.01 4:0.3
"""
).encode().splitlines(),
)
assert_array_equal(y, [1, 0, 1])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
],
)
def test_crazy_whitespace():
X, y = classification_from_lines(
(
b" 1 1:0.43 3:0.12 9:0.2 \n"
b"0 2:0.12 8:0.2 \n"
b" 1 3:0.01 4:0.3 "
).splitlines(),
)
assert_array_equal(y, [1, 0, 1])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
],
)
def test_multilabel():
X, y = multilabel_classification_from_lines(
dedent(
"""\
1,2 1:0.43 3:0.12 9:0.2
2 2:0.12 8:0.2
2,3,4 3:0.01 4:0.3
6:0.01 7:0.3
"""
).encode().splitlines(),
)
assert_array_equal(y, [(1, 2), (2,), (2, 3, 4), ()])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.01, 0.3, 0, 0],
],
)
def test_crazy_multilabel_whitespace():
X, y = multilabel_classification_from_lines(
(
b" 1,2 1:0.43 3:0.12 9:0.2 \n"
b"2 2:0.12 8:0.2 \n"
b" 2,3 3:0.01 4:0.3 "
).splitlines(),
)
assert_array_equal(y, [(1, 2), (2,), (2, 3)])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
],
)
def test_regression():
X, y = regression_from_lines(
dedent(
"""\
0.2 1:0.43 3:0.12 9:0.2
3000.7 2:0.12 8:0.2
240.234 3:0.01 4:0.3
0.001 6:0.01 7:0.3
"""
).encode().splitlines(),
)
assert_array_equal(y, [0.2, 3000.7, 240.234, 0.001])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.01, 0.3, 0, 0],
],
)
def test_crazy_regression_whitespace():
X, y = regression_from_lines(
(
b" 1.7 1:0.43 3:0.12 9:0.2 \n"
b"0.3 2:0.12 8:0.2 \n"
b" 1 3:0.01 4:0.3 "
).splitlines(),
)
assert_array_equal(y, [1.7, 0.3, 1])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
],
)
@all_loaders
def test_invalid_order(from_lines):
with pytest.raises(InvalidSVMLight) as e:
from_lines(
dedent( # second line has column indexes in the wrong order
"""\
0 2:0.12 8:0.2
1 3:0.43 1:0.12 9:0.2
"""
).encode().splitlines(),
)
assert "example 2" in str(e.value)
@all_loaders
def test_zero_index_in_nonzero_based_file(from_lines):
with pytest.raises(InvalidSVMLight) as e:
from_lines([b"-1 0:0.12 9:0.2"], zero_based=False)
assert "example 1" in str(e.value)
@all_loaders
def test_empty_line(from_lines):
X, y = from_lines(
dedent(
"""\
1 1:0.43 3:0.12 9:0.2
0
0 3:0.12
"""
).encode().splitlines(),
)
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.12, 0, 0, 0, 0, 0, 0],
],
)
@all_loaders
def test_empty_lines_at_end(from_lines):
X, y = from_lines(
dedent(
"""\
1 1:0.43 3:0.12 9:0.2
0
1
"""
).encode().splitlines(),
)
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0] * 9,
[0] * 9,
],
)
@all_loaders
def test_all_empty_lines(from_lines):
X, y = from_lines(
dedent(
"""\
1
0
1
"""
).encode().splitlines(),
)
assert_array_equal(X.toarray(), [[], [], []])
@all_loaders
def test_query_ids_are_ignored_by_default(from_lines):
X, _ = from_lines(
dedent(
"""\
1 qid:1 1:0.43 3:0.12 9:0.2
0 qid:2 2:0.12 8:0.2
1 qid:1 3:0.01 4:0.3
"""
).encode().splitlines(),
)
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
],
)
@all_loaders
def test_query_ids_are_returned_if_requested(from_lines):
X, _, query_id = from_lines(
dedent(
"""\
1 qid:1 1:0.43 3:0.12 9:0.2
0 qid:2 2:0.12 8:0.2
1 qid:1 3:0.01 4:0.3
"""
).encode().splitlines(),
query_id=True,
)
assert_array_equal(query_id, [1, 2, 1])
assert_array_equal(
X.toarray(), [
[0.43, 0, 0.12, 0, 0, 0, 0, 0, 0.2],
[0, 0.12, 0, 0, 0, 0, 0, 0.2, 0],
[0, 0, 0.01, 0.3, 0, 0, 0, 0, 0],
],
)
|
{"hexsha": "41999d4f3610f505a8327e348061cdaaeed6a518", "size": 6535, "ext": "py", "lang": "Python", "max_stars_repo_path": "svmlight_loader/tests/test_api.py", "max_stars_repo_name": "Julian/svmlight-loader", "max_stars_repo_head_hexsha": "bbd470063d6b0376fd03e827fc1ac09e4632a939", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "svmlight_loader/tests/test_api.py", "max_issues_repo_name": "Julian/svmlight-loader", "max_issues_repo_head_hexsha": "bbd470063d6b0376fd03e827fc1ac09e4632a939", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-01-20T16:09:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-20T16:09:26.000Z", "max_forks_repo_path": "svmlight_loader/tests/test_api.py", "max_forks_repo_name": "Julian/svmlight-loader", "max_forks_repo_head_hexsha": "bbd470063d6b0376fd03e827fc1ac09e4632a939", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6603773585, "max_line_length": 72, "alphanum_fraction": 0.4335118592, "include": true, "reason": "from numpy", "num_tokens": 2659}
|
[STATEMENT]
theorem justify_fifo_push_relabel_prep_run_split:
"fifo_push_relabel el s t =
do {
pr \<leftarrow> fifo_push_relabel_prepare_impl el s t;
case pr of
None \<Rightarrow> return None
| Some (N,ami,c,cf) \<Rightarrow> do {
cf \<leftarrow> fifo_push_relabel_run_impl s t N ami cf;
return (Some (c,ami,N,cf))
}
}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fifo_push_relabel el s t = fifo_push_relabel_prepare_impl el s t \<bind> (\<lambda>pr. case pr of None \<Rightarrow> return None | Some (N, ami, c, cf) \<Rightarrow> fifo_push_relabel_run_impl s t N ami cf \<bind> (\<lambda>cf. return (Some (c, ami, N, cf))))
[PROOF STEP]
unfolding fifo_push_relabel_def fifo_push_relabel_prepare_impl_def
fifo_push_relabel_impl_tab_am_def fifo_push_relabel_impl_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (case prepareNet el s t of None \<Rightarrow> return None | Some (c, am, N) \<Rightarrow> Array.make N am \<bind> (\<lambda>ami. fifo_push_relabel_init_impl c N \<bind> fifo_push_relabel_run_impl s t N ami \<bind> (\<lambda>cfi. return (ami, cfi))) \<bind> (\<lambda>(ami, cf). return (Some (c, ami, N, cf)))) = (case prepareNet el s t of None \<Rightarrow> return None | Some (c, am, N) \<Rightarrow> Array.make N am \<bind> (\<lambda>ami. fifo_push_relabel_init_impl c N \<bind> (\<lambda>cfi. return (Some (N, ami, c, cfi))))) \<bind> (\<lambda>pr. case pr of None \<Rightarrow> return None | Some (N, ami, c, cf) \<Rightarrow> fifo_push_relabel_run_impl s t N ami cf \<bind> (\<lambda>cf. return (Some (c, ami, N, cf))))
[PROOF STEP]
by (auto split: option.split)
|
{"llama_tokens": 620, "file": "Prpu_Maxflow_Fifo_Push_Relabel_Impl", "length": 2}
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from math import exp, log
import pytest
import numpy as np
from fluids.constants import calorie, R
from chemicals.rachford_rice import *
from thermo.mixture import Mixture
from thermo.uniquac import UNIQUAC
from random import random
from thermo import *
import numpy as np
from fluids.numerics import jacobian, hessian, derivative, normalize, assert_close, assert_close1d, assert_close2d, assert_close3d
def test_UNIQUAC_functional():
# P05.01c VLE Behavior of Ethanol - Water Using UNIQUAC
# http://chemthermo.ddbst.com/Problems_Solutions/Mathcad_Files/P05.01c%20VLE%20Behavior%20of%20Ethanol%20-%20Water%20Using%20UNIQUAC.xps
gammas = UNIQUAC_gammas(xs=[0.252, 0.748], rs=[2.1055, 0.9200], qs=[1.972, 1.400], taus=[[1.0, 1.0919744384510301], [0.37452902779205477, 1.0]])
assert_close1d(gammas, [2.35875137797083, 1.2442093415968987])
# Example 8.3 in [2]_ for solubility of benzene (2) in ethanol (1) at 260 K.
# Worked great here
gammas = UNIQUAC_gammas(xs=[.7566, .2434], rs=[2.1055, 3.1878], qs=[1.972, 2.4], taus=[[1.0, 1.17984681869376], [0.22826016391070073, 1.0]])
assert_close1d(gammas, [1.0826343452263132, 3.0176007269546083])
# Example 7.3 in [2], for electrolytes
gammas = UNIQUAC_gammas(xs=[0.05, 0.025, 0.925], rs=[1., 1., 0.92], qs=[1., 1., 1.4], taus=[[1.0, 0.4052558731309731, 2.7333668483468143], [21.816716876191823, 1.0, 0.06871094878791346], [0.4790878929721784, 3.3901086879605944, 1.0]])
assert_close1d(gammas, [0.3838177662072466, 0.49469915162858774, 1.0204435746722416])
def UNIQUAC_original_form(xs, rs, qs, taus):
# This works too - just slower.
cmps = range(len(xs))
rsxs = sum([rs[i]*xs[i] for i in cmps])
qsxs = sum([qs[i]*xs[i] for i in cmps])
Phis = [rs[i]*xs[i]/rsxs for i in cmps]
thetas = [qs[i]*xs[i]/qsxs for i in cmps]
ls = [5*(ri - qi) - (ri - 1.) for ri, qi in zip(rs, qs)]
gammas = []
for i in cmps:
lngamma = (log(Phis[i]/xs[i]) + 5*qs[i]*log(thetas[i]/Phis[i]) + ls[i]
- Phis[i]/xs[i]*sum([xs[j]*ls[j] for j in cmps])
- qs[i]*log(sum([thetas[j]*taus[j][i] for j in cmps]))
+ qs[i]
- qs[i]*sum([thetas[j]*taus[i][j]/sum([thetas[k]*taus[k][j] for k in cmps]) for j in cmps]))
gammas.append(exp(lngamma))
return gammas
gammas = UNIQUAC_original_form(xs=[.7566, .2434], rs=[2.1055, 3.1878], qs=[1.972, 2.4], taus=[[1.0, 1.17984681869376], [0.22826016391070073, 1.0]])
assert_close1d(gammas, [1.0826343452263132, 3.0176007269546083])
gammas = UNIQUAC_original_form(xs=[0.252, 0.748], rs=[2.1055, 0.9200], qs=[1.972, 1.400], taus=[[1.0, 1.0919744384510301], [0.37452902779205477, 1.0]])
assert_close1d(gammas, [2.35875137797083, 1.2442093415968987])
gammas = UNIQUAC_original_form(xs=[0.05, 0.025, 0.925], rs=[1., 1., 0.92], qs=[1., 1., 1.4], taus=[[1.0, 0.4052558731309731, 2.7333668483468143], [21.816716876191823, 1.0, 0.06871094878791346], [0.4790878929721784, 3.3901086879605944, 1.0]])
assert_close1d(gammas, [0.3838177662072466, 0.49469915162858774, 1.0204435746722416])
def make_rsqs(N):
cmps = range(N)
rs = [float('%.3g'%(random()*2.5)) for _ in cmps]
qs = [float('%.3g'%(random()*1.3)) for _ in cmps]
return rs, qs
def make_taus(N):
cmps = range(N)
data = []
base = [1e-4, 200.0, -5e-4, -7e-5, 300, 9e-8]
for i in cmps:
row = []
for j in cmps:
if i == j:
row.append([0.0]*6)
else:
row.append([float('%.3g'%(random()*n)) for n in base])
data.append(row)
return data
def test_madeup_20():
N = 20
rs, qs = make_rsqs(N)
taus = make_taus(N)
xs = normalize([random() for i in range(N)])
T = 350.0
GE = UNIQUAC(T=T, xs=xs, rs=rs, qs=qs, tau_coeffs=taus)
def test_UNIQUAC_madeup_ternary():
N = 3
T = 331.42
xs = [0.229, 0.175, 0.596]
rs = [2.5735, 2.87, 1.4311]
qs = [2.336, 2.41, 1.432]
# madeup numbers to match Wilson example roughly
tausA = [[0.0, -1.05e-4, -2.5e-4], [3.9e-4, 0.0, 1.6e-4], [-1.123e-4, 6.5e-4, 0]]
tausB = [[0.0, 235.0, -169.0], [-160, 0.0, -715.0], [11.2, 144.0, 0.0]]
tausC = [[0.0, -4.23e-4, 2.9e-4], [6.1e-4, 0.0, 8.2e-5], [-7.8e-4, 1.11e-4, 0]]
tausD = [[0.0, -3.94e-5, 2.22e-5], [8.5e-5, 0.0, 4.4e-5], [-7.9e-5, 3.22e-5, 0]]
tausE = [[0.0, -4.2e2, 8.32e2], [2.7e2, 0.0, 6.8e2], [3.7e2, 7.43e2, 0]]
tausF = [[0.0, 9.64e-8, 8.94e-8], [1.53e-7, 0.0, 1.11e-7], [7.9e-8, 2.276e-8, 0]]
ABCDEF = (tausA, tausB, tausC, tausD, tausE, tausF)
GE = UNIQUAC(T=T, xs=xs, rs=rs, qs=qs, ABCDEF=ABCDEF)
assert eval(str(GE)).GE() == GE.GE()
GE2 = UNIQUAC.from_JSON(GE.as_JSON())
assert GE2.__dict__ == GE.__dict__
# GE
GE_expect = 415.5805110962149
GE_analytical = GE.GE()
assert_close(GE_expect, GE_analytical, rtol=1e-13)
gammas = UNIQUAC_gammas(taus=GE.taus(), rs=rs, qs=qs, xs=xs)
GE_identity = R*T*sum(xi*log(gamma) for xi, gamma in zip(xs, gammas))
assert_close(GE_identity, GE_analytical, rtol=1e-12)
# dGE_dT
dGE_dT_expect = 0.9907140284750982
dGE_dT_analytical = GE.dGE_dT()
dGE_dT_numerical = derivative(lambda T: GE.to_T_xs(T, xs).GE(), T, order=7, dx=T*1e-3)
assert_close(dGE_dT_analytical, dGE_dT_numerical, rtol=1e-12)
assert_close(dGE_dT_expect, dGE_dT_analytical, rtol=1e-13)
# d2GE_dT2
d2GE_dT2_expect = -0.007148011229475758
d2GE_dT2_analytical = GE.d2GE_dT2()
d2GE_dT2_numerical = derivative(lambda T: GE.to_T_xs(T, xs).dGE_dT(), T, order=7, dx=T*1e-3)
assert_close(d2GE_dT2_expect, d2GE_dT2_analytical, rtol=1e-12)
assert_close(d2GE_dT2_analytical, d2GE_dT2_numerical, rtol=1e-12)
# d3GE_dT3
d3GE_dT3_expect = 2.4882477326368877e-05
d3GE_dT3_analytical = GE.d3GE_dT3()
assert_close(d3GE_dT3_expect, d3GE_dT3_analytical, rtol=1e-13)
d3GE_dT3_numerical = derivative(lambda T: GE.to_T_xs(T, xs).d2GE_dT2(), T, order=11, dx=T*1e-2)
assert_close(d3GE_dT3_analytical, d3GE_dT3_numerical, rtol=1e-12)
# dphis_dxs
dphis_dxs_analytical = GE.dphis_dxs()
dphis_dxs_expect = [[0.9223577846000854, -0.4473196931643269, -0.2230519905531248],
[-0.3418381934661886, 1.094722540086528, -0.19009311780433752],
[-0.5805195911338968, -0.6474028469222008, 0.41314510835746243]]
assert_close2d(dphis_dxs_expect, dphis_dxs_analytical, rtol=1e-12)
dphis_dxs_numerical = jacobian(lambda xs: GE.to_T_xs(T, xs).phis(), xs, scalar=False, perturbation=2e-8)
assert_close2d(dphis_dxs_numerical, dphis_dxs_analytical, rtol=3e-8)
# d2phis_dxixjs - checked to the last decimal with sympy
d2phis_dxixjs_expect = [[[-2.441416183656415, 0.9048216556030662, 1.536594528053349],
[-0.7693373390462084, -0.9442924629794809, 1.7136298020256895],
[-0.3836232285397313, 0.5031631130108988, -0.11953988447116741]],
[[-0.7693373390462084, -0.9442924629794809, 1.7136298020256895],
[1.3204383950972896, -3.231500191022578, 1.9110617959252876],
[0.658424873597119, -0.5251124708645561, -0.13331240273256284]],
[[-0.3836232285397313, 0.5031631130108987, -0.11953988447116741],
[0.6584248735971189, -0.5251124708645561, -0.13331240273256284],
[0.32831771310273056, 0.27980444182238084, -0.6081221549251116]]]
d2phis_dxixjs_analytical = GE.d2phis_dxixjs()
assert_close3d(d2phis_dxixjs_analytical, d2phis_dxixjs_expect, rtol=1e-12)
d2phis_dxixjs_numerical = hessian(lambda xs: GE.to_T_xs(T, xs).phis(), xs, scalar=False, perturbation=1e-5)
assert_close3d(d2phis_dxixjs_numerical, d2phis_dxixjs_analytical, rtol=8e-5)
d2thetas_dxixjs_expect = [[[-2.346422740416712, 0.7760247163009644, 1.5703980241157476],
[-0.7026345706138027, -0.9175106511836936, 1.6201452217974965],
[-0.4174990477672056, 0.47571378156805694, -0.05821473380085118]],
[[-0.7026345706138027, -0.9175106511836936, 1.6201452217974965],
[1.0476523499983839, -2.7191206652946023, 1.6714683152962189],
[0.6225054627376287, -0.5624465978146614, -0.06005886492296719]],
[[-0.4174990477672056, 0.47571378156805694, -0.05821473380085118],
[0.6225054627376287, -0.5624465978146614, -0.06005886492296719],
[0.3698870633362176, 0.2916190647283637, -0.6615061280645813]]]
d2thetas_dxixjs_analytical = GE.d2thetas_dxixjs()
assert_close3d(d2thetas_dxixjs_analytical, d2thetas_dxixjs_expect, rtol=1e-12)
d2thetas_dxixjs_numerical = hessian(lambda xs: GE.to_T_xs(T, xs).thetas(), xs, scalar=False, perturbation=2e-5)
assert_close3d(d2thetas_dxixjs_numerical, d2thetas_dxixjs_analytical, rtol=1e-4)
def to_jac(xs):
return GE.to_T_xs(T, xs).GE()
# Obtained 12 decimals of precision with numdifftools
dGE_dxs_analytical = GE.dGE_dxs()
dGE_dxs_expect = [-2651.3181821109024, -2085.574403592012, -2295.0860830203587]
assert_close1d(dGE_dxs_analytical, dGE_dxs_expect, rtol=1e-12)
dGE_dxs_numerical = jacobian(to_jac, xs, perturbation=1e-8)
assert_close1d(dGE_dxs_numerical, dGE_dxs_analytical, rtol=1e-6)
# d2GE_dxixjs
def to_hess(xs):
return GE.to_T_xs(T, xs).GE()
d2GE_dxixjs_numerical = hessian(to_hess, xs, perturbation=1e-4)
d2GE_dxixjs_sympy = [[-2890.4327598108343, -6687.099054095988, -1549.3754436994557],
[-6687.099054095988, -2811.283290487096, -1228.622385377738],
[-1549.3754436994557, -1228.622385377738, -3667.3880987585053]]
d2GE_dxixjs_analytical = GE.d2GE_dxixjs()
assert_close2d(d2GE_dxixjs_numerical, d2GE_dxixjs_analytical, rtol=1e-4)
assert_close2d(d2GE_dxixjs_analytical, d2GE_dxixjs_sympy, rtol=1e-12)
# Check json storage again, with some results
GE2 = UNIQUAC.from_JSON(GE.as_JSON())
assert GE2.__dict__ == GE.__dict__
|
{"hexsha": "2acab3dc29cec0910fb935157e696bef801d454f", "size": 11037, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_uniquac.py", "max_stars_repo_name": "brunokiyoshi/thermo", "max_stars_repo_head_hexsha": "5b31d21fd087dd0fc3302f023c5f3c52d9cbee3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_uniquac.py", "max_issues_repo_name": "brunokiyoshi/thermo", "max_issues_repo_head_hexsha": "5b31d21fd087dd0fc3302f023c5f3c52d9cbee3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_uniquac.py", "max_forks_repo_name": "brunokiyoshi/thermo", "max_forks_repo_head_hexsha": "5b31d21fd087dd0fc3302f023c5f3c52d9cbee3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.1965065502, "max_line_length": 245, "alphanum_fraction": 0.6880492888, "include": true, "reason": "import numpy", "num_tokens": 4394}
|
import numpy as np
class EMatch:
"""
Construct a class to compute E_Match as in formula 10 using a function to pass directly the personalized blendshapes
in delta space delta_p (dp)
k:= num_of_blendshapes
f:= num_frames
n:= num_features
"""
def __init__(self, tckf, uk, daf):
self.tilda_ckf = tckf
self.uk = uk
self.delta_af = daf
self.F = np.shape(self.delta_af)[0]
self.K = np.shape(self.uk)[0]
self.N = np.shape(self.uk)[1]
def _ematch(self, dp):
"""
Compute E_Match as in formula 10
:param dp: delta p (k, n)
:return: e_match
"""
# reshape dp in case it comes as a 1D array
if len(np.shape(dp)) < 2:
dp = np.reshape(dp, (self.K, self.N))
# diagonalize uk
diag_uk = np.array([np.diag(uk) for uk in self.uk]) # using diag(self.uk) would result of getting only the diagonal elements
# compute weighted mask
w_mask = diag_uk @ self.delta_af.T
# duplicate dp
dup_dp = np.repeat(np.expand_dims(dp, axis=2), self.F, axis=2)
# compute norm
norm = np.power(np.linalg.norm(dup_dp - w_mask, axis=1), 2)
# compute e_match
return np.sum(np.multiply(self.tilda_ckf, norm)) / self.F
def get_eMatch(self):
"""
return ematch as a function
:return:
"""
print("[Warning] Using this function for optimization may be very slow ")
return self._ematch
def get_dEmatch(self):
"""
Compute the derivative of E_Match (formula 10) at delta_p as to minimize delta_p -> E_match' = 0
equation: (2/F) * sum_f(c_{k,f}) * delta_p_k - (2/F) * sum_f[(c_{k,f}) * diag(u_k) * delta_a_f]
It splits the equation in a diagonal matrix A and a vector b as to solve the equation Ax = b, with x = delta_p
Since the equation are separable in xyz, the function splits the data and returns a system of equation for each
dimension, resulting in 3*(kMxknM) instead of one (3kMx3kM) -> section 4.6 of the paper
M:= num_markers = self.N / 3
A*:= (kM x kM) diag matrix with coef = (2/F) * sum_f(c_{k,f})
b*:= (kM,) vector with value =(2/F) * sum_f[(c_{k,f}) * diag(u_k) * delta_a_f]
:return: AX, AY, AZ, bX, bY, bZ
"""
# test if data are separable into xyz
if self.N % 3 != 0:
raise ValueError("Number of features ({}) is not a multiple of 3 (xyz)".format(self.N))
M = int(self.N / 3) # num markers
# split data into xyz coordinates
x_indices = np.arange(start=0, stop=self.N, step=3)
y_indices = np.arange(start=1, stop=self.N, step=3)
z_indices = np.arange(start=2, stop=self.N, step=3)
# split self.uk
ukX = self.uk[:, x_indices]
ukY = self.uk[:, y_indices]
ukZ = self.uk[:, z_indices]
# split self.delta_af
afX = self.delta_af[:, x_indices]
afY = self.delta_af[:, y_indices]
afZ = self.delta_af[:, z_indices]
# declare variables
bX = np.zeros((self.K, M))
bY = np.zeros((self.K, M))
bZ = np.zeros((self.K, M))
# build A (kM x kM) diagonal matrix
A = (2/self.F) * np.diag(np.repeat(np.sum(self.tilda_ckf, axis=1), M))
# there's probably an even better way to make it all in a matrix form :)
for k in range(self.K):
# compute the term: tilda_c[k,:] * diag(u[k]) * delta_af[:]
bX[k] = (2 / self.F) * self.tilda_ckf[k] @ (np.diag(ukX[k]) @ afX.T).T
bY[k] = (2 / self.F) * self.tilda_ckf[k] @ (np.diag(ukY[k]) @ afY.T).T
bZ[k] = (2 / self.F) * self.tilda_ckf[k] @ (np.diag(ukZ[k]) @ afZ.T).T
bX = bX.reshape(-1)
bY = bY.reshape(-1)
bZ = bZ.reshape(-1)
# A = Ax = Ay = Az
return A, A, A, bX, bY, bZ
if __name__ == '__main__':
"""
test E_Match function
1) test that E_Match is computer correctly
2) test optimization of the E_Match function
run: python -m src.EMatch
"""
np.random.seed(0)
np.set_printoptions(precision=4, linewidth=200)
# declare variables
n_k = 2
n_f = 3
n_n = 12 # = 4 markers
tckf = np.random.rand(n_k, n_f) # (k, f)
uk = np.random.rand(n_k, n_n)
da = np.random.rand(n_f, n_n)
dp = np.random.rand(n_k, n_n)
print("----- EMatch Function -----")
# control compute e_match
ematch_ctrl = 0
for f in range(n_f):
for k in range(n_k):
norm = np.linalg.norm(dp[k] - np.diag(uk[k]) @ da[f])
ematch_ctrl += tckf[k, f] * norm**2
ematch_ctrl /= n_f
print("ematch_ctrl")
print(ematch_ctrl)
# compute e_match
e_match_fn = EMatch(tckf, uk, da).get_eMatch()
ematch = e_match_fn(dp)
print("ematch")
print(ematch)
# test if value matches (up to 6 decimals)
assert np.around(ematch, 6) == np.around(ematch_ctrl, 6)
print("ematch values are equal")
print()
print("----- Minimization ------")
import time as time
print("try optimizer")
from scipy import optimize
start = time.time()
opt = optimize.minimize(e_match_fn, dp, method="BFGS")
print("solved in:", time.time() - start)
print(opt.x[:10]) # print only 10 first
from scipy.linalg import solve
print("try solver")
AX, AY, AZ, bX, bY, bZ = EMatch(tckf, uk, da).get_dEmatch()
start = time.time()
solX = solve(AX, bX)
solY = solve(AY, bY)
solZ = solve(AZ, bZ)
sol = np.vstack((solX, solY, solZ)).reshape(-1, order='F')
print("solved in:", time.time() - start)
print(sol[:10]) # print only 10 first
# test if values matches
assert opt.x.all() == sol.all()
print("Reached same value!")
|
{"hexsha": "4bca9789a7fd0a14ed4b378112c0432f88330415", "size": 5832, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/EMatch.py", "max_stars_repo_name": "johndpope/FacialRetargeting", "max_stars_repo_head_hexsha": "5fb0c1da6af6c3d59aef264f567bfa7a244d0764", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-08-19T02:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T12:35:04.000Z", "max_issues_repo_path": "src/EMatch.py", "max_issues_repo_name": "johndpope/FacialRetargeting", "max_issues_repo_head_hexsha": "5fb0c1da6af6c3d59aef264f567bfa7a244d0764", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-16T07:11:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-30T10:26:04.000Z", "max_forks_repo_path": "src/EMatch.py", "max_forks_repo_name": "johndpope/FacialRetargeting", "max_forks_repo_head_hexsha": "5fb0c1da6af6c3d59aef264f567bfa7a244d0764", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-08-24T08:30:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T15:55:24.000Z", "avg_line_length": 33.710982659, "max_line_length": 133, "alphanum_fraction": 0.5728737997, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1758}
|
/* Copyright (c) 2018, 2019, 2020 BlinkTrade, Inc.
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */
#ifndef TRIAL_IOFIBER_FIBER_H
#define TRIAL_IOFIBER_FIBER_H
#include <type_traits>
#include <stdexcept>
#include <atomic>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/strand.hpp>
#include <boost/context/fiber.hpp>
#include <boost/intrusive/slist.hpp>
#include <boost/mp11/integer_sequence.hpp>
#include <boost/core/ignore_unused.hpp>
namespace trial {
namespace iofiber {
class fiber_interrupted
{};
template<class... Args>
struct interrupter_for;
namespace detail {
template<class>
struct is_strand: std::false_type {};
template<>
struct is_strand<boost::asio::io_context::strand>: std::true_type {};
template<class T>
struct is_strand<boost::asio::strand<T>>: std::true_type {};
using resume_token_type = std::uint32_t;
template<class Strand, class IntrTrait>
struct with_intr_token;
class local_data_interface
: public boost::intrusive::slist_base_hook<
#ifdef NDEBUG
boost::intrusive::link_mode<boost::intrusive::normal_link>
#else
boost::intrusive::link_mode<boost::intrusive::safe_link>
#endif
>
{
public:
local_data_interface(void* type_index)
: type_index(type_index)
{}
virtual void* get() = 0;
virtual ~local_data_interface() noexcept = default;
void* type_index;
};
template<class T>
class local_data: public local_data_interface
{
public:
static_assert(std::is_nothrow_destructible<T>::value,
"Fiber local storage destructors can't throw");
template<class... Args>
local_data(Args&&... args)
: local_data_interface(&id)
, value(std::forward<Args>(args)...)
{}
void* get() override
{
return &value;
}
T& operator*()
{
return value;
}
static char id;
private:
T value;
};
template<class T>
char local_data<T>::id;
template<class Strand>
class basic_this_fiber
{
public:
struct impl
{
impl(Strand executor) : executor(std::move(executor)) {}
~impl() noexcept
{
local_data.clear_and_dispose([](local_data_interface* p) { delete p; });
}
// Thread-safe. {{{
Strand executor;
// This variable may only be set once and its value doesn't revert back
// to false. Therefore this pattern is safe. If a synchronized action is
// required, use the strand.
std::atomic_bool interrupted{false};
// }}}
// Strand-safe. {{{
boost::context::fiber coro;
std::function<void()> interrupter;
bool interruption_enabled = true;
// Only useful when fiber finishes.
bool interruption_caught = false;
// Increment and store this token before you suspend the coroutine and
// free its strand and check whether its value is same just before
// resuming it. If values differ, do not resume the coroutine and
// discard any would-deliver value.
resume_token_type resume_token = 0;
// Must NOT block/suspend the calling fiber/thread. IOW, the joiner
// fiber must be scheduled as if `joiner_pimpl->executor.post()` has
// been called. This property simplifies critical/synchronization
// sections.
std::function<void()> joiner_executor;
boost::intrusive::slist<
local_data_interface,
boost::intrusive::constant_time_size<false>,
boost::intrusive::linear<true>,
boost::intrusive::cache_last<false>
> local_data;
// }}}
};
class restore_interruption;
class disable_interruption
{
public:
disable_interruption(const basic_this_fiber<Strand> &this_fiber)
noexcept
: pimpl(*this_fiber.pimpl_)
, previous_state(pimpl.interruption_enabled)
{
#ifndef NDEBUG
// You already assume there will be no suspensions so it doesn't
// make sense to worry about interruptions. Refactor your code.
assert(this_fiber.suspension_disallowed == 0);
#endif // NDEBUG
pimpl.interruption_enabled = false;
}
~disable_interruption() noexcept
{
pimpl.interruption_enabled = previous_state;
}
disable_interruption(const disable_interruption&) = delete;
disable_interruption& operator=(const disable_interruption&) = delete;
private:
impl& pimpl;
bool previous_state;
friend class restore_interruption;
};
class restore_interruption
{
public:
explicit restore_interruption(disable_interruption& disabler) noexcept
: pimpl(disabler.pimpl)
{
pimpl.interruption_enabled = disabler.previous_state;
}
~restore_interruption() noexcept
{
pimpl.interruption_enabled = false;
}
restore_interruption(const restore_interruption&) = delete;
restore_interruption& operator=(const restore_interruption&) = delete;
private:
impl& pimpl;
};
basic_this_fiber(std::shared_ptr<impl> pimpl)
: interrupter(pimpl->interrupter)
, pimpl_(std::move(pimpl))
{}
#ifdef NDEBUG
basic_this_fiber(const basic_this_fiber<Strand>&) = default;
#else
basic_this_fiber(const basic_this_fiber<Strand>& o)
: interrupter(o.interrupter)
, pimpl_(o.pimpl_)
, out_ec_(o.out_ec_)
{
assert(o.suspension_disallowed == 0);
}
#endif // NDEBUG
basic_this_fiber& operator=(const basic_this_fiber<Strand>& o)
{
assert(pimpl_ == o.pimpl_);
out_ec_ = o.out_ec_;
return *this;
}
basic_this_fiber<Strand> operator[](boost::system::error_code &ec)
{
basic_this_fiber<Strand> ret{*this};
ret.out_ec_ = &ec;
return std::move(ret);
}
basic_this_fiber<Strand> operator[](std::nullptr_t)
{
basic_this_fiber<Strand> ret{*this};
ret.out_ec_ = nullptr;
return std::move(ret);
}
Strand get_executor() const
{
return pimpl_->executor;
}
void yield()
{
#ifndef NDEBUG
assert(suspension_disallowed == 0);
#endif // NDEBUG
if (pimpl_->interruption_enabled && pimpl_->interrupted.load()) {
throw fiber_interrupted();
}
{
auto token = ++pimpl_->resume_token;
auto pimpl = this->pimpl_;
pimpl_->executor.defer([pimpl,token]() {
if (token != pimpl->resume_token)
return;
pimpl->coro = std::move(pimpl->coro).resume();
}, std::allocator<void>{});
}
pimpl_->coro = std::move(pimpl_->coro).resume();
}
template<class... Args>
with_intr_token<
Strand,
trial::iofiber::interrupter_for<typename std::decay<Args>::type...>
>
with_intr(Args&&... args);
void forbid_suspend()
{
#ifndef NDEBUG
++suspension_disallowed;
#endif // NDEBUG
}
void allow_suspend()
{
#ifndef NDEBUG
assert(suspension_disallowed > 0);
--suspension_disallowed;
#endif // NDEBUG
}
// Having some “global” state to be used by the next call resembles OpenGL
// state machine, but the actual inspiration came from a realization that
// this style of API can be simpler to configure suspending calls after
// reading Giacomo Tesio's awake syscall idea:
// <http://jehanne.io/2018/11/15/simplicity-awakes.html>.
std::function<void()>& interrupter;
template<class T, class... Args>
T& local(Args&&... args)
{
for (auto& e: pimpl_->local_data) {
if (e.type_index == &local_data<T>::id)
return *reinterpret_cast<T*>(e.get());
}
auto& e = *new local_data<T>(std::forward<Args>(args)...);
pimpl_->local_data.push_front(e);
return *e;
}
// Private {{{
std::shared_ptr<impl> pimpl_;
boost::system::error_code *out_ec_ = nullptr;
#ifndef NDEBUG
int suspension_disallowed = 0;
#endif // NDEBUG
// }}}
};
template<class Strand, class IntrTrait>
struct with_intr_token
{
with_intr_token(
std::shared_ptr<typename basic_this_fiber<Strand>::impl> pimpl,
boost::system::error_code* out_ec
)
: pimpl(std::move(pimpl))
, out_ec(out_ec)
{}
with_intr_token(const with_intr_token&) = default;
with_intr_token(with_intr_token&&) = default;
std::shared_ptr<typename basic_this_fiber<Strand>::impl> pimpl;
boost::system::error_code* out_ec;
};
template<class Strand>
template<class... Args>
inline
with_intr_token<
Strand, trial::iofiber::interrupter_for<typename std::decay<Args>::type...>
>
basic_this_fiber<Strand>::with_intr(Args&&... args)
{
#ifndef NDEBUG
assert(suspension_disallowed == 0);
#endif // NDEBUG
using IntrTrait = trial::iofiber::interrupter_for<
typename std::decay<Args>::type...>;
IntrTrait::assign(*this, std::forward<Args>(args)...);
return with_intr_token<Strand, IntrTrait>{pimpl_, out_ec_};
}
template<class T = void>
class service_aborted: public boost::asio::execution_context::service
{
public:
using key_type = service_aborted<T>;
explicit service_aborted(boost::asio::execution_context& ctx)
: boost::asio::execution_context::service(ctx)
{}
static boost::asio::execution_context::id id;
private:
virtual void shutdown() noexcept override {}
};
template<class T>
boost::asio::execution_context::id service_aborted<T>::id;
template<class Strand, class F>
struct SpawnFunctor
{
using impl = typename basic_this_fiber<Strand>::impl;
template<class F2>
SpawnFunctor(F2&& f, std::shared_ptr<impl> pimpl)
: f(std::forward<F2>(f))
, pimpl(std::move(pimpl))
{}
boost::context::fiber operator()(boost::context::fiber&& sink)
{
pimpl->coro.swap(sink);
try {
f(basic_this_fiber<Strand>{pimpl});
} catch (const fiber_interrupted&) {
pimpl->interruption_caught = true;
}
if (pimpl->joiner_executor) {
pimpl->joiner_executor();
}
return std::move(pimpl->coro);
}
F f;
std::shared_ptr<impl> pimpl;
};
} // namespace detail {
template<class ExecutionContext>
bool context_aborted(ExecutionContext& ctx)
{
return boost::asio::has_service<detail::service_aborted<>>(ctx);
}
template<class Strand>
class [[nodiscard]] basic_fiber
{
public:
static_assert(detail::is_strand<Strand>::value, "");
using this_fiber = detail::basic_this_fiber<Strand>;
using impl = typename this_fiber::impl;
basic_fiber()
: joinable_state(joinable_type::DETACHED)
{}
template<class F, class StackAllocator = boost::context::default_stack>
basic_fiber(Strand executor, F&& f,
StackAllocator salloc = StackAllocator())
: pimpl_{[&]() {
auto pimpl = std::make_shared<impl>(executor);
pimpl->coro = boost::context::fiber{
std::allocator_arg,
salloc,
detail::SpawnFunctor<Strand, F>{std::forward<F>(f), pimpl}
};
executor.post([pimpl]() {
pimpl->coro = std::move(pimpl->coro).resume();
}, std::allocator<void>{});
executor.on_work_started();
return std::move(pimpl);
}()}
, joinable_state(joinable_type::JOINABLE)
{}
template<class F, class StackAllocator = boost::context::default_stack>
basic_fiber(detail::basic_this_fiber<Strand> this_fiber, F&& f,
StackAllocator salloc = StackAllocator())
: basic_fiber{
this_fiber.get_executor(), std::forward<F>(f),
std::move(salloc)
}
{}
template<class F, class StackAllocator = boost::context::default_stack>
basic_fiber(decltype(std::declval<Strand>().context())& ctx, F&& f,
StackAllocator salloc = StackAllocator())
: basic_fiber{Strand{ctx}, std::forward<F>(f), std::move(salloc)}
{}
basic_fiber(basic_fiber&& o)
: pimpl_(std::move(o.pimpl_))
, joinable_state(o.joinable_state)
{
o.joinable_state = joinable_type::DETACHED;
}
~basic_fiber()
{
if (joinable_state == joinable_type::JOINABLE) {
boost::asio::use_service<detail::service_aborted<>>(
pimpl_->executor.context());
pimpl_->executor.context().stop();
}
}
basic_fiber& operator=(basic_fiber&& o)
{
if (joinable_state == joinable_type::JOINABLE) {
boost::asio::use_service<detail::service_aborted<>>(
pimpl_->executor.context());
pimpl_->executor.context().stop();
throw std::logic_error{"fiber handle leak"};
}
pimpl_ = std::move(o.pimpl_);
joinable_state = o.joinable_state;
o.joinable_state = joinable_type::DETACHED;
return *this;
}
basic_fiber(const basic_fiber&) = delete;
basic_fiber& operator=(const basic_fiber&) = delete;
bool joinable() const
{
return joinable_state == joinable_type::JOINABLE;
}
void join(this_fiber this_fiber)
{
#ifndef NDEBUG
assert(this_fiber.suspension_disallowed == 0);
#endif // NDEBUG
assert(joinable());
if (this_fiber.pimpl_->interruption_enabled
&& this_fiber.pimpl_->interrupted.load()) {
throw fiber_interrupted();
}
if (pimpl_->executor != this_fiber.get_executor()) {
inter_strand_join(this_fiber);
joinable_state = joinable_type::JOINED;
pimpl_->executor.on_work_finished();
return;
}
same_strand_join(this_fiber.pimpl_);
joinable_state = joinable_type::JOINED;
pimpl_->executor.on_work_finished();
}
template<class Strand2>
void join(typename basic_fiber<Strand2>::this_fiber this_fiber)
{
#ifndef NDEBUG
assert(this_fiber.suspension_disallowed == 0);
#endif // NDEBUG
assert(joinable());
if (this_fiber.pimpl_->interruption_enabled
&& this_fiber.pimpl_->interrupted.load()) {
throw fiber_interrupted();
}
inter_strand_join(this_fiber);
joinable_state = joinable_type::JOINED;
pimpl_->executor.on_work_finished();
}
void detach()
{
assert(joinable());
joinable_state = joinable_type::DETACHED;
pimpl_->executor.on_work_finished();
}
void interrupt()
{
if (!joinable())
return;
bool interruption_in_progress = pimpl_->interrupted.exchange(true);
if (!interruption_in_progress) {
auto pimpl = pimpl_;
pimpl_->executor.post([pimpl]() {
if (!pimpl->coro || !pimpl->interruption_enabled)
return;
if (pimpl->interrupter)
return pimpl->interrupter();
// We must prevent double-resuming the coroutine. If we're here,
// then there is a second scheduled resumption in progress. We
// use a token approach where only the matched token gets to
// resume the suspended coroutine and the competitors just
// discard their result.
++pimpl->resume_token;
pimpl->coro = std::move(pimpl->coro).resume_with(
[pimpl](boost::context::fiber&& sink)
-> boost::context::fiber {
pimpl->coro.swap(sink);
throw fiber_interrupted();
return {};
}
);
}, std::allocator<void>{});
}
}
// Equivalent to `PTHREAD_CANCELED`, but name inspired by Python's trio:
// https://trio.readthedocs.io/en/latest/reference-core.html#trio.The%20cancel%20scope%20interface.cancelled_caught
bool interruption_caught() const
{
assert(joinable_state == joinable_type::JOINED);
return pimpl_->interruption_caught;
}
std::shared_ptr<impl> pimpl_;
private:
void same_strand_join(std::shared_ptr<impl> &active_coro)
{
if (!pimpl_->coro)
return;
auto token = ++active_coro->resume_token;
pimpl_->joiner_executor = [active_coro,token]() {
// This isn't strand-migration. This is just to let the joining
// fiber die and perform any shutdown sequence it should.
active_coro->executor.defer([active_coro,token]() {
if (token != active_coro->resume_token)
return;
active_coro->coro = std::move(active_coro->coro).resume();
}, std::allocator<void>{});
};
active_coro->coro = std::move(active_coro->coro).resume();
}
template<class Strand2>
void inter_strand_join(detail::basic_this_fiber<Strand2> this_fiber)
{
auto joiner_pimpl = this_fiber.pimpl_;
auto token = ++joiner_pimpl->resume_token;
pimpl_->executor.post([joiner_pimpl,token,this]() {
auto awaker = [joiner_pimpl,token]() {
joiner_pimpl->executor.post([joiner_pimpl,token]() {
if (token != joiner_pimpl->resume_token)
return;
joiner_pimpl->coro = std::move(joiner_pimpl->coro).resume();
}, std::allocator<void>{});
};
// Same-strand-join
if (!pimpl_->coro)
awaker();
else
pimpl_->joiner_executor = std::move(awaker);
}, std::allocator<void>{});
auto _w{boost::asio::make_work_guard(joiner_pimpl->executor)};
boost::ignore_unused(_w);
joiner_pimpl->coro = std::move(joiner_pimpl->coro).resume();
}
enum class joinable_type: std::uint_fast8_t
{
JOINABLE,
DETACHED,
JOINED
} joinable_state;
};
using fiber = basic_fiber<boost::asio::io_context::strand>;
struct detach
{
template<class Fiber, class Yield>
void operator()(Fiber& fib, Yield&)
{
if (fib.joinable())
fib.detach();
}
};
struct join_if_joinable
{
template<class Fiber, class Yield>
void operator()(Fiber& fib, Yield& this_fiber)
{
try {
if (fib.joinable())
fib.join(this_fiber);
} catch (const fiber_interrupted&) {
fib.interrupt();
typename Yield::disable_interruption di(this_fiber);
boost::ignore_unused(di);
fib.join(this_fiber);
}
}
};
struct interrupt_and_join_if_joinable
{
template<class Fiber, class Yield>
void operator()(Fiber& fib, Yield& this_fiber)
{
fib.interrupt();
typename Yield::disable_interruption di(this_fiber);
boost::ignore_unused(di);
if (fib.joinable())
fib.join(this_fiber);
}
};
template<class CallableFiber = join_if_joinable,
class JoinerStrand = boost::asio::io_context::strand,
class JoineeStrand = boost::asio::io_context::strand>
class strict_scoped_fiber: private CallableFiber
{
public:
strict_scoped_fiber(
basic_fiber<JoineeStrand> &&fib,
typename basic_fiber<JoinerStrand>::this_fiber this_fiber
)
: fib(std::move(fib))
, yield(std::move(this_fiber))
{}
~strict_scoped_fiber()
{
static_cast<CallableFiber&>(*this)(fib, yield);
}
private:
basic_fiber<JoineeStrand> fib;
typename basic_fiber<JoinerStrand>::this_fiber yield;
};
template<class CallableFiber, class Strand>
class strict_scoped_fiber<CallableFiber, Strand, Strand>: private CallableFiber
{
public:
strict_scoped_fiber(
basic_fiber<Strand> &&fib,
typename basic_fiber<Strand>::this_fiber this_fiber
)
: fib(std::move(fib))
, yield(std::move(this_fiber))
{}
template<class F, class StackAllocator = boost::context::default_stack>
strict_scoped_fiber(
typename basic_fiber<Strand>::this_fiber this_fiber,
F&& f, StackAllocator salloc = StackAllocator()
)
: fib(this_fiber, std::forward<F>(f), salloc)
, yield(std::move(this_fiber))
{}
~strict_scoped_fiber()
{
static_cast<CallableFiber&>(*this)(fib, yield);
}
private:
basic_fiber<Strand> fib;
typename basic_fiber<Strand>::this_fiber yield;
};
template<class CallableFiber = join_if_joinable,
class JoinerStrand = boost::asio::io_context::strand,
class JoineeStrand = boost::asio::io_context::strand>
class scoped_fiber: private CallableFiber
{
public:
scoped_fiber(typename basic_fiber<JoinerStrand>::this_fiber this_fiber)
: yield(std::move(this_fiber))
{}
scoped_fiber(
basic_fiber<JoineeStrand> &&fib,
typename basic_fiber<JoinerStrand>::this_fiber this_fiber
)
: fib(std::move(fib))
, yield(std::move(this_fiber))
{}
scoped_fiber(scoped_fiber&& o)
: fib(std::move(o.fib))
, yield(std::move(o.yield))
{}
~scoped_fiber()
{
static_cast<CallableFiber&>(*this)(fib, yield);
}
scoped_fiber& operator=(scoped_fiber&& o)
{
static_cast<CallableFiber&>(*this)(fib, yield);
fib = std::move(o.fib);
return *this;
}
bool joinable() const
{
return fib.joinable();
}
template<class T>
void join(const T& this_fiber)
{
static_assert(
std::is_same<
T, typename basic_fiber<JoinerStrand>::this_fiber
>::value,
""
);
assert(this_fiber.pimpl_ == this->yield.pimpl_);
boost::ignore_unused(this_fiber);
join();
}
void join()
{
fib.join(yield);
}
void detach()
{
fib.detach();
}
void interrupt()
{
fib.interrupt();
}
bool interruption_caught() const
{
return fib.interruption_caught();
}
private:
basic_fiber<JoineeStrand> fib;
typename basic_fiber<JoinerStrand>::this_fiber yield;
};
template<class CallableFiber, class Strand>
class scoped_fiber<CallableFiber, Strand, Strand>: private CallableFiber
{
public:
scoped_fiber(typename basic_fiber<Strand>::this_fiber this_fiber)
: yield(std::move(this_fiber))
{}
scoped_fiber(
basic_fiber<Strand> &&fib,
typename basic_fiber<Strand>::this_fiber this_fiber
)
: fib(std::move(fib))
, yield(std::move(this_fiber))
{}
template<class F, class StackAllocator = boost::context::default_stack>
scoped_fiber(
typename basic_fiber<Strand>::this_fiber this_fiber,
F&& f, StackAllocator salloc = StackAllocator()
)
: fib(this_fiber, std::forward<F>(f), salloc)
, yield(std::move(this_fiber))
{}
scoped_fiber(scoped_fiber&& o)
: fib(std::move(o.fib))
, yield(std::move(o.yield))
{}
~scoped_fiber()
{
static_cast<CallableFiber&>(*this)(fib, yield);
}
scoped_fiber& operator=(scoped_fiber&& o)
{
static_cast<CallableFiber&>(*this)(fib, yield);
fib = std::move(o.fib);
return *this;
}
bool joinable() const
{
return fib.joinable();
}
template<class T>
void join(const T& this_fiber)
{
static_assert(
std::is_same<
T, typename basic_fiber<Strand>::this_fiber
>::value,
""
);
assert(this_fiber.pimpl_ == this->yield.pimpl_);
boost::ignore_unused(this_fiber);
join();
}
void join()
{
fib.join(yield);
}
void detach()
{
fib.detach();
}
void interrupt()
{
fib.interrupt();
}
bool interruption_caught() const
{
return fib.interruption_caught();
}
private:
basic_fiber<Strand> fib;
typename basic_fiber<Strand>::this_fiber yield;
};
template<class T, class Strand = boost::asio::io_context::strand>
class assert_excl_strand_ref
{
public:
assert_excl_strand_ref(
T& o,
typename basic_fiber<Strand>::this_fiber& this_fiber
)
: obj(nullptr)
, this_fiber(this_fiber)
{
reset(o);
}
~assert_excl_strand_ref()
{
release();
}
// This wrapper is always tied to a finite lexical scope. Its purpose is
// *NOT* to manage lifetimes.
assert_excl_strand_ref(const assert_excl_strand_ref&) = delete;
assert_excl_strand_ref& operator=(const assert_excl_strand_ref&) = delete;
T& operator*() const
{
assert(obj);
return *obj;
}
T* operator->() const
{
assert(obj);
return obj;
}
void release()
{
if (obj)
this_fiber.allow_suspend();
obj = nullptr;
}
void reset(T& o)
{
release();
this_fiber.forbid_suspend();
obj = &o;
}
private:
T* obj;
typename basic_fiber<Strand>::this_fiber& this_fiber;
};
template<class Strand>
class assert_excl_strand_ref<void, Strand>
{
public:
assert_excl_strand_ref(typename basic_fiber<Strand>::this_fiber& this_fiber)
: obj(false)
, this_fiber(this_fiber)
{
reset();
}
~assert_excl_strand_ref()
{
release();
}
assert_excl_strand_ref(const assert_excl_strand_ref&) = delete;
assert_excl_strand_ref& operator=(const assert_excl_strand_ref&) = delete;
void release()
{
if (obj)
this_fiber.allow_suspend();
obj = false;
}
void reset()
{
release();
this_fiber.forbid_suspend();
obj = true;
}
private:
bool obj;
typename basic_fiber<Strand>::this_fiber& this_fiber;
};
namespace detail {
template<class Executor>
class remap_post_to_defer: private Executor
{
public:
remap_post_to_defer(const remap_post_to_defer&) = default;
remap_post_to_defer(remap_post_to_defer&&) = default;
explicit remap_post_to_defer(const Executor& ex)
: Executor(ex)
{}
explicit remap_post_to_defer(Executor&& ex)
: Executor(std::move(ex))
{}
bool operator==(const remap_post_to_defer& o) const noexcept
{
return static_cast<const Executor&>(*this) ==
static_cast<const Executor&>(o);
}
bool operator!=(const remap_post_to_defer& o) const noexcept
{
return static_cast<const Executor&>(*this) !=
static_cast<const Executor&>(o);
}
decltype(std::declval<Executor>().context())
context() const noexcept
{
return Executor::context();
}
void on_work_started() const noexcept
{
Executor::on_work_started();
}
void on_work_finished() const noexcept
{
Executor::on_work_finished();
}
template<class F, class A>
void dispatch(F&& f, const A& a) const
{
Executor::dispatch(std::forward<F>(f), a);
}
template<class F, class A>
void post(F&& f, const A& a) const
{
Executor::defer(std::forward<F>(f), a);
}
template<class F, class A>
void defer(F&& f, const A& a) const
{
Executor::defer(std::forward<F>(f), a);
}
};
template<class... Args>
struct ReturnType
{
using type = std::tuple<Args...>;
};
template<class T>
struct ReturnType<T>
{
using type = T;
};
template<>
struct ReturnType<>
{
using type = void;
};
static_assert(std::is_same<typename ReturnType<>::type, void>::value, "");
static_assert(std::is_same<typename ReturnType<int>::type, int>::value, "");
static_assert(std::is_same<
typename ReturnType<int, int>::type, std::tuple<int, int>
>::value, "");
template<class T>
struct GetImpl
{
static T get(T &t)
{
return std::move(t);
}
};
template<>
struct GetImpl<void>
{
static void get(const std::tuple<>&) {}
};
// bind + apply {{{
template<class IntrTrait, class Strand, class T, std::size_t... Idxs>
void apply_on_result_impl(basic_this_fiber<Strand>& this_fiber,
boost::system::error_code& ec, T& args,
boost::mp11::index_sequence<Idxs...>)
{
IntrTrait::on_result(this_fiber, ec, std::get<Idxs>(args)...);
}
template<class IntrTrait, class Strand, class... Args>
void apply_on_result(basic_this_fiber<Strand> this_fiber,
boost::system::error_code& ec, std::tuple<Args...>& args)
{
using Idxs = boost::mp11::make_index_sequence<sizeof...(Args)>;
apply_on_result_impl<IntrTrait>(this_fiber, ec, args, Idxs{});
}
template<class IntrTrait, class Strand, class T>
void apply_on_result(basic_this_fiber<Strand> this_fiber,
boost::system::error_code& ec, T& val)
{
IntrTrait::on_result(this_fiber, ec, val);
}
template<class IntrTrait, class Strand, class T, std::size_t... Idxs>
void apply_on_result_impl(basic_this_fiber<Strand>& this_fiber, T& args,
boost::mp11::index_sequence<Idxs...>)
{
IntrTrait::on_result(this_fiber, std::get<Idxs>(args)...);
}
template<class IntrTrait, class Strand, class... Args>
void apply_on_result(basic_this_fiber<Strand> this_fiber,
std::tuple<Args...>& args)
{
using Idxs = boost::mp11::make_index_sequence<sizeof...(Args)>;
apply_on_result_impl<IntrTrait>(this_fiber, args, Idxs{});
}
template<class IntrTrait, class Strand, class T>
void apply_on_result(basic_this_fiber<Strand> this_fiber, T& val)
{
IntrTrait::on_result(this_fiber, val);
}
// }}}
struct dummy_intr_trait
{
template<class... Args>
static void on_result(Args&&...)
{}
};
template<class, class, class>
class fiber_async_result;
template<class Strand, class IntrTrait, class R, class... Args>
class fiber_async_result<
Strand, IntrTrait, R(boost::system::error_code, Args...)>
{
public:
static_assert(std::is_same<R, void>::value,
"completion handler return type must be void");
using return_type = typename ReturnType<
typename std::decay<Args>::type...>::type;
struct completion_handler_type
{
struct packed_args_type
{
boost::system::error_code ec;
typename std::conditional<
std::is_same<return_type, void>::value,
std::tuple<>,
return_type
>::type ret;
};
using executor_type = remap_post_to_defer<Strand>;
completion_handler_type(
const typename trial::iofiber::basic_fiber<Strand>::this_fiber& tkn
)
: pimpl(tkn.pimpl_)
, token(++tkn.pimpl_->resume_token)
, out_ec(tkn.out_ec_)
, executor(tkn.get_executor())
{
#ifndef NDEBUG
assert(tkn.suspension_disallowed == 0);
#endif // NDEBUG
if (pimpl->interruption_enabled && pimpl->interrupted.load()) {
pimpl->interrupter = nullptr;
throw trial::iofiber::fiber_interrupted();
}
}
completion_handler_type(
const trial::iofiber::detail::with_intr_token<Strand, IntrTrait>&
token
)
: pimpl(token.pimpl)
, token(++token.pimpl->resume_token)
, out_ec(token.out_ec)
, executor(token.pimpl->executor)
{
if (pimpl->interruption_enabled && pimpl->interrupted.load()) {
pimpl->interrupter = nullptr;
throw trial::iofiber::fiber_interrupted();
}
}
template<class... Tail>
void operator()(const boost::system::error_code &ec, Tail&&... args)
{
assert(this->args != NULL);
if (token != pimpl->resume_token)
return;
pimpl->interrupter = nullptr;
this->args->ec = ec;
this->args->ret = std::forward_as_tuple(args...);
pimpl->coro = std::move(pimpl->coro).resume();
}
template<class T>
void operator()(const boost::system::error_code &ec, T &&arg)
{
assert(this->args != NULL);
if (token != pimpl->resume_token)
return;
pimpl->interrupter = nullptr;
this->args->ec = ec;
this->args->ret = std::forward<T>(arg);
pimpl->coro = std::move(pimpl->coro).resume();
}
executor_type get_executor() const
{
return executor;
}
std::shared_ptr<typename basic_this_fiber<Strand>::impl> pimpl;
resume_token_type token;
boost::system::error_code* out_ec;
executor_type executor;
packed_args_type* args;
};
explicit fiber_async_result(completion_handler_type& handler)
: pimpl(handler.pimpl)
, out_ec(handler.out_ec)
{
handler.args = &args;
}
return_type get()
{
pimpl->coro = std::move(pimpl->coro).resume();
apply_on_result<IntrTrait>(
basic_this_fiber<Strand>{pimpl}, args.ec, args.ret);
if (out_ec)
*out_ec = args.ec;
else if (args.ec)
throw boost::system::system_error(args.ec);
return GetImpl<return_type>::get(args.ret);
}
private:
std::shared_ptr<typename basic_this_fiber<Strand>::impl> pimpl;
boost::system::error_code* out_ec;
typename completion_handler_type::packed_args_type args;
};
template<class Strand, class IntrTrait, class R, class... Args>
class fiber_async_result<Strand, IntrTrait, R(Args...)>
{
public:
static_assert(std::is_same<R, void>::value,
"completion handler return type must be void");
using return_type = typename ReturnType<
typename std::decay<Args>::type...>::type;
struct completion_handler_type
{
using packed_args_type = typename std::conditional<
std::is_same<return_type, void>::value,
std::tuple<>,
return_type
>::type;
using executor_type = remap_post_to_defer<Strand>;
completion_handler_type(
const typename trial::iofiber::basic_fiber<Strand>::this_fiber &tkn
)
: pimpl(tkn.pimpl_)
, token(++tkn.pimpl_->resume_token)
, executor(tkn.get_executor())
{
#ifndef NDEBUG
assert(tkn.suspension_disallowed == 0);
#endif // NDEBUG
if (pimpl->interruption_enabled && pimpl->interrupted.load()) {
pimpl->interrupter = nullptr;
throw trial::iofiber::fiber_interrupted();
}
}
completion_handler_type(
const trial::iofiber::detail::with_intr_token<Strand, IntrTrait>&
token
)
: pimpl(token.pimpl)
, token(++token.pimpl->resume_token)
, executor(token.pimpl->executor)
{
if (pimpl->interruption_enabled && pimpl->interrupted.load()) {
pimpl->interrupter = nullptr;
throw trial::iofiber::fiber_interrupted();
}
}
template<class... Args2>
void operator()(Args2&&... args)
{
assert(this->args != NULL);
if (token != pimpl->resume_token)
return;
pimpl->interrupter = nullptr;
*this->args = std::forward_as_tuple(args...);
pimpl->coro = std::move(pimpl->coro).resume();
}
template<class T>
void operator()(T &&arg)
{
assert(this->args != NULL);
if (token != pimpl->resume_token)
return;
pimpl->interrupter = nullptr;
*this->args = std::forward<T>(arg);
pimpl->coro = std::move(pimpl->coro).resume();
}
executor_type get_executor() const
{
return executor;
}
std::shared_ptr<typename basic_this_fiber<Strand>::impl> pimpl;
resume_token_type token;
executor_type executor;
packed_args_type* args;
};
explicit fiber_async_result(completion_handler_type& handler)
: pimpl(handler.pimpl)
{
handler.args = &args;
}
return_type get()
{
pimpl->coro = std::move(pimpl->coro).resume();
apply_on_result<IntrTrait>(basic_this_fiber<Strand>{pimpl}, args);
return GetImpl<return_type>::get(args);
}
private:
std::shared_ptr<typename basic_this_fiber<Strand>::impl> pimpl;
typename completion_handler_type::packed_args_type args;
};
} // namespace detail {
} // namespace iofiber {
} // namespace trial {
namespace boost {
namespace asio {
#ifndef TRIAL_IOFIBER_DISABLE_DEFAULT_INTERRUPTER
template<class Strand, class T>
class async_result<trial::iofiber::detail::basic_this_fiber<Strand>, T>
: public trial::iofiber::detail::fiber_async_result<
Strand, trial::iofiber::detail::dummy_intr_trait, T>
{
public:
explicit async_result(
typename trial::iofiber::detail::fiber_async_result<
Strand, trial::iofiber::detail::dummy_intr_trait, T
>::completion_handler_type&
handler
)
: trial::iofiber::detail::fiber_async_result<
Strand, trial::iofiber::detail::dummy_intr_trait, T>(handler)
{}
};
#endif // !defined(TRIAL_IOFIBER_DISABLE_DEFAULT_INTERRUPTER)
template<class Strand, class T, class U>
class async_result<trial::iofiber::detail::with_intr_token<Strand, T>, U>
: public trial::iofiber::detail::fiber_async_result<Strand, T, U>
{
public:
explicit async_result(
typename trial::iofiber::detail::fiber_async_result<
Strand, T, U>::completion_handler_type&
handler
)
: trial::iofiber::detail::fiber_async_result<Strand, T, U>(handler)
{}
};
} // namespace asio {
} // namespace boost {
#endif // TRIAL_IOFIBER_FIBER_H
|
{"hexsha": "9a0e533f38c01ed0c31c44a17c90d1da4fc431b1", "size": 38445, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/trial/iofiber/fiber.hpp", "max_stars_repo_name": "blinktrade/iofiber", "max_stars_repo_head_hexsha": "c1d883ee0c5b4d90432a4af993469455d75dc626", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2018-12-17T14:44:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-17T01:35:17.000Z", "max_issues_repo_path": "include/trial/iofiber/fiber.hpp", "max_issues_repo_name": "vinipsmaker/iofiber", "max_issues_repo_head_hexsha": "c1d883ee0c5b4d90432a4af993469455d75dc626", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 7.0, "max_issues_repo_issues_event_min_datetime": "2018-12-13T20:31:55.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-13T21:09:45.000Z", "max_forks_repo_path": "include/trial/iofiber/fiber.hpp", "max_forks_repo_name": "blinktrade/iofiber", "max_forks_repo_head_hexsha": "c1d883ee0c5b4d90432a4af993469455d75dc626", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-02-05T20:35:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T09:42:48.000Z", "avg_line_length": 26.7722841226, "max_line_length": 119, "alphanum_fraction": 0.604786058, "num_tokens": 9349}
|
import os
import uvicorn
from fastapi import FastAPI
from fastapi.logger import logger
from pydantic import BaseModel
import numpy as np
import vaex
# Instantiate the web application
app = FastAPI()
class Data(BaseModel):
instances: list = []
parameters: dict
# These will be some global parameters, essentially filling them up at startup
global_items = {}
# Run at startup - basically downloads the state file
@app.on_event("startup")
def startup():
path = os.getenv('AIP_STORAGE_URI')
fs_options = {'token': 'cloud'}
if path is not None:
logger.info(f'Loading model state from: {path}')
path = os.path.join(path, 'state.json')
global_items['state'] = vaex.utils.read_json_or_yaml(path, fs_options=fs_options)
else: # for local testing
logger.info(f'AIP_STORAGE_URI was not set - loading state from a default location')
path ='gs://vaex-data/models/har_phones_accelerometer_2021-03-13T14:10:54/state.json'
global_items['state'] = vaex.utils.read_json_or_yaml(path, fs_options=fs_options)
logger.info('State successfully retrieved from GCP')
# Healthcheck
@app.get('/health')
def health():
return ''
@app.post('/predict')
def predict(data: Data):
instances = data.instances
if isinstance(instances[0], list):
data = np.asarray(instances).T
df = vaex.from_arrays(Arrival_Time=data[0],
Creation_Time=data[1],
x=data[2],
y=data[3],
z=data[4])
elif isinstance(instances[0], dict):
dfs = []
for instance in instances:
df = vaex.from_dict(instance)
dfs.append(df)
df = vaex.concat(dfs)
else:
return {'predictions': 'invalid input format'}
df.state_set(global_items['state'], set_filter=False)
return {'predictions': df.pred_name.tolist()}
if __name__ == '__main__':
uvicorn.run("app:app", host='0.0.0.0', port=8000, reload=False)
|
{"hexsha": "310fe5909b405e6c17d39a1d346cfe396b1e6c0f", "size": 2046, "ext": "py", "lang": "Python", "max_stars_repo_path": "medium-gcp-ai-platform/ai-platform-deploy/docker/app.py", "max_stars_repo_name": "triper1022/vaex-examples", "max_stars_repo_head_hexsha": "16c6abc9be3e12889f0bcbee5d91410a72977a81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2019-12-09T11:46:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T08:19:11.000Z", "max_issues_repo_path": "medium-gcp-ai-platform/ai-platform-deploy/docker/app.py", "max_issues_repo_name": "triper1022/vaex-examples", "max_issues_repo_head_hexsha": "16c6abc9be3e12889f0bcbee5d91410a72977a81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-10-16T19:53:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T13:28:10.000Z", "max_forks_repo_path": "medium-gcp-ai-platform/ai-platform-deploy/docker/app.py", "max_forks_repo_name": "triper1022/vaex-examples", "max_forks_repo_head_hexsha": "16c6abc9be3e12889f0bcbee5d91410a72977a81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 37, "max_forks_repo_forks_event_min_datetime": "2019-12-02T08:48:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T22:24:32.000Z", "avg_line_length": 26.9210526316, "max_line_length": 93, "alphanum_fraction": 0.6363636364, "include": true, "reason": "import numpy", "num_tokens": 479}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 11:27:40 2019
@author: ott
"""
#%% Load stuff
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tg_set_globalplotting import tg_set_globalplotting
dat = pd.read_csv('../Results/preprocessed_results.csv')
tg_set_globalplotting(style='frontiers')
#%% Offers statistics
n_trials = np.sum( (dat['phase'] > 1) & (dat['subject'] == 1) )
# whole experiment
n_offer = np.zeros(4)
for o in range(4):
n_offer[o] = np.sum( (dat['offer'] == o+1) & (dat['phase'] > 1) & (dat['subject'] == 1) )
# trialwise
n_offer_t = np.zeros((4,15))
for o in range(4):
for t in range(15):
n_offer_t[o,t] = np.sum( (dat['offer'] == o+1) & (dat['phase'] > 1) & (dat['subject'] == 1) & (dat['trial'] == t+1))
# miniblockwise
n_offer_b = np.zeros((4,60))
for o in range(4):
idx_b = 0
for b in range(20):
for p in range(3):
n_offer_b[o,idx_b] = np.sum( (dat['offer'] == o+1) & (dat['phase'] == p+2) & (dat['subject'] == 1) & (dat['block'] == b+1))
idx_b+=1
# difficulty-wise
n_offer_j = np.zeros((4,3))
for o in range(4):
for j in range(2,-1,-1):
n_offer_j[o,j] = np.sum( (dat['offer'] == o+1) & (dat['phase'] > 1) & (dat['subject'] == 1) & ( (dat['start_condition'] == 1 + 2*j)|(dat['start_condition'] == 2 + 2*j)) )
# difficulty-wise & trialwise
n_offer_jt = np.zeros((4,3,15))
for o in range(4):
for j in range(2,-1,-1):
for t in range(15):
n_offer_jt[o,j,t] = np.sum( (dat['offer'] == o+1) & (dat['phase'] > 1) & (dat['subject'] == 1) & ( (dat['start_condition'] == 1 + 2*j)|(dat['start_condition'] == 2 + 2*j)) & (dat['trial'] == t+1) )
# difficulty-wise & blockwise
n_offer_jb = np.zeros((4,3,20))
for o in range(4):
for j in range(2,-1,-1):
idx_b = 0
idx_z = 0
for b in range(20):
for p in range(3):
if np.sum( (dat['offer'] == o+1) & (dat['phase'] == p + 2) & (dat['subject'] == 1) & ( (dat['start_condition'] == 1 + 2*j)|(dat['start_condition'] == 2 + 2*j)) & (dat['block'] == b+1) ) > 0:
n_offer_jb[o,j,idx_z] = np.sum( (dat['offer'] == o+1) & (dat['phase'] == p + 2) & (dat['subject'] == 1) & ( (dat['start_condition'] == 1 + 2*j)|(dat['start_condition'] == 2 + 2*j)) & (dat['block'] == b+1) )
idx_z+=1
idx_b+=1
#%% Draw histograms
#%% n_offer
titles = ['A','B','Ab','aB']
fig,axes = plt.subplots(ncols = 1,nrows = 1, figsize = (3.3,2.3))
axes.bar(range(4), n_offer)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim(0,250)
axes.set_yticks(range(0,250,25))
axes.set_xticks(range(4))
axes.set_xticklabels(titles )
axes.set_ylabel('Number')
axes.set_xlabel('Offer')
plt.tight_layout()
#fig.savefig('n_offer.png', dpi=300, bbox_inches='tight', transparent=True)
#%% n_offer_t
titles = ['A','B','Ab','aB']
fig,ax = plt.subplots(ncols = 2,nrows = 2, figsize = (7,4))
for i, axes in enumerate(ax.flat):
axes.bar(range(15), n_offer_t[i,:])
axes.set_title(titles[i])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim(0,25)
axes.set_xticks(range(0,15,2))
axes.set_xticklabels(range(1,16,2) )
axes.set_ylabel('Number')
axes.set_xlabel('Trial')
plt.tight_layout()
#fig.savefig('n_offer_t.png', dpi=300, bbox_inches='tight', transparent=True)
#%% n_offer_b
titles = ['A','B','Ab','aB']
k=1
fig,ax = plt.subplots(ncols = 2,nrows = 2, figsize = (7*k,4*k))
for i, axes in enumerate(ax.flat):
axes.bar(range(60), n_offer_b[i,:],width = 0.6)
axes.set_title(titles[i])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim(0,8)
axes.set_xlim(0,61)
axes.set_xticks(range(4,64,5))
axes.set_xticklabels(range(5,65,5) )
axes.set_ylabel('Number')
axes.set_xlabel('Miniblock')
plt.tight_layout()
#fig.savefig('n_offer_b.png', dpi=300, bbox_inches='tight', transparent=True)
#%% n_offer_j
titles = ['A','B','Ab','aB']
conds = ['easy', 'medium', 'hard']
k=1
fig,ax = plt.subplots(ncols = 3,nrows = 1, figsize = (7*k,1.8*k))
for i, axes in enumerate(ax.flat):
axes.bar(range(4), n_offer_j[:,i])
axes.set_title(conds[i])
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim(0,85)
axes.set_xticks(range(4))
axes.set_xticklabels(titles)
axes.set_ylabel('Number')
axes.set_xlabel('Offer')
plt.tight_layout()
#fig.savefig('n_offer_j.png', dpi=300, bbox_inches='tight', transparent=True)
#%% n_offer_jt
titles = ['A','B','Ab','aB']
colors =['red', 'green', 'blue']
fig,ax = plt.subplots(ncols = 4,nrows = 3, figsize = (7,4))
idx = 0
for i, axes in enumerate(ax.flat):
if i < 4:
axes.bar(range(15), n_offer_jt[i,0,:])
axes.set_title(titles[i])
elif (i >=4) & (i < 8) :
axes.bar(range(15), n_offer_jt[i-4,1,:])
else:
axes.bar(range(15), n_offer_jt[i-8,2,:])
axes.set_xlabel('Trial')
if i == 0:
axes.set_ylabel('easy\nNumber')
elif i == 4:
axes.set_ylabel('medium\nNumber')
elif i == 8:
axes.set_ylabel('hard\nNumber')
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim(0,12)
axes.set_xticks(range(0,15,2))
axes.set_xticklabels(range(1,16,2) )
plt.tight_layout()
#fig.savefig('n_offer_jt.png', dpi=300, bbox_inches='tight', transparent=True)
#%% n_offer_jb
titles = ['A','B','Ab','aB']
colors =['red', 'green', 'blue']
fig,ax = plt.subplots(ncols = 4,nrows = 3, figsize = (7,4))
idx = 0
for i, axes in enumerate(ax.flat):
if i < 4:
axes.bar(range(20), n_offer_jb[i,0,:])
axes.set_title(titles[i])
elif (i >=4) & (i < 8) :
axes.bar(range(20), n_offer_jb[i-4,1,:])
else:
axes.bar(range(20), n_offer_jb[i-8,2,:])
axes.set_xlabel('Miniblock')
if i == 0:
axes.set_ylabel('easy\nNumber')
elif i == 4:
axes.set_ylabel('medium\nNumber')
elif i == 8:
axes.set_ylabel('hard\nNumber')
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.xaxis.set_tick_params(top='off', direction='out', width=1)
axes.yaxis.set_tick_params(right='off', direction='out', width=1)
axes.set_ylim(0,12)
axes.set_xticks(range(0,20,2))
axes.set_xticklabels(range(1,21,2) )
plt.tight_layout()
#fig.savefig('n_offer_jb.png', dpi=300, bbox_inches='tight', transparent=True)
|
{"hexsha": "ef78f8b7e365309673a677f9012562ffce66a72e", "size": 7430, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/tg_offer_stats.py", "max_stars_repo_name": "fmott/two_goal_task", "max_stars_repo_head_hexsha": "b2a7948ced432b48566d8d00e97e83a65fba7ca5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Code/tg_offer_stats.py", "max_issues_repo_name": "fmott/two_goal_task", "max_issues_repo_head_hexsha": "b2a7948ced432b48566d8d00e97e83a65fba7ca5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/tg_offer_stats.py", "max_forks_repo_name": "fmott/two_goal_task", "max_forks_repo_head_hexsha": "b2a7948ced432b48566d8d00e97e83a65fba7ca5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.731277533, "max_line_length": 239, "alphanum_fraction": 0.5959623149, "include": true, "reason": "import numpy", "num_tokens": 2400}
|
function check_domterm()
global isDomterm
isDomterm = true
try
if Sys.isunix()
run(`domterm is-domterm`)
else
println("Domterm can only run in Unix or WSL")
end
catch err
println("Not running in domterm\n")
isDomterm = false
end
end
"""add a CSS style rule to domterm
Example ::
```Julia
dt_addstyle("table {border: 1px solid}")
dt_addstyle("table td{border-color: red}")
```
The above functions set all tables in the current DomTerm session
to have 1px-width border and red for the inner-border color.
"""
function dt_addst(rule1::String, rules...)
args = rule1*" "*join(rules, ' ')
r = String(read(`domterm add-style $args`))
return nothing
end
function dt_settings(args...)
arg = args.join(' ')
r = run(`domterm $arg`)
return nothing
end
function dt_reversevideo(turn_on::Bool=true)
arg = turn_on ? "on" : "off"
run(`domterm reverse-video $arg`)
return nothing
end
function dt_status()
s = read(`domterm status`, String)
return s
end
function dt_list()
s = read(`domterm list`, String)
return s
end
function dt_listst()
s = read(`domterm list-stylesheets`, String)
return s
end
function dt_printst(i)
s = read(`domterm print-stylesheet $i`, String)
return s
end
using Base64: Base64EncodePipe
const img_style="display:block; margin:auto;"
function imagecat(mime::AbstractString, fn::AbstractString; attr="")
at = img_style
if !isempty(attr); at *= attr; end
x = open(fn) do io
read(io)
end # x is Vector{UInt8}
buf = IOBuffer()
b64 = Base64EncodePipe(buf)
write(b64, x)
close(b64)
uri = String(take!(buf))
print("\e]72;<img style=\" $(at) \" src='data:$(mime);base64,", uri, "'/>\a")
end
svgcat(fn::AbstractString; attr="") = imagecat("image/svg+xml", fn; attr)
pngcat(fn::AbstractString; attr="") = imagecat("image/png", fn; attr)
jpegcat(fn::AbstractString; attr="") = imagecat("image/jpeg", fn; attr)
gifcat(fn::AbstractString; attr="") = imagecat("image/gif", fn; attr)
raw_htmlcat(html_content::String) = print("\e]72;"*html_content*"\a")
hput(html_content::String) = raw_htmlcat(html_content)
function htmlcat(fn::AbstractString)
x = String(open(io -> read(io), fn))
# remove elements not recognized by domterm
x = replace(x, r"<\?xml.*?>"s=>"")
x = replace(x, r"<!DOCTYPE.*?>"s=>"")
x = replace(x, r"<!doctype.*?>"s=>"")
x = replace(x, r"<mfenced.*?>"s=>"")
x = replace(x, r"</mfenced>"s=>"")
#x = replace(x, r"\n"=>"") # strip extra spaces off the output
raw_htmlcat(x)
end
dt_clear() = println("\e[7J")
# Copied from DomTerm documentation
# "\e]721;" key ";" html-text "\a"
#
# Replace previously-inserted HTML. Looks for the latest element (in document
# order) whose class includes can-replace-children and that has a
# replace-key="key" attribute. Essentially sets the innerHTML to html-text
# (after safety-scrubbing).
const dt_cmd = Dict(
:reversevid => dt_reversevideo,
:clear => dt_clear,
:status => dt_status,
:list => dt_list,
:listst => dt_listst,
:printst => dt_printst,
:addst => dt_addst,
:set => dt_settings,
:jpegcat => jpegcat,
:pngcat => pngcat,
:svgcat => svgcat,
:gifcat => gifcat,
:htmlcat => htmlcat
)
macro dt(cmd, args...)
if isempty(args)
expr = Expr(:call, dt_cmd[cmd])
else
expr = Expr(:call, dt_cmd[cmd], args...)
end
r = eval(expr)
r !== nothing && println(r)
return nothing
end
# get the size in pixel of the current terminal
iolock_begin() = ccall(:jl_iolock_begin, Cvoid, ())
iolock_end() = ccall(:jl_iolock_end, Cvoid, ())
function tty_set_raw!(io::IO, isRaw::Bool)
iolock_begin()
ccall(:jl_tty_set_mode, Int32, (Ptr{Cvoid},Int32), io.handle::Ptr{Cvoid}, isRaw)
iolock_end()
end
function dt_winsizex()
h = w = -1
tty_set_raw!(stdin, true)
write(stdout, "\e[14t")
out = Vector{UInt8}()
while true
ch = read(stdin, Char)
push!(out, ch)
ch == 't' && break
end
tty_set_raw!(stdin, false)
vh = Vector{UInt8}()
vw = Vector{UInt8}()
i = findfirst([UInt8(';')], out).start
i == nothing && error("malformed xterm report")
i += 1
while out[i] != UInt8(';')
push!(vw, out[i])
i += 1
end
i += 1
while out[i] != UInt8('t')
push!(vh, out[i])
i += 1
end
w = parse(Int, String(vw), base=10)
h = parse(Int, String(vh), base=10)
return h, w
end
|
{"hexsha": "c6f0be8d6a9fedbde08e8491c3a89fe43272fd04", "size": 4616, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utilities.jl", "max_stars_repo_name": "jingyuewang/DomTerm.jl", "max_stars_repo_head_hexsha": "c5d169644227873dea6a58731884f133ddcdafa2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utilities.jl", "max_issues_repo_name": "jingyuewang/DomTerm.jl", "max_issues_repo_head_hexsha": "c5d169644227873dea6a58731884f133ddcdafa2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utilities.jl", "max_forks_repo_name": "jingyuewang/DomTerm.jl", "max_forks_repo_head_hexsha": "c5d169644227873dea6a58731884f133ddcdafa2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.167539267, "max_line_length": 84, "alphanum_fraction": 0.6087521664, "num_tokens": 1368}
|
import numpy as np
import tensorflow as tf
from libs.utils.calc_ious import bbox_giou, bbox_iou
__all__ = ['get_losses']
def get_losses(pred_raw, pred_decoded, label, bboxes, stride, iou_loss_thr, num_classes):
"""
Args:
pred_decoded: decoded yolo output
pred_raw: raw yolo output
"""
batch_size, grid_size = pred_raw.shape[0], pred_raw.shape[1]
input_size = tf.cast(stride * grid_size, tf.float32)
pred_raw = tf.reshape(pred_raw, (batch_size, grid_size, grid_size, 3, 5+num_classes))
pred_raw_conf = pred_raw[:, :, :, :, 4:5]
pred_raw_prob = pred_raw[:, :, :, :, 5:]
pred_decoded_xywh = pred_decoded[:, :, :, :, 0:4]
pred_decoded_conf = pred_decoded[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
label_conf = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(bbox_giou(pred_decoded_xywh, label_xywh), axis=-1)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = label_conf * bbox_loss_scale * (1 - giou)
# Find the value of IoU with the real box The largest prediction box
## pred_decoded_xywh.shape: [batch_size, y_idx, x_idx, num_scales, 4]
## bboxes.shape: [batch_size, max_num_bboxes_per_scale, 4]
iou = bbox_iou(pred_decoded_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
## iou.shape: [batch_size, y_idx, x_idx, num_scales, max_num_bboxes_per_scale]
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
# If the largest iou is less than the threshold, it is considered that the prediction box contains no objects, then the background box
respond_bg = (1.0 - label_conf) * tf.cast(max_iou < iou_loss_thr, tf.float32)
conf_focal = tf.pow(label_conf - pred_decoded_conf, 2)
# Calculate the loss of confidence
# we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0 when there is no object.
conf_loss = conf_focal * (
label_conf * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_conf, logits=pred_raw_conf)
+
respond_bg * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_conf, logits=pred_raw_conf)
)
prob_loss = label_conf * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=pred_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1, 2, 3, 4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1, 2, 3, 4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1, 2, 3, 4]))
return giou_loss, conf_loss, prob_loss
|
{"hexsha": "5a62dec31daa22b5e5093a4b232c09db330b5df1", "size": 2694, "ext": "py", "lang": "Python", "max_stars_repo_path": "libs/losses.py", "max_stars_repo_name": "devbruce/yolov3-tf2", "max_stars_repo_head_hexsha": "2361685cc8a12f341de441bb3462eb3e7a825bc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/losses.py", "max_issues_repo_name": "devbruce/yolov3-tf2", "max_issues_repo_head_hexsha": "2361685cc8a12f341de441bb3462eb3e7a825bc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/losses.py", "max_forks_repo_name": "devbruce/yolov3-tf2", "max_forks_repo_head_hexsha": "2361685cc8a12f341de441bb3462eb3e7a825bc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.6610169492, "max_line_length": 140, "alphanum_fraction": 0.6744617669, "include": true, "reason": "import numpy", "num_tokens": 780}
|
import numpy as np
from scipy.io import savemat
from pandas.api.types import is_datetime64_any_dtype
from neslter.parsing.utils import datetime_to_datenum
def df_to_mat(df, filename, convert_dates=True):
data = {}
for c in df.columns:
if convert_dates and is_datetime64_any_dtype(df[c]):
values = [datetime_to_datenum(dt) for dt in df[c]]
else:
values = df[c]
data[c] = np.array(values)
savemat(filename, data)
|
{"hexsha": "6024f2b7e995f78619977129323379bc5bac12ee", "size": 473, "ext": "py", "lang": "Python", "max_stars_repo_path": "nlweb/api/utils.py", "max_stars_repo_name": "WHOIGit/nes-lter-ims", "max_stars_repo_head_hexsha": "d4cc96c10da56ca33286af84d669625b67170522", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-01-24T16:32:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T02:18:12.000Z", "max_issues_repo_path": "nlweb/api/utils.py", "max_issues_repo_name": "WHOIGit/nes-lter-ims", "max_issues_repo_head_hexsha": "d4cc96c10da56ca33286af84d669625b67170522", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 45, "max_issues_repo_issues_event_min_datetime": "2019-05-23T15:15:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T14:09:20.000Z", "max_forks_repo_path": "nlweb/api/utils.py", "max_forks_repo_name": "WHOIGit/nes-lter-ims", "max_forks_repo_head_hexsha": "d4cc96c10da56ca33286af84d669625b67170522", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5625, "max_line_length": 62, "alphanum_fraction": 0.6828752643, "include": true, "reason": "import numpy,from scipy", "num_tokens": 114}
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
sys.path.insert(0,'third_party')
import subprocess
import imageio
import glob
from ext_utils.badja_data import BADJAData
from ext_utils.joint_catalog import SMALJointInfo
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
from nnutils.geom_utils import obj_to_cam, pinhole_cam
import pyrender
from pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer
import configparser
parser = argparse.ArgumentParser(description='render mesh')
parser.add_argument('--testdir', default='',
help='path to test dir')
parser.add_argument('--seqname', default='camel',
help='sequence to test')
parser.add_argument('--watertight', default='no',
help='watertight remesh')
parser.add_argument('--outpath', default='/data/gengshay/output.gif',
help='output path')
parser.add_argument('--cam_type', default='perspective',
help='camera model, orthographic or perspective')
parser.add_argument('--append_img', default='no',
help='whether append images before the seq')
parser.add_argument('--append_render', default='yes',
help='whether append renderings')
parser.add_argument('--nosmooth', dest='smooth', action='store_false',
help='whether to smooth vertex colors and positions')
parser.add_argument('--gray', dest='gray', action='store_true',
help='whether to render with gray texture')
parser.add_argument('--overlay', dest='overlay',action='store_true',
help='whether to overlay with the input')
parser.add_argument('--vis_bones', dest='vis_bones',action='store_true',
help='whether show transparent surface and vis bones')
parser.add_argument('--freeze', dest='freeze', action='store_true',
help='freeze object at frist frame')
args = parser.parse_args()
renderer_softflf = sr.SoftRenderer(image_size=256,dist_func='hard' ,aggr_func_alpha='hard',
camera_mode='look_at',perspective=False, aggr_func_rgb='hard',
light_mode='vertex', light_intensity_ambient=1.,light_intensity_directionals=0.)
def preprocess_image(img,mask,imgsize):
if len(img.shape) == 2:
img = np.repeat(np.expand_dims(img, 2), 3, axis=2)
if mask.shape[0]!=img.shape[0] or mask.shape[1]!=img.shape[1]:
mask = cv2.resize(mask, img.shape[:2][::-1],interpolation=cv2.INTER_NEAREST)[:,:,None]
mask = mask[:,:,:1]
# crop box
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( (xid.max()-xid.min())//2, (yid.max()-yid.min())//2)
maxlength = int(1.2*max(length))
length = (maxlength,maxlength)
alp = 2*length[0]/float(imgsize)
refpp = np.asarray(center)/(imgsize/2.) - 1
return alp, refpp,center,length[0]
def draw_joints_on_image(rgb_img, joints, visibility, region_colors, marker_types):
joints = joints[:, ::-1] # OpenCV works in (x, y) rather than (i, j)
disp_img = rgb_img.copy()
for joint_coord, visible, color, marker_type in zip(joints, visibility, region_colors, marker_types):
if visible:
joint_coord = joint_coord.astype(int)
cv2.drawMarker(disp_img, tuple(joint_coord), color.tolist(), marker_type, 30, thickness = 10)
return disp_img
def remesh(mesh):
mesh.export('tmp/input.obj')
print(subprocess.check_output(['./Manifold/build/manifold', 'tmp/input.obj', 'tmp/output.obj', '10000']))
mesh = trimesh.load('tmp/output.obj',process=False)
return mesh
def main():
print(args.testdir)
# store all the data
all_anno = []
all_mesh = []
all_bone = []
all_cam = []
all_fr = []
config = configparser.RawConfigParser()
config.read('configs/%s.config'%args.seqname)
datapath = str(config.get('data', 'datapath'))
init_frame = int(config.get('data', 'init_frame'))
end_frame = int(config.get('data', 'end_frame'))
dframe = int(config.get('data', 'dframe'))
for name in sorted(glob.glob('%s/*'%datapath))[init_frame:end_frame][::dframe]:
rgb_img = cv2.imread(name)
sil_img = cv2.imread(name.replace('JPEGImages', 'Annotations').replace('.jpg', '.png'),0)[:,:,None]
all_anno.append([rgb_img,sil_img,0,0,name])
seqname = name.split('/')[-2]
fr = int(name.split('/')[-1].split('.')[-2])
all_fr.append(fr)
print('%s/%d'%(seqname, fr))
try:
try:
mesh = trimesh.load('%s/pred%d.ply'%(args.testdir, fr),process=False)
except:
mesh = trimesh.load('%s/pred%d.obj'%(args.testdir, fr),process=False)
trimesh.repair.fix_inversion(mesh)
if args.watertight=='yes':
mesh = remesh(mesh)
if args.gray:
mesh.visual.vertex_colors[:,:3]=64
if args.overlay:
mesh.visual.vertex_colors[:,:2]=0
mesh.visual.vertex_colors[:,2]=255
all_mesh.append(mesh)
cam = np.loadtxt('%s/cam%d.txt'%(args.testdir,fr))
all_cam.append(cam)
all_bone.append(trimesh.load('%s/gauss%d.ply'%(args.testdir, fr),process=False))
except: print('no mesh found')
# add bones?
num_original_verts = []
num_original_faces = []
if args.vis_bones:
for i in range(len(all_mesh)):
all_mesh[i].visual.vertex_colors[:,-1]=192 # alpha
num_original_verts.append( all_mesh[i].vertices.shape[0])
num_original_faces.append( all_mesh[i].faces.shape[0] )
all_mesh[i] = trimesh.util.concatenate([all_mesh[i], all_bone[i]])
# store all the results
input_size = all_anno[0][0].shape[:2]
output_size = (int(input_size[0] * 480/input_size[1]), 480)# 270x480
frames=[]
if args.append_img=="yes":
if args.append_render=='yes':
if args.freeze: napp_fr = 30
else: napp_fr = int(len(all_anno)//5)
for i in range(napp_fr):
frames.append(cv2.resize(all_anno[0][0],output_size[::-1])[:,:,::-1])
else:
for i in range(len(all_anno)):
#frames.append(cv2.resize(all_anno[i][1],output_size[::-1])*255) # silhouette
frames.append(cv2.resize(all_anno[i][0],output_size[::-1])[:,:,::-1]) # frame
#strx = sorted(glob.glob('%s/*'%datapath))[init_frame:end_frame][::dframe][i]# flow
#strx = strx.replace('JPEGImages', 'FlowBW')
#flowimg = cv2.imread('%s/vis-%s'%(strx.rsplit('/',1)[0],strx.rsplit('/',1)[1]))
#frames.append(cv2.resize(flowimg,output_size[::-1])[:,:,::-1])
theta = 7*np.pi/9
light_pose = np.asarray([[1,0,0,0],[0,np.cos(theta),-np.sin(theta),0],[0,np.sin(theta),np.cos(theta),0],[0,0,0,1]])
if args.freeze:
size = 150
else:
size = len(all_anno)
for i in range(size):
if args.append_render=='no':break
# render flow between mesh 1 and 2
if args.freeze:
print(i)
refimg = all_anno[0][0]
img_size = max(refimg.shape)
refmesh = all_mesh[0]
refmesh.vertices -= refmesh.vertices.mean(0)[None]
refmesh.vertices /= 1.2*np.abs(refmesh.vertices).max()
refcam = all_cam[0].copy()
refcam[:3,:3] = refcam[:3,:3].dot(cv2.Rodrigues(np.asarray([0.,-i*2*np.pi/size,0.]))[0])
refcam[:2,3] = 0 # trans xy
refcam[2,3] = 20 # depth
if args.cam_type=='perspective':
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[0]/2 # px py
refcam[3,:2] = 8*img_size/2 # fl
else:
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[1]/2 # px py
refcam[3,:2] =0.5 * img_size/2 # fl
else:
refimg, refsil, refkp, refvis, refname = all_anno[i]
print('%s'%(refname))
img_size = max(refimg.shape)
renderer_softflf.rasterizer.image_size = img_size
refmesh = all_mesh[i]
refcam = all_cam[i]
currcam = np.concatenate([refcam[:3,:4],np.asarray([[0,0,0,1]])],0)
if i==0:
initcam = currcam.copy()
refface = torch.Tensor(refmesh.faces[None]).cuda()
verts = torch.Tensor(refmesh.vertices[None]).cuda()
Rmat = torch.Tensor(refcam[None,:3,:3]).cuda()
Tmat = torch.Tensor(refcam[None,:3,3]).cuda()
ppoint =refcam[3,2:4]
scale = refcam[3,0]
verts = obj_to_cam(verts, Rmat, Tmat,nmesh=1,n_hypo=1,skin=None)
if args.cam_type != 'perspective':
verts[:,:,1] = ppoint[1]+verts[:,:, 1]*scale[0]
verts[:,:,0] = ppoint[0]+verts[:,:, 0]*scale[0]
verts[:,:,2] += (5+verts[:,:,2].min())
r = OffscreenRenderer(img_size, img_size)
if args.overlay:
bgcolor=[0., 0., 0.]
else:
bgcolor=[1.,1.,1.]
scene = Scene(ambient_light=0.4*np.asarray([1.,1.,1.,1.]), bg_color=bgcolor)
direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=6.0)
colors = refmesh.visual.vertex_colors
colors= np.concatenate([0.6*colors[:,:3].astype(np.uint8), colors[:,3:]],-1) # avoid overexposure
smooth=args.smooth
if args.freeze:
tbone = 0
else:
tbone = i
if args.vis_bones:
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:num_original_verts[tbone],:3].cpu()), faces=np.asarray(refface[0,:num_original_faces[tbone]].cpu()),vertex_colors=colors[:num_original_verts[tbone]])
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=meshr ))
mesh2 = trimesh.Trimesh(vertices=np.asarray(verts[0,num_original_verts[tbone]:,:3].cpu()), faces=np.asarray(refface[0,num_original_faces[tbone]:].cpu()-num_original_verts[tbone]),vertex_colors=colors[num_original_verts[tbone]:])
mesh2=Mesh.from_trimesh(mesh2,smooth=smooth)
mesh2._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=mesh2))
else:
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:,:3].cpu()), faces=np.asarray(refface[0].cpu()),vertex_colors=colors)
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=meshr ))
if not args.overlay:
floor_mesh = trimesh.load('./database/misc/wood.obj',process=False)
floor_mesh.vertices = np.concatenate([floor_mesh.vertices[:,:1], floor_mesh.vertices[:,2:3], floor_mesh.vertices[:,1:2]],-1 )
xfloor = 10*mesh.vertices[:,0].min() + (10*mesh.vertices[:,0].max()-10*mesh.vertices[:,0].min())*(floor_mesh.vertices[:,0:1] - floor_mesh.vertices[:,0].min())/(floor_mesh.vertices[:,0].max()-floor_mesh.vertices[:,0].min())
yfloor = floor_mesh.vertices[:,1:2]; yfloor[:] = (mesh.vertices[:,1].max())
zfloor = 0.5*mesh.vertices[:,2].min() + (10*mesh.vertices[:,2].max()-0.5*mesh.vertices[:,2].min())*(floor_mesh.vertices[:,2:3] - floor_mesh.vertices[:,2].min())/(floor_mesh.vertices[:,2].max()-floor_mesh.vertices[:,2].min())
floor_mesh.vertices = np.concatenate([xfloor,yfloor,zfloor],-1)
floor_mesh = trimesh.Trimesh(floor_mesh.vertices, floor_mesh.faces, vertex_colors=255*np.ones((4,4), dtype=np.uint8))
scene.add_node( Node(mesh=Mesh.from_trimesh(floor_mesh))) # overrides the prev. one
if args.cam_type=='perspective':
cam = IntrinsicsCamera(
scale,
scale,
ppoint[0],
ppoint[1],
znear=1e-3,zfar=1000)
else:
cam = pyrender.OrthographicCamera(xmag=1., ymag=1.)
cam_pose = -np.eye(4); cam_pose[0,0]=1; cam_pose[-1,-1]=1
cam_node = scene.add(cam, pose=cam_pose)
direc_l_node = scene.add(direc_l, pose=light_pose)
if args.vis_bones:
color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL)
else:
color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL | pyrender.RenderFlags.SKIP_CULL_FACES)
r.delete()
color = color[:refimg.shape[0],:refimg.shape[1],:3]
if args.overlay:
color = cv2.addWeighted(color, 0.5, refimg[:,:,::-1], 0.5, 0)
color = cv2.resize(color, output_size[::-1])
frames.append(color)
imageio.mimsave('%s'%args.outpath, frames, duration=5./len(frames))
if __name__ == '__main__':
main()
|
{"hexsha": "3b376beb69017aec16f93e56e13a9aa3704e77e0", "size": 13780, "ext": "py", "lang": "Python", "max_stars_repo_path": "render_vis.py", "max_stars_repo_name": "isabella232/lasr", "max_stars_repo_head_hexsha": "bedc8be2bb37d954fdd9e8b8aaddcfda6495cf22", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 128, "max_stars_repo_stars_event_min_datetime": "2021-06-21T19:01:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:36:54.000Z", "max_issues_repo_path": "render_vis.py", "max_issues_repo_name": "ecmjohnson/lasr", "max_issues_repo_head_hexsha": "1d8404232d2e51c330bdf83416343eda0290bcc0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-06-29T06:23:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T01:46:33.000Z", "max_forks_repo_path": "render_vis.py", "max_forks_repo_name": "ecmjohnson/lasr", "max_forks_repo_head_hexsha": "1d8404232d2e51c330bdf83416343eda0290bcc0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-06-21T19:38:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T12:44:03.000Z", "avg_line_length": 47.0307167235, "max_line_length": 240, "alphanum_fraction": 0.606095791, "include": true, "reason": "import numpy", "num_tokens": 3646}
|
import os
import platform
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from imio.load import load_any
from brainreg.cli import main as brainreg_run
test_data_dir = Path(os.getcwd()) / "tests" / "data"
brain_data_dir = test_data_dir / "brain data"
expected_niftyreg_output_dir = (
test_data_dir / "registration_output" / platform.system()
)
x_pix = "40"
y_pix = "40"
z_pix = "50"
relative_tolerance = 0.01
absolute_tolerance = 10
check_less_precise_pd = 1
# This will do a single run of brainreg when pytest is run
# The outputs are then tested in a separate test below
@pytest.fixture(scope="session")
def niftyreg_output_path(tmp_path_factory):
test_output_dir = tmp_path_factory.mktemp("output_dir")
brainreg_args = [
"brainreg",
str(brain_data_dir),
str(test_output_dir),
"-v",
z_pix,
y_pix,
x_pix,
"--orientation",
"psl",
"--n-free-cpus",
"0",
"--atlas",
"allen_mouse_100um",
"-d",
str(brain_data_dir),
]
sys.argv = brainreg_args
brainreg_run()
return test_output_dir
@pytest.mark.parametrize(
"image",
[
"boundaries.tiff",
"deformation_field_0.tiff",
"deformation_field_1.tiff",
"deformation_field_2.tiff",
"downsampled.tiff",
"downsampled_brain data.tiff",
"downsampled_standard.tiff",
"downsampled_standard_brain data.tiff",
"registered_atlas.tiff",
"registered_hemispheres.tiff",
],
)
def test_images_output(niftyreg_output_path, image):
are_images_equal(image, niftyreg_output_path, expected_niftyreg_output_dir)
def test_volumes_output(niftyreg_output_path):
pd.testing.assert_frame_equal(
pd.read_csv(os.path.join(niftyreg_output_path, "volumes.csv")),
pd.read_csv(os.path.join(expected_niftyreg_output_dir, "volumes.csv")),
)
def are_images_equal(image_name, output_directory, test_output_directory):
image = load_any(
os.path.join(output_directory, image_name),
)
test_image = load_any(
os.path.join(test_output_directory, image_name),
)
np.testing.assert_allclose(
image, test_image, rtol=relative_tolerance, atol=absolute_tolerance
)
|
{"hexsha": "dccc12d5443b52a47affa82524a772d5b085a5e3", "size": 2324, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/tests/test_integration/test_registration.py", "max_stars_repo_name": "WyssCenter/brainreg", "max_stars_repo_head_hexsha": "ab93567bae211f3d569478b2191c1a82aef22b6b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-28T06:42:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T06:42:46.000Z", "max_issues_repo_path": "tests/tests/test_integration/test_registration.py", "max_issues_repo_name": "WyssCenter/brainreg", "max_issues_repo_head_hexsha": "ab93567bae211f3d569478b2191c1a82aef22b6b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/tests/test_integration/test_registration.py", "max_forks_repo_name": "WyssCenter/brainreg", "max_forks_repo_head_hexsha": "ab93567bae211f3d569478b2191c1a82aef22b6b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9892473118, "max_line_length": 79, "alphanum_fraction": 0.6790017212, "include": true, "reason": "import numpy", "num_tokens": 573}
|
#!/usr/bin/env python
"""This file calculates the cylinder geometry given the top dead center volume the sweept volume and the bore and stroke
and the location within the engine cylce given the crank angle Depictions of the geometry and equations can by found in
Internal Combustion Engine Fundementals, Heywood, page 43."""
import numpy as np
from matplotlib import pyplot
class Cylinder_Geometry:
""" The geometry of the cylnder is a function of the given variables
The angle theta has a resolution of 1/10th of a degree
volumes are in cubic centimeters and lengths are in mm by convention"""
def __init__(self,compression_ratio = 12.75 ,bore=9.5,stroke=6.34,tdc_volume=0.0,swept_volume=449.9, connecting_rod_length=15.0,connecting_rod_ratio=3.388,crank_angle=np.arange(0,720,0.1)):
"""This init function converts all the base geometeries into arrays using numpy.array(), this is done
#for speed"""
if compression_ratio <0 or bore < 0 or stroke < 0 or tdc_volume < 0 or swept_volume < 0 or connecting_rod_length < 0:
raise ValueError('Inputs must be postive')
self.compression_ratio=compression_ratio
self.bore=np.array(bore)
self.stroke=np.array(stroke)
self.tdc_volume=np.array(tdc_volume)
self.swept_volume=np.array(swept_volume)
self.crank_radius=np.array(stroke/2.0)
self.connecting_rod_length=np.array(connecting_rod_length)
self.crank_angle=np.array(crank_angle)
self.connectrod_crankrad= np.array(connecting_rod_length/(stroke*0.5))
self.connecting_rod_ratio=np.array(connecting_rod_ratio)
def cylinder_volume_func(self):
'''This calcualates the cylinder volume given the function outlined in heywoods book on page 43,
The default caluation is to 1/10th of a degree, the resolution can change by change by changing the
Crank_angle numpy array'''
theta_c = np.deg2rad(self.crank_angle)
a=self.crank_radius
l=self.connecting_rod_length
b=self.bore
v_c=self.tdc_volume
c=Cylinder_Geometry()
if v_c == 0:
v_c = c.tdc_volume_calc()
cylinder_volume= np.arange(0,len(theta_c))
cylinder_volume = []
for i,theta in enumerate(theta_c):
s=(a * np.cos(theta)) + (np.sqrt(l ** 2 - (a ** 2) * (np.sin(theta)) ** 2))
cylinder_volume.append((v_c+((np.pi*(b**2))/4.0)*(l+a-s)))
return cylinder_volume
def areaCylinder(self):
# surface area of cylinder at instant
C=Cylinder_Geometry()
piston_pos= C.piston_position(self.crank_angle)
piston_area = np.pi * (self.bore ** 2) * 0.25
wall_area = np.pi * self.bore * self.stroke * (1 - piston_pos)
return wall_area + (piston_area * 2)
def tdc_volume_calc(self):
''' This calculates the tdc volume using the compression ratio and the swept volume
the equation for this can be found in heywoods books on page 44'''
tdc_volume=self.swept_volume/(self.compression_ratio-1)
return tdc_volume
def compression_ratio(self):
''' compression ration can be found if given swept volume and tdc volume'''
compression_ratio= (self.tdc_volume+self.swept_volume)/self.tdc_volume
return compression_ratio
def piston_velocity(self,n):
'''this function is used to define both the average velocity of the piston
aswell as the actual velocity of the piston'''
theta_c = np.deg2rad(self.crank_angle)
ave_pist_velocity=2*self.stroke*n
actual_pist_velocity= ave_pist_velocity*(np.pi*0.5*np.sin(theta_c))*(1+(np.cos(theta_c)/np.sqrt(self.connectrod_crankrad**2-(np.sin(theta_c)))))
return ave_pist_velocity, actual_pist_velocity
def piston_position(self, crank_angle):
""" Relative position of the piston, =1 at TDC and =0 at BDC, regarding
to the crank angle in degres. """
# Angle in radians
radangle = np.radians(crank_angle)
# Ratio of the crank radius on the connecting rod length
ratio = 1/self.connecting_rod_ratio
piston_pos=1-0.5*((1-np.cos(radangle)) + ratio*(1-np.sqrt(1-pow(ratio*np.sin(radangle),2))))
return piston_pos
if __name__=="__main__":
c=Cylinder_Geometry()
tdcv= c.cylinder_volume_func()
print (tdcv)
#print (len(tdcv))
#print (np.array2string(tdcv))
|
{"hexsha": "109442d1e8346b067e1b62a92efddef04a48edb8", "size": 4463, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypow/thermodynamics/Cylinder_Geometry.py", "max_stars_repo_name": "johnkittelman/pypow", "max_stars_repo_head_hexsha": "388932556b2c3acd893edc3f4b5943f36998afce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pypow/thermodynamics/Cylinder_Geometry.py", "max_issues_repo_name": "johnkittelman/pypow", "max_issues_repo_head_hexsha": "388932556b2c3acd893edc3f4b5943f36998afce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pypow/thermodynamics/Cylinder_Geometry.py", "max_forks_repo_name": "johnkittelman/pypow", "max_forks_repo_head_hexsha": "388932556b2c3acd893edc3f4b5943f36998afce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5042016807, "max_line_length": 193, "alphanum_fraction": 0.6804839794, "include": true, "reason": "import numpy", "num_tokens": 1132}
|
%
% lellipf(phi, k, errtol)
%
% Inputs:
%
% phi Input angle vector size 1 or 1xN.
% k Input parameter vector size 1 or 1xN.
% errtol Error tolerance for Carlson's algorithms.
%
% Matlab function to compute Legendre's (incomplete) elliptic integral
% F(phi, k). Uses a vectorized implementation of Carlson's Duplication Algorithms
% for symmetric elliptic integrals as found in "Computing Elliptic
% Integrals by Duplication," by B. C. Carlson, Numer. Math. 33, 1-16 (1979)
% and also found in ACM TOMS Algorithm 577. Section 4 in the paper cited
% here describes how to convert between the symmetric elliptic integrals
% and Legendre's elliptic integrals.
%
% Returns NaN's for any argument values outside input range.
%
function f = lellipf(phi, k, errtol)
% Argument checking for vectorization:
lphi = length(phi);
lk = length(k);
errflag = logical(0);
if (lphi ~= lk)
if (lphi==1)
phivec = phi * ones(1,lk);
kvec = k;
elseif (lk==1)
kvec = k * ones(1,lphi);
phivec = phi;
else
disp('Incompatible input vector dimensions in lellipf!');
errflag = logical(1);
end
else
phivec = phi;
kvec = k;
end
if ~errflag
snphi = sin(phivec);
csphi = cos(phivec);
csphi2 = csphi .* csphi;
onesvec = ones(1,length(phivec));
y = onesvec - kvec.*kvec .* snphi.*snphi;
f = snphi .* rf(csphi2, y, onesvec, errtol);
else
f = NaN;
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/3705-ellipticintegrals-zip/Elliptic_Integrals/lellipf.m"}
|
//
// Copyright (c) 2000-2010
// Joerg Walter, Mathias Koch, Gunter Winkler, David Bellot
// Copyright (c) 2014, Athanasios Iliopoulos
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// GeNeSys mbH & Co. KG in producing this work.
//
#ifndef _BOOST_UBLAS_MATRIX_
#define _BOOST_UBLAS_MATRIX_
#include <boost/config.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/matrix_expression.hpp>
#include <boost/numeric/ublas/detail/matrix_assign.hpp>
#include <boost/serialization/collection_size_type.hpp>
#include <boost/serialization/array.hpp>
#include <boost/serialization/nvp.hpp>
// Iterators based on ideas of Jeremy Siek
namespace geofeatures_boost {} namespace boost = geofeatures_boost; namespace geofeatures_boost { namespace numeric {
/** \brief main namespace of uBLAS.
*
* Use this namespace for all operations with uBLAS. It can also be abbreviated with
* \code namespace ublas = geofeatures_boost::numeric::ublas; \endcode
*
* A common practice is to bring this namespace into the current scope with
* \code using namespace geofeatures_boost::numeric::ublas; \endcode.
*
* However, be warned that using the ublas namespace and the std::vector at the same time can lead to the compiler to confusion.
* The solution is simply to prefix each ublas vector like \c geofeatures_boost::numeric::ublas::vector<T>. If you think it's too long to
* write, you can define a new namespace like \c namespace ublas = geofeatures_boost::numeric::ublas and then just declare your vectors
* with \c ublas::vector<T>. STL vectors will be declared as vector<T>. No need to prefix with \c std::
*/
namespace ublas {
namespace detail {
using namespace geofeatures_boost::numeric::ublas;
// Matrix resizing algorithm
template <class L, class M>
BOOST_UBLAS_INLINE
void matrix_resize_preserve (M& m, M& temporary) {
typedef L layout_type;
typedef typename M::size_type size_type;
const size_type msize1 (m.size1 ()); // original size
const size_type msize2 (m.size2 ());
const size_type size1 (temporary.size1 ()); // new size is specified by temporary
const size_type size2 (temporary.size2 ());
// Common elements to preserve
const size_type size1_min = (std::min) (size1, msize1);
const size_type size2_min = (std::min) (size2, msize2);
// Order for major and minor sizes
const size_type major_size = layout_type::size_M (size1_min, size2_min);
const size_type minor_size = layout_type::size_m (size1_min, size2_min);
// Indexing copy over major
for (size_type major = 0; major != major_size; ++major) {
for (size_type minor = 0; minor != minor_size; ++minor) {
// find indexes - use invertability of element_ functions
const size_type i1 = layout_type::index_M(major, minor);
const size_type i2 = layout_type::index_m(major, minor);
temporary.data () [layout_type::element (i1, size1, i2, size2)] =
m.data() [layout_type::element (i1, msize1, i2, msize2)];
}
}
m.assign_temporary (temporary);
}
}
/** \brief A dense matrix of values of type \c T.
*
* For a \f$(m \times n)\f$-dimensional matrix and \f$ 0 \leq i < m, 0 \leq j < n\f$, every element \f$ m_{i,j} \f$ is mapped to
* the \f$(i.n + j)\f$-th element of the container for row major orientation or the \f$ (i + j.m) \f$-th element of
* the container for column major orientation. In a dense matrix all elements are represented in memory in a
* contiguous chunk of memory by definition.
*
* Orientation and storage can also be specified, otherwise a \c row_major and \c unbounded_array are used. It is \b not
* required by the storage to initialize elements of the matrix.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam L the storage organization. It can be either \c row_major or \c column_major. Default is \c row_major
* \tparam A the type of Storage array. Default is \c unbounded_array
*/
template<class T, class L, class A>
class matrix:
public matrix_container<matrix<T, L, A> > {
typedef T *pointer;
typedef L layout_type;
typedef matrix<T, L, A> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef typename A::size_type size_type;
typedef typename A::difference_type difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef A array_type;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef vector<T, A> vector_temporary_type;
typedef self_type matrix_temporary_type;
typedef dense_tag storage_category;
// This could be better for performance,
// typedef typename unknown_orientation_tag orientation_category;
// but others depend on the orientation information...
typedef typename L::orientation_category orientation_category;
// Construction and destruction
/// Default dense matrix constructor. Make a dense matrix of size (0,0)
BOOST_UBLAS_INLINE
matrix ():
matrix_container<self_type> (),
size1_ (0), size2_ (0), data_ () {}
/** Dense matrix constructor with defined size
* \param size1 number of rows
* \param size2 number of columns
*/
BOOST_UBLAS_INLINE
matrix (size_type size1, size_type size2):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2), data_ (layout_type::storage_size (size1, size2)) {
}
/** Dense matrix constructor with defined size a initial value for all the matrix elements
* \param size1 number of rows
* \param size2 number of columns
* \param init initial value assigned to all elements
*/
matrix (size_type size1, size_type size2, const value_type &init):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2), data_ (layout_type::storage_size (size1, size2), init) {
}
/** Dense matrix constructor with defined size and an initial data array
* \param size1 number of rows
* \param size2 number of columns
* \param data array to copy into the matrix. Must have the same dimension as the matrix
*/
BOOST_UBLAS_INLINE
matrix (size_type size1, size_type size2, const array_type &data):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2), data_ (data) {}
/** Copy-constructor of a dense matrix
* \param m is a dense matrix
*/
BOOST_UBLAS_INLINE
matrix (const matrix &m):
matrix_container<self_type> (),
size1_ (m.size1_), size2_ (m.size2_), data_ (m.data_) {}
/** Copy-constructor of a dense matrix from a matrix expression
* \param ae is a matrix expression
*/
template<class AE>
BOOST_UBLAS_INLINE
matrix (const matrix_expression<AE> &ae):
matrix_container<self_type> (),
size1_ (ae ().size1 ()), size2_ (ae ().size2 ()), data_ (layout_type::storage_size (size1_, size2_)) {
matrix_assign<scalar_assign> (*this, ae);
}
// Accessors
/** Return the number of rows of the matrix
* You can also use the free size<>() function in operation/size.hpp as size<1>(m) where m is a matrix
*/
BOOST_UBLAS_INLINE
size_type size1 () const {
return size1_;
}
/** Return the number of colums of the matrix
* You can also use the free size<>() function in operation/size.hpp as size<2>(m) where m is a matrix
*/
BOOST_UBLAS_INLINE
size_type size2 () const {
return size2_;
}
// Storage accessors
/** Return a constant reference to the internal storage of a dense matrix, i.e. the raw data
* It's type depends on the type used by the matrix to store its data
*/
BOOST_UBLAS_INLINE
const array_type &data () const {
return data_;
}
/** Return a reference to the internal storage of a dense matrix, i.e. the raw data
* It's type depends on the type used by the matrix to store its data
*/
BOOST_UBLAS_INLINE
array_type &data () {
return data_;
}
// Resizing
/** Resize a matrix to new dimensions
* If data are preserved, then if the size if bigger at least on one dimension, extra values are filled with zeros.
* If data are not preserved, then nothing has to be assumed regarding the content of the matrix after resizing.
* \param size1 the new number of rows
* \param size2 the new number of colums
* \param preserve a boolean to say if one wants the data to be preserved during the resizing. Default is true.
*/
BOOST_UBLAS_INLINE
void resize (size_type size1, size_type size2, bool preserve = true) {
if (preserve) {
self_type temporary (size1, size2);
detail::matrix_resize_preserve<layout_type> (*this, temporary);
}
else {
data ().resize (layout_type::storage_size (size1, size2));
size1_ = size1;
size2_ = size2;
}
}
// Element access
/** Access a matrix element. Here we return a const reference
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \return a const reference to the element
*/
BOOST_UBLAS_INLINE
const_reference operator () (size_type i, size_type j) const {
return data () [layout_type::element (i, size1_, j, size2_)];
}
/** Access a matrix element. Here we return a reference
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \return a reference to the element
*/
BOOST_UBLAS_INLINE
reference at_element (size_type i, size_type j) {
return data () [layout_type::element (i, size1_, j, size2_)];
}
/** Access a matrix element. Here we return a reference
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \return a reference to the element
*/
BOOST_UBLAS_INLINE
reference operator () (size_type i, size_type j) {
return at_element (i, j);
}
// Element assignment
/** Change the value of a matrix element. Return back a reference to it
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \param t the new value of the element
* \return a reference to the newly changed element
*/
BOOST_UBLAS_INLINE
reference insert_element (size_type i, size_type j, const_reference t) {
return (at_element (i, j) = t);
}
/** Erase the element
* For most types (int, double, etc...) it means setting 0 (zero) the element at zero in fact.
* For user-defined types, it could be another value if you decided it. Your type in that case must
* contain a default null value.
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
*/
void erase_element (size_type i, size_type j) {
at_element (i, j) = value_type/*zero*/();
}
// Zeroing
/** Erase all elements in the matrix
* For most types (int, double, etc...) it means writing 0 (zero) everywhere.
* For user-defined types, it could be another value if you decided it. Your type in that case must
* contain a default null value.
*/
BOOST_UBLAS_INLINE
void clear () {
std::fill (data ().begin (), data ().end (), value_type/*zero*/());
}
// Assignment
#ifdef BOOST_UBLAS_MOVE_SEMANTICS
/*! @note "pass by value" the key idea to enable move semantics */
BOOST_UBLAS_INLINE
matrix &operator = (matrix m) {
assign_temporary(m);
return *this;
}
#else
BOOST_UBLAS_INLINE
matrix &operator = (const matrix &m) {
size1_ = m.size1_;
size2_ = m.size2_;
data () = m.data ();
return *this;
}
#endif
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
matrix &operator = (const matrix_container<C> &m) {
resize (m ().size1 (), m ().size2 (), false);
assign (m);
return *this;
}
BOOST_UBLAS_INLINE
matrix &assign_temporary (matrix &m) {
swap (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
matrix &operator = (const matrix_expression<AE> &ae) {
self_type temporary (ae);
return assign_temporary (temporary);
}
template<class AE>
BOOST_UBLAS_INLINE
matrix &assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
matrix& operator += (const matrix_expression<AE> &ae) {
self_type temporary (*this + ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
matrix &operator += (const matrix_container<C> &m) {
plus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
matrix &plus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_plus_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
matrix& operator -= (const matrix_expression<AE> &ae) {
self_type temporary (*this - ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
matrix &operator -= (const matrix_container<C> &m) {
minus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
matrix &minus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_minus_assign> (*this, ae);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
matrix& operator *= (const AT &at) {
matrix_assign_scalar<scalar_multiplies_assign> (*this, at);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
matrix& operator /= (const AT &at) {
matrix_assign_scalar<scalar_divides_assign> (*this, at);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (matrix &m) {
if (this != &m) {
std::swap (size1_, m.size1_);
std::swap (size2_, m.size2_);
data ().swap (m.data ());
}
}
BOOST_UBLAS_INLINE
friend void swap (matrix &m1, matrix &m2) {
m1.swap (m2);
}
// Iterator types
private:
// Use the storage array iterator
typedef typename A::const_iterator const_subiterator_type;
typedef typename A::iterator subiterator_type;
public:
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
typedef indexed_iterator1<self_type, dense_random_access_iterator_tag> iterator1;
typedef indexed_iterator2<self_type, dense_random_access_iterator_tag> iterator2;
typedef indexed_const_iterator1<self_type, dense_random_access_iterator_tag> const_iterator1;
typedef indexed_const_iterator2<self_type, dense_random_access_iterator_tag> const_iterator2;
#else
class const_iterator1;
class iterator1;
class const_iterator2;
class iterator2;
#endif
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base1<iterator1> reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
typedef reverse_iterator_base2<iterator2> reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int /* rank */, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator1 (*this, i, j);
#else
return const_iterator1 (*this, data ().begin () + layout_type::address (i, size1_, j, size2_));
#endif
}
BOOST_UBLAS_INLINE
iterator1 find1 (int /* rank */, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator1 (*this, i, j);
#else
return iterator1 (*this, data ().begin () + layout_type::address (i, size1_, j, size2_));
#endif
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int /* rank */, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator2 (*this, i, j);
#else
return const_iterator2 (*this, data ().begin () + layout_type::address (i, size1_, j, size2_));
#endif
}
BOOST_UBLAS_INLINE
iterator2 find2 (int /* rank */, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator2 (*this, i, j);
#else
return iterator2 (*this, data ().begin () + layout_type::address (i, size1_, j, size2_));
#endif
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator1:
public container_const_reference<matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename matrix::value_type value_type;
typedef typename matrix::difference_type difference_type;
typedef typename matrix::const_reference reference;
typedef const typename matrix::pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator1 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator1 (const iterator1 &it):
container_const_reference<self_type> (it ()), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
layout_type::increment_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
layout_type::decrement_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator += (difference_type n) {
layout_type::increment_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -= (difference_type n) {
layout_type::decrement_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_i (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
const_subiterator_type it_;
friend class iterator1;
};
#endif
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return find1 (0, size1_, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator1:
public container_reference<matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator1, value_type> {
public:
typedef typename matrix::value_type value_type;
typedef typename matrix::difference_type difference_type;
typedef typename matrix::reference reference;
typedef typename matrix::pointer pointer;
typedef iterator2 dual_iterator_type;
typedef reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator1 ():
container_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
iterator1 (self_type &m, const subiterator_type &it):
container_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator1 &operator ++ () {
layout_type::increment_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -- () {
layout_type::decrement_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator += (difference_type n) {
layout_type::increment_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -= (difference_type n) {
layout_type::decrement_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_i (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 begin () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 end () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rbegin () const {
return reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rend () const {
return reverse_iterator2 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
iterator1 &operator = (const iterator1 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
subiterator_type it_;
friend class const_iterator1;
};
#endif
BOOST_UBLAS_INLINE
iterator1 begin1 () {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator1 end1 () {
return find1 (0, size1_, 0);
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator2:
public container_const_reference<matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename matrix::value_type value_type;
typedef typename matrix::difference_type difference_type;
typedef typename matrix::const_reference reference;
typedef const typename matrix::pointer pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator2 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator2 (const iterator2 &it):
container_const_reference<self_type> (it ()), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
layout_type::increment_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
layout_type::decrement_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator += (difference_type n) {
layout_type::increment_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -= (difference_type n) {
layout_type::decrement_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_j (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
const self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
const self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
const_subiterator_type it_;
friend class iterator2;
};
#endif
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return find2 (0, 0, size2_);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator2:
public container_reference<matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator2, value_type> {
public:
typedef typename matrix::value_type value_type;
typedef typename matrix::difference_type difference_type;
typedef typename matrix::reference reference;
typedef typename matrix::pointer pointer;
typedef iterator1 dual_iterator_type;
typedef reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator2 ():
container_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
iterator2 (self_type &m, const subiterator_type &it):
container_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator2 &operator ++ () {
layout_type::increment_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -- () {
layout_type::decrement_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator += (difference_type n) {
layout_type::increment_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -= (difference_type n) {
layout_type::decrement_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_j (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 begin () const {
self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 end () const {
self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rbegin () const {
return reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rend () const {
return reverse_iterator1 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
iterator2 &operator = (const iterator2 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
subiterator_type it_;
friend class const_iterator2;
};
#endif
BOOST_UBLAS_INLINE
iterator2 begin2 () {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator2 end2 () {
return find2 (0, 0, size2_);
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
reverse_iterator1 rbegin1 () {
return reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
reverse_iterator1 rend1 () {
return reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
BOOST_UBLAS_INLINE
reverse_iterator2 rbegin2 () {
return reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
reverse_iterator2 rend2 () {
return reverse_iterator2 (begin2 ());
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
// we need to copy to a collection_size_type to get a portable
// and efficient serialization
serialization::collection_size_type s1 (size1_);
serialization::collection_size_type s2 (size2_);
// serialize the sizes
ar & serialization::make_nvp("size1",s1)
& serialization::make_nvp("size2",s2);
// copy the values back if loading
if (Archive::is_loading::value) {
size1_ = s1;
size2_ = s2;
}
ar & serialization::make_nvp("data",data_);
}
private:
size_type size1_;
size_type size2_;
array_type data_;
};
#ifdef BOOST_UBLAS_CPP_GE_2011
/** \brief A fixed size dense matrix of values of type \c T. Equivalent to a c-style 2 dimensional array.
*
* For a \f$(m \times n)\f$-dimensional fixed_matrix and \f$ 0 \leq i < m, 0 \leq j < n\f$, every element \f$ m_{i,j} \f$ is mapped to
* the \f$(i.n + j)\f$-th element of the container for row major orientation or the \f$ (i + j.m) \f$-th element of
* the container for column major orientation. In a dense matrix all elements are represented in memory in a
* contiguous chunk of memory by definition.
*
* Orientation and storage can also be specified, otherwise \c row_major and \c std::array are used. It is \b not
* required by the storage container to initialize elements of the matrix.
*
* \tparam T the type of object stored in the matrix (like double, float, std::complex<double>, etc...)
* \tparam L the storage organization. It can be either \c row_major or \c column_major. Default is \c row_major
* \tparam A the type of Storage array. Default is \c std::array<T, M*N>
*/
template<class T, std::size_t M, std::size_t N, class L, class A>
class fixed_matrix:
public matrix_container<fixed_matrix<T, M, N, L, A> > {
typedef T *pointer;
typedef L layout_type;
typedef fixed_matrix<T, M, N, L, A> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef typename A::size_type size_type;
typedef typename A::difference_type difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef A array_type;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef vector<T, A> vector_temporary_type;
typedef self_type matrix_temporary_type;
typedef dense_tag storage_category;
// This could be better for performance,
// typedef typename unknown_orientation_tag orientation_category;
// but others depend on the orientation information...
typedef typename L::orientation_category orientation_category;
// Construction and destruction
/// Default dense fixed_matrix constructor. Make a dense fixed_matrix of size M x N
BOOST_UBLAS_INLINE
fixed_matrix ():
matrix_container<self_type> (),
data_ () {}
/// \brief Construct a fixed_matrix from a list of values
/// The list may be included in curly braces. Typical syntax is choices are :
/// fixed_matrix<double, 2,2> v = { 1, 2, 3, 4 } or fixed_matrix<double,4> v( {1, 2, 3, 4} ) or fixed_matrix<double,2,2> v( 1, 2, 3, 4 )
template <typename... Types>
fixed_matrix(value_type v0, Types... vrest) :
matrix_container<self_type> (),
data_{ { v0, vrest... } } {}
/** Dense fixed_matrix constructor with defined initial value for all the matrix elements
* \param init initial value assigned to all elements
*/
fixed_matrix (const value_type &init):
matrix_container<self_type> (),
data_ ( ) {
data_.fill(init);
}
/** Dense matrix constructor with defined initial data array
* \param data array to copy into the matrix. Must have the same dimension as the matrix
*/
BOOST_UBLAS_INLINE
fixed_matrix (const array_type &data):
matrix_container<self_type> (),
data_ (data) {}
/** Copy-constructor of a dense fixed_matrix
* \param m is a dense fixed_matrix
*/
BOOST_UBLAS_INLINE
fixed_matrix (const fixed_matrix &m):
matrix_container<self_type> (),
data_ (m.data_) {}
/** Copy-constructor of a dense matrix from a matrix expression
* \param ae is a matrix expression
*/
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix (const matrix_expression<AE> &ae):
matrix_container<self_type> (),
data_ () {
matrix_assign<scalar_assign> (*this, ae);
}
// Accessors
/** Return the number of rows of the fixed_matrix
* You can also use the free size<>() function in operation/size.hpp as size<1>(m) where m is a fixed_matrix
*/
BOOST_UBLAS_INLINE
BOOST_CONSTEXPR size_type size1 () const {
return M;
}
/** Return the number of colums of the fixed_matrix
* You can also use the free size<>() function in operation/size.hpp as size<2>(m) where m is a fixed_matrix
*/
BOOST_UBLAS_INLINE
BOOST_CONSTEXPR size_type size2 () const {
return N;
}
// Storage accessors
/** Return a constant reference to the internal storage of a dense matrix, i.e. the raw data
* It's type depends on the type used by the matrix to store its data
*/
BOOST_UBLAS_INLINE
const array_type &data () const {
return data_;
}
/** Return a reference to the internal storage of a dense fixed_matrix, i.e. the raw data
* It's type depends on the type used by the fixed_matrix to store its data
*/
BOOST_UBLAS_INLINE
array_type &data () {
return data_;
}
// Element access
/** Access a fixed_matrix element. Here we return a const reference
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \return a const reference to the element
*/
BOOST_UBLAS_INLINE
const_reference operator () (size_type i, size_type j) const {
return data () [layout_type::element (i, M, j, N)]; // Fixme: add static lookup for element(...) i.e.: element<M, N>(i,j)
}
/** Access a fixed_matrix element. Here we return a reference
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \return a reference to the element
*/
BOOST_UBLAS_INLINE
reference at_element (size_type i, size_type j) {
return data () [layout_type::element (i, M, j, N)];
}
/** Access a fixed_matrix element. Here we return a reference
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \return a reference to the element
*/
BOOST_UBLAS_INLINE
reference operator () (size_type i, size_type j) {
return at_element (i, j);
}
// Element assignment
/** Change the value of a fixed_matrix element. Return back a reference to it
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
* \param t the new value of the element
* \return a reference to the newly changed element
*/
BOOST_UBLAS_INLINE
reference insert_element (size_type i, size_type j, const_reference t) {
return (at_element (i, j) = t);
}
/** Erase the element
* For most types (int, double, etc...) it means setting 0 (zero) the element at zero in fact.
* For user-defined types, it could be another value if you decided it. Your type in that case must
* contain a default null value.
* \param i the first coordinate of the element. By default it's the row
* \param j the second coordinate of the element. By default it's the column
*/
void erase_element (size_type i, size_type j) {
at_element (i, j) = value_type/*zero*/();
}
// Zeroing
/** Erase all elements in the fixed_matrix
* For most types (int, double, etc...) it means writing 0 (zero) everywhere.
* For user-defined types, it could be another value if you decided it. Your type in that case must
* contain a default null value.
*/
BOOST_UBLAS_INLINE
void clear () {
std::fill (data ().begin (), data ().end (), value_type/*zero*/());
}
// Assignment
#ifdef BOOST_UBLAS_MOVE_SEMANTICS
/*! @note "pass by value" the key idea to enable move semantics */
BOOST_UBLAS_INLINE
fixed_matrix &operator = (matrix m) {
assign_temporary(m);
return *this;
}
#else
BOOST_UBLAS_INLINE
fixed_matrix &operator = (const fixed_matrix &m) {
data () = m.data ();
return *this;
}
#endif
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
fixed_matrix &operator = (const matrix_container<C> &m) {
resize (m ().size1 (), m ().size2 (), false);
assign (m);
return *this;
}
BOOST_UBLAS_INLINE
fixed_matrix &assign_temporary (fixed_matrix &m) {
swap (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix &operator = (const matrix_expression<AE> &ae) {
self_type temporary (ae);
return assign_temporary (temporary);
}
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix &assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix& operator += (const matrix_expression<AE> &ae) {
self_type temporary (*this + ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
fixed_matrix &operator += (const matrix_container<C> &m) {
plus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix &plus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_plus_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix& operator -= (const matrix_expression<AE> &ae) {
self_type temporary (*this - ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
fixed_matrix &operator -= (const matrix_container<C> &m) {
minus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
fixed_matrix &minus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_minus_assign> (*this, ae);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
fixed_matrix& operator *= (const AT &at) {
matrix_assign_scalar<scalar_multiplies_assign> (*this, at);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
fixed_matrix& operator /= (const AT &at) {
matrix_assign_scalar<scalar_divides_assign> (*this, at);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (fixed_matrix &m) {
if (this != &m) {
data ().swap (m.data ());
}
}
BOOST_UBLAS_INLINE
friend void swap (fixed_matrix &m1, fixed_matrix &m2) {
m1.swap (m2);
}
// Iterator types
private:
// Use the storage array iterator
typedef typename A::const_iterator const_subiterator_type;
typedef typename A::iterator subiterator_type;
public:
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
typedef indexed_iterator1<self_type, dense_random_access_iterator_tag> iterator1;
typedef indexed_iterator2<self_type, dense_random_access_iterator_tag> iterator2;
typedef indexed_const_iterator1<self_type, dense_random_access_iterator_tag> const_iterator1;
typedef indexed_const_iterator2<self_type, dense_random_access_iterator_tag> const_iterator2;
#else
class const_iterator1;
class iterator1;
class const_iterator2;
class iterator2;
#endif
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base1<iterator1> reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
typedef reverse_iterator_base2<iterator2> reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int /* rank */, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator1 (*this, i, j);
#else
return const_iterator1 (*this, data ().begin () + layout_type::address (i, M, j, N));
#endif
}
BOOST_UBLAS_INLINE
iterator1 find1 (int /* rank */, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator1 (*this, i, j);
#else
return iterator1 (*this, data ().begin () + layout_type::address (i, M, j, N));
#endif
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int /* rank */, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator2 (*this, i, j);
#else
return const_iterator2 (*this, data ().begin () + layout_type::address (i, M, j, N));
#endif
}
BOOST_UBLAS_INLINE
iterator2 find2 (int /* rank */, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator2 (*this, i, j);
#else
return iterator2 (*this, data ().begin () + layout_type::address (i, M, j, N));
#endif
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator1:
public container_const_reference<fixed_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename fixed_matrix::value_type value_type;
typedef typename fixed_matrix::difference_type difference_type;
typedef typename fixed_matrix::const_reference reference;
typedef const typename fixed_matrix::pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator1 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator1 (const iterator1 &it):
container_const_reference<self_type> (it ()), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
layout_type::increment_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
layout_type::decrement_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator += (difference_type n) {
layout_type::increment_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -= (difference_type n) {
layout_type::decrement_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_i (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
const_subiterator_type it_;
friend class iterator1;
};
#endif
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return find1 (0, M, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator1:
public container_reference<fixed_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator1, value_type> {
public:
typedef typename fixed_matrix::value_type value_type;
typedef typename fixed_matrix::difference_type difference_type;
typedef typename fixed_matrix::reference reference;
typedef typename fixed_matrix::pointer pointer;
typedef iterator2 dual_iterator_type;
typedef reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator1 ():
container_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
iterator1 (self_type &m, const subiterator_type &it):
container_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator1 &operator ++ () {
layout_type::increment_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -- () {
layout_type::decrement_i (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator += (difference_type n) {
layout_type::increment_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -= (difference_type n) {
layout_type::decrement_i (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_i (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 begin () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 end () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rbegin () const {
return reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rend () const {
return reverse_iterator2 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin1 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
iterator1 &operator = (const iterator1 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
subiterator_type it_;
friend class const_iterator1;
};
#endif
BOOST_UBLAS_INLINE
iterator1 begin1 () {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator1 end1 () {
return find1 (0, M, 0);
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator2:
public container_const_reference<fixed_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename fixed_matrix::value_type value_type;
typedef typename fixed_matrix::difference_type difference_type;
typedef typename fixed_matrix::const_reference reference;
typedef const typename fixed_matrix::pointer pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator2 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator2 (const iterator2 &it):
container_const_reference<self_type> (it ()), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
layout_type::increment_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
layout_type::decrement_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator += (difference_type n) {
layout_type::increment_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -= (difference_type n) {
layout_type::decrement_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_j (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
const self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
const self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
const_subiterator_type it_;
friend class iterator2;
};
#endif
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return find2 (0, 0, N);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator2:
public container_reference<fixed_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator2, value_type> {
public:
typedef typename fixed_matrix::value_type value_type;
typedef typename fixed_matrix::difference_type difference_type;
typedef typename fixed_matrix::reference reference;
typedef typename fixed_matrix::pointer pointer;
typedef iterator1 dual_iterator_type;
typedef reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator2 ():
container_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
iterator2 (self_type &m, const subiterator_type &it):
container_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator2 &operator ++ () {
layout_type::increment_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -- () {
layout_type::decrement_j (it_, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator += (difference_type n) {
layout_type::increment_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -= (difference_type n) {
layout_type::decrement_j (it_, n, (*this) ().size1 (), (*this) ().size2 ());
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return layout_type::distance_j (it_ - it.it_, (*this) ().size1 (), (*this) ().size2 ());
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 begin () const {
self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 end () const {
self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rbegin () const {
return reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rend () const {
return reverse_iterator1 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
self_type &m = (*this) ();
return layout_type::index_i (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
size_type index2 () const {
self_type &m = (*this) ();
return layout_type::index_j (it_ - m.begin2 ().it_, m.size1 (), m.size2 ());
}
// Assignment
BOOST_UBLAS_INLINE
iterator2 &operator = (const iterator2 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
subiterator_type it_;
friend class const_iterator2;
};
#endif
BOOST_UBLAS_INLINE
iterator2 begin2 () {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator2 end2 () {
return find2 (0, 0, N);
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
reverse_iterator1 rbegin1 () {
return reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
reverse_iterator1 rend1 () {
return reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
BOOST_UBLAS_INLINE
reverse_iterator2 rbegin2 () {
return reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
reverse_iterator2 rend2 () {
return reverse_iterator2 (begin2 ());
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
ar & serialization::make_nvp("data",data_);
}
private:
array_type data_;
};
#endif // BOOST_UBLAS_CPP_GE_2011
/** \brief A dense matrix of values of type \c T with a variable size bounded to a maximum of \f$M\f$ by \f$N\f$.
*
* For a \f$(m \times n)\f$-dimensional matrix and \f$ 0 \leq i < m, 0 \leq j < n\f$, every element \f$m_{i,j}\f$ is mapped
* to the \f$(i.n + j)\f$-th element of the container for row major orientation or the \f$(i + j.m)\f$-th element
* of the container for column major orientation. Finally in a dense matrix all elements are represented in memory
* in a contiguous chunk of memory.
*
* Orientation can be specified. Default is \c row_major
* The default constructor creates the matrix with size \f$M\f$ by \f$N\f$. Elements are constructed by the storage
* type \c bounded_array, which need not initialise their value.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam M maximum and default number of rows (if not specified at construction)
* \tparam N maximum and default number of columns (if not specified at construction)
* \tparam L the storage organization. It can be either \c row_major or \c column_major. Default is \c row_major
*/
template<class T, std::size_t M, std::size_t N, class L>
class bounded_matrix:
public matrix<T, L, bounded_array<T, M * N> > {
typedef matrix<T, L, bounded_array<T, M * N> > matrix_type;
public:
typedef typename matrix_type::size_type size_type;
static const size_type max_size1 = M;
static const size_type max_size2 = N;
// Construction and destruction
BOOST_UBLAS_INLINE
bounded_matrix ():
matrix_type (M, N) {}
BOOST_UBLAS_INLINE
bounded_matrix (size_type size1, size_type size2):
matrix_type (size1, size2) {}
BOOST_UBLAS_INLINE
bounded_matrix (const bounded_matrix &m):
matrix_type (m) {}
template<class A2> // Allow matrix<T, L, bounded_array<M,N> > construction
BOOST_UBLAS_INLINE
bounded_matrix (const matrix<T, L, A2> &m):
matrix_type (m) {}
template<class AE>
BOOST_UBLAS_INLINE
bounded_matrix (const matrix_expression<AE> &ae):
matrix_type (ae) {}
BOOST_UBLAS_INLINE
~bounded_matrix () {}
// Assignment
#ifdef BOOST_UBLAS_MOVE_SEMANTICS
/*! @note "pass by value" the key idea to enable move semantics */
BOOST_UBLAS_INLINE
bounded_matrix &operator = (bounded_matrix m) {
matrix_type::operator = (m);
return *this;
}
#else
BOOST_UBLAS_INLINE
bounded_matrix &operator = (const bounded_matrix &m) {
matrix_type::operator = (m);
return *this;
}
#endif
template<class L2, class A2> // Generic matrix assignment
BOOST_UBLAS_INLINE
bounded_matrix &operator = (const matrix<T, L2, A2> &m) {
matrix_type::operator = (m);
return *this;
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
bounded_matrix &operator = (const matrix_container<C> &m) {
matrix_type::operator = (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
bounded_matrix &operator = (const matrix_expression<AE> &ae) {
matrix_type::operator = (ae);
return *this;
}
};
/** \brief A dense matrix of values of type \c T stored as a vector of vectors.
*
* Rows or columns are not stored into contiguous chunks of memory but data inside rows (or columns) are.
* Orientation and storage can also be specified, otherwise a row major and unbounded arrays are used.
* The data is stored as a vector of vectors, meaning that rows or columns might not be stored into contiguous chunks
* of memory. Orientation and storage can also be specified, otherwise a row major and unbounded arrays are used.
* The storage type defaults to \c unbounded_array<unbounded_array<T>> and orientation is \c row_major. It is \b not
* required by the storage to initialize elements of the matrix. For a \f$(m \times n)\f$-dimensional matrix and
* \f$ 0 \leq i < m, 0 \leq j < n\f$, every element \f$m_{i,j}\f$ is mapped to the \f$(i.n + j)\f$-th element of the
* container for row major orientation or the \f$(i + j.m)\f$-th element of the container for column major orientation.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam L the storage organization. It can be either \c row_major or \c column_major. By default it is \c row_major
* \tparam A the type of Storage array. By default, it is an \unbounded_array<unbounder_array<T>>
*/
template<class T, class L, class A>
class vector_of_vector:
public matrix_container<vector_of_vector<T, L, A> > {
typedef T *pointer;
typedef L layout_type;
typedef vector_of_vector<T, L, A> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef typename A::size_type size_type;
typedef typename A::difference_type difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef A array_type;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef vector<T, typename A::value_type> vector_temporary_type;
typedef self_type matrix_temporary_type;
typedef dense_tag storage_category;
// This could be better for performance,
// typedef typename unknown_orientation_tag orientation_category;
// but others depend on the orientation information...
typedef typename L::orientation_category orientation_category;
// Construction and destruction
BOOST_UBLAS_INLINE
vector_of_vector ():
matrix_container<self_type> (),
size1_ (0), size2_ (0), data_ (1) {}
BOOST_UBLAS_INLINE
vector_of_vector (size_type size1, size_type size2):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2), data_ (1) {
resize (size1, size2, true);
}
BOOST_UBLAS_INLINE
vector_of_vector (const vector_of_vector &m):
matrix_container<self_type> (),
size1_ (m.size1_), size2_ (m.size2_), data_ (m.data_) {}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector (const matrix_expression<AE> &ae):
matrix_container<self_type> (),
size1_ (ae ().size1 ()), size2_ (ae ().size2 ()), data_ (layout_type::size_M (size1_, size2_) + 1) {
for (size_type k = 0; k < layout_type::size_M (size1_, size2_); ++ k)
data ()[k].resize (layout_type::size_m (size1_, size2_));
matrix_assign<scalar_assign> (*this, ae);
}
// Accessors
BOOST_UBLAS_INLINE
size_type size1 () const {
return size1_;
}
BOOST_UBLAS_INLINE
size_type size2 () const {
return size2_;
}
// Storage accessors
BOOST_UBLAS_INLINE
const array_type &data () const {
return data_;
}
BOOST_UBLAS_INLINE
array_type &data () {
return data_;
}
// Resizing
BOOST_UBLAS_INLINE
void resize (size_type size1, size_type size2, bool preserve = true) {
size1_ = size1;
size2_ = size2;
if (preserve)
data ().resize (layout_type::size_M (size1, size2) + 1, typename array_type::value_type ());
else
data ().resize (layout_type::size_M (size1, size2) + 1);
for (size_type k = 0; k < layout_type::size_M (size1, size2); ++ k) {
if (preserve)
data () [k].resize (layout_type::size_m (size1, size2), value_type ());
else
data () [k].resize (layout_type::size_m (size1, size2));
}
}
// Element access
BOOST_UBLAS_INLINE
const_reference operator () (size_type i, size_type j) const {
return data () [layout_type::index_M (i, j)] [layout_type::index_m (i, j)];
}
BOOST_UBLAS_INLINE
reference at_element (size_type i, size_type j) {
return data () [layout_type::index_M (i, j)] [layout_type::index_m (i, j)];
}
BOOST_UBLAS_INLINE
reference operator () (size_type i, size_type j) {
return at_element (i, j);
}
// Element assignment
BOOST_UBLAS_INLINE
reference insert_element (size_type i, size_type j, const_reference t) {
return (at_element (i, j) = t);
}
BOOST_UBLAS_INLINE
void erase_element (size_type i, size_type j) {
at_element (i, j) = value_type/*zero*/();
}
// Zeroing
BOOST_UBLAS_INLINE
void clear () {
for (size_type k = 0; k < layout_type::size_M (size1_, size2_); ++ k)
std::fill (data () [k].begin (), data () [k].end (), value_type/*zero*/());
}
// Assignment
BOOST_UBLAS_INLINE
vector_of_vector &operator = (const vector_of_vector &m) {
size1_ = m.size1_;
size2_ = m.size2_;
data () = m.data ();
return *this;
}
BOOST_UBLAS_INLINE
vector_of_vector &assign_temporary (vector_of_vector &m) {
swap (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector &operator = (const matrix_expression<AE> &ae) {
self_type temporary (ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
vector_of_vector &operator = (const matrix_container<C> &m) {
resize (m ().size1 (), m ().size2 (), false);
assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector &assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector& operator += (const matrix_expression<AE> &ae) {
self_type temporary (*this + ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
vector_of_vector &operator += (const matrix_container<C> &m) {
plus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector &plus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_plus_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector& operator -= (const matrix_expression<AE> &ae) {
self_type temporary (*this - ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
vector_of_vector &operator -= (const matrix_container<C> &m) {
minus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
vector_of_vector &minus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_minus_assign> (*this, ae);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
vector_of_vector& operator *= (const AT &at) {
matrix_assign_scalar<scalar_multiplies_assign> (*this, at);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
vector_of_vector& operator /= (const AT &at) {
matrix_assign_scalar<scalar_divides_assign> (*this, at);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (vector_of_vector &m) {
if (this != &m) {
std::swap (size1_, m.size1_);
std::swap (size2_, m.size2_);
data ().swap (m.data ());
}
}
BOOST_UBLAS_INLINE
friend void swap (vector_of_vector &m1, vector_of_vector &m2) {
m1.swap (m2);
}
// Iterator types
private:
// Use the vector iterator
typedef typename A::value_type::const_iterator const_subiterator_type;
typedef typename A::value_type::iterator subiterator_type;
public:
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
typedef indexed_iterator1<self_type, dense_random_access_iterator_tag> iterator1;
typedef indexed_iterator2<self_type, dense_random_access_iterator_tag> iterator2;
typedef indexed_const_iterator1<self_type, dense_random_access_iterator_tag> const_iterator1;
typedef indexed_const_iterator2<self_type, dense_random_access_iterator_tag> const_iterator2;
#else
class const_iterator1;
class iterator1;
class const_iterator2;
class iterator2;
#endif
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base1<iterator1> reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
typedef reverse_iterator_base2<iterator2> reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int /*rank*/, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator1 (*this, i, j);
#else
return const_iterator1 (*this, i, j, data () [layout_type::index_M (i, j)].begin () + layout_type::index_m (i, j));
#endif
}
BOOST_UBLAS_INLINE
iterator1 find1 (int /*rank*/, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator1 (*this, i, j);
#else
return iterator1 (*this, i, j, data () [layout_type::index_M (i, j)].begin () + layout_type::index_m (i, j));
#endif
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int /*rank*/, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator2 (*this, i, j);
#else
return const_iterator2 (*this, i, j, data () [layout_type::index_M (i, j)].begin () + layout_type::index_m (i, j));
#endif
}
BOOST_UBLAS_INLINE
iterator2 find2 (int /*rank*/, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator2 (*this, i, j);
#else
return iterator2 (*this, i, j, data () [layout_type::index_M (i, j)].begin () + layout_type::index_m (i, j));
#endif
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator1:
public container_const_reference<vector_of_vector>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename vector_of_vector::value_type value_type;
typedef typename vector_of_vector::difference_type difference_type;
typedef typename vector_of_vector::const_reference reference;
typedef const typename vector_of_vector::pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<self_type> (), i_ (), j_ (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator1 (const self_type &m, size_type i, size_type j, const const_subiterator_type &it):
container_const_reference<self_type> (m), i_ (i), j_ (j), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator1 (const iterator1 &it):
container_const_reference<self_type> (it ()), i_ (it.i_), j_ (it.j_), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
++ i_;
const self_type &m = (*this) ();
if (layout_type::fast_i ())
++ it_;
else
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
-- i_;
const self_type &m = (*this) ();
if (layout_type::fast_i ())
-- it_;
else
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator += (difference_type n) {
i_ += n;
const self_type &m = (*this) ();
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -= (difference_type n) {
i_ -= n;
const self_type &m = (*this) ();
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index2 () == it.index2 (), bad_index ());
return index1 () - it.index1 ();
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return i_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return j_;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index2 () == it.index2 (), bad_index ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index2 () == it.index2 (), bad_index ());
return it_ < it.it_;
}
private:
size_type i_;
size_type j_;
const_subiterator_type it_;
friend class iterator1;
};
#endif
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return find1 (0, size1_, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator1:
public container_reference<vector_of_vector>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator1, value_type> {
public:
typedef typename vector_of_vector::value_type value_type;
typedef typename vector_of_vector::difference_type difference_type;
typedef typename vector_of_vector::reference reference;
typedef typename vector_of_vector::pointer pointer;
typedef iterator2 dual_iterator_type;
typedef reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator1 ():
container_reference<self_type> (), i_ (), j_ (), it_ () {}
BOOST_UBLAS_INLINE
iterator1 (self_type &m, size_type i, size_type j, const subiterator_type &it):
container_reference<self_type> (m), i_ (i), j_ (j), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator1 &operator ++ () {
++ i_;
self_type &m = (*this) ();
if (layout_type::fast_i ())
++ it_;
else
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -- () {
-- i_;
self_type &m = (*this) ();
if (layout_type::fast_i ())
-- it_;
else
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator += (difference_type n) {
i_ += n;
self_type &m = (*this) ();
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -= (difference_type n) {
i_ -= n;
self_type &m = (*this) ();
it_ = m.find1 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index2 () == it.index2 (), bad_index ());
return index1 () - it.index1 ();
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 begin () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 end () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rbegin () const {
return reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rend () const {
return reverse_iterator2 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return i_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return j_;
}
// Assignment
BOOST_UBLAS_INLINE
iterator1 &operator = (const iterator1 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index2 () == it.index2 (), bad_index ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index2 () == it.index2 (), bad_index ());
return it_ < it.it_;
}
private:
size_type i_;
size_type j_;
subiterator_type it_;
friend class const_iterator1;
};
#endif
BOOST_UBLAS_INLINE
iterator1 begin1 () {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator1 end1 () {
return find1 (0, size1_, 0);
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator2:
public container_const_reference<vector_of_vector>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename vector_of_vector::value_type value_type;
typedef typename vector_of_vector::difference_type difference_type;
typedef typename vector_of_vector::const_reference reference;
typedef const typename vector_of_vector::pointer pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<self_type> (), i_ (), j_ (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator2 (const self_type &m, size_type i, size_type j, const const_subiterator_type &it):
container_const_reference<self_type> (m), i_ (i), j_ (j), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator2 (const iterator2 &it):
container_const_reference<self_type> (it ()), i_ (it.i_), j_ (it.j_), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
++ j_;
const self_type &m = (*this) ();
if (layout_type::fast_j ())
++ it_;
else
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
-- j_;
const self_type &m = (*this) ();
if (layout_type::fast_j ())
-- it_;
else
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator += (difference_type n) {
j_ += n;
const self_type &m = (*this) ();
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -= (difference_type n) {
j_ -= n;
const self_type &m = (*this) ();
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index1 () == it.index1 (), bad_index ());
return index2 () - it.index2 ();
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
const self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
const self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return i_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return j_;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index1 () == it.index1 (), bad_index ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index1 () == it.index1 (), bad_index ());
return it_ < it.it_;
}
private:
size_type i_;
size_type j_;
const_subiterator_type it_;
friend class iterator2;
};
#endif
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return find2 (0, 0, size2_);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator2:
public container_reference<vector_of_vector>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator2, value_type> {
public:
typedef typename vector_of_vector::value_type value_type;
typedef typename vector_of_vector::difference_type difference_type;
typedef typename vector_of_vector::reference reference;
typedef typename vector_of_vector::pointer pointer;
typedef iterator1 dual_iterator_type;
typedef reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator2 ():
container_reference<self_type> (), i_ (), j_ (), it_ () {}
BOOST_UBLAS_INLINE
iterator2 (self_type &m, size_type i, size_type j, const subiterator_type &it):
container_reference<self_type> (m), i_ (i), j_ (j), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator2 &operator ++ () {
++ j_;
self_type &m = (*this) ();
if (layout_type::fast_j ())
++ it_;
else
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -- () {
-- j_;
self_type &m = (*this) ();
if (layout_type::fast_j ())
-- it_;
else
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator += (difference_type n) {
j_ += n;
self_type &m = (*this) ();
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -= (difference_type n) {
j_ -= n;
self_type &m = (*this) ();
it_ = m.find2 (1, i_, j_).it_;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index1 () == it.index1 (), bad_index ());
return index2 () - it.index2 ();
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 begin () const {
self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 end () const {
self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rbegin () const {
return reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rend () const {
return reverse_iterator1 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return i_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return j_;
}
// Assignment
BOOST_UBLAS_INLINE
iterator2 &operator = (const iterator2 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index1 () == it.index1 (), bad_index ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (index1 () == it.index1 (), bad_index ());
return it_ < it.it_;
}
private:
size_type i_;
size_type j_;
subiterator_type it_;
friend class const_iterator2;
};
#endif
BOOST_UBLAS_INLINE
iterator2 begin2 () {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator2 end2 () {
return find2 (0, 0, size2_);
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
reverse_iterator1 rbegin1 () {
return reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
reverse_iterator1 rend1 () {
return reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
BOOST_UBLAS_INLINE
reverse_iterator2 rbegin2 () {
return reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
reverse_iterator2 rend2 () {
return reverse_iterator2 (begin2 ());
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
// we need to copy to a collection_size_type to get a portable
// and efficient serialization
serialization::collection_size_type s1 (size1_);
serialization::collection_size_type s2 (size2_);
// serialize the sizes
ar & serialization::make_nvp("size1",s1)
& serialization::make_nvp("size2",s2);
// copy the values back if loading
if (Archive::is_loading::value) {
size1_ = s1;
size2_ = s2;
}
ar & serialization::make_nvp("data",data_);
}
private:
size_type size1_;
size_type size2_;
array_type data_;
};
/** \brief A matrix with all values of type \c T equal to zero
*
* Changing values does not affect the matrix, however assigning it to a normal matrix will put zero
* everywhere in the target matrix. All accesses are constant time, due to the trivial value.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam ALLOC an allocator for storing the zero element. By default, a standar allocator is used.
*/
template<class T, class ALLOC>
class zero_matrix:
public matrix_container<zero_matrix<T, ALLOC> > {
typedef const T *const_pointer;
typedef zero_matrix<T, ALLOC> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef typename ALLOC::size_type size_type;
typedef typename ALLOC::difference_type difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef sparse_tag storage_category;
typedef unknown_orientation_tag orientation_category;
// Construction and destruction
BOOST_UBLAS_INLINE
zero_matrix ():
matrix_container<self_type> (),
size1_ (0), size2_ (0) {}
BOOST_UBLAS_INLINE
zero_matrix (size_type size):
matrix_container<self_type> (),
size1_ (size), size2_ (size) {}
BOOST_UBLAS_INLINE
zero_matrix (size_type size1, size_type size2):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2) {}
BOOST_UBLAS_INLINE
zero_matrix (const zero_matrix &m):
matrix_container<self_type> (),
size1_ (m.size1_), size2_ (m.size2_) {}
// Accessors
BOOST_UBLAS_INLINE
size_type size1 () const {
return size1_;
}
BOOST_UBLAS_INLINE
size_type size2 () const {
return size2_;
}
// Resizing
BOOST_UBLAS_INLINE
void resize (size_type size, bool /*preserve*/ = true) {
size1_ = size;
size2_ = size;
}
BOOST_UBLAS_INLINE
void resize (size_type size1, size_type size2, bool /*preserve*/ = true) {
size1_ = size1;
size2_ = size2;
}
// Element access
BOOST_UBLAS_INLINE
const_reference operator () (size_type /* i */, size_type /* j */) const {
return zero_;
}
// Assignment
BOOST_UBLAS_INLINE
zero_matrix &operator = (const zero_matrix &m) {
size1_ = m.size1_;
size2_ = m.size2_;
return *this;
}
BOOST_UBLAS_INLINE
zero_matrix &assign_temporary (zero_matrix &m) {
swap (m);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (zero_matrix &m) {
if (this != &m) {
std::swap (size1_, m.size1_);
std::swap (size2_, m.size2_);
}
}
BOOST_UBLAS_INLINE
friend void swap (zero_matrix &m1, zero_matrix &m2) {
m1.swap (m2);
}
// Iterator types
public:
class const_iterator1;
class const_iterator2;
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int /*rank*/, size_type /*i*/, size_type /*j*/) const {
return const_iterator1 (*this);
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int /*rank*/, size_type /*i*/, size_type /*j*/) const {
return const_iterator2 (*this);
}
class const_iterator1:
public container_const_reference<zero_matrix>,
public bidirectional_iterator_base<sparse_bidirectional_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename zero_matrix::value_type value_type;
typedef typename zero_matrix::difference_type difference_type;
typedef typename zero_matrix::const_reference reference;
typedef typename zero_matrix::const_pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<self_type> () {}
BOOST_UBLAS_INLINE
const_iterator1 (const self_type &m):
container_const_reference<self_type> (m) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return *this;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return zero_; // arbitary return value
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
return const_iterator2 ((*this) ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
return const_iterator2 ((*this) ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return 0; // arbitary return value
}
BOOST_UBLAS_INLINE
size_type index2 () const {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return 0; // arbitary return value
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<self_type>::assign (&it ());
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
detail::ignore_unused_variable_warning(it);
return true;
}
};
typedef const_iterator1 iterator1;
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return const_iterator1 (*this);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return const_iterator1 (*this);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
class const_iterator2:
public container_const_reference<zero_matrix>,
public bidirectional_iterator_base<sparse_bidirectional_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename zero_matrix::value_type value_type;
typedef typename zero_matrix::difference_type difference_type;
typedef typename zero_matrix::const_reference reference;
typedef typename zero_matrix::const_pointer pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<self_type> () {}
BOOST_UBLAS_INLINE
const_iterator2 (const self_type &m):
container_const_reference<self_type> (m) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return *this;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return zero_; // arbitary return value
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
return const_iterator1 ((*this) ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
return const_iterator1 ((*this) ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return 0; // arbitary return value
}
BOOST_UBLAS_INLINE
size_type index2 () const {
BOOST_UBLAS_CHECK_FALSE (bad_index ());
return 0; // arbitary return value
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<self_type>::assign (&it ());
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
detail::ignore_unused_variable_warning(it);
return true;
}
};
typedef const_iterator2 iterator2;
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return find2 (0, 0, size2_);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
// we need to copy to a collection_size_type to get a portable
// and efficient serialization
serialization::collection_size_type s1 (size1_);
serialization::collection_size_type s2 (size2_);
// serialize the sizes
ar & serialization::make_nvp("size1",s1)
& serialization::make_nvp("size2",s2);
// copy the values back if loading
if (Archive::is_loading::value) {
size1_ = s1;
size2_ = s2;
}
}
private:
size_type size1_;
size_type size2_;
static const value_type zero_;
};
template<class T, class ALLOC>
const typename zero_matrix<T, ALLOC>::value_type zero_matrix<T, ALLOC>::zero_ = T(/*zero*/);
/** \brief An identity matrix with values of type \c T
*
* Elements or cordinates \f$(i,i)\f$ are equal to 1 (one) and all others to 0 (zero).
* Changing values does not affect the matrix, however assigning it to a normal matrix will
* make the matrix equal to an identity matrix. All accesses are constant du to the trivial values.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam ALLOC an allocator for storing the zeros and one elements. By default, a standar allocator is used.
*/
template<class T, class ALLOC>
class identity_matrix:
public matrix_container<identity_matrix<T, ALLOC> > {
typedef const T *const_pointer;
typedef identity_matrix<T, ALLOC> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef typename ALLOC::size_type size_type;
typedef typename ALLOC::difference_type difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef sparse_tag storage_category;
typedef unknown_orientation_tag orientation_category;
// Construction and destruction
BOOST_UBLAS_INLINE
identity_matrix ():
matrix_container<self_type> (),
size1_ (0), size2_ (0), size_common_ (0) {}
BOOST_UBLAS_INLINE
identity_matrix (size_type size):
matrix_container<self_type> (),
size1_ (size), size2_ (size), size_common_ ((std::min) (size1_, size2_)) {}
BOOST_UBLAS_INLINE
identity_matrix (size_type size1, size_type size2):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2), size_common_ ((std::min) (size1_, size2_)) {}
BOOST_UBLAS_INLINE
identity_matrix (const identity_matrix &m):
matrix_container<self_type> (),
size1_ (m.size1_), size2_ (m.size2_), size_common_ ((std::min) (size1_, size2_)) {}
// Accessors
BOOST_UBLAS_INLINE
size_type size1 () const {
return size1_;
}
BOOST_UBLAS_INLINE
size_type size2 () const {
return size2_;
}
// Resizing
BOOST_UBLAS_INLINE
void resize (size_type size, bool /*preserve*/ = true) {
size1_ = size;
size2_ = size;
size_common_ = ((std::min)(size1_, size2_));
}
BOOST_UBLAS_INLINE
void resize (size_type size1, size_type size2, bool /*preserve*/ = true) {
size1_ = size1;
size2_ = size2;
size_common_ = ((std::min)(size1_, size2_));
}
// Element access
BOOST_UBLAS_INLINE
const_reference operator () (size_type i, size_type j) const {
if (i == j)
return one_;
else
return zero_;
}
// Assignment
BOOST_UBLAS_INLINE
identity_matrix &operator = (const identity_matrix &m) {
size1_ = m.size1_;
size2_ = m.size2_;
size_common_ = m.size_common_;
return *this;
}
BOOST_UBLAS_INLINE
identity_matrix &assign_temporary (identity_matrix &m) {
swap (m);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (identity_matrix &m) {
if (this != &m) {
std::swap (size1_, m.size1_);
std::swap (size2_, m.size2_);
std::swap (size_common_, m.size_common_);
}
}
BOOST_UBLAS_INLINE
friend void swap (identity_matrix &m1, identity_matrix &m2) {
m1.swap (m2);
}
// Iterator types
private:
// Use an index
typedef size_type const_subiterator_type;
public:
class const_iterator1;
class const_iterator2;
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int rank, size_type i, size_type j) const {
if (rank == 1) {
i = (std::max) (i, j);
i = (std::min) (i, j + 1);
}
return const_iterator1 (*this, i);
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int rank, size_type i, size_type j) const {
if (rank == 1) {
j = (std::max) (j, i);
j = (std::min) (j, i + 1);
}
return const_iterator2 (*this, j);
}
class const_iterator1:
public container_const_reference<identity_matrix>,
public bidirectional_iterator_base<sparse_bidirectional_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename identity_matrix::value_type value_type;
typedef typename identity_matrix::difference_type difference_type;
typedef typename identity_matrix::const_reference reference;
typedef typename identity_matrix::const_pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator1 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
BOOST_UBLAS_CHECK (it_ < (*this) ().size1 (), bad_index ());
++it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
BOOST_UBLAS_CHECK (it_ > 0, bad_index ());
--it_;
return *this;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
return one_;
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
return const_iterator2 ((*this) (), it_);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
return const_iterator2 ((*this) (), it_ + 1);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return it_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return it_;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
private:
const_subiterator_type it_;
};
typedef const_iterator1 iterator1;
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return const_iterator1 (*this, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return const_iterator1 (*this, size_common_);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
class const_iterator2:
public container_const_reference<identity_matrix>,
public bidirectional_iterator_base<sparse_bidirectional_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename identity_matrix::value_type value_type;
typedef typename identity_matrix::difference_type difference_type;
typedef typename identity_matrix::const_reference reference;
typedef typename identity_matrix::const_pointer pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator2 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
BOOST_UBLAS_CHECK (it_ < (*this) ().size_common_, bad_index ());
++it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
BOOST_UBLAS_CHECK (it_ > 0, bad_index ());
--it_;
return *this;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
return one_;
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
return const_iterator1 ((*this) (), it_);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
return const_iterator1 ((*this) (), it_ + 1);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return it_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return it_;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
private:
const_subiterator_type it_;
};
typedef const_iterator2 iterator2;
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return const_iterator2 (*this, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return const_iterator2 (*this, size_common_);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
// we need to copy to a collection_size_type to get a portable
// and efficient serialization
serialization::collection_size_type s1 (size1_);
serialization::collection_size_type s2 (size2_);
// serialize the sizes
ar & serialization::make_nvp("size1",s1)
& serialization::make_nvp("size2",s2);
// copy the values back if loading
if (Archive::is_loading::value) {
size1_ = s1;
size2_ = s2;
size_common_ = ((std::min)(size1_, size2_));
}
}
private:
size_type size1_;
size_type size2_;
size_type size_common_;
static const value_type zero_;
static const value_type one_;
};
template<class T, class ALLOC>
const typename identity_matrix<T, ALLOC>::value_type identity_matrix<T, ALLOC>::zero_ = T(/*zero*/);
template<class T, class ALLOC>
const typename identity_matrix<T, ALLOC>::value_type identity_matrix<T, ALLOC>::one_ (1); // ISSUE: need 'one'-traits here
/** \brief A matrix with all values of type \c T equal to the same value
*
* Changing one value has the effect of changing all the values. Assigning it to a normal matrix will copy
* the same value everywhere in this matrix. All accesses are constant time, due to the trivial value.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam ALLOC an allocator for storing the unique value. By default, a standar allocator is used.
*/
template<class T, class ALLOC>
class scalar_matrix:
public matrix_container<scalar_matrix<T, ALLOC> > {
typedef const T *const_pointer;
typedef scalar_matrix<T, ALLOC> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef dense_tag storage_category;
typedef unknown_orientation_tag orientation_category;
// Construction and destruction
BOOST_UBLAS_INLINE
scalar_matrix ():
matrix_container<self_type> (),
size1_ (0), size2_ (0), value_ () {}
BOOST_UBLAS_INLINE
scalar_matrix (size_type size1, size_type size2, const value_type &value = value_type(1)):
matrix_container<self_type> (),
size1_ (size1), size2_ (size2), value_ (value) {}
BOOST_UBLAS_INLINE
scalar_matrix (const scalar_matrix &m):
matrix_container<self_type> (),
size1_ (m.size1_), size2_ (m.size2_), value_ (m.value_) {}
// Accessors
BOOST_UBLAS_INLINE
size_type size1 () const {
return size1_;
}
BOOST_UBLAS_INLINE
size_type size2 () const {
return size2_;
}
// Resizing
BOOST_UBLAS_INLINE
void resize (size_type size1, size_type size2, bool /*preserve*/ = true) {
size1_ = size1;
size2_ = size2;
}
// Element access
BOOST_UBLAS_INLINE
const_reference operator () (size_type /*i*/, size_type /*j*/) const {
return value_;
}
// Assignment
BOOST_UBLAS_INLINE
scalar_matrix &operator = (const scalar_matrix &m) {
size1_ = m.size1_;
size2_ = m.size2_;
value_ = m.value_;
return *this;
}
BOOST_UBLAS_INLINE
scalar_matrix &assign_temporary (scalar_matrix &m) {
swap (m);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (scalar_matrix &m) {
if (this != &m) {
std::swap (size1_, m.size1_);
std::swap (size2_, m.size2_);
std::swap (value_, m.value_);
}
}
BOOST_UBLAS_INLINE
friend void swap (scalar_matrix &m1, scalar_matrix &m2) {
m1.swap (m2);
}
// Iterator types
private:
// Use an index
typedef size_type const_subiterator_type;
public:
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
typedef indexed_const_iterator1<self_type, dense_random_access_iterator_tag> iterator1;
typedef indexed_const_iterator2<self_type, dense_random_access_iterator_tag> iterator2;
typedef indexed_const_iterator1<self_type, dense_random_access_iterator_tag> const_iterator1;
typedef indexed_const_iterator2<self_type, dense_random_access_iterator_tag> const_iterator2;
#else
class const_iterator1;
class const_iterator2;
#endif
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int /*rank*/, size_type i, size_type j) const {
return const_iterator1 (*this, i, j);
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int /*rank*/, size_type i, size_type j) const {
return const_iterator2 (*this, i, j);
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator1:
public container_const_reference<scalar_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename scalar_matrix::value_type value_type;
typedef typename scalar_matrix::difference_type difference_type;
typedef typename scalar_matrix::const_reference reference;
typedef typename scalar_matrix::const_pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<scalar_matrix> (), it1_ (), it2_ () {}
BOOST_UBLAS_INLINE
const_iterator1 (const scalar_matrix &m, const const_subiterator_type &it1, const const_subiterator_type &it2):
container_const_reference<scalar_matrix> (m), it1_ (it1), it2_ (it2) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
++ it1_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
-- it1_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator += (difference_type n) {
it1_ += n;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -= (difference_type n) {
it1_ -= n;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (it2_ == it.it2_, external_logic ());
return it1_ - it.it1_;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return (*this) () (index1 (), index2 ());
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
const scalar_matrix &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
const scalar_matrix &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return it1_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return it2_;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<scalar_matrix>::assign (&it ());
it1_ = it.it1_;
it2_ = it.it2_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (it2_ == it.it2_, external_logic ());
return it1_ == it.it1_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (it2_ == it.it2_, external_logic ());
return it1_ < it.it1_;
}
private:
const_subiterator_type it1_;
const_subiterator_type it2_;
};
typedef const_iterator1 iterator1;
#endif
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return find1 (0, size1_, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator2:
public container_const_reference<scalar_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename scalar_matrix::value_type value_type;
typedef typename scalar_matrix::difference_type difference_type;
typedef typename scalar_matrix::const_reference reference;
typedef typename scalar_matrix::const_pointer pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<scalar_matrix> (), it1_ (), it2_ () {}
BOOST_UBLAS_INLINE
const_iterator2 (const scalar_matrix &m, const const_subiterator_type &it1, const const_subiterator_type &it2):
container_const_reference<scalar_matrix> (m), it1_ (it1), it2_ (it2) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
++ it2_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
-- it2_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator += (difference_type n) {
it2_ += n;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -= (difference_type n) {
it2_ -= n;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (it1_ == it.it1_, external_logic ());
return it2_ - it.it2_;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return (*this) () (index1 (), index2 ());
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
const scalar_matrix &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
const scalar_matrix &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
return it1_;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
return it2_;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<scalar_matrix>::assign (&it ());
it1_ = it.it1_;
it2_ = it.it2_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (it1_ == it.it1_, external_logic ());
return it2_ == it.it2_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
BOOST_UBLAS_CHECK (it1_ == it.it1_, external_logic ());
return it2_ < it.it2_;
}
private:
const_subiterator_type it1_;
const_subiterator_type it2_;
};
typedef const_iterator2 iterator2;
#endif
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return find2 (0, 0, size2_);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
// we need to copy to a collection_size_type to get a portable
// and efficient serialization
serialization::collection_size_type s1 (size1_);
serialization::collection_size_type s2 (size2_);
// serialize the sizes
ar & serialization::make_nvp("size1",s1)
& serialization::make_nvp("size2",s2);
// copy the values back if loading
if (Archive::is_loading::value) {
size1_ = s1;
size2_ = s2;
}
ar & serialization::make_nvp("value", value_);
}
private:
size_type size1_;
size_type size2_;
value_type value_;
};
/** \brief An array based matrix class which size is defined at type specification or object instanciation
*
* This matrix is directly based on a predefined C-style arry of data, thus providing the fastest
* implementation possible. The constraint is that dimensions of the matrix must be specified at
* the instanciation or the type specification.
*
* For instance, \code typedef c_matrix<double,4,4> my_4by4_matrix \endcode
* defines a 4 by 4 double-precision matrix. You can also instantiate it directly with
* \code c_matrix<int,8,5> my_fast_matrix \endcode. This will make a 8 by 5 integer matrix. The
* price to pay for this speed is that you cannot resize it to a size larger than the one defined
* in the template parameters. In the previous example, a size of 4 by 5 or 3 by 2 is acceptable,
* but a new size of 9 by 5 or even 10 by 10 will raise a bad_size() exception.
*
* \tparam T the type of object stored in the matrix (like double, float, complex, etc...)
* \tparam N the default maximum number of rows
* \tparam M the default maximum number of columns
*/
template<class T, std::size_t N, std::size_t M>
class c_matrix:
public matrix_container<c_matrix<T, N, M> > {
typedef c_matrix<T, N, M> self_type;
public:
#ifdef BOOST_UBLAS_ENABLE_PROXY_SHORTCUTS
using matrix_container<self_type>::operator ();
#endif
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T value_type;
typedef const T &const_reference;
typedef T &reference;
typedef const T *const_pointer;
typedef T *pointer;
typedef const matrix_reference<const self_type> const_closure_type;
typedef matrix_reference<self_type> closure_type;
typedef c_vector<T, N * M> vector_temporary_type; // vector able to store all elements of c_matrix
typedef self_type matrix_temporary_type;
typedef dense_tag storage_category;
// This could be better for performance,
// typedef typename unknown_orientation_tag orientation_category;
// but others depend on the orientation information...
typedef row_major_tag orientation_category;
// Construction and destruction
BOOST_UBLAS_INLINE
c_matrix ():
size1_ (N), size2_ (M) /* , data_ () */ {
}
BOOST_UBLAS_INLINE
c_matrix (size_type size1, size_type size2):
size1_ (size1), size2_ (size2) /* , data_ () */ {
if (size1_ > N || size2_ > M)
bad_size ().raise ();
}
BOOST_UBLAS_INLINE
c_matrix (const c_matrix &m):
size1_ (m.size1_), size2_ (m.size2_) /* , data_ () */ {
if (size1_ > N || size2_ > M)
bad_size ().raise ();
assign(m);
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix (const matrix_expression<AE> &ae):
size1_ (ae ().size1 ()), size2_ (ae ().size2 ()) /* , data_ () */ {
if (size1_ > N || size2_ > M)
bad_size ().raise ();
matrix_assign<scalar_assign> (*this, ae);
}
// Accessors
BOOST_UBLAS_INLINE
size_type size1 () const {
return size1_;
}
BOOST_UBLAS_INLINE
size_type size2 () const {
return size2_;
}
BOOST_UBLAS_INLINE
const_pointer data () const {
return reinterpret_cast<const_pointer> (data_);
}
BOOST_UBLAS_INLINE
pointer data () {
return reinterpret_cast<pointer> (data_);
}
// Resizing
BOOST_UBLAS_INLINE
void resize (size_type size1, size_type size2, bool preserve = true) {
if (size1 > N || size2 > M)
bad_size ().raise ();
if (preserve) {
self_type temporary (size1, size2);
// Common elements to preserve
const size_type size1_min = (std::min) (size1, size1_);
const size_type size2_min = (std::min) (size2, size2_);
for (size_type i = 0; i != size1_min; ++i) { // indexing copy over major
for (size_type j = 0; j != size2_min; ++j) {
temporary.data_[i][j] = data_[i][j];
}
}
assign_temporary (temporary);
}
else {
size1_ = size1;
size2_ = size2;
}
}
// Element access
BOOST_UBLAS_INLINE
const_reference operator () (size_type i, size_type j) const {
BOOST_UBLAS_CHECK (i < size1_, bad_index ());
BOOST_UBLAS_CHECK (j < size2_, bad_index ());
return data_ [i] [j];
}
BOOST_UBLAS_INLINE
reference at_element (size_type i, size_type j) {
BOOST_UBLAS_CHECK (i < size1_, bad_index ());
BOOST_UBLAS_CHECK (j < size2_, bad_index ());
return data_ [i] [j];
}
BOOST_UBLAS_INLINE
reference operator () (size_type i, size_type j) {
return at_element (i, j);
}
// Element assignment
BOOST_UBLAS_INLINE
reference insert_element (size_type i, size_type j, const_reference t) {
return (at_element (i, j) = t);
}
// Zeroing
BOOST_UBLAS_INLINE
void clear () {
for (size_type i = 0; i < size1_; ++ i)
std::fill (data_ [i], data_ [i] + size2_, value_type/*zero*/());
}
// Assignment
#ifdef BOOST_UBLAS_MOVE_SEMANTICS
/*! @note "pass by value" the key idea to enable move semantics */
BOOST_UBLAS_INLINE
c_matrix &operator = (c_matrix m) {
assign_temporary(m);
return *this;
}
#else
BOOST_UBLAS_INLINE
c_matrix &operator = (const c_matrix &m) {
size1_ = m.size1_;
size2_ = m.size2_;
for (size_type i = 0; i < m.size1_; ++ i)
std::copy (m.data_ [i], m.data_ [i] + m.size2_, data_ [i]);
return *this;
}
#endif
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
c_matrix &operator = (const matrix_container<C> &m) {
resize (m ().size1 (), m ().size2 (), false);
assign (m);
return *this;
}
BOOST_UBLAS_INLINE
c_matrix &assign_temporary (c_matrix &m) {
swap (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix &operator = (const matrix_expression<AE> &ae) {
self_type temporary (ae);
return assign_temporary (temporary);
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix &assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix& operator += (const matrix_expression<AE> &ae) {
self_type temporary (*this + ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
c_matrix &operator += (const matrix_container<C> &m) {
plus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix &plus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_plus_assign> (*this, ae);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix& operator -= (const matrix_expression<AE> &ae) {
self_type temporary (*this - ae);
return assign_temporary (temporary);
}
template<class C> // Container assignment without temporary
BOOST_UBLAS_INLINE
c_matrix &operator -= (const matrix_container<C> &m) {
minus_assign (m);
return *this;
}
template<class AE>
BOOST_UBLAS_INLINE
c_matrix &minus_assign (const matrix_expression<AE> &ae) {
matrix_assign<scalar_minus_assign> (*this, ae);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
c_matrix& operator *= (const AT &at) {
matrix_assign_scalar<scalar_multiplies_assign> (*this, at);
return *this;
}
template<class AT>
BOOST_UBLAS_INLINE
c_matrix& operator /= (const AT &at) {
matrix_assign_scalar<scalar_divides_assign> (*this, at);
return *this;
}
// Swapping
BOOST_UBLAS_INLINE
void swap (c_matrix &m) {
if (this != &m) {
BOOST_UBLAS_CHECK (size1_ == m.size1_, bad_size ());
BOOST_UBLAS_CHECK (size2_ == m.size2_, bad_size ());
std::swap (size1_, m.size1_);
std::swap (size2_, m.size2_);
for (size_type i = 0; i < size1_; ++ i)
std::swap_ranges (data_ [i], data_ [i] + size2_, m.data_ [i]);
}
}
BOOST_UBLAS_INLINE
friend void swap (c_matrix &m1, c_matrix &m2) {
m1.swap (m2);
}
// Iterator types
private:
// Use pointers for iterator
typedef const_pointer const_subiterator_type;
typedef pointer subiterator_type;
public:
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
typedef indexed_iterator1<self_type, dense_random_access_iterator_tag> iterator1;
typedef indexed_iterator2<self_type, dense_random_access_iterator_tag> iterator2;
typedef indexed_const_iterator1<self_type, dense_random_access_iterator_tag> const_iterator1;
typedef indexed_const_iterator2<self_type, dense_random_access_iterator_tag> const_iterator2;
#else
class const_iterator1;
class iterator1;
class const_iterator2;
class iterator2;
#endif
typedef reverse_iterator_base1<const_iterator1> const_reverse_iterator1;
typedef reverse_iterator_base1<iterator1> reverse_iterator1;
typedef reverse_iterator_base2<const_iterator2> const_reverse_iterator2;
typedef reverse_iterator_base2<iterator2> reverse_iterator2;
// Element lookup
BOOST_UBLAS_INLINE
const_iterator1 find1 (int /*rank*/, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator1 (*this, i, j);
#else
return const_iterator1 (*this, &data_ [i] [j]);
#endif
}
BOOST_UBLAS_INLINE
iterator1 find1 (int /*rank*/, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator1 (*this, i, j);
#else
return iterator1 (*this, &data_ [i] [j]);
#endif
}
BOOST_UBLAS_INLINE
const_iterator2 find2 (int /*rank*/, size_type i, size_type j) const {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return const_iterator2 (*this, i, j);
#else
return const_iterator2 (*this, &data_ [i] [j]);
#endif
}
BOOST_UBLAS_INLINE
iterator2 find2 (int /*rank*/, size_type i, size_type j) {
#ifdef BOOST_UBLAS_USE_INDEXED_ITERATOR
return iterator2 (*this, i, j);
#else
return iterator2 (*this, &data_ [i] [j]);
#endif
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator1:
public container_const_reference<c_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator1, value_type> {
public:
typedef typename c_matrix::difference_type difference_type;
typedef typename c_matrix::value_type value_type;
typedef typename c_matrix::const_reference reference;
typedef typename c_matrix::const_pointer pointer;
typedef const_iterator2 dual_iterator_type;
typedef const_reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator1 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator1 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator1 (const iterator1 &it):
container_const_reference<self_type> (it ()), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator1 &operator ++ () {
it_ += M;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -- () {
it_ -= M;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator += (difference_type n) {
it_ += n * M;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator1 &operator -= (difference_type n) {
it_ -= n * M;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return (it_ - it.it_) / M;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 begin () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 end () const {
const self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator2 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rbegin () const {
return const_reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 rend () const {
return const_reverse_iterator2 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator2 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return (it_ - m.begin1 ().it_) / M;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return (it_ - m.begin1 ().it_) % M;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator1 &operator = (const const_iterator1 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
const_subiterator_type it_;
friend class iterator1;
};
#endif
BOOST_UBLAS_INLINE
const_iterator1 begin1 () const {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cbegin1 () const {
return begin1 ();
}
BOOST_UBLAS_INLINE
const_iterator1 end1 () const {
return find1 (0, size1_, 0);
}
BOOST_UBLAS_INLINE
const_iterator1 cend1 () const {
return end1 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator1:
public container_reference<c_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator1, value_type> {
public:
typedef typename c_matrix::difference_type difference_type;
typedef typename c_matrix::value_type value_type;
typedef typename c_matrix::reference reference;
typedef typename c_matrix::pointer pointer;
typedef iterator2 dual_iterator_type;
typedef reverse_iterator2 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator1 ():
container_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
iterator1 (self_type &m, const subiterator_type &it):
container_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator1 &operator ++ () {
it_ += M;
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -- () {
it_ -= M;
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator += (difference_type n) {
it_ += n * M;
return *this;
}
BOOST_UBLAS_INLINE
iterator1 &operator -= (difference_type n) {
it_ -= n * M;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return (it_ - it.it_) / M;
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 begin () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), 0);
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator2 end () const {
self_type &m = (*this) ();
return m.find2 (1, index1 (), m.size2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rbegin () const {
return reverse_iterator2 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator2 rend () const {
return reverse_iterator2 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return (it_ - m.begin1 ().it_) / M;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return (it_ - m.begin1 ().it_) % M;
}
// Assignment
BOOST_UBLAS_INLINE
iterator1 &operator = (const iterator1 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator1 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
subiterator_type it_;
friend class const_iterator1;
};
#endif
BOOST_UBLAS_INLINE
iterator1 begin1 () {
return find1 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator1 end1 () {
return find1 (0, size1_, 0);
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class const_iterator2:
public container_const_reference<c_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
const_iterator2, value_type> {
public:
typedef typename c_matrix::difference_type difference_type;
typedef typename c_matrix::value_type value_type;
typedef typename c_matrix::const_reference reference;
typedef typename c_matrix::const_reference pointer;
typedef const_iterator1 dual_iterator_type;
typedef const_reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
const_iterator2 ():
container_const_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
const_iterator2 (const self_type &m, const const_subiterator_type &it):
container_const_reference<self_type> (m), it_ (it) {}
BOOST_UBLAS_INLINE
const_iterator2 (const iterator2 &it):
container_const_reference<self_type> (it ()), it_ (it.it_) {}
// Arithmetic
BOOST_UBLAS_INLINE
const_iterator2 &operator ++ () {
++ it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -- () {
-- it_;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator += (difference_type n) {
it_ += n;
return *this;
}
BOOST_UBLAS_INLINE
const_iterator2 &operator -= (difference_type n) {
it_ -= n;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ - it.it_;
}
// Dereference
BOOST_UBLAS_INLINE
const_reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
const_reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 begin () const {
const self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cbegin () const {
return begin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 end () const {
const self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_iterator1 cend () const {
return end ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rbegin () const {
return const_reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crbegin () const {
return rbegin ();
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 rend () const {
return const_reverse_iterator1 (begin ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
const_reverse_iterator1 crend () const {
return rend ();
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return (it_ - m.begin2 ().it_) / M;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return (it_ - m.begin2 ().it_) % M;
}
// Assignment
BOOST_UBLAS_INLINE
const_iterator2 &operator = (const const_iterator2 &it) {
container_const_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const const_iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
const_subiterator_type it_;
friend class iterator2;
};
#endif
BOOST_UBLAS_INLINE
const_iterator2 begin2 () const {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
const_iterator2 cbegin2 () const {
return begin2 ();
}
BOOST_UBLAS_INLINE
const_iterator2 end2 () const {
return find2 (0, 0, size2_);
}
BOOST_UBLAS_INLINE
const_iterator2 cend2 () const {
return end2 ();
}
#ifndef BOOST_UBLAS_USE_INDEXED_ITERATOR
class iterator2:
public container_reference<c_matrix>,
public random_access_iterator_base<dense_random_access_iterator_tag,
iterator2, value_type> {
public:
typedef typename c_matrix::difference_type difference_type;
typedef typename c_matrix::value_type value_type;
typedef typename c_matrix::reference reference;
typedef typename c_matrix::pointer pointer;
typedef iterator1 dual_iterator_type;
typedef reverse_iterator1 dual_reverse_iterator_type;
// Construction and destruction
BOOST_UBLAS_INLINE
iterator2 ():
container_reference<self_type> (), it_ () {}
BOOST_UBLAS_INLINE
iterator2 (self_type &m, const subiterator_type &it):
container_reference<self_type> (m), it_ (it) {}
// Arithmetic
BOOST_UBLAS_INLINE
iterator2 &operator ++ () {
++ it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -- () {
-- it_;
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator += (difference_type n) {
it_ += n;
return *this;
}
BOOST_UBLAS_INLINE
iterator2 &operator -= (difference_type n) {
it_ -= n;
return *this;
}
BOOST_UBLAS_INLINE
difference_type operator - (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ - it.it_;
}
// Dereference
BOOST_UBLAS_INLINE
reference operator * () const {
BOOST_UBLAS_CHECK (index1 () < (*this) ().size1 (), bad_index ());
BOOST_UBLAS_CHECK (index2 () < (*this) ().size2 (), bad_index ());
return *it_;
}
BOOST_UBLAS_INLINE
reference operator [] (difference_type n) const {
return *(*this + n);
}
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 begin () const {
self_type &m = (*this) ();
return m.find1 (1, 0, index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
iterator1 end () const {
self_type &m = (*this) ();
return m.find1 (1, m.size1 (), index2 ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rbegin () const {
return reverse_iterator1 (end ());
}
BOOST_UBLAS_INLINE
#ifdef BOOST_UBLAS_MSVC_NESTED_CLASS_RELATION
typename self_type::
#endif
reverse_iterator1 rend () const {
return reverse_iterator1 (begin ());
}
#endif
// Indices
BOOST_UBLAS_INLINE
size_type index1 () const {
const self_type &m = (*this) ();
return (it_ - m.begin2 ().it_) / M;
}
BOOST_UBLAS_INLINE
size_type index2 () const {
const self_type &m = (*this) ();
return (it_ - m.begin2 ().it_) % M;
}
// Assignment
BOOST_UBLAS_INLINE
iterator2 &operator = (const iterator2 &it) {
container_reference<self_type>::assign (&it ());
it_ = it.it_;
return *this;
}
// Comparison
BOOST_UBLAS_INLINE
bool operator == (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ == it.it_;
}
BOOST_UBLAS_INLINE
bool operator < (const iterator2 &it) const {
BOOST_UBLAS_CHECK (&(*this) () == &it (), external_logic ());
return it_ < it.it_;
}
private:
subiterator_type it_;
friend class const_iterator2;
};
#endif
BOOST_UBLAS_INLINE
iterator2 begin2 () {
return find2 (0, 0, 0);
}
BOOST_UBLAS_INLINE
iterator2 end2 () {
return find2 (0, 0, size2_);
}
// Reverse iterators
BOOST_UBLAS_INLINE
const_reverse_iterator1 rbegin1 () const {
return const_reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crbegin1 () const {
return rbegin1 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 rend1 () const {
return const_reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator1 crend1 () const {
return rend1 ();
}
BOOST_UBLAS_INLINE
reverse_iterator1 rbegin1 () {
return reverse_iterator1 (end1 ());
}
BOOST_UBLAS_INLINE
reverse_iterator1 rend1 () {
return reverse_iterator1 (begin1 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rbegin2 () const {
return const_reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crbegin2 () const {
return rbegin2 ();
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 rend2 () const {
return const_reverse_iterator2 (begin2 ());
}
BOOST_UBLAS_INLINE
const_reverse_iterator2 crend2 () const {
return rend2 ();
}
BOOST_UBLAS_INLINE
reverse_iterator2 rbegin2 () {
return reverse_iterator2 (end2 ());
}
BOOST_UBLAS_INLINE
reverse_iterator2 rend2 () {
return reverse_iterator2 (begin2 ());
}
// Serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
// we need to copy to a collection_size_type to get a portable
// and efficient serialization
serialization::collection_size_type s1 (size1_);
serialization::collection_size_type s2 (size2_);
// serialize the sizes
ar & serialization::make_nvp("size1",s1)
& serialization::make_nvp("size2",s2);
// copy the values back if loading
if (Archive::is_loading::value) {
size1_ = s1;
size2_ = s2;
}
// could probably use make_array( &(data[0][0]), N*M )
ar & serialization::make_array(data_, N);
}
private:
size_type size1_;
size_type size2_;
value_type data_ [N] [M];
};
}}}
#endif
|
{"hexsha": "882997aed2bfad776a37bd9343f0f0aff8e98b29", "size": 215376, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Pods/Headers/Private/GeoFeatures/boost/numeric/ublas/matrix.hpp", "max_stars_repo_name": "xarvey/Yuuuuuge", "max_stars_repo_head_hexsha": "9f4ec32f81cf813ea630ba2c44eb03970c56dad3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pods/Headers/Private/GeoFeatures/boost/numeric/ublas/matrix.hpp", "max_issues_repo_name": "xarvey/Yuuuuuge", "max_issues_repo_head_hexsha": "9f4ec32f81cf813ea630ba2c44eb03970c56dad3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pods/Headers/Private/GeoFeatures/boost/numeric/ublas/matrix.hpp", "max_forks_repo_name": "xarvey/Yuuuuuge", "max_forks_repo_head_hexsha": "9f4ec32f81cf813ea630ba2c44eb03970c56dad3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9619302054, "max_line_length": 144, "alphanum_fraction": 0.5594448778, "num_tokens": 46950}
|
#
# gemini_python
#
# primtives_gmos_longslit.py
# ------------------------------------------------------------------------------
from copy import copy, deepcopy
from importlib import import_module
import astrodata
import numpy as np
from astrodata.provenance import add_provenance
from astropy import visualization as vis
from astropy.modeling import models, fitting
from astropy.stats import sigma_clip
from geminidr.core.primitives_spect import _transpose_if_needed
from geminidr.gemini.lookups import DQ_definitions as DQ
from gempy.gemini import gemini_tools as gt
from gempy.library import astrotools as at
from gempy.library import astromodels, transform
from gwcs import coordinate_frames
from gwcs.wcs import WCS as gWCS
from matplotlib import gridspec
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from recipe_system.utils.decorators import parameter_override
from recipe_system.utils.md5 import md5sum
from .primitives_gmos_spect import GMOSSpect
from .primitives_gmos_nodandshuffle import GMOSNodAndShuffle
from . import parameters_gmos_longslit
# ------------------------------------------------------------------------------
@parameter_override
class GMOSLongslit(GMOSSpect, GMOSNodAndShuffle):
"""
This is the class containing all of the preprocessing primitives
for the GMOSLongslit level of the type hierarchy tree. It inherits all
the primitives from the level above
"""
tagset = {"GEMINI", "GMOS", "SPECT", "LS"}
def __init__(self, adinputs, **kwargs):
super().__init__(adinputs, **kwargs)
self._param_update(parameters_gmos_longslit)
def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
"""
Adds an illumination mask to each AD object
Parameters
----------
suffix: str
suffix to be added to output files
illum_mask: str/None
name of illumination mask mask (None -> use default)
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
for ad, illum in zip(*gt.make_lists(adinputs, illum_mask, force_ad=True)):
if ad.phu.get(timestamp_key):
log.warning('No changes will be made to {}, since it has '
'already been processed by addIllumMaskToDQ'.
format(ad.filename))
continue
ad_detsec = ad.detector_section()
no_bridges = all(detsec.y1 > 1600 and detsec.y2 < 2900
for detsec in ad_detsec)
has_48rows = (all(detsec.y2 == 4224 for detsec in ad_detsec) and
'Hamamatsu' in ad.detector_name(pretty=True))
if illum:
log.fullinfo("Using {} as illumination mask".format(illum.filename))
final_illum = gt.clip_auxiliary_data(ad, aux=illum, aux_type='bpm',
return_dtype=DQ.datatype)
for ext, illum_ext in zip(ad, final_illum):
if illum_ext is not None:
# Ensure we're only adding the unilluminated bit
iext = np.where(illum_ext.data > 0, DQ.unilluminated,
0).astype(DQ.datatype)
ext.mask = iext if ext.mask is None else ext.mask | iext
elif not no_bridges: # i.e. there are bridges.
# Default operation for GMOS full-frame LS
# The 95% cut should ensure that we're sampling something
# bright (even for an arc)
# The max is intended to handle R150 data, where many of
# the extensions are unilluminated
row_medians = np.max(np.array([np.percentile(ext.data, 95, axis=1)
for ext in ad]), axis=0)
rows = np.arange(len(row_medians))
m_init = models.Polynomial1D(degree=3)
fit_it = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(),
outlier_func=sigma_clip,
sigma_upper=1, sigma_lower=3)
m_final, _ = fit_it(m_init, rows, row_medians)
model_fit = m_final(rows)
# Find points which are significantly below the smooth illumination fit
# First ensure we don't worry about single rows
row_mask = at.boxcar(model_fit - row_medians > 0.1 * model_fit,
operation=np.logical_and, size=1)
row_mask = at.boxcar(row_mask, operation=np.logical_or, size=3)
for ext in ad:
ext.mask |= (row_mask * DQ.unilluminated).astype(DQ.datatype)[:, np.newaxis]
if has_48rows:
actual_rows = 48 // ad.detector_y_bin()
for ext in ad:
ext.mask[:actual_rows] |= DQ.unilluminated
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=suffix, strip=True)
return adinputs
def makeSlitIllum(self, adinputs=None, **params):
"""
Makes the processed Slit Illumination Function by binning a 2D
spectrum along the dispersion direction, fitting a smooth function
for each bin, fitting a smooth 2D model, and reconstructing the 2D
array using this last model.
Its implementation based on the IRAF's `noao.twodspec.longslit.illumination`
task following the algorithm described in [Valdes, 1968].
It expects an input calibration image to be an a dispersed image of the
slit without illumination problems (e.g, twilight flat). The spectra is
not required to be smooth in wavelength and may contain strong emission
and absorption lines. The image should contain a `.mask` attribute in
each extension, and it is expected to be overscan and bias corrected.
Parameters
----------
adinputs : list
List of AstroData objects containing the dispersed image of the
slit of a source free of illumination problems. The data needs to
have been overscan and bias corrected and is expected to have a
Data Quality mask.
bins : {None, int}, optional
Total number of bins across the dispersion axis. If None,
the number of bins will match the number of extensions on each
input AstroData object. It it is an int, it will create N bins
with the same size.
border : int, optional
Border size that is added on every edge of the slit illumination
image before cutting it down to the input AstroData frame.
smooth_order : int, optional
Order of the spline that is used in each bin fitting to smooth
the data (Default: 3)
x_order : int, optional
Order of the x-component in the Chebyshev2D model used to
reconstruct the 2D data from the binned data.
y_order : int, optional
Order of the y-component in the Chebyshev2D model used to
reconstruct the 2D data from the binned data.
Return
------
List of AstroData : containing an AstroData with the Slit Illumination
Response Function for each of the input object.
References
----------
.. [Valdes, 1968] Francisco Valdes "Reduction Of Long Slit Spectra With
IRAF", Proc. SPIE 0627, Instrumentation in Astronomy VI,
(13 October 1986); https://doi.org/10.1117/12.968155
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
suffix = params["suffix"]
bins = params["bins"]
border = params["border"]
debug_plot = params["debug_plot"]
smooth_order = params["smooth_order"]
cheb2d_x_order = params["x_order"]
cheb2d_y_order = params["y_order"]
ad_outputs = []
for ad in adinputs:
if len(ad) > 1 and "mosaic" not in ad[0].wcs.available_frames:
log.info('Add "mosaic" gWCS frame to input data')
geotable = import_module('.geometry_conf', self.inst_lookups)
# deepcopy prevents modifying input `ad` inplace
ad = transform.add_mosaic_wcs(deepcopy(ad), geotable)
log.info("Temporarily mosaicking multi-extension file")
mosaicked_ad = transform.resample_from_wcs(
ad, "mosaic", attributes=None, order=1, process_objcat=False)
else:
log.info('Input data already has one extension and has a '
'"mosaic" frame.')
# deepcopy prevents modifying input `ad` inplace
mosaicked_ad = deepcopy(ad)
log.info("Transposing data if needed")
dispaxis = 2 - mosaicked_ad[0].dispersion_axis() # python sense
should_transpose = dispaxis == 1
data, mask, variance = _transpose_if_needed(
mosaicked_ad[0].data, mosaicked_ad[0].mask,
mosaicked_ad[0].variance, transpose=should_transpose)
log.info("Masking data")
data = np.ma.masked_array(data, mask=mask)
variance = np.ma.masked_array(variance, mask=mask)
std = np.sqrt(variance) # Easier to work with
log.info("Creating bins for data and variance")
height = data.shape[0]
width = data.shape[1]
if bins is None:
nbins = max(len(ad), 12)
bin_limits = np.linspace(0, height, nbins + 1, dtype=int)
elif isinstance(bins, int):
nbins = bins
bin_limits = np.linspace(0, height, nbins + 1, dtype=int)
else:
# ToDo: Handle input bins as array
raise TypeError("Expected None or Int for `bins`. "
"Found: {}".format(type(bins)))
bin_top = bin_limits[1:]
bin_bot = bin_limits[:-1]
binned_data = np.zeros_like(data)
binned_std = np.zeros_like(std)
log.info("Smooth binned data and variance, and normalize them by "
"smoothed central value")
for bin_idx, (b0, b1) in enumerate(zip(bin_bot, bin_top)):
rows = np.arange(width)
avg_data = np.ma.mean(data[b0:b1], axis=0)
model_1d_data = astromodels.UnivariateSplineWithOutlierRemoval(
rows, avg_data, order=smooth_order)
avg_std = np.ma.mean(std[b0:b1], axis=0)
model_1d_std = astromodels.UnivariateSplineWithOutlierRemoval(
rows, avg_std, order=smooth_order)
slit_central_value = model_1d_data(rows)[width // 2]
binned_data[b0:b1] = model_1d_data(rows) / slit_central_value
binned_std[b0:b1] = model_1d_std(rows) / slit_central_value
log.info("Reconstruct 2D mosaicked data")
bin_center = np.array(0.5 * (bin_bot + bin_top), dtype=int)
cols_fit, rows_fit = np.meshgrid(np.arange(width), bin_center)
fitter = fitting.SLSQPLSQFitter()
model_2d_init = models.Chebyshev2D(
x_degree=cheb2d_x_order, x_domain=(0, width),
y_degree=cheb2d_y_order, y_domain=(0, height))
model_2d_data = fitter(model_2d_init, cols_fit, rows_fit,
binned_data[rows_fit, cols_fit])
model_2d_std = fitter(model_2d_init, cols_fit, rows_fit,
binned_std[rows_fit, cols_fit])
rows_val, cols_val = \
np.mgrid[-border:height+border, -border:width+border]
slit_response_data = model_2d_data(cols_val, rows_val)
slit_response_mask = np.pad(mask, border, mode='edge') # ToDo: any update to the mask?
slit_response_std = model_2d_std(cols_val, rows_val)
slit_response_var = slit_response_std ** 2
del cols_fit, cols_val, rows_fit, rows_val
_data, _mask, _variance = _transpose_if_needed(
slit_response_data, slit_response_mask, slit_response_var,
transpose=dispaxis == 1)
log.info("Update slit response data and data_section")
slit_response_ad = deepcopy(mosaicked_ad)
slit_response_ad[0].data = _data
slit_response_ad[0].mask = _mask
slit_response_ad[0].variance = _variance
if "mosaic" in ad[0].wcs.available_frames:
log.info("Map coordinates between slit function and mosaicked data") # ToDo: Improve message?
slit_response_ad = _split_mosaic_into_extensions(
ad, slit_response_ad, border_size=border)
elif len(ad) == 1:
log.info("Trim out borders")
slit_response_ad[0].data = \
slit_response_ad[0].data[border:-border, border:-border]
slit_response_ad[0].mask = \
slit_response_ad[0].mask[border:-border, border:-border]
slit_response_ad[0].variance = \
slit_response_ad[0].variance[border:-border, border:-border]
log.info("Update metadata and filename")
gt.mark_history(
slit_response_ad, primname=self.myself(), keyword=timestamp_key)
slit_response_ad.update_filename(suffix=suffix, strip=True)
ad_outputs.append(slit_response_ad)
# Plotting ------
if debug_plot:
log.info("Creating plots")
palette = copy(plt.cm.cividis)
palette.set_bad('r', 0.75)
norm = vis.ImageNormalize(data[~data.mask],
stretch=vis.LinearStretch(),
interval=vis.PercentileInterval(97))
fig = plt.figure(
num="Slit Response from MEF - {}".format(ad.filename),
figsize=(12, 9), dpi=110)
gs = gridspec.GridSpec(nrows=2, ncols=3, figure=fig)
# Display raw mosaicked data and its bins ---
ax1 = fig.add_subplot(gs[0, 0])
im1 = ax1.imshow(data, cmap=palette, origin='lower',
vmin=norm.vmin, vmax=norm.vmax)
ax1.set_title("Mosaicked Data\n and Spectral Bins", fontsize=10)
ax1.set_xlim(-1, data.shape[1])
ax1.set_xticks([])
ax1.set_ylim(-1, data.shape[0])
ax1.set_yticks(bin_center)
ax1.tick_params(axis=u'both', which=u'both', length=0)
ax1.set_yticklabels(
["Bin {}".format(i) for i in range(len(bin_center))],
fontsize=6)
_ = [ax1.spines[s].set_visible(False) for s in ax1.spines]
_ = [ax1.axhline(b, c='w', lw=0.5) for b in bin_limits]
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im1, cax=cax1)
# Display non-smoothed bins ---
ax2 = fig.add_subplot(gs[0, 1])
im2 = ax2.imshow(binned_data, cmap=palette, origin='lower')
ax2.set_title("Binned, smoothed\n and normalized data ", fontsize=10)
ax2.set_xlim(0, data.shape[1])
ax2.set_xticks([])
ax2.set_ylim(0, data.shape[0])
ax2.set_yticks(bin_center)
ax2.tick_params(axis=u'both', which=u'both', length=0)
ax2.set_yticklabels(
["Bin {}".format(i) for i in range(len(bin_center))],
fontsize=6)
_ = [ax2.spines[s].set_visible(False) for s in ax2.spines]
_ = [ax2.axhline(b, c='w', lw=0.5) for b in bin_limits]
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im2, cax=cax2)
# Display reconstructed slit response ---
vmin = slit_response_data.min()
vmax = slit_response_data.max()
ax3 = fig.add_subplot(gs[1, 0])
im3 = ax3.imshow(slit_response_data, cmap=palette,
origin='lower', vmin=vmin, vmax=vmax)
ax3.set_title("Reconstructed\n Slit response", fontsize=10)
ax3.set_xlim(0, data.shape[1])
ax3.set_xticks([])
ax3.set_ylim(0, data.shape[0])
ax3.set_yticks([])
ax3.tick_params(axis=u'both', which=u'both', length=0)
_ = [ax3.spines[s].set_visible(False) for s in ax3.spines]
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im3, cax=cax3)
# Display extensions ---
ax4 = fig.add_subplot(gs[1, 1])
ax4.set_xticks([])
ax4.set_yticks([])
_ = [ax4.spines[s].set_visible(False) for s in ax4.spines]
sub_gs4 = gridspec.GridSpecFromSubplotSpec(
nrows=len(ad), ncols=1, subplot_spec=gs[1, 1], hspace=0.03)
# The [::-1] is needed to put the fist extension in the bottom
for i, ext in enumerate(slit_response_ad[::-1]):
ext_data, ext_mask, ext_variance = _transpose_if_needed(
ext.data, ext.mask, ext.variance, transpose=dispaxis == 1)
ext_data = np.ma.masked_array(ext_data, mask=ext_mask)
sub_ax = fig.add_subplot(sub_gs4[i])
im4 = sub_ax.imshow(ext_data, origin="lower", vmin=vmin,
vmax=vmax, cmap=palette)
sub_ax.set_xlim(0, ext_data.shape[1])
sub_ax.set_xticks([])
sub_ax.set_ylim(0, ext_data.shape[0])
sub_ax.set_yticks([ext_data.shape[0] // 2])
sub_ax.set_yticklabels(
["Ext {}".format(len(slit_response_ad) - i - 1)],
fontsize=6)
_ = [sub_ax.spines[s].set_visible(False) for s in sub_ax.spines]
if i == 0:
sub_ax.set_title("Multi-extension\n Slit Response Function")
divider = make_axes_locatable(ax4)
cax4 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im4, cax=cax4)
# Display Signal-To-Noise Ratio ---
snr = data / np.sqrt(variance)
norm = vis.ImageNormalize(snr[~snr.mask],
stretch=vis.LinearStretch(),
interval=vis.PercentileInterval(97))
ax5 = fig.add_subplot(gs[0, 2])
im5 = ax5.imshow(snr, cmap=palette, origin='lower',
vmin=norm.vmin, vmax=norm.vmax)
ax5.set_title("Mosaicked Data SNR", fontsize=10)
ax5.set_xlim(-1, data.shape[1])
ax5.set_xticks([])
ax5.set_ylim(-1, data.shape[0])
ax5.set_yticks(bin_center)
ax5.tick_params(axis=u'both', which=u'both', length=0)
ax5.set_yticklabels(
["Bin {}".format(i) for i in range(len(bin_center))],
fontsize=6)
_ = [ax5.spines[s].set_visible(False) for s in ax5.spines]
_ = [ax5.axhline(b, c='w', lw=0.5) for b in bin_limits]
divider = make_axes_locatable(ax5)
cax5 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im5, cax=cax5)
# Display Signal-To-Noise Ratio of Slit Illumination ---
slit_response_snr = np.ma.masked_array(
slit_response_data / np.sqrt(slit_response_var),
mask=slit_response_mask)
ax6 = fig.add_subplot(gs[1, 2])
im6 = ax6.imshow(slit_response_snr, origin="lower",
vmin=norm.vmin, vmax=norm.vmax, cmap=palette)
ax6.set_xlim(0, slit_response_snr.shape[1])
ax6.set_xticks([])
ax6.set_ylim(0, slit_response_snr.shape[0])
ax6.set_yticks([])
ax6.set_title("Reconstructed\n Slit Response SNR")
_ = [ax6.spines[s] .set_visible(False) for s in ax6.spines]
divider = make_axes_locatable(ax6)
cax6 = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im6, cax=cax6)
# Save plots ---
fig.tight_layout(rect=[0, 0, 0.95, 1], pad=0.5)
fname = slit_response_ad.filename.replace(".fits", ".png")
log.info("Saving plots to {}".format(fname))
plt.savefig(fname)
return ad_outputs
def normalizeFlat(self, adinputs=None, **params):
"""
This primitive normalizes a GMOS Longslit spectroscopic flatfield
in a manner similar to that performed by gsflat in Gemini-IRAF.
A cubic spline is fitted along the dispersion direction of each
row, separately for each CCD.
As this primitive is GMOS-specific, we know the dispersion direction
will be along the rows, and there will be 3 CCDs.
For Hamamatsu CCDs, the 21 unbinned columns at each CCD edge are
masked out, following the procedure in gsflat.
TODO: Should we add these in the BPM?
Parameters
----------
suffix: str
suffix to be added to output files
spectral_order: int/str
order of fit in spectral direction
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
# For flexibility, the code is going to pass whatever validated
# parameters it gets (apart from suffix and spectral_order) to
# the spline fitter
spline_kwargs = params.copy()
suffix = spline_kwargs.pop("suffix")
spectral_order = spline_kwargs.pop("spectral_order")
threshold = spline_kwargs.pop("threshold")
# Parameter validation should ensure we get an int or a list of 3 ints
try:
orders = [int(x) for x in spectral_order]
except TypeError:
orders = [spectral_order] * 3
for ad in adinputs:
xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
array_info = gt.array_information(ad)
is_hamamatsu = 'Hamamatsu' in ad.detector_name(pretty=True)
ad_tiled = self.tileArrays([ad], tile_all=False)[0]
ad_fitted = astrodata.create(ad.phu)
for ext, order, indices in zip(ad_tiled, orders, array_info.extensions):
# If the entire row is unilluminated, we want to fit
# the pixels but still keep the edges masked
try:
ext.mask ^= (np.bitwise_and.reduce(ext.mask, axis=1) & DQ.unilluminated)[:, None]
except TypeError: # ext.mask is None
pass
else:
if is_hamamatsu:
ext.mask[:, :21 // xbin] = 1
ext.mask[:, -21 // xbin:] = 1
fitted_data = np.empty_like(ext.data)
pixels = np.arange(ext.shape[1])
for i, row in enumerate(ext.nddata):
masked_data = np.ma.masked_array(row.data, mask=row.mask)
weights = np.sqrt(np.where(row.variance > 0, 1. / row.variance, 0.))
spline = astromodels.UnivariateSplineWithOutlierRemoval(pixels, masked_data,
order=order, w=weights, **spline_kwargs)
fitted_data[i] = spline(pixels)
# Copy header so we have the _section() descriptors
ad_fitted.append(fitted_data, header=ext.hdr)
# Find the largest spline value for each row across all extensions
# and mask pixels below the requested fraction of the peak
row_max = np.array([ext_fitted.data.max(axis=1)
for ext_fitted in ad_fitted]).max(axis=0)
# Prevent runtime error in division
row_max[row_max == 0] = np.inf
for ext_fitted in ad_fitted:
ext_fitted.mask = np.where(
(ext_fitted.data.T / row_max).T < threshold,
DQ.unilluminated, DQ.good)
for ext_fitted, indices in zip(ad_fitted, array_info.extensions):
tiled_arrsec = ext_fitted.array_section()
for i in indices:
ext = ad[i]
arrsec = ext.array_section()
slice_ = (slice((arrsec.y1 - tiled_arrsec.y1) // ybin, (arrsec.y2 - tiled_arrsec.y1) // ybin),
slice((arrsec.x1 - tiled_arrsec.x1) // xbin, (arrsec.x2 - tiled_arrsec.x1) // xbin))
# Suppress warnings to do with fitted_data==0
# (which create NaNs in variance)
with np.errstate(invalid='ignore', divide='ignore'):
ext.divide(ext_fitted.nddata[slice_])
np.nan_to_num(ext.data, copy=False, posinf=0, neginf=0)
np.nan_to_num(ext.variance, copy=False)
# Timestamp and update filename
gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
ad.update_filename(suffix=suffix, strip=True)
return adinputs
def slitIllumCorrect(self, adinputs=None, slit_illum=None,
do_illum=True, suffix="_illumCorrected"):
"""
This primitive will divide each SCI extension of the inputs by those
of the corresponding slit illumination image. If the inputs contain
VAR or DQ frames, those will also be updated accordingly due to the
division on the data.
Parameters
----------
adinputs : list of AstroData
Data to be corrected.
slit_illum : str or AstroData
Slit illumination path or AstroData object.
do_illum: bool, optional
Perform slit illumination correction? (Default: True)
"""
log = self.log
log.debug(gt.log_message("primitive", self.myself(), "starting"))
timestamp_key = self.timestamp_keys[self.myself()]
qecorr_key = self.timestamp_keys['QECorrect']
if not do_illum:
log.warning("Slit Illumination correction has been turned off.")
return adinputs
if slit_illum is None:
raise NotImplementedError
else:
slit_illum_list = slit_illum
# Provide a Slit Illum Ad object for every science frame
ad_outputs = []
for ad, slit_illum_ad in zip(*gt.make_lists(adinputs, slit_illum_list, force_ad=True)):
if ad.phu.get(timestamp_key):
log.warning(
"No changes will be made to {}, since it has "
"already been processed by flatCorrect".format(ad.filename))
continue
if slit_illum_ad is None:
if self.mode in ['sq']:
raise OSError(
"No processed slit illumination listed for {}".format(
ad.filename))
else:
log.warning(
"No changes will be made to {}, since no slit "
"illumination has been specified".format(ad.filename))
continue
gt.check_inputs_match(ad, slit_illum_ad, check_shape=False)
if not all([e1.shape == e2.shape for (e1, e2) in zip(ad, slit_illum_ad)]):
slit_illum_ad = gt.clip_auxiliary_data(
adinput=[ad], aux=[slit_illum_ad])[0]
log.info("Dividing the input AstroData object {} by this \n"
"slit illumination file: \n{}".format(ad.filename, slit_illum_ad.filename))
ad_out = deepcopy(ad)
ad_out.divide(slit_illum_ad)
# Update the header and filename, copying QECORR keyword from flat
ad_out.phu.set("SLTILLIM", slit_illum_ad.filename,
self.keyword_comments["SLTILLIM"])
try:
qecorr_value = slit_illum_ad.phu[qecorr_key]
except KeyError:
pass
else:
log.fullinfo("Copying {} keyword from slit illumination".format(qecorr_key))
ad_out.phu.set(qecorr_key, qecorr_value,
slit_illum_ad.phu.comments[qecorr_key])
gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
ad_out.update_filename(suffix=suffix, strip=True)
if slit_illum_ad.path:
add_provenance(ad_out, slit_illum_ad.filename,
md5sum(slit_illum_ad.path) or "", self.myself())
ad_outputs.append(ad_out)
return ad_outputs
def _split_mosaic_into_extensions(ref_ad, mos_ad, border_size=0):
"""
Split the `mos_ad` mosaicked data into multiple extensions using
coordinate frames and transformations stored in the `ref_ad` object.
Right now, the pixels at the border of each extensions might not
match the expected values. The mosaicking and de-mosaicking is an
interpolation, because there's a small rotation. This will only interpolate,
not extrapolate beyond the boundaries of the input data, so you lose some
information at the edges when you perform both operations and consequently
the edges of the input frame get lost.
Parameters
----------
ref_ad : AstroData
Reference multi-extension-file object containing a gWCS.
mos_ad : AstroData
Mosaicked data that will be split containing a single extension.
border_size : int
Number of pixels to be trimmed out from each border.
Returns
-------
AstroData : Split multi-extension-file object.
See Also
--------
- :func:`gempy.library.transform.add_mosaic_wcs`
- :func:`gempy.library.transform.resample_from_wcs`
"""
# Check input data
if len(mos_ad) > 1:
raise ValueError("Expected number of extensions of `mos_ad` to be 1. "
"Found {:d}".format(len(mos_ad)))
if len(mos_ad[0].shape) != 2:
raise ValueError("Expected ndim for `mos_ad` to be 2. "
"Found {:d}".format(len(mos_ad[0].shape)))
# Get original relative shift
origin_shift_y, origin_shift_x = mos_ad[0].nddata.meta['transform']['origin']
# Create shift transformation
shift_x = models.Shift(origin_shift_x - border_size)
shift_y = models.Shift(origin_shift_y - border_size)
# Create empty AD
ad_out = astrodata.create(ref_ad.phu)
# Update data_section to be able to resample WCS frames
datasec_kw = mos_ad._keyword_for('data_section')
mos_ad[0].hdr[datasec_kw] = '[1:{},1:{}]'.format(*mos_ad[0].shape[::-1])
# Loop across all extensions
for i, ref_ext in enumerate(ref_ad):
# Create new transformation pipeline
in_frame = ref_ext.wcs.input_frame
mos_frame = coordinate_frames.Frame2D(name="mosaic")
mosaic_to_pixel = ref_ext.wcs.get_transform(mos_frame, in_frame)
pipeline = [(mos_frame, mosaic_to_pixel),
(in_frame, None)]
mos_ad[0].wcs = gWCS(pipeline)
# Shift mosaic in order to set reference (0, 0) on Detector 2
mos_ad[0].wcs.insert_transform(mos_frame, shift_x & shift_y, after=True)
# Apply transformation
temp_ad = transform.resample_from_wcs(
mos_ad, in_frame.name, origin=(0, 0), output_shape=ref_ext.shape)
# Update data_section
datasec_kw = ref_ad._keyword_for('data_section')
temp_ad[0].hdr[datasec_kw] = \
'[1:{:d},1:{:d}]'.format(*temp_ad[0].shape[::-1])
# If detector_section returned something, set an appropriate value
det_sec_kw = ref_ext._keyword_for('detector_section')
det_sec = ref_ext.detector_section()
if det_sec:
temp_ad[0].hdr[det_sec_kw] = \
'[{}:{},{}:{}]'.format(
det_sec.x1 + 1, det_sec.x2, det_sec.y1 + 1, det_sec.y2)
else:
del temp_ad[0].hdr[det_sec_kw]
# If array_section returned something, set an appropriate value
arr_sec_kw = ref_ext._keyword_for('array_section')
arr_sec = ref_ext.array_section()
if arr_sec:
temp_ad[0].hdr[arr_sec_kw] = \
'[{}:{},{}:{}]'.format(
arr_sec.x1 + 1, arr_sec.x2, arr_sec.y1 + 1, arr_sec.y2)
else:
del temp_ad[0].hdr[arr_sec_kw]
ad_out.append(temp_ad[0])
return ad_out
|
{"hexsha": "82e5c493520f2ec26db6d2b7133b28afe80c9409", "size": 34203, "ext": "py", "lang": "Python", "max_stars_repo_path": "geminidr/gmos/primitives_gmos_longslit.py", "max_stars_repo_name": "Luke-Ludwig/DRAGONS", "max_stars_repo_head_hexsha": "d4ff4e462f64d547fbb727627875b72664255d1e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "geminidr/gmos/primitives_gmos_longslit.py", "max_issues_repo_name": "Luke-Ludwig/DRAGONS", "max_issues_repo_head_hexsha": "d4ff4e462f64d547fbb727627875b72664255d1e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geminidr/gmos/primitives_gmos_longslit.py", "max_forks_repo_name": "Luke-Ludwig/DRAGONS", "max_forks_repo_head_hexsha": "d4ff4e462f64d547fbb727627875b72664255d1e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.75375, "max_line_length": 114, "alphanum_fraction": 0.5670262842, "include": true, "reason": "import numpy,from astropy", "num_tokens": 7709}
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_sort_nat_HSort2Sorts
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
datatype Heap = Node "Heap" "Nat" "Heap" | Nil
fun le :: "Nat => Nat => bool" where
"le (Z) y = True"
| "le (S z) (Z) = False"
| "le (S z) (S x2) = le z x2"
fun ordered :: "Nat list => bool" where
"ordered (nil2) = True"
| "ordered (cons2 y (nil2)) = True"
| "ordered (cons2 y (cons2 y2 xs)) =
((le y y2) & (ordered (cons2 y2 xs)))"
fun hmerge :: "Heap => Heap => Heap" where
"hmerge (Node z x2 x3) (Node x4 x5 x6) =
(if le x2 x5 then Node (hmerge x3 (Node x4 x5 x6)) x2 z else
Node (hmerge (Node z x2 x3) x6) x5 x4)"
| "hmerge (Node z x2 x3) (Nil) = Node z x2 x3"
| "hmerge (Nil) y = y"
(*fun did not finish the proof*)
function toList :: "Heap => Nat list" where
"toList (Node q y r) = cons2 y (toList (hmerge q r))"
| "toList (Nil) = nil2"
by pat_completeness auto
fun hinsert :: "Nat => Heap => Heap" where
"hinsert x y = hmerge (Node Nil x Nil) y"
fun toHeap2 :: "Nat list => Heap" where
"toHeap2 (nil2) = Nil"
| "toHeap2 (cons2 y xs) = hinsert y (toHeap2 xs)"
fun hsort2 :: "Nat list => Nat list" where
"hsort2 x = toList (toHeap2 x)"
theorem property0 :
"ordered (hsort2 xs)"
oops
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/UR/TIP_with_Proof/TIP15/TIP15/TIP_sort_nat_HSort2Sorts.thy"}
|
## activate project environment
using Pkg
Pkg.activate(@__DIR__)
Pkg.instantiate()
## compute relative performance
using DelimitedFiles
using Measurements
function compute_relative_performance(filename)
header = "# Polydeg Primitive/conservative-variables-mean std"
data = readdlm(filename, comments=true)
cons = data[:, 2] .± data[:, 3]
prim = data[:, 4] .± data[:, 5]
ratio = prim ./ cons
open(filename[1:end-4] * "_relative.dat", "w") do io
println(io, header)
writedlm(io, hcat(data[:, 1], # polydeg
Measurements.value.(ratio),
Measurements.uncertainty.(ratio)))
end
end
compute_relative_performance(joinpath(@__DIR__, "primitive_2D_flux_shima_etal.dat"))
compute_relative_performance(joinpath(@__DIR__, "primitive_3D_flux_shima_etal.dat"))
compute_relative_performance(joinpath(@__DIR__, "primitive_2D_flux_ranocha.dat"))
compute_relative_performance(joinpath(@__DIR__, "primitive_3D_flux_ranocha.dat"))
|
{"hexsha": "94ac9f69fb4b44a4c003beec3349ce573325b908", "size": 980, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "code/primitive_variables/compute_ratios.jl", "max_stars_repo_name": "trixi-framework/paper-2021-EC_performance", "max_stars_repo_head_hexsha": "4c2f8ca8db56ec967de9290cd68af2a2e9a9d2df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/primitive_variables/compute_ratios.jl", "max_issues_repo_name": "trixi-framework/paper-2021-EC_performance", "max_issues_repo_head_hexsha": "4c2f8ca8db56ec967de9290cd68af2a2e9a9d2df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/primitive_variables/compute_ratios.jl", "max_forks_repo_name": "trixi-framework/paper-2021-EC_performance", "max_forks_repo_head_hexsha": "4c2f8ca8db56ec967de9290cd68af2a2e9a9d2df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6666666667, "max_line_length": 84, "alphanum_fraction": 0.7295918367, "num_tokens": 248}
|
# -*- coding: utf-8 -*-
# @Author: Theo Lemaire
# @Email: theo.lemaire@epfl.ch
# @Date: 2020-09-24 15:30:34
# @Last Modified by: Theo Lemaire
# @Last Modified time: 2021-03-23 00:36:39
import os
import pickle
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from PySONIC.core import EffectiveVariablesLookup
from ..utils import logger, timer, isWithin, si_format, rmse
from ..neurons import passiveNeuron
class SonicBenchmark:
''' Interface allowing to run benchmark simulations of a two-compartment model
incorporating the SONIC paradigm, with a simplified sinusoidal capacitive drive.
'''
npc = 100 # number of samples per cycle
min_ncycles = 10 # minimum number of cycles per simulation
varunits = {
't': 'ms',
'Cm': 'uF/cm2',
'Vm': 'mV',
'Qm': 'nC/cm2'
}
varfactors = {
't': 1e3,
'Cm': 1e2,
'Vm': 1e0,
'Qm': 1e5
}
nodelabels = ['node 1', 'node 2']
ga_bounds = [1e-10, 1e10] # S/m2
def __init__(self, pneuron, ga, Fdrive, gammas, passive=False):
''' Initialization.
:param pneuron: point-neuron object
:param ga: axial conductance (S/m2)
:param Fdrive: US frequency (Hz)
:param gammas: pair of relative capacitance oscillation ranges
'''
self.pneuron = pneuron
self.ga = ga
self.Fdrive = Fdrive
self.gammas = gammas
self.passive = passive
self.computeLookups()
def copy(self):
return self.__class__(self.pneuron, self.ga, self.Fdrive, self.gammas, passive=self.passive)
@property
def gammalist(self):
return [f'{x:.2f}' for x in self.gammas]
@property
def gammastr(self):
return f"({', '.join(self.gammalist)})"
@property
def fstr(self):
return f'{si_format(self.Fdrive)}Hz'
@property
def gastr(self):
return f'{self.ga:.2e} S/m2'
@property
def mechstr(self):
dynamics = 'passive ' if self.passive else ''
return f'{dynamics}{self.pneuron.name}'
def __repr__(self):
params = [
f'ga = {self.gastr}',
f'f = {self.fstr}',
f'gamma = {self.gammastr}'
]
return f'{self.__class__.__name__}({self.mechstr} dynamics, {", ".join(params)})'
@property
def corecode(self):
s = self.__repr__()
for c in [' = ', ', ', ' ', '(', '/']:
s = s.replace(c, '_')
s = s.replace('))', '').replace('__', '_')
return s
@property
def pneuron(self):
return self._pneuron
@pneuron.setter
def pneuron(self, value):
self._pneuron = value.copy()
self.states = self._pneuron.statesNames()
if hasattr(self, 'lkps'):
self.computeLookups()
def isPassive(self):
return self.pneuron.name.startswith('pas_')
@property
def Fdrive(self):
return self._Fdrive
@Fdrive.setter
def Fdrive(self, value):
self._Fdrive = value
if hasattr(self, 'lkps'):
self.computeLookups()
@property
def gammas(self):
return self._gammas
@gammas.setter
def gammas(self, value):
self._gammas = value
if hasattr(self, 'lkps'):
self.computeLookups()
@property
def passive(self):
return self._passive
@passive.setter
def passive(self, value):
assert isinstance(value, bool), 'passive must be boolean typed'
self._passive = value
if hasattr(self, 'lkps'):
self.computeLookups()
@property
def ga(self):
return self._ga
@ga.setter
def ga(self, value):
if value != 0.:
assert isWithin('ga', value, self.ga_bounds)
self._ga = value
@property
def gPas(self):
''' Passive membrane conductance (S/m2). '''
return self.pneuron.gLeak
@property
def Cm0(self):
''' Resting capacitance (F/m2). '''
return self.pneuron.Cm0
@property
def Vm0(self):
''' Resting membrane potential (mV). '''
return self.pneuron.Vm0
@property
def Qm0(self):
''' Resting membrane charge density (C/m2). '''
return self.Vm0 * self.Cm0 * 1e-3
@property
def Qref(self):
''' Reference charge linear space. '''
return np.arange(*self.pneuron.Qbounds, 1e-5) # C/cm2
@property
def Cmeff(self):
''' Analytical solution for effective membrane capacitance (F/m2). '''
return self.Cm0 * np.sqrt(1 - np.array(self.gammas)**2 / 4)
@property
def Qminf(self):
''' Analytical solution for steady-state charge density (C/m2). '''
return self.Cmeff * self.pneuron.ELeak * 1e-3
def capct(self, gamma, t):
''' Time-varying sinusoidal capacitance (in F/m2) '''
return self.Cm0 * (1 + 0.5 * gamma * np.sin(2 * np.pi * self.Fdrive * t))
def vCapct(self, t):
''' Vector of time-varying capacitance (in F/m2) '''
return np.array([self.capct(gamma, t) for gamma in self.gammas])
def getLookup(self, Cm):
''' Get a lookup object of effective variables for a given capacitance cycle vector. '''
refs = {'Q': self.Qref} # C/m2
Vmarray = np.array([Q / Cm for Q in self.Qref]) * 1e3 # mV
tables = {
k: np.array([np.mean(np.vectorize(v)(Vmvec)) for Vmvec in Vmarray])
for k, v in self.pneuron.effRates().items()
}
return EffectiveVariablesLookup(refs, tables)
@property
def tcycle(self):
''' Time vector over 1 acoustic cycle (s). '''
return np.linspace(0, 1 / self.Fdrive, self.npc)
@property
def dt_full(self):
''' Full time step (s). '''
return 1 / (self.npc * self.Fdrive)
@property
def dt_sparse(self):
''' Sparse time step (s). '''
return 1 / self.Fdrive
def computeLookups(self):
''' Compute benchmark lookups. '''
self.lkps = []
if not self.passive:
self.lkps = [self.getLookup(Cm_cycle) for Cm_cycle in self.vCapct(self.tcycle)]
def getCmeff(self, Cm_cycle):
''' Compute effective capacitance from capacitance profile over 1 cycle. '''
return 1 / np.mean(1 / Cm_cycle) # F/m2
def iax(self, Vm, Vmother):
''' Compute axial current flowing in the compartment from another compartment (in mA/m2).
[iax] = S/m2 * mV = 1e-3 A/m2 = 1 mA/m2
'''
return self.ga * (Vmother - Vm)
def vIax(self, Vm):
''' Compute array of axial currents in each compartment based on array of potentials. '''
return np.array([self.iax(*Vm), self.iax(*Vm[::-1])]) # mA/m2
def serialize(self, y):
''' Serialize a single state vector into a state-per-node matrix. '''
return np.reshape(y.copy(), (self.npernode * 2))
def deserialize(self, y):
''' Deserialize a state per node matrix into a single state vector. '''
return np.reshape(y.copy(), (2, self.npernode))
def derivatives(self, t, y, Cm, dstates_func):
''' Generic derivatives method. '''
# Deserialize states vector and initialize derivatives array
y = self.deserialize(y)
dydt = np.empty(y.shape)
# Extract charge density and membrane potential vectors
Qm = y[:, 0] # C/m2
Vm = y[:, 0] / Cm * 1e3 # mV
# Extract states array
states_array = y[:, 1:]
# Compute membrane dynamics for each node
for i, (qm, vm, states) in enumerate(zip(Qm, Vm, states_array)):
# If passive, compute only leakage current
if self.passive:
im = self.pneuron.iLeak(vm) # mA/m2
# Otherwise, compute states derivatives and total membrane current
if not self.passive:
states_dict = dict(zip(self.states, states))
dydt[i, 1:] = dstates_func(i, qm, vm, states_dict) # s-1
im = self.pneuron.iNet(vm, states_dict) # mA/m2
dydt[i, 0] = -im # mA/m2
# Add axial currents to currents column
dydt[:, 0] += self.vIax(Vm) # mA/m2
# Rescale currents column into charge derivative units
dydt[:, 0] *= 1e-3 # C/m2.s
# Return serialized derivatives vector
return self.serialize(dydt)
def dstatesFull(self, i, qm, vm, states):
''' Compute detailed states derivatives. '''
return self.pneuron.getDerStates(vm, states)
def dfull(self, t, y):
''' Compute detailed derivatives vector. '''
return self.derivatives(t, y, self.vCapct(t), self.dstatesFull)
def dstatesEff(self, i, qm, vm, states):
''' Compute effective states derivatives. '''
lkp0d = self.lkps[i].interpolate1D(qm)
return np.array([self.pneuron.derEffStates()[k](lkp0d, states) for k in self.states])
def deff(self, t, y):
''' Compute effective derivatives vector. '''
return self.derivatives(t, y, self.Cmeff, self.dstatesEff)
@property
def y0node(self):
''' Get initial conditions vector (common to every node). '''
if self.passive:
return [self.Qm0]
else:
return [self.Qm0, *[self.pneuron.steadyStates()[k](self.Vm0) for k in self.states]]
@property
def y0(self):
''' Get full initial conditions vector (duplicated ynode vector). '''
self.npernode = len(self.y0node)
return self.y0node + self.y0node
def integrate(self, dfunc, t):
''' Integrate over a time vector and return charge density arrays. '''
# Integrate system
tolerances = {'atol': 1e-10}
y = odeint(dfunc, self.y0, t, tfirst=True, **tolerances).T
# Cast each solution variable as a time-per-node matrix
sol = {'Qm': y[::self.npernode]}
if not self.passive:
for i, k in enumerate(self.states):
sol[k] = y[i + 1::self.npernode]
# Return recast solution dictionary
return sol
def orderedKeys(self, varkeys):
''' Get ordered list of solution keys. '''
mainkeys = ['Qm', 'Vm', 'Cm']
otherkeys = list(set(varkeys) - set(mainkeys))
return mainkeys + otherkeys
def orderedSol(self, sol):
''' Re-order solution according to keys list. '''
return {k: sol[k] for k in self.orderedKeys(sol.keys())}
def nsamples(self, tstop):
''' Compute the number of samples required to integrate over a given time interval. '''
return self.getNCycles(tstop) * self.npc
@timer
def simFull(self, tstop):
''' Simulate the full system until a specific stop time. '''
t = np.linspace(0, tstop, self.nsamples(tstop))
sol = self.integrate(self.dfull, t)
sol['Cm'] = self.vCapct(t)
sol['Vm'] = sol['Qm'] / sol['Cm'] * 1e3
return t, self.orderedSol(sol)
@timer
def simEff(self, tstop):
''' Simulate the effective system until a specific stop time. '''
t = np.linspace(0, tstop, self.getNCycles(tstop))
sol = self.integrate(self.deff, t)
sol['Cm'] = np.array([np.ones(t.size) * Cmeff for Cmeff in self.Cmeff])
sol['Vm'] = sol['Qm'] / sol['Cm'] * 1e3
return t, self.orderedSol(sol)
@property
def methods(self):
''' Dictionary of simulation methods. '''
return {'full': self.simFull, 'effective': self.simEff}
def getNCycles(self, duration):
''' Compute number of cycles from a duration. '''
return int(np.ceil(duration * self.Fdrive))
def simulate(self, mtype, tstop):
''' Simulate the system with a specific method for a given duration. '''
# Cast tstop as a multiple of the acoustic period
tstop = self.getNCycles(tstop) / self.Fdrive # s
# Retrieve simulation method
try:
method = self.methods[mtype]
except KeyError:
raise ValueError(f'"{mtype}" is not a valid method type')
# Run simulation and return output
logger.debug(f'running {mtype} {si_format(tstop, 2)}s simulation')
output, tcomp = method(tstop)
logger.debug(f'completed in {tcomp:.2f} s')
return output
def cycleAvg(self, y):
''' Cycle-average a solution vector according to the number of samples per cycle. '''
ypercycle = np.reshape(y, (int(y.shape[0] / self.npc), self.npc))
return np.mean(ypercycle, axis=1)
def cycleAvgSol(self, t, sol):
''' Cycle-average a time vector and a solution dictionary. '''
solavg = {}
# For each per-node-matrix in the solution
for k, ymat in sol.items():
# Cycle-average each node vector of the matrix
solavg[k] = np.array([self.cycleAvg(yvec) for yvec in ymat])
# Re-sample time vector at system periodicity
tavg = t[::self.npc] # + 0.5 / self.Fdrive
# Return cycle-averaged time vector and solution dictionary
return tavg, solavg
def g2tau(self, g):
''' Convert conductance per unit membrane area (S/m2) to time constant (s). '''
return self.Cm0 / g # s
def tau2g(self, tau):
''' Convert time constant (s) to conductance per unit membrane area (S/m2). '''
return self.Cm0 / tau # s
@property
def taum(self):
''' Passive membrane time constant (s). '''
return self.pneuron.tau_pas
@taum.setter
def taum(self, value):
''' Update point-neuron leakage conductance to match time new membrane time constant. '''
if not self.isPassive():
raise ValueError('taum can only be set for passive neurons')
self.pneuron = passiveNeuron(
self.pneuron.Cm0,
self.tau2g(value), # S/m2
self.pneuron.ELeak)
@property
def tauax(self):
''' Axial time constant (s). '''
return self.g2tau(self.ga)
@tauax.setter
def tauax(self, value):
''' Update axial conductance per unit area to match time new axial time constant. '''
self.ga = self.tau2g(value) # S/m2
@property
def taumax(self):
''' Maximal time constant of the model (s). '''
return max(self.taum, self.tauax)
def setTimeConstants(self, taum, tauax):
''' Update benchmark according to pair of time constants (in s). '''
self.taum = taum # s
self.tauax = tauax # s
def setDrive(self, f, gammas):
''' Update benchmark drive to a new frequency and amplitude. '''
self.Fdrive = f
self.gammas = gammas
def getPassiveTstop(self, f):
''' Compute minimum simulation time for a passive model (s). '''
return max(5 * self.taumax, self.min_ncycles / f)
@property
def passive_tstop(self):
return self.getPassiveTstop(self.Fdrive)
def simAllMethods(self, tstop):
''' Simulate the model with both methods. '''
logger.info(f'{self}: {si_format(tstop)}s simulation')
# Simulate with full and effective systems
t, sol = {}, {}
for method in self.methods.keys():
t[method], sol[method] = self.simulate(method, tstop)
t, sol = self.postproSol(t, sol)
return t, sol
def simAndSave(self, *args, outdir=''):
fpath = os.path.join(outdir, self.corecode)
if os.path.isfile(fpath):
with open(fpath, 'rb') as fh:
out = pickle.load(fh)
else:
out = self.simAllMethods(*args)
with open(fpath, 'wb') as fh:
pickle.dump(out, fh)
return out
def computeGradient(self, sol):
''' compute the gradient of a solution array. '''
return {k: np.vstack((y, np.diff(y, axis=0))) for k, y in sol.items()}
def addOnset(self, ymat, y0):
return np.hstack((np.ones((2, 2)) * y0, ymat))
def getY0(self, k, y):
y0dict = {'Cm': self.Cm0, 'Qm': self.Qm0, 'Vm': self.Vm0}
try:
return y0dict[k]
except KeyError:
return y[0, 0]
return
def postproSol(self, t, sol, gradient=False):
''' Post-process solution. '''
# Add cycle-average of full solution
t['cycle-avg'], sol['cycle-avg'] = self.cycleAvgSol(t['full'], sol['full'])
keys = list(sol.keys())
tonset = 0.05 * np.ptp(t['full'])
# Add onset
for k in keys:
t[k] = np.hstack(([-tonset, 0], t[k]))
sol[k] = {vk: self.addOnset(ymat, self.getY0(vk, ymat)) for vk, ymat in sol[k].items()}
# Add gradient across nodes for each variable
if gradient:
for k in keys:
t[f'{k}-grad'] = t[k]
sol[f'{k}-grad'] = self.computeGradient(sol[k])
return t, sol
def plot(self, t, sol, Qonly=False, gradient=False):
''' Plot results of benchmark simulations of the model. '''
colors = ['C0', 'C1', 'darkgrey']
markers = ['-', '--', '-']
alphas = [0.5, 1., 1.]
# Reduce solution dictionary if only Q needs to be plotted
if Qonly:
sol = {key: {'Qm': value['Qm']} for key, value in sol.items()}
# Extract simulation duration
tstop = t[list(t.keys())[0]][-1] # s
# Gather keys of methods and variables to plot
mkeys = list(sol.keys())
varkeys = list(sol[mkeys[0]].keys())
naxes = len(varkeys)
# Get node labels
lbls = self.nodelabels
# Create figure
fig, axes = plt.subplots(naxes, 1, sharex=True, figsize=(10, min(3 * naxes, 10)))
if naxes == 1:
axes = [axes]
axes[0].set_title(f'{self} - {si_format(tstop)}s simulation')
axes[-1].set_xlabel(f'time ({self.varunits["t"]})')
for ax, vk in zip(axes, varkeys):
ax.set_ylabel(f'{vk} ({self.varunits.get(vk, "-")})')
if self.passive:
# Add horizontal lines for node-specific SONIC steady-states on charge density plot
Qm_ax = axes[varkeys.index('Qm')]
for Qm, c in zip(self.Qminf, colors):
Qm_ax.axhline(Qm * self.varfactors['Qm'], c=c, linestyle=':')
# For each solution type
for m, alpha, (mkey, varsdict) in zip(markers, alphas, sol.items()):
tplt = t[mkey] * self.varfactors['t']
# For each solution variable
for ax, (vkey, v) in zip(axes, varsdict.items()):
# For each node
for y, c, lbl in zip(v, colors, lbls):
# Plot node variable with appropriate color and marker
ax.plot(tplt, y * self.varfactors.get(vkey, 1.0),
m, alpha=alpha, c=c, label=f'{lbl} - {mkey}')
# Add legend
fig.subplots_adjust(bottom=0.2)
axes[-1].legend(
bbox_to_anchor=(0., -0.7, 1., .1), loc='upper center',
ncol=3, mode="expand", borderaxespad=0.)
# Return figure
return fig
def plotQnorm(self, t, sol, ax=None, notitle=False):
''' Plot normalized charge density traces from benchmark simulations of the model. '''
colors = ['C0', 'C1']
markers = ['-', '--', '-']
alphas = [0.5, 1., 1.]
V = {key: value['Qm'] / self.Cm0 for key, value in sol.items()}
tstop = t[list(t.keys())[0]][-1] # s
if ax is None:
fig, ax = plt.subplots(figsize=(10, 3))
else:
fig = ax.get_figure()
if not notitle:
ax.set_title(f'{self} - {si_format(tstop)}s simulation')
ax.set_xlabel(f'time ({self.varunits["t"]})')
ax.set_ylabel(f'Qm / Cm0 (mV)')
for sk in ['top', 'right']:
ax.spines[sk].set_visible(False)
ax.set_ylim(-85., 55.)
for m, alpha, (key, varsdict) in zip(markers, alphas, sol.items()):
for y, c, lbl in zip(V[key], colors, self.nodelabels):
ax.plot(t[key] * self.varfactors['t'], y * 1e3,
m, alpha=alpha, c=c, label=f'{lbl} - {key}')
# fig.subplots_adjust(bottom=0.2)
# ax.legend(bbox_to_anchor=(0., -0.7, 1., .1), loc='upper center', ncol=3,
# mode="expand", borderaxespad=0.)
return fig
def simplot(self, *args, **kwargs):
''' Run benchmark simulation and plot results. '''
return self.plot(*self.simAllMethods(*args, **kwargs))
@property
def eval_funcs(self):
''' Different functions to evaluate the divergence between two solutions. '''
return {
'rmse': lambda y1, y2: rmse(y1, y2), # RMSE
'ss': lambda y1, y2: np.abs(y1[-1] - y2[-1]), # steady-state absolute difference
'amax': lambda y1, y2: np.max(np.abs(y1 - y2)) # max absolute difference
}
def divergencePerNode(self, t, sol, eval_mode='RMSE'):
''' Evaluate the divergence between the effective and full, cycle-averaged solutions
at a specific point in time, computing per-node differences in charge density values
divided by resting capacitance.
'''
if eval_mode not in self.eval_funcs.keys():
raise ValueError(f'{eval_mode} evaluation mode is not supported')
# Extract charge matrices from solution dictionary
Qsol = {k: sol[k]['Qm'] for k in ['effective', 'cycle-avg']} # C/m2
# Normalize matrices by resting capacitance
Qnorm = {k: v / self.Cm0 * 1e3 for k, v in Qsol.items()} # mV
# Keep only the first two rows (3rd one, if any, is a gradient)
Qnorm = {k: v[:2, :] for k, v in Qnorm.items()}
# Discard the first 3 columns (artifical onset and first cycle artefact)
Qnorm = {k: v[:, 3:] for k, v in Qnorm.items()}
eval_func = self.eval_funcs[eval_mode]
# Compute deviation across nodes saccording to evaluation mode
div_per_node = [eval_func(*[v[i] for v in Qnorm.values()]) for i in range(2)]
# Cast into dictionary and return
div_per_node = dict(zip(self.nodelabels, div_per_node))
logger.debug(f'divergence per node: ', {k: f'{v:.2e} mV' for k, v in div_per_node.items()})
return div_per_node
def divergence(self, *args, **kwargs):
div_per_node = self.divergencePerNode(*args, **kwargs) # mV
return max(list(div_per_node.values())) # mV
def logDivergences(self, t, sol):
for eval_mode in self.eval_funcs.keys():
div_per_node = self.divergencePerNode(t, sol, eval_mode=eval_mode)
div_per_node_str = {k: f'{v:.3f}' for k, v in div_per_node.items()}
logger.info(f'{eval_mode}: divergence = {div_per_node_str} mV')
def phaseplotQnorm(self, t, sol):
''' Phase-plot normalized charge density traces from benchmark simulations of the model. '''
colors = ['C0', 'C1']
markers = ['-', '--', '-']
alphas = [0.5, 1., 1.]
# Extract normalized charge density profiles
Qnorm = {key: value['Qm'] / self.Cm0 for key, value in sol.items()}
# Discard the first 2 indexes of artifical onset)
t = {k: v[2:] for k, v in t.items()}
Qnorm = {k: v[:, 2:] for k, v in Qnorm.items()}
# Get time derivatives
dQnorm = {}
for key, value in Qnorm.items():
dQdt = np.diff(value, axis=1) / np.diff(t[key])
dQnorm[key] = np.hstack((np.array([dQdt[:, 0]]).T, dQdt))
fig, ax = plt.subplots(figsize=(5, 5), constrained_layout=True)
# tstop = t[list(t.keys())[0]][-1] # s
# ax.set_title(f'{self} - {si_format(tstop)}s simulation', fontsize=10)
ax.set_xlabel(f'Qm / Cm0 (mV)')
ax.set_ylabel(f'd(Qm / Cm0) / dt (V/s)')
xfactor, yfactor = 1e3, 1e0
x0 = self.pneuron.Qm0 / self.pneuron.Cm0
y0 = 0.
for m, alpha, (key, varsdict) in zip(markers, alphas, sol.items()):
if key != 'full':
for y, dydt, c, lbl in zip(Qnorm[key], dQnorm[key], colors, self.nodelabels):
ax.plot(np.hstack(([x0], y)) * xfactor, np.hstack(([y0], dydt)) * yfactor,
m, alpha=alpha, c=c, label=f'{lbl} - {key}')
ax.scatter(x0 * xfactor, y0 * yfactor, c=['k'], zorder=2.5)
# fig.subplots_adjust(bottom=0.2)
# ax.legend(bbox_to_anchor=(0., -0.7, 1., .1), loc='upper center', ncol=3,
# mode="expand", borderaxespad=0.)
return fig
|
{"hexsha": "14b57a8f1b1f55fd9815131f2cfa5f0eaa68b495", "size": 24708, "ext": "py", "lang": "Python", "max_stars_repo_path": "PySONIC/multicomp/benchmark.py", "max_stars_repo_name": "scbao/pysonic", "max_stars_repo_head_hexsha": "b4ccaf49772d55f632a0995c411d1cc042d71903", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PySONIC/multicomp/benchmark.py", "max_issues_repo_name": "scbao/pysonic", "max_issues_repo_head_hexsha": "b4ccaf49772d55f632a0995c411d1cc042d71903", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PySONIC/multicomp/benchmark.py", "max_forks_repo_name": "scbao/pysonic", "max_forks_repo_head_hexsha": "b4ccaf49772d55f632a0995c411d1cc042d71903", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8606676343, "max_line_length": 100, "alphanum_fraction": 0.5698963898, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6751}
|
import numpy as np # Reading CSV ans saving vectors as binary files
import tensorflow as tf # Tensorflow
from tensorflow import keras # Simplified Tensorflow Framework
from tensorflow.keras import regularizers
from tensorflow.keras import callbacks
import matplotlib.pyplot as plt
def plotWeights(model):
for layer in model.get_weights():
fig, ax = plt.subplots()
im = ax.imshow(layer)
plt.show()
def saveModel(model, folder="data/"):
print("In folder " + folder)
# Saves complete model with
# Architecture
# Weights
# Training configuration (Loss, Optimizer)
# State of Optimizer
model.save_weights(folder + 'model.h5')
# Saves only Architecture of Model
np.save(folder+"modelW.npy",model.get_weights())
print(model.get_weights())
file = open(folder + 'model.json', "w")
file.write(model.to_json())
print("Finished with saving")
file.close();
def binaryClassification(train_data, train_labels, test_data, test_labels, nEpochs, lrate, layerSize, rp=0.01, columns=None):
### Read input data ###
# # Training data (80%)
# train_data=np.load(folder + "train_data.npy")
# train_labels=np.load(folder + "train_labels.npy")
# # Evaluation data (10%)
# test_data=np.load(folder + "test_data.npy")
# test_labels=np.load(folder + "test_labels.npy")
if (columns is not None):
train_data = train_data[:, columns]
test_data = test_data[:, columns]
### Define Neuronal Network
cbks = [callbacks.TerminateOnNaN()]
layers=[keras.layers.Dense(i, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(rp)) for i in layerSize]
layers.append(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model = keras.Sequential(layers)
model.compile(optimizer = tf.train.AdamOptimizer(),
lr = lrate,
loss = 'binary_crossentropy',
metrics = ['accuracy'])
### Execute model
# history = model.fit(train_data, train_labels, epochs=nEpochs, verbose=1, validation_data=[test_data,test_labels]) #--> Use this to grep & plot this per Epochs (last line)
history = model.fit(train_data, train_labels, callbacks=cbks, epochs=nEpochs, verbose=0)
test_loss, test_acc = model.evaluate(test_data, test_labels, verbose=0)
# if (math.isnan(history.history['loss'])):
if (np.isnan(history.history['loss']).any()):
raise ValueError("Loss was not a number")
plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
#plt.show()
# saveModel(model);
plotWeights(model)
return test_loss, test_acc
|
{"hexsha": "40f89e3665c79536e4de921ac5c550d9be55785f", "size": 2823, "ext": "py", "lang": "Python", "max_stars_repo_path": "metricsML/MachineLearning.py", "max_stars_repo_name": "Elscha/MetricsML", "max_stars_repo_head_hexsha": "2ecbc42ad7bd2465f4f75658f44452ea5c552c3b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "metricsML/MachineLearning.py", "max_issues_repo_name": "Elscha/MetricsML", "max_issues_repo_head_hexsha": "2ecbc42ad7bd2465f4f75658f44452ea5c552c3b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "metricsML/MachineLearning.py", "max_forks_repo_name": "Elscha/MetricsML", "max_forks_repo_head_hexsha": "2ecbc42ad7bd2465f4f75658f44452ea5c552c3b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3285714286, "max_line_length": 177, "alphanum_fraction": 0.6606447042, "include": true, "reason": "import numpy", "num_tokens": 656}
|
#
# Copyright 2021 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# Authors: Mudhakar Srivatsa <msrivats@us.ibm.com>
# Raghu Ganti <rganti@us.ibm.com>
# Carlos Costa <chcost@us.ibm.com>
#
#
import graphviz
import codeflare.pipelines.Datamodel as dm
import ray
import numpy as np
def pipeline_to_graph(pipeline: dm.Pipeline) -> graphviz.Digraph:
"""
Converts the given pipeline to a networkX graph for visualization.
:param pipeline: Pipeline to convert to networkX graph
:return: A directed graph representing this pipeline
"""
graph = graphviz.Digraph()
pipeline_nodes = pipeline.get_nodes()
for pre_node in pipeline_nodes.values():
post_nodes = pipeline.get_post_nodes(pre_node)
graph.node(pre_node.get_node_name())
for post_node in post_nodes:
graph.node(post_node.get_node_name())
graph.edge(pre_node.get_node_name(), post_node.get_node_name())
return graph
@ray.remote
def split(xy_ref: dm.XYRef, num_splits):
"""
Takes input as XYRef, splits the X and sends back the data as chunks. This is quite
useful when we have to break a raw array into smaller pieces. This current implementation
requires the input X of XYRef to be a pandas dataframe.
"""
x = ray.get(xy_ref.get_Xref())
y = ray.get(xy_ref.get_yref())
xy_split_refs = []
# TODO: How do we split y if it is needed, lets revisit it later, will these be aligned?
x_split = np.array_split(x, num_splits)
if y is not None:
y_split = np.array_split(y, num_splits)
# iterate over each and then insert into Plasma
for i in range(0, len(x_split)):
x_part = x_split[i]
y_part = None
if y is not None:
y_part = y_split[i]
x_part_ref = ray.put(x_part)
y_part_ref = ray.put(y_part)
xy_ref_part = dm.XYRef(x_part_ref, y_part_ref)
xy_split_refs.append(xy_ref_part)
return xy_split_refs
|
{"hexsha": "6f7ab6c64cdada07f05535dbb6d6c2443b161e02", "size": 2717, "ext": "py", "lang": "Python", "max_stars_repo_path": "codeflare/pipelines/utils.py", "max_stars_repo_name": "snyderbrian/codeflare", "max_stars_repo_head_hexsha": "414f15c6fc52c1885df51491e9eebc3a2cb73bda", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 154, "max_stars_repo_stars_event_min_datetime": "2021-06-18T21:48:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:28:11.000Z", "max_issues_repo_path": "codeflare/pipelines/utils.py", "max_issues_repo_name": "snyderbrian/codeflare", "max_issues_repo_head_hexsha": "414f15c6fc52c1885df51491e9eebc3a2cb73bda", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2021-06-20T18:30:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T18:18:16.000Z", "max_forks_repo_path": "codeflare/pipelines/utils.py", "max_forks_repo_name": "snyderbrian/codeflare", "max_forks_repo_head_hexsha": "414f15c6fc52c1885df51491e9eebc3a2cb73bda", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2021-06-21T12:59:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T10:01:25.000Z", "avg_line_length": 32.734939759, "max_line_length": 93, "alphanum_fraction": 0.7011409643, "include": true, "reason": "import numpy", "num_tokens": 649}
|
import pandas as pd
import tempfile
import skbio
from skbio.sequence import Sequence
import os
import subprocess
import platform
import numpy as np
if platform.system() == 'Windows':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
else:
startupinfo = None
__all__ = ['muscle_align',
'align2skbio',
'skbio2align']
def align2skbio(align):
return skbio.TabularMSA([Sequence(s, metadata=dict(id=str(i))) for i, s in align.items()])
def skbio2align(seqColl):
return pd.Series({s.metadata['id']:''.join([c.decode('utf-8') for c in s.values]) for s in seqColl})
def align2fasta(align, fn):
with open(fn, 'w') as fh:
for i in range(align.shape[0]):
fh.write('>%s\n' % str(align.index[i]))
fh.write('%s\n' % str(align.iloc[i]))
def muscle_align(seqs):
"""Use MUSCLE to align AA seqs.
muscle -in new_seqs.fa -out new_seqs.afa
Parameters
----------
seqs : list or pd.Series
Return
------
align : pd.Series()
Aligned sequences."""
"""Create temporary file for MUSCLE"""
inFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)
outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)
if not isinstance(seqs, pd.Series):
align = pd.Series(seqs)
else:
align = seqs
newIndex = np.arange(align.shape[0])
oldIndex = align.index
align.index = newIndex
"""Put alignment in the tempfiles"""
align2fasta(align, inFn)
# align2skbio(align).write(inFn, format='fasta')
# skbio.write(obj=, into=inFn, format='fasta')
muscleCommand = ['muscle',
'-in', inFn,
'-out', outFn]
result = subprocess.call(muscleCommand, startupinfo=startupinfo)
"""If MUSCLE was successful"""
if not result:
outAlign = skbio2align(skbio.read(outFn, format='fasta'))
# outAign = skbio2align(skbio.TabularMSA.read(outFn, format='fasta'))
else:
print("Error in MUSCLE!")
raise Exception("MUSCLEError")
"""Remove the temporary files"""
os.remove(inFn)
os.remove(outFn)
"""MUSCLE seqs need to be reorderd using the original index"""
outAlign = outAlign.loc[[str(i) for i in align.index]]
"""Index was str() through FASTA files so reset index with original index"""
outAlign.index = oldIndex
"""Check that all seqs are being returned in the correct order"""
badSeqs = 0
if not len(seqs) == len(outAlign):
print('Different number of output seqs!')
badSeqs += 1
for i, s1, s2 in zip(list(range(len(seqs))), seqs, outAlign):
if not s1.replace('-', '') == s2.replace('-', ''):
print('%d: %s != %s' % (i, s1, s2))
badSeqs += 1
if badSeqs > 0:
raise Exception('Output seqs are different than input seqs! (%d)' % badSeqs)
return outAlign
|
{"hexsha": "0d6ab030d4b74182bdf7e98eefd677d23d55294d", "size": 3047, "ext": "py", "lang": "Python", "max_stars_repo_path": "muscle.py", "max_stars_repo_name": "victorfica/utils", "max_stars_repo_head_hexsha": "b61935a860838a0e70afde7c9ecf2c68f51a2c4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-12-16T01:23:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-27T11:41:43.000Z", "max_issues_repo_path": "muscle.py", "max_issues_repo_name": "victorfica/utils", "max_issues_repo_head_hexsha": "b61935a860838a0e70afde7c9ecf2c68f51a2c4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-06T23:47:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T23:48:33.000Z", "max_forks_repo_path": "muscle.py", "max_forks_repo_name": "victorfica/utils", "max_forks_repo_head_hexsha": "b61935a860838a0e70afde7c9ecf2c68f51a2c4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-04-29T14:04:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-06T23:49:34.000Z", "avg_line_length": 29.5825242718, "max_line_length": 104, "alphanum_fraction": 0.6117492616, "include": true, "reason": "import numpy", "num_tokens": 809}
|
import matplotlib.pyplot as plt
import numpy as np
from CEIT.EITPlotter import EITPlotter
from CEIT.Solver import Solver
solver = Solver()
plotter = EITPlotter()
# generate random signal
delta_V = np.random.rand(240)
fig, ax = plt.subplots(nrows=1, ncols=1)
plotter.plot_detection_area_map(solver.solve(delta_V), ax, with_electrode=True)
plt.show()
|
{"hexsha": "5563d21c224493c38cb2181d17d8f7008ad85c38", "size": 352, "ext": "py", "lang": "Python", "max_stars_repo_path": "Example_04_solver.py", "max_stars_repo_name": "zehao99/CEIT", "max_stars_repo_head_hexsha": "06f5a409a93073bb7cfd22afb3a39f500e5a24d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-11-28T19:20:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T11:35:27.000Z", "max_issues_repo_path": "Example_04_solver.py", "max_issues_repo_name": "zehao99/CEIT", "max_issues_repo_head_hexsha": "06f5a409a93073bb7cfd22afb3a39f500e5a24d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Example_04_solver.py", "max_forks_repo_name": "zehao99/CEIT", "max_forks_repo_head_hexsha": "06f5a409a93073bb7cfd22afb3a39f500e5a24d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4666666667, "max_line_length": 79, "alphanum_fraction": 0.7840909091, "include": true, "reason": "import numpy", "num_tokens": 97}
|
from builtins import print as pr
import numpy as np
from matplotlib import pyplot as plt
color_coll = {'white': "\033[37;1m",
'red': "\033[31;1m",
'green': "\033[33;1m",
'blue': "\033[34;1m"}
def print(value, color="default"):
if color == "default":
color = 'white'
pr(color_coll[color.lower()] + str(value) + color_coll['white'])
def float_f(n, decimals):
string = '{'+'0:.{}f'.format(decimals)+'}'
return float(string.format(round(n,decimals)))
def spin_matrix(theta):
return np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
def plot_list(*_list):
plt.figure()
for j in _list:
plt.plot([i[0] for i in j], [i[1] for i in j])
plt.axis('equal')
plt.show()
|
{"hexsha": "9bdb77ec9e9899256a52541f3e505f2f58c271da", "size": 818, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/server_design/algorithms/compressor/miscellaneous/tools.py", "max_stars_repo_name": "robertpardillo/Funnel", "max_stars_repo_head_hexsha": "f45e419f55e085bbb95e17c47b4c94a7c625ba9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-18T16:10:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T16:10:49.000Z", "max_issues_repo_path": "src/server_design/algorithms/compressor/miscellaneous/tools.py", "max_issues_repo_name": "robertpardillo/Funnel", "max_issues_repo_head_hexsha": "f45e419f55e085bbb95e17c47b4c94a7c625ba9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/server_design/algorithms/compressor/miscellaneous/tools.py", "max_forks_repo_name": "robertpardillo/Funnel", "max_forks_repo_head_hexsha": "f45e419f55e085bbb95e17c47b4c94a7c625ba9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7878787879, "max_line_length": 87, "alphanum_fraction": 0.5550122249, "include": true, "reason": "import numpy", "num_tokens": 228}
|
import numpy as np
from . import coordinates, rotations, expansions
class CoaxialTranslation(coordinates.OwnerMixin):
_default_output_type = expansions.Expansion
def __init__(self, input_order, output_order, position=None, radius=None, wavenumber=None, defer_evaluation=False):
self._input_order = input_order
self._output_order = output_order
self._min_order = min(self.input_order, self.output_order)
self._max_order = max(self.input_order, self.output_order)
self._wavenumber = np.asarray(wavenumber)
self.coordinate = coordinates.Translation.parse_args(position=position, radius=radius)
num_unique = (
(self._min_order + 1)**2 * (self._max_order + 1)
- (self._min_order * (self._min_order + 1)) // 2 * (self._min_order + self._max_order + 2)
+ (self._min_order * (self._min_order - 1) * (self._min_order + 1)) // 6
)
self._data = np.zeros((num_unique,) + np.shape(wavenumber) + self.coordinate.shapes.radius, self._dtype)
if not defer_evaluation:
self.evaluate(position=self.coordinate)
@property
def order(self):
return (self.input_order, self.output_order)
@property
def input_order(self):
return self._input_order
@property
def output_order(self):
return self._output_order
@property
def shape(self):
return self.coordinate.shapes.radius
def copy(self, deep=False):
new_obj = super().copy(deep=deep)
new_obj._input_order = self._input_order
new_obj._output_order = self._output_order
new_obj._max_order = self._max_order
new_obj._min_order = self._min_order
if deep:
new_obj._wavenumber = self._wavenumber.copy()
new_obj._data = self._data.copy()
else:
new_obj._wavenumber = self._wavenumber
new_obj._data = self._data
return new_obj
@property
def wavenumber(self):
return self._wavenumber
@property
def _component_indices(self):
out = []
for m in range(self._min_order + 1):
for n in range(m, self._min_order + 1):
for p in range(n, self._max_order + 1):
out.append((n, p, m))
return out
def _idx(self, input_order=None, output_order=None, mode=None, index=None):
def mode_offset(m):
return (m * (self._min_order + 1) * (self._max_order + 1)
- (self._max_order + 1) * (m * (m - 1)) // 2
- m * (self._min_order * (self._min_order + 1)) // 2
+ (m * (m - 1) * (m - 2)) // 6
)
def input_order_offset(n, m):
return (n - m) * (self._max_order + 1) + (m * (m - 1) - n * (n - 1)) // 2
if index is None:
# Default mode, getting the linear index of the component indices
if abs(mode) > input_order:
raise IndexError(f'Mode {mode} is out of bounds for input order {input_order}')
if abs(mode) > output_order:
raise IndexError(f'Mode {mode} is out of bounds for output order {output_order}')
if input_order > self._min_order:
raise IndexError(f'Component {(input_order, output_order, mode)} not stored in {self.__class__.__name__} (n > min(N, P). Use getter or index the object directly.')
if output_order > self._max_order:
raise IndexError(f'Component {(input_order, output_order, mode)} not stored in {self.__class__.__name__} (p > max(N, P)). Use getter or index the object directly.')
if input_order > output_order:
raise IndexError(f'Component {(input_order, output_order, mode)} not stored in {self.__class__.__name__}. Use getter or index the object directly.')
if mode < 0:
raise IndexError(f'Component {(input_order, output_order, mode)} not stored in {self.__class__.__name__}. Use getter or index the object directly.')
# Data is stored in [mode, input_order, output_order] (m, n, p) order
return mode_offset(mode) + input_order_offset(input_order, mode) + output_order - input_order
else:
# Inverse mode, getting the component indices of a linear index.
mode = 0
while mode_offset(mode + 1) <= index:
mode += 1
index -= mode_offset(mode)
input_order = mode
while input_order_offset(input_order + 1, mode) <= index:
input_order += 1
index -= input_order_offset(input_order, mode)
output_order = index + input_order
return (input_order, output_order, mode)
def __getitem__(self, key):
n, p, m = key
if n > p:
return (-1)**(n + p) * self._data[self._idx(p, n, abs(m))]
else:
return self._data[self._idx(n, p, abs(m))]
def evaluate(self, position=None, radius=None, wavenumber=None):
if wavenumber is not None:
self._wavenumber = np.asarray(wavenumber)
self.coordinate = coordinates.Translation.parse_args(position=position, radius=radius)
# The computation is split in two domains for p, the stored and the buffered.
# The stored range is p <= P, i.e. the values we are interested in.
# The buffered range is P < p <= N + P - m, which are values needed to
# complete the recurrence to higher n and m.
# The values (n, P, m) exist in both domains, due to simplifications of
# the indexing and implementations for the buffered values.
# We can get away with only two buffers for n since we need (n-2, p, m)
# only when calculating (n, p, m), and never again after that point.
# By calculating and storing in the same statement, we can reuse the same memory directly.
# Recurrence buffers
N, P = self._min_order, self._max_order # Shorthand for clarity
m_buffer = np.zeros((N + 1,) + np.shape(self.wavenumber) + self.coordinate.shapes.radius, dtype=self._dtype)
m_minus_one_buffer = np.zeros((N + 1,) + np.shape(self.wavenumber) + self.coordinate.shapes.radius, dtype=self._dtype)
n_minus_two_buffer = np.zeros((N + 1,) + np.shape(self.wavenumber) + self.coordinate.shapes.radius, dtype=self._dtype)
n_minus_one_buffer = np.zeros((N + 1,) + np.shape(self.wavenumber) + self.coordinate.shapes.radius, dtype=self._dtype)
for m in range(N + 1): # Main loop over modes
# Buffer swap for sectorials.
m_buffer, m_minus_one_buffer = m_minus_one_buffer, m_buffer
if m == 0: # Get starting values: (0, p, 0)
# Somewhat ugly with this if-statement inside the loop,
# but the alternative is to duplicate everything else in the loop
# for m=0.
initial_values = self._recurrence_initialization(order=N + P, radius=self.coordinate.radius, wavenumber=self.wavenumber)
for p in range(P):
self._data[self._idx(0, p, 0)] = initial_values[p] * (2 * p + 1)**0.5
self._data[self._idx(0, P, 0)] = m_buffer[0] = n_minus_one_buffer[0] = initial_values[P] * (2 * P + 1)**0.5
for p in range(P + 1, N + P + 1):
m_buffer[p - P] = n_minus_one_buffer[p - P] = initial_values[p] * (2 * p + 1)**0.5
else:
for p in range(m, P): # Sectorial recurrence in the stored range
self._data[self._idx(m, p, m)] = (
self._data[self._idx(m - 1, p - 1, m - 1)] * ((p + m - 1) * (p + m) * (2 * m + 1) / ((2 * p - 1) * (2 * p + 1) * (2 * m)))**0.5
+ self._data[self._idx(m - 1, p + 1, m - 1)] * ((p - m + 1) * (p - m + 2) * (2 * m + 1) / ((2 * p + 1) * (2 * p + 3) * (2 * m)))**0.5
)
self._data[self._idx(m, P, m)] = m_buffer[0] = n_minus_one_buffer[0] = (
self._data[self._idx(m - 1, P - 1, m - 1)] * ((P + m - 1) * (P + m) * (2 * m + 1) / ((2 * P - 1) * (2 * P + 1) * (2 * m)))**0.5
+ m_minus_one_buffer[1] * ((P - m + 1) * (P - m + 2) * (2 * m + 1) / ((2 * P + 1) * (2 * P + 3) * (2 * m)))**0.5
)
for p in range(P + 1, N + P - m + 1): # Sectorial recurrence in the buffered range
m_buffer[p - P] = n_minus_one_buffer[p - P] = (
m_minus_one_buffer[p - P - 1] * ((p + m - 1) * (p + m) * (2 * m + 1) / ((2 * p - 1) * (2 * p + 1) * (2 * m)))**0.5
+ m_minus_one_buffer[p - P + 1] * ((p - m + 1) * (p - m + 2) * (2 * m + 1) / ((2 * p + 1) * (2 * p + 3) * (2 * m)))**0.5
)
# Remaining (non-sectorial) values.
# n = m + 1 is a special case since n-2 < m removes one component from the recurrence
if m < N: # Needed to prevent n = N + 1
scale = (2 * m + 3)**0.5
for p in range(m + 1, P): # n = m - 1, stored range
self._data[self._idx(m + 1, p, m)] = scale * (
self._data[self._idx(m, p - 1, m)] * ((p + m) * (p - m) / ((2 * p - 1) * (2 * p + 1)))**0.5
- self._data[self._idx(m, p + 1, m)] * ((p + m + 1) * (p - m + 1) / ((2 * p + 1) * (2 * p + 3)))**0.5
)
self._data[self._idx(m + 1, P, m)] = n_minus_two_buffer[0] = scale * (
self._data[self._idx(m, P - 1, m)] * ((P + m) * (P - m) / ((2 * P - 1) * (2 * P + 1)))**0.5
- n_minus_one_buffer[1] * ((P + m + 1) * (P - m + 1) / ((2 * P + 1) * (2 * P + 3)))**0.5
)
for p in range(P + 1, N + P - m): # n = m - 1, buffered range
n_minus_two_buffer[p - P] = scale * (
n_minus_one_buffer[p - P - 1] * ((p + m) * (p - m) / ((2 * p - 1) * (2 * p + 1)))**0.5
- n_minus_one_buffer[p - P + 1] * ((p + m + 1) * (p - m + 1) / ((2 * p + 1) * (2 * p + 3)))**0.5
)
for n in range(m + 2, N + 1): # Main loop over n.
# Buffer swap for n.
n_minus_one_buffer, n_minus_two_buffer = n_minus_two_buffer, n_minus_one_buffer
scale = ((2 * n - 1) * (2 * n + 1) / ((n + m) * (n - m)))**0.5
for p in range(n, P): # Stored range
self._data[self._idx(n, p, m)] = scale * (
self._data[self._idx(n - 2, p, m)] * ((n + m - 1) * (n - m - 1) / ((2 * n - 3) * (2 * n - 1)))**0.5
+ self._data[self._idx(n - 1, p - 1, m)] * ((p + m) * (p - m) / ((2 * p - 1) * (2 * p + 1)))**0.5
- self._data[self._idx(n - 1, p + 1, m)] * ((p + m + 1) * (p - m + 1) / ((2 * p + 1) * (2 * p + 3)))**0.5
)
self._data[self._idx(n, P, m)] = n_minus_two_buffer[0] = scale * (
self._data[self._idx(n - 2, P, m)] * ((n + m - 1) * (n - m - 1) / ((2 * n - 3) * (2 * n - 1)))**0.5
+ self._data[self._idx(n - 1, P - 1, m)] * ((P + m) * (P - m) / ((2 * P - 1) * (2 * P + 1)))**0.5
- n_minus_one_buffer[1] * ((P + m + 1) * (P - m + 1) / ((2 * P + 1) * (2 * P + 3)))**0.5
)
for p in range(P + 1, N + P - n + 1): # Buffered range
n_minus_two_buffer[p - P] = scale * (
n_minus_two_buffer[p - P] * ((n + m - 1) * (n - m - 1) / ((2 * n - 3) * (2 * n - 1)))**0.5
+ n_minus_one_buffer[p - P - 1] * ((p + m) * (p - m) / ((2 * p - 1) * (2 * p + 1)))**0.5
- n_minus_one_buffer[p - P + 1] * ((p + m + 1) * (p - m + 1) / ((2 * p + 1) * (2 * p + 3)))**0.5
)
return self
def apply(self, expansion, inverse=False, out=None):
wavenumber = getattr(expansion, 'wavenumber', None)
if wavenumber is not None:
if not np.allclose(wavenumber, self.wavenumber):
raise ValueError('Cannot apply translation to expansion of different wavenuber')
# TODO: Limit the order when the input expansion is lower order
# TODO: Limit the order when the output expansion is lower order
# TODO: output type for inverse translations? If the translation is exterior to interior, what should the inverse translation do?
if out is None:
self_shape = self.shape
expansion_shape = np.shape(expansion)
output_shape = np.broadcast(np.empty(self_shape, dtype=[]), np.empty(expansion_shape, dtype=[])).shape
out = self._default_output_type(order=self.input_order if inverse else self.output_order, wavenumber=self.wavenumber, shape=output_shape)
elif expansion is out:
raise NotImplementedError('Translations cannot currently be applied in place')
if not inverse:
for m in range(-self._min_order, self._min_order + 1):
for p in range(abs(m), self.output_order + 1):
value = 0
for n in range(abs(m), self.input_order + 1):
value += self[n, p, m] * expansion[n, m]
out[p, m] = value
else:
for m in range(-self._min_order, self._min_order + 1):
for n in range(abs(m), self.input_order + 1):
value = 0
for p in range(abs(m), self.output_order + 1):
value += self[n, p, m] * expansion[p, m]
out[n, m] = value
return out
class InteriorCoaxialTranslation(CoaxialTranslation):
_dtype = float
from .bases import RegularRadialBase as _recurrence_initialization
_default_output_type = expansions.InteriorExpansion
class ExteriorCoaxialTranslation(CoaxialTranslation):
_dtype = float
from .bases import RegularRadialBase as _recurrence_initialization
_default_output_type = expansions.ExteriorExpansion
class ExteriorInteriorCoaxialTranslation(CoaxialTranslation):
_dtype = complex
from .bases import SingularRadialBase as _recurrence_initialization
_default_output_type = expansions.InteriorExpansion
class Translation(CoaxialTranslation):
def __init__(self, input_order, output_order, position=None, wavenumber=None,
radius=None, colatitude=None, azimuth=None, defer_evaluation=False):
coordinate = coordinates.Translation.parse_args(position=position, radius=radius, colatitude=colatitude, azimuth=azimuth)
self._rotation = rotations.Rotation(
order=max(input_order, output_order), defer_evaluation=True,
colatitude=coordinate.colatitude, azimuth=coordinate.azimuth,
)
super().__init__(input_order=input_order, output_order=output_order, position=coordinate, wavenumber=wavenumber, defer_evaluation=defer_evaluation)
def evaluate(self, position=None, wavenumber=None, radius=None, colatitude=None, azimuth=None):
self.coordinate = coordinates.Translation.parse_args(position=position, radius=radius, colatitude=colatitude, azimuth=azimuth)
if (position is not None) or (radius is not None) or (wavenumber is not None):
super().evaluate(position=self.coordinate, wavenumber=wavenumber)
if (position is not None) or (colatitude is not None) or (azimuth is not None):
self._rotation.evaluate(colatitude=self.coordinate.colatitude, azimuth=self.coordinate.azimuth)
return self
def apply(self, expansion, inverse=False, _only_coaxial=False):
if _only_coaxial:
return super().apply(expansion, inverse=inverse)
if not inverse:
return expansion.apply(self._rotation, inverse=True).apply(self, _only_coaxial=True).apply(self._rotation)
else:
raise NotImplementedError('Inverse translations not implemented yet.')
@property
def shape(self):
return self.coordinate.shape
def copy(self, deep=False):
new_obj = super().copy(deep=deep)
new_obj._rotation = self._rotation.copy(deep=deep)
return new_obj
class InteriorTranslation(Translation, InteriorCoaxialTranslation):
pass
class ExteriorTranslation(Translation, ExteriorCoaxialTranslation):
pass
class ExteriorInteriorTranslation(Translation, ExteriorInteriorCoaxialTranslation):
pass
|
{"hexsha": "ffde7ff3dfeab4079ab980de24f867531c159951", "size": 16573, "ext": "py", "lang": "Python", "max_stars_repo_path": "shetar/translations.py", "max_stars_repo_name": "AppliedAcousticsChalmers/Spherical-Helmholtz-Translation-and-Rotation", "max_stars_repo_head_hexsha": "84c5b51dab5d7ee26886ece44945a5d887bff369", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shetar/translations.py", "max_issues_repo_name": "AppliedAcousticsChalmers/Spherical-Helmholtz-Translation-and-Rotation", "max_issues_repo_head_hexsha": "84c5b51dab5d7ee26886ece44945a5d887bff369", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shetar/translations.py", "max_forks_repo_name": "AppliedAcousticsChalmers/Spherical-Helmholtz-Translation-and-Rotation", "max_forks_repo_head_hexsha": "84c5b51dab5d7ee26886ece44945a5d887bff369", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.160130719, "max_line_length": 180, "alphanum_fraction": 0.5583780848, "include": true, "reason": "import numpy", "num_tokens": 4576}
|
from __future__ import print_function, absolute_import, unicode_literals, division
from collections import OrderedDict
import csv
from sklearn.metrics import accuracy_score
from amt.Test_data import get_miniclips_video_stats, print_stats_actions_miniclips, get_actions_stats
from amt.settings import *
import pandas as pd
import numpy as np
def compare_with_GT(dict_actions_hit, threshold):
csv_file = open(PATH_output_file_name, 'r')
reader = csv.DictReader(csv_file)
with open(PATH_output_results_csv, 'w+') as csvfile3:
fieldnames = ['HIT_nb', 'Video_name', 'Actions', 'Worker_1', 'Worker_2', 'Worker_3', 'Worker_GT']
writer = csv.DictWriter(csvfile3, fieldnames=fieldnames)
writer.writeheader()
HIT_nb = ""
Actions = ""
Video_name = ""
Worker_1 = ""
Worker_2 = ""
Worker_3 = ""
worker_GT = ""
for row in reader:
ok_write = 0
for (column_name, value) in row.items():
if column_name == 'Video_name' and 'p' not in value.split("_")[0]:
Video_name = value
for (column_name, value) in row.items():
if column_name == 'Actions':
Actions = value
if column_name == 'HIT_nb':
HIT_nb = value
if column_name == 'Worker_1':
Worker_1 = value
if column_name == 'Worker_2':
Worker_2 = value
if column_name == 'Worker_3':
Worker_3 = value
ok_write = 1
if ok_write == 1:
break
if ok_write == 1:
csv_file2 = open(PATH_GT_AMT, 'r')
reader2 = csv.DictReader(csv_file2)
for row2 in reader2:
ok = 0
if ('Video_name', Video_name) in row2.items() and ('Actions', Actions) in row2.items():
for (column_name, value) in row2.items():
if column_name == 'Worker_GT':
worker_GT = value
ok = 1
break
if ok == 1:
break
csv_file2.close()
writer.writerow({'HIT_nb': HIT_nb, 'Video_name': Video_name, 'Actions': Actions, 'Worker_1': Worker_1,
'Worker_2': Worker_2, 'Worker_3': Worker_3, 'Worker_GT': worker_GT})
# compute agreement GT vs. Worker1, GT vs. Worker2, GT vs. Worker3
csv_file = open(PATH_output_results_csv, 'r')
reader = csv.DictReader(csv_file)
ok_1_equal_2 = 1
list_results = []
hit_nbs = set()
for row in reader:
for (column_name, value) in row.items():
if "Worker_1" == column_name:
value1 = int(value)
if value1 == 2 and ok_1_equal_2 == 1:
value1 = 1
if "Worker_2" == column_name:
value2 = int(value)
if value2 == 2 and ok_1_equal_2 == 1:
value2 = 1
if "Worker_3" == column_name:
value3 = int(value)
if value3 == 2 and ok_1_equal_2 == 1:
value3 = 1
if "Worker_GT" == column_name:
value4 = int(value)
if value4 == 2 and ok_1_equal_2 == 1:
value4 = 1
if "HIT_nb" == column_name:
hit_nb = value
hit_nbs.add(hit_nb)
list_results.append((hit_nb, value1, value2, value3, value4))
per_hit_val_rater = dict()
per_hit_val_rater[0] = []
per_hit_val_rater[1] = []
per_hit_val_rater[2] = []
per_hit_val_rater['GT'] = []
values_GT = OrderedDict()
potential_spammers = OrderedDict()
spammers_same_val = OrderedDict()
spammers_low_GT_acc = OrderedDict()
for key in hit_nbs:
potential_spammers[key] = set()
spammers_same_val[key] = set()
spammers_low_GT_acc[key] = set()
# ----- Verifiy in all the results if a worker put the same value everywhere --> spammer
for hit in dict_actions_hit.keys():
set_results_worker_1 = set()
set_results_worker_2 = set()
set_results_worker_3 = set()
dict_video_names = dict_actions_hit[hit]
for video_name in dict_video_names.keys():
for action_result_list in dict_video_names[video_name]:
result = action_result_list[1]
if len(result) == 3:
set_results_worker_1.add(result[0])
set_results_worker_2.add(result[1])
set_results_worker_3.add(result[2])
if len(set_results_worker_1) == 1:
potential_spammers[hit].add(0)
spammers_same_val[hit].add(0)
if len(set_results_worker_2) == 1:
potential_spammers[hit].add(1)
spammers_same_val[hit].add(1)
if len(set_results_worker_3) == 1:
potential_spammers[hit].add(2)
spammers_same_val[hit].add(2)
hit = list_results[0][0]
for (hit_nb, value1, value2, value3, value4) in list_results:
if (hit_nb == hit):
per_hit_val_rater[0].append(value1)
per_hit_val_rater[1].append(value2)
per_hit_val_rater[2].append(value3)
per_hit_val_rater['GT'].append(value4)
else:
for worker in range(0, 3):
if worker in potential_spammers[hit]:
continue
else:
accuracy_with_GT = accuracy_score(per_hit_val_rater[worker], per_hit_val_rater['GT'])
if accuracy_with_GT < threshold:
potential_spammers[hit].add(worker)
spammers_low_GT_acc[hit].add(worker)
values_GT[hit] = per_hit_val_rater['GT']
per_hit_val_rater[0] = [value1]
per_hit_val_rater[1] = [value2]
per_hit_val_rater[2] = [value3]
per_hit_val_rater['GT'] = [value4]
hit = hit_nb
# compute for the last HIT
for worker in range(0, 3):
if worker in potential_spammers[hit_nb]:
continue
else:
accuracy_with_GT = accuracy_score(per_hit_val_rater[worker], per_hit_val_rater['GT'])
if accuracy_with_GT < 0.2:
values_GT[hit_nb] = per_hit_val_rater['GT']
potential_spammers[hit_nb].add(worker)
spammers_low_GT_acc[hit_nb].add(worker)
list_keys = dict()
with open(PATH_input_file_name, 'r') as csvinput:
for row in csv.reader(csvinput):
key = row[0]
if key == 'HITId':
continue
if key not in list_keys.keys():
list_keys[key] = 0
list_keys[key] += 1
csvinput.close()
list_bad_keys = []
for key in list_keys.keys():
if list_keys[key] != 3:
list_bad_keys.append(key)
index = 0
index_row = 0
with open(PATH_input_file_name, 'r') as csvinput:
with open(PATH_spammers_files, 'w+') as csvoutput:
writer = csv.writer(csvoutput)
for row in csv.reader(csvinput):
if index_row == 0:
writer.writerow(row)
index_row += 1
continue
key = row[0]
if key not in potential_spammers.keys() or potential_spammers[key] == set() or key in list_bad_keys:
row.append('x')
row.append('')
else:
set_workers = potential_spammers[key]
if index in set_workers:
row.append('')
row.append('spammer')
else:
row.append('x')
row.append('')
if key not in list_bad_keys:
index += 1
if index == 3:
index = 0
writer.writerow(row)
csvinput.close()
csvoutput.close()
return potential_spammers
def get_compromised_hits(potential_spammers):
compromised_hits = [] # hits with more than 1 spammer
nb_spammers = 0
nb_workers = 0
for hit_nb in potential_spammers.keys():
nb_workers += 3
nb_spammers += len(potential_spammers[hit_nb])
if len(potential_spammers[hit_nb]) > 1:
compromised_hits.append(hit_nb)
return compromised_hits, nb_spammers, nb_workers
def create_after_spam_filtered_results(dict_output, print_stats, threshold):
potential_spammers = compare_with_GT(dict_output, threshold)
compromised_hits, nb_spammers, nb_workers = get_compromised_hits(potential_spammers)
csv_file = open(PATH_output_file_name, 'r')
reader = csv.DictReader(csv_file)
with open(PATH_after_spam_filter_csv, 'w+') as csvfile2:
fieldnames = ['HIT_nb', 'Video_name', 'Actions', 'Worker_1', 'Worker_2', 'Worker_3', 'All_Yes_actions',
'Majority_Yes_actions', 'All_No_actions', 'Majority_No_actions']
writer = csv.DictWriter(csvfile2, fieldnames=fieldnames)
writer.writeheader()
HIT_nb = ""
Actions = ""
Video_name = ""
Worker_1 = ""
Worker_2 = ""
Worker_3 = ""
All_Yes_actions = ""
Majority_Yes_actions = ""
All_No_actions = ""
for row in reader:
for (column_name, value) in row.items():
if column_name == 'Actions':
Actions = value
if column_name == 'Video_name':
Video_name = value
if column_name == 'HIT_nb':
HIT_nb = value
if column_name == 'Worker_1':
Worker_1 = value
if column_name == 'Worker_2':
Worker_2 = value
if column_name == 'Worker_3':
Worker_3 = value
if column_name == 'All_Yes_actions':
All_Yes_actions = value
if column_name == 'Majority_Yes_actions':
Majority_Yes_actions = value
if column_name == 'Majority_No_actions':
Majority_No_actions = value
if len(potential_spammers[HIT_nb]) == 0:
writer.writerow({'HIT_nb': HIT_nb, 'Video_name': Video_name, 'Actions': Actions, 'Worker_1': Worker_1,
'Worker_2': Worker_2, 'Worker_3': Worker_3, 'All_Yes_actions': All_Yes_actions,
'Majority_Yes_actions': Majority_Yes_actions,
'Majority_No_actions': Majority_No_actions})
elif HIT_nb in compromised_hits:
continue
else:
spammer = int(list(potential_spammers[HIT_nb])[0])
if spammer == 0:
Worker_1 = -1
if Worker_2 != '0' or Worker_3 != '0':
Majority_No_actions = Actions
Majority_Yes_actions = ''
elif spammer == 1:
Worker_2 = -1
if Worker_1 != '0' or Worker_3 != '0':
Majority_No_actions = Actions
Majority_Yes_actions = ''
else:
Worker_3 = -1
if Worker_2 != '0' or Worker_1 != '0':
Majority_No_actions = Actions
Majority_Yes_actions = ''
writer.writerow({'HIT_nb': HIT_nb, 'Video_name': Video_name, 'Actions': Actions, 'Worker_1': Worker_1,
'Worker_2': Worker_2, 'Worker_3': Worker_3, 'All_Yes_actions': All_Yes_actions,
'Majority_Yes_actions': Majority_Yes_actions,
'Majority_No_actions': Majority_No_actions})
csv_file.close()
csvfile2.close()
visible_actions, all_visible_actions, not_visible_actions = get_visible_and_not_visible_actions(
PATH_after_spam_filter_csv, PATH_visible_not_visible_actions_csv)
if print_stats == 1:
print("There are {0} spammers out of {1} total workers".format(nb_spammers, nb_workers))
print("There are {0} compromised hits (more than 1 spammer) out of a total of {1} hits.".format(
len(compromised_hits), len(potential_spammers)))
# print "Compromised hits: " + str(compromised_hits)
nb_non_gt_videos, nb_non_gt_miniclips, nb_gt_videos, nb_gt_miniclips = get_miniclips_video_stats(
PATH_after_spam_filter_csv)
nb_total_actions, nb_majority_visible_actions, nb_all_visible_actions = get_actions_stats(
PATH_after_spam_filter_csv)
print("##--------------- After Spam Check --------------------------##")
print_stats_actions_miniclips(nb_total_actions, nb_majority_visible_actions, nb_all_visible_actions,
nb_non_gt_videos, nb_non_gt_miniclips, nb_gt_videos, nb_gt_miniclips)
return potential_spammers
def get_visible_and_not_visible_actions(after_spam_filter_csv, visible_not_visible_actions_csv):
df_data = pd.read_csv(after_spam_filter_csv)
unique_data = df_data.drop_duplicates(subset=['Video_name', 'Actions'])
video_names = unique_data['Video_name'].tolist()
visible_actions = unique_data['Majority_Yes_actions'].tolist()
all_visible_actions = unique_data['All_Yes_actions'].tolist()
not_visible_actions = unique_data['Majority_No_actions'].tolist()
df = pd.DataFrame({'Video_name': video_names,
'Visible Actions': visible_actions,
'Not Visible Actions': not_visible_actions})
df.to_csv(visible_not_visible_actions_csv, sep=',', encoding='utf-8', index=False, header=True,
columns=["Video_name", "Visible Actions", "Not Visible Actions"])
return visible_actions, all_visible_actions, not_visible_actions
|
{"hexsha": "c368f396e3b0ab5e812f54b8ec3c2868adca7f06", "size": 14682, "ext": "py", "lang": "Python", "max_stars_repo_path": "amt/detect_spam.py", "max_stars_repo_name": "MichiganNLP/vlog_action_recognition", "max_stars_repo_head_hexsha": "90790d4ba12e91f32704d04c725e8afb65f40372", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-23T22:27:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T22:27:54.000Z", "max_issues_repo_path": "amt/detect_spam.py", "max_issues_repo_name": "MichiganNLP/vlog_action_recognition", "max_issues_repo_head_hexsha": "90790d4ba12e91f32704d04c725e8afb65f40372", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-10T00:39:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:39:42.000Z", "max_forks_repo_path": "amt/detect_spam.py", "max_forks_repo_name": "MichiganNLP/vlog_action_recognition", "max_forks_repo_head_hexsha": "90790d4ba12e91f32704d04c725e8afb65f40372", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5920679887, "max_line_length": 119, "alphanum_fraction": 0.5409344776, "include": true, "reason": "import numpy", "num_tokens": 3239}
|
# MIT License: Copyright (c) 2016: Andy Ferris.
# See LICENSE.md for further licensing test
@inline function LinearAlgebra.eigen(S::SymmetricTensor{2, 1, T}) where {T}
@inbounds Eigen(Vec{1, T}((S[1, 1],)), one(Tensor{2, 1, T}))
end
function LinearAlgebra.eigen(S::SymmetricTensor{2, 2, T}) where {T}
@inbounds begin
if S[2,1] == 0 # diagonal tensor
S11 = S[1,1]
S22 = S[2,2]
if S11 < S22
λ = Vec{2}((S11, S22))
Φ = Tensor{2,2,T}((T(1), T(0), T(0), T(1)))
else # S22 <= S11
λ = Vec{2}((S22, S11))
Φ = Tensor{2,2,T}((T(0), T(1), T(1), T(0)))
end
else
# eigenvalues from quadratic formula
trS_half = tr(S) / 2
tmp2 = trS_half * trS_half - det(S)
tmp2 < 0 ? tmp = zero(tmp2) : tmp = sqrt(tmp2) # Numerically stable for identity matrices, etc.
λ = Vec{2}((trS_half - tmp, trS_half + tmp))
Φ11 = λ[1] - S[2,2]
n1 = sqrt(Φ11 * Φ11 + S[2,1] * S[2,1])
Φ11 = Φ11 / n1
Φ12 = S[2,1] / n1
Φ21 = λ[2] - S[2,2]
n2 = sqrt(Φ21 * Φ21 + S[2,1] * S[2,1])
Φ21 = Φ21 / n2
Φ22 = S[2,1] / n2
Φ = Tensor{2, 2}((Φ11, Φ12,
Φ21, Φ22))
end
return Eigen(λ, Φ)
end
end
# A small part of the code in the following method was inspired by works of David
# Eberly, Geometric Tools LLC, in code released under the Boost Software
# License. See LICENSE.md
function LinearAlgebra.eigen(S::SymmetricTensor{2, 3, T}) where {T}
@inbounds begin
R = typeof((one(T)*zero(T) + zero(T))/one(T))
SR = convert(SymmetricTensor{2, 3, R}, S)
S11 = SR[1, 1]; S22 = SR[2, 2]; S33 = SR[3, 3]
S12 = SR[1, 2]; S13 = SR[1, 3]; S23 = SR[2, 3]
p1 = abs2(S12) + abs2(S13) + abs2(S23)
if (p1 == 0) # diagonal tensor
v1, v2, v3 = basevec(Vec{3, R})
if S11 < S22
if S22 < S33
return Eigen(Vec{3, R}((S11, S22, S33)), Tensor{2, 3, R}((v1[1], v1[2], v1[3], v2[1], v2[2], v2[3], v3[1], v3[2], v3[3])))
elseif S33 < S11
return Eigen(Vec{3, R}((S33, S11, S22)), Tensor{2, 3, R}((v3[1], v3[2], v3[3], v1[1], v1[2], v1[3], v2[1], v2[2], v2[3])))
else
return Eigen(Vec{3, R}((S11, S33, S22)), Tensor{2, 3, R}((v1[1], v1[2], v1[3], v3[1], v3[2], v3[3], v2[1], v2[2], v2[3])))
end
else #S22 < S11
if S11 < S33
return Eigen(Vec{3, R}((S22, S11, S33)), Tensor{2, 3, R}((v2[1], v2[2], v2[3], v1[1], v1[2], v1[3], v3[1], v3[2], v3[3])))
elseif S33 < S22
return Eigen(Vec{3, R}((S33, S22, S11)), Tensor{2, 3, R}((v3[1], v3[2], v3[3], v2[1], v2[2], v2[3], v1[1], v1[2], v1[3])))
else
return Eigen(Vec{3, R}((S22, S33, S11)), Tensor{2, 3, R}((v2[1], v2[2], v2[3], v3[1], v3[2], v3[3], v1[1], v1[2], v1[3])))
end
end
end
q = (S11 + S22 + S33) / 3
p2 = abs2(S11 - q) + abs2(S22 - q) + abs2(S33 - q) + 2 * p1
p = sqrt(p2 / 6)
invp = inv(p)
b11 = (S11 - q) * invp
b22 = (S22 - q) * invp
b33 = (S33 - q) * invp
b12 = S12 * invp
b13 = S13 * invp
b23 = S23 * invp
B = SymmetricTensor{2, 3, R}((b11, b12, b13, b22, b23, b33))
r = det(B) / 2
# In exact arithmetic for a symmetric matrix -1 <= r <= 1
# but computation error can leave it slightly outside this range.
if (r <= -1)
phi = R(pi) / 3
elseif (r >= 1)
phi = zero(R)
else
phi = acos(r) / 3
end
λ3 = q + 2 * p * cos(phi)
λ1 = q + 2 * p * cos(phi + (2*R(pi)/3))
λ2 = 3 * q - λ1 - λ3 # since tr(S) = λ1 + λ2 + λ3
if r > 0 # Helps with conditioning the eigenvector calculation
(λ1, λ3) = (λ3, λ1)
end
# Calculate the first eigenvector
# This should be orthogonal to these three rows of A - λ1*I
# Use all combinations of cross products and choose the "best" one
r₁ = Vec{3, R}((S11 - λ1, S12, S13))
r₂ = Vec{3, R}((S12, S22 - λ1, S23))
r₃ = Vec{3, R}((S13, S23, S33 - λ1))
n₁ = r₁ ⋅ r₁
n₂ = r₂ ⋅ r₂
n₃ = r₃ ⋅ r₃
r₁₂ = r₁ × r₂
r₂₃ = r₂ × r₃
r₃₁ = r₃ × r₁
n₁₂ = r₁₂ ⋅ r₁₂
n₂₃ = r₂₃ ⋅ r₂₃
n₃₁ = r₃₁ ⋅ r₃₁
# we want best angle so we put all norms on same footing
# (cheaper to multiply by third nᵢ rather than divide by the two involved)
if n₁₂ * n₃ > n₂₃ * n₁
if n₁₂ * n₃ > n₃₁ * n₂
Φ1 = r₁₂ / sqrt(n₁₂)
else
Φ1 = r₃₁ / sqrt(n₃₁)
end
else
if n₂₃ * n₁ > n₃₁ * n₂
Φ1 = r₂₃ / sqrt(n₂₃)
else
Φ1 = r₃₁ / sqrt(n₃₁)
end
end
# Calculate the second eigenvector
# This should be orthogonal to the previous eigenvector and the three
# rows of A - λ2*I. However, we need to "solve" the remaining 2x2 subspace
# problem in case the cross products are identically or nearly zero
# The remaing 2x2 subspace is:
if abs(Φ1[1]) < abs(Φ1[2]) # safe to set one component to zero, depending on this
orthogonal1 = Vec{3, R}((-Φ1[3], zero(R), Φ1[1])) / sqrt(abs2(Φ1[1]) + abs2(Φ1[3]))
else
orthogonal1 = Vec{3, R}((zero(R), Φ1[3], -Φ1[2])) / sqrt(abs2(Φ1[2]) + abs2(Φ1[3]))
end
orthogonal2 = Φ1 × orthogonal1
# The projected 2x2 eigenvalue problem is C x = 0 where C is the projection
# of (A - λ2*I) onto the subspace {orthogonal1, orthogonal2}
a_orth1_1 = S11 * orthogonal1[1] + S12 * orthogonal1[2] + S13 * orthogonal1[3]
a_orth1_2 = S12 * orthogonal1[1] + S22 * orthogonal1[2] + S23 * orthogonal1[3]
a_orth1_3 = S13 * orthogonal1[1] + S23 * orthogonal1[2] + S33 * orthogonal1[3]
a_orth2_1 = S11 * orthogonal2[1] + S12 * orthogonal2[2] + S13 * orthogonal2[3]
a_orth2_2 = S12 * orthogonal2[1] + S22 * orthogonal2[2] + S23 * orthogonal2[3]
a_orth2_3 = S13 * orthogonal2[1] + S23 * orthogonal2[2] + S33 * orthogonal2[3]
c11 = orthogonal1[1]*a_orth1_1 + orthogonal1[2]*a_orth1_2 + orthogonal1[3]*a_orth1_3 - λ2
c12 = orthogonal1[1]*a_orth2_1 + orthogonal1[2]*a_orth2_2 + orthogonal1[3]*a_orth2_3
c22 = orthogonal2[1]*a_orth2_1 + orthogonal2[2]*a_orth2_2 + orthogonal2[3]*a_orth2_3 - λ2
# Solve this robustly (some values might be small or zero)
c11² = abs2(c11)
c12² = abs2(c12)
c22² = abs2(c22)
if c11² >= c22²
if c11² > 0 || c12² > 0
if c11² >= c12²
tmp = c12 / c11
p2 = inv(sqrt(1 + abs2(tmp)))
p1 = tmp * p2
else
tmp = c11 / c12 # TODO check for compex input
p1 = inv(sqrt(1 + abs2(tmp)))
p2 = tmp * p1
end
Φ2 = p1*orthogonal1 - p2*orthogonal2
else # c11 == 0 && c12 == 0 && c22 == 0 (smaller than c11)
Φ2 = orthogonal1
end
else
if c22² >= c12²
tmp = c12 / c22
p1 = inv(sqrt(1 + abs2(tmp)))
p2 = tmp * p1
else
tmp = c22 / c12
p2 = inv(sqrt(1 + abs2(tmp)))
p1 = tmp * p2
end
Φ2 = p1*orthogonal1 - p2*orthogonal2
end
# The third eigenvector is a simple cross product of the other two
Φ3 = Φ1 × Φ2 # should be normalized already
# Sort them back to the original ordering, if necessary
if r > 0
(λ1, λ3) = (λ3, λ1)
(Φ1, Φ3) = (Φ3, Φ1)
end
λ = Vec{3}((λ1, λ2, λ3))
Φ = Tensor{2, 3}((Φ1[1], Φ1[2], Φ1[3],
Φ2[1], Φ2[2], Φ2[3],
Φ3[1], Φ3[2], Φ3[3]))
return Eigen(λ, Φ)
end
end
|
{"hexsha": "5080ff924192e4a44809062a0ded84bc2a711080", "size": 8350, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/eigen.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Tensors.jl-48a634ad-e948-5137-8d70-aa71f2a747f4", "max_stars_repo_head_hexsha": "0a881a8e1739184a0acc3a153f204460588a4746", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 94, "max_stars_repo_stars_event_min_datetime": "2017-02-09T12:23:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-16T10:57:53.000Z", "max_issues_repo_path": "src/eigen.jl", "max_issues_repo_name": "wangleiphy/Tensors.jl", "max_issues_repo_head_hexsha": "0a881a8e1739184a0acc3a153f204460588a4746", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 121, "max_issues_repo_issues_event_min_datetime": "2017-02-09T13:02:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-18T14:22:10.000Z", "max_forks_repo_path": "src/eigen.jl", "max_forks_repo_name": "wangleiphy/Tensors.jl", "max_forks_repo_head_hexsha": "0a881a8e1739184a0acc3a153f204460588a4746", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2017-02-09T13:04:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T10:57:55.000Z", "avg_line_length": 38.6574074074, "max_line_length": 142, "alphanum_fraction": 0.4700598802, "num_tokens": 3135}
|
@testset "temporal" begin
hod = HoD()
@test hod isa Transform
@test cardinality(hod) == OneToOne()
@testset "Basic" begin
x = collect(DateTime(2020, 1, 1, 9, 0):Hour(1):DateTime(2020, 5, 7, 9, 0))
# Expected result is an hour a day starting and ending on the 9th hour inclusive,
# with 126 full days in the middle
expected = [9:23..., repeat(0:23, 126)..., 0:9...]
@test FeatureTransforms.apply(x, hod) == expected
@test hod(x) == expected
@testset "StepRange" begin
x = DateTime(2020, 1, 1, 9, 0):Hour(1):DateTime(2020, 5, 7, 9, 0)
@test FeatureTransforms.apply(x, hod) == expected
@test hod(x) == expected
end
@testset "DST" begin
x = ZonedDateTime(2020, 3, 7, 9, 0, tz"America/New_York"):Hour(1):ZonedDateTime(2020, 3, 8, 9, 0, tz"America/New_York")
# expected result skips the DST transition hour of 2
expected_dst = [9:23..., 0, 1, 3:9...]
@test FeatureTransforms.apply(x, hod) == expected_dst
@test hod(x) == expected_dst
end
end
end
|
{"hexsha": "bcd142596700e44dc7fbeda912140e64d6e1060b", "size": 1143, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/temporal.jl", "max_stars_repo_name": "invenia/Transforms.jl", "max_stars_repo_head_hexsha": "d2204817f0744b1b4cd1dc02f73f19e8228f7873", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2021-04-07T19:51:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T15:29:13.000Z", "max_issues_repo_path": "test/temporal.jl", "max_issues_repo_name": "invenia/FeatureTransforms.jl", "max_issues_repo_head_hexsha": "1c86cbdbaa2431e5275774d0f3374bfa7481e1cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2021-02-25T20:01:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T13:24:23.000Z", "max_forks_repo_path": "test/temporal.jl", "max_forks_repo_name": "invenia/Transforms.jl", "max_forks_repo_head_hexsha": "d2204817f0744b1b4cd1dc02f73f19e8228f7873", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6363636364, "max_line_length": 131, "alphanum_fraction": 0.5660542432, "num_tokens": 371}
|
#!/usr/bin/env python
'''
The MIT License (MIT)
Copyright (c) 2019 Kunal Shah
kshah.kunal@gmail.com
'''
# std lib imports
import sys
import time
import numpy as np
import numpy.linalg as la
# plot
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Standard ROS message
import rospy
from std_msgs.msg import Bool, Time
from geometry_msgs.msg import Pose, PoseStamped, Twist
from quadProxy import QuadProxy
class Replay(object):
"""docstring for Replay
this class impliments a reviwer to plot the trajs of the quads"""
def __init__(self):
# self.manifest = ["rexquad3"]
self.manifest = ["rexquad0", "rexquad3", "quad3"]
print(self.manifest)
# make proxy objects
self.proxies = [QuadProxy(quad) for quad in self.manifest]
# empty list for the positions
self.trajHist = [[], [], []]
self.fig = plt.figure(facecolor='w', edgecolor='k')
self.ax = self.fig.add_subplot(111, projection='3d')
self.colors = ['b', 'g', 'r']
self.ax.set_xlim([-6, 2])
self.ax.set_ylim([-1.5, 1.5])
self.ax.set_zlim([0, 2.5])
plt.ion()
plt.draw()
# make plot timer
# self.plotter = rospy.Timer(rospy.Duration(.05), self.drawCB)
def drawCB(self):
for i, (quad, color) in enumerate(zip(self.proxies, self.colors)):
self.trajHist[i].append([quad.pose.position.x,
quad.pose.position.y,
quad.pose.position.z])
self.ax.scatter(quad.pose.position.x,
quad.pose.position.y,
quad.pose.position.z,
color=color)
plt.draw()
def run(self):
rate = rospy.Rate(50) # 10 Hz
while not rospy.is_shutdown():
# usrCmd = raw_input("enter to stop a commmand: ")
# self.plotter.shutdown()
self.drawCB()
plt.draw()
plt.pause(.0000001)
rate.sleep()
for quadname, traj in zip(self.manifest, self.trajHist):
print("traj for: " + quadname)
for pt in traj:
print(pt)
if __name__ == '__main__':
# make tower
player = Replay()
player.run()
|
{"hexsha": "b2fc962421f31f9ce9e99e822a0de45e2e80bb21", "size": 2333, "ext": "py", "lang": "Python", "max_stars_repo_path": "mslquad/scripts/replay.py", "max_stars_repo_name": "StanfordMSL/mslquad", "max_stars_repo_head_hexsha": "c319ecf4ba1063075221b67f12f4e017992f28fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-02-05T22:02:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T03:26:05.000Z", "max_issues_repo_path": "mslquad/scripts/replay.py", "max_issues_repo_name": "StanfordMSL/msl_quad", "max_issues_repo_head_hexsha": "c319ecf4ba1063075221b67f12f4e017992f28fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 48, "max_issues_repo_issues_event_min_datetime": "2019-02-07T22:17:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-01T22:34:58.000Z", "max_forks_repo_path": "mslquad/scripts/replay.py", "max_forks_repo_name": "StanfordMSL/mslquad", "max_forks_repo_head_hexsha": "c319ecf4ba1063075221b67f12f4e017992f28fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-02-25T22:42:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-15T05:23:58.000Z", "avg_line_length": 29.1625, "max_line_length": 74, "alphanum_fraction": 0.560222889, "include": true, "reason": "import numpy", "num_tokens": 562}
|
#!python3
import numpy as np
from magLabUtilities.signalutilities.signals import SignalThread, Signal
from magLabUtilities.signalutilities.calculus import integralTrapQuadrature
if __name__ == '__main__':
x = SignalThread(np.array([1,2,4]))
t = SignalThread(np.array([0,1,3]))
intXDx = integralTrapQuadrature(x, t)
print('here')
|
{"hexsha": "ade03be056b723ec98ef352b8b5ab7b649312909", "size": 360, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/integrals.py", "max_stars_repo_name": "MarkTravers/magLabUtilities", "max_stars_repo_head_hexsha": "e116c8cb627cd82c3b8ba651dd6979b66e568632", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/integrals.py", "max_issues_repo_name": "MarkTravers/magLabUtilities", "max_issues_repo_head_hexsha": "e116c8cb627cd82c3b8ba651dd6979b66e568632", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/integrals.py", "max_forks_repo_name": "MarkTravers/magLabUtilities", "max_forks_repo_head_hexsha": "e116c8cb627cd82c3b8ba651dd6979b66e568632", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6923076923, "max_line_length": 76, "alphanum_fraction": 0.7222222222, "include": true, "reason": "import numpy", "num_tokens": 92}
|
(* Title: N-Algebras
Author: Walter Guttmann
Maintainer: Walter Guttmann <walter.guttmann at canterbury.ac.nz>
*)
section \<open>N-Algebras\<close>
theory N_Algebras
imports Stone_Kleene_Relation_Algebras.Iterings Base Lattice_Ordered_Semirings
begin
class C_left_n_algebra = bounded_idempotent_left_semiring + bounded_distrib_lattice + n + L
begin
abbreviation C :: "'a \<Rightarrow> 'a" where "C x \<equiv> n(L) * top \<sqinter> x"
text \<open>AACP Theorem 3.38\<close>
lemma C_isotone:
"x \<le> y \<longrightarrow> C x \<le> C y"
using inf.sup_right_isotone by auto
text \<open>AACP Theorem 3.40\<close>
lemma C_decreasing:
"C x \<le> x"
by simp
end
class left_n_algebra = C_left_n_algebra +
assumes n_dist_n_add : "n(x) \<squnion> n(y) = n(n(x) * top \<squnion> y)"
assumes n_export : "n(x) * n(y) = n(n(x) * y)"
assumes n_left_upper_bound : "n(x) \<le> n(x \<squnion> y)"
assumes n_nL_meet_L_nL0 : "n(L) * x = (x \<sqinter> L) \<squnion> n(L * bot) * x"
assumes n_n_L_split_n_n_L_L : "x * n(y) * L = x * bot \<squnion> n(x * n(y) * L) * L"
assumes n_sub_nL : "n(x) \<le> n(L)"
assumes n_L_decreasing : "n(x) * L \<le> x"
assumes n_L_T_meet_mult_combined: "C (x * y) * z \<le> C x * y * C z"
assumes n_n_top_split_n_top : "x * n(y) * top \<le> x * bot \<squnion> n(x * y) * top"
assumes n_top_meet_L_below_L : "x * top * y \<sqinter> L \<le> x * L * y"
begin
subclass lattice_ordered_pre_left_semiring ..
lemma n_L_T_meet_mult_below:
"C (x * y) \<le> C x * y"
proof -
have "C (x * y) \<le> C x * y * C 1"
by (meson order.trans mult_sub_right_one n_L_T_meet_mult_combined)
also have "... \<le> C x * y"
by (metis mult_1_right mult_left_sub_dist_inf_right)
finally show ?thesis
.
qed
text \<open>AACP Theorem 3.41\<close>
lemma n_L_T_meet_mult_propagate:
"C x * y \<le> x * C y"
proof -
have "C x * y \<le> C x * 1 * C y"
by (metis mult_1_right mult_assoc n_L_T_meet_mult_combined mult_1_right)
also have "... \<le> x * C y"
by (simp add: mult_right_sub_dist_inf_right)
finally show ?thesis
.
qed
text \<open>AACP Theorem 3.43\<close>
lemma C_n_mult_closed:
"C (n(x) * y) = n(x) * y"
by (simp add: inf.absorb2 mult_isotone n_sub_nL)
text \<open>AACP Theorem 3.40\<close>
lemma meet_L_below_C:
"x \<sqinter> L \<le> C x"
by (simp add: le_supI1 n_nL_meet_L_nL0)
text \<open>AACP Theorem 3.42\<close>
lemma n_L_T_meet_mult:
"C (x * y) = C x * y"
apply (rule order.antisym)
apply (rule n_L_T_meet_mult_below)
by (smt (z3) C_n_mult_closed inf.boundedE inf.sup_monoid.add_assoc inf.sup_monoid.add_commute mult_right_sub_dist_inf mult_assoc)
text \<open>AACP Theorem 3.42\<close>
lemma C_mult_propagate:
"C x * y = C x * C y"
by (smt (z3) C_n_mult_closed order.eq_iff inf.left_commute inf.sup_monoid.add_commute mult_left_sub_dist_inf_right n_L_T_meet_mult_propagate)
text \<open>AACP Theorem 3.32\<close>
lemma meet_L_below_n_L:
"x \<sqinter> L \<le> n(L) * x"
by (simp add: n_nL_meet_L_nL0)
text \<open>AACP Theorem 3.27\<close>
lemma n_vector_meet_L:
"x * top \<sqinter> L \<le> x * L"
by (metis mult_1_right n_top_meet_L_below_L)
lemma n_right_upper_bound:
"n(x) \<le> n(y \<squnion> x)"
by (simp add: n_left_upper_bound sup_commute)
text \<open>AACP Theorem 3.1\<close>
lemma n_isotone:
"x \<le> y \<Longrightarrow> n(x) \<le> n(y)"
by (metis le_iff_sup n_left_upper_bound)
lemma n_add_left_zero:
"n(bot) \<squnion> n(x) = n(x)"
using le_iff_sup sup_bot_right sup_right_divisibility n_isotone by auto
text \<open>AACP Theorem 3.13\<close>
lemma n_mult_right_zero_L:
"n(x) * bot \<le> L"
by (meson bot_least mult_isotone n_L_decreasing n_sub_nL order_trans)
lemma n_add_left_top:
"n(top) \<squnion> n(x) = n(top)"
by (simp add: sup_absorb1 n_isotone)
text \<open>AACP Theorem 3.18\<close>
lemma n_n_L:
"n(n(x) * L) = n(x)"
by (metis order.antisym n_dist_n_add n_export n_sub_nL sup_bot_right sup_commute sup_top_left n_add_left_zero n_right_upper_bound)
lemma n_mult_transitive:
"n(x) * n(x) \<le> n(x)"
by (metis mult_right_isotone n_export n_sub_nL n_n_L)
lemma n_mult_left_absorb_add_sub:
"n(x) * (n(x) \<squnion> n(y)) \<le> n(x)"
by (metis mult_right_isotone n_dist_n_add n_export n_sub_nL n_n_L)
text \<open>AACP Theorem 3.21\<close>
lemma n_mult_left_lower_bound:
"n(x) * n(y) \<le> n(x)"
by (metis mult_right_isotone n_export n_sub_nL n_n_L)
text \<open>AACP Theorem 3.20\<close>
lemma n_mult_left_zero:
"n(bot) * n(x) = n(bot)"
by (metis n_export sup_absorb1 n_add_left_zero n_mult_left_lower_bound)
lemma n_mult_right_one:
"n(x) * n(top) = n(x)"
using n_dist_n_add n_export sup_commute n_add_left_zero by fastforce
lemma n_L_increasing:
"n(x) \<le> n(n(x) * L)"
by (simp add: n_n_L)
text \<open>AACP Theorem 3.2\<close>
lemma n_galois:
"n(x) \<le> n(y) \<longleftrightarrow> n(x) * L \<le> y"
by (metis mult_left_isotone n_L_decreasing n_L_increasing n_isotone order_trans)
lemma n_add_n_top:
"n(x \<squnion> n(x) * top) = n(x)"
by (metis n_dist_n_add sup.idem sup_commute)
text \<open>AACP Theorem 3.6\<close>
lemma n_L_below_nL_top:
"L \<le> n(L) * top"
by (metis inf_top.left_neutral meet_L_below_n_L)
text \<open>AACP Theorem 3.4\<close>
lemma n_less_eq_char_n:
"x \<le> y \<longleftrightarrow> x \<le> y \<squnion> L \<and> C x \<le> y \<squnion> n(y) * top"
proof
assume "x \<le> y"
thus "x \<le> y \<squnion> L \<and> C x \<le> y \<squnion> n(y) * top"
by (simp add: inf.coboundedI2 le_supI1)
next
assume 1: "x \<le> y \<squnion> L \<and> C x \<le> y \<squnion> n(y) * top"
hence "x \<le> y \<squnion> (x \<sqinter> L)"
using sup_commute sup_inf_distrib2 by force
also have "... \<le> y \<squnion> C x"
using sup_right_isotone meet_L_below_C by blast
also have "... \<le> y \<squnion> n(y) * top"
using 1 by simp
finally have "x \<le> y \<squnion> (L \<sqinter> n(y) * top)"
using 1 by (simp add: sup_inf_distrib1)
thus "x \<le> y"
by (metis inf_commute n_L_decreasing order_trans sup_absorb1 n_vector_meet_L)
qed
text \<open>AACP Theorem 3.31\<close>
lemma n_L_decreasing_meet_L:
"n(x) * L \<le> x \<sqinter> L"
using n_sub_nL n_galois by auto
text \<open>AACP Theorem 3.5\<close>
lemma n_zero_L_zero:
"n(bot) * L = bot"
by (simp add: le_bot n_L_decreasing)
lemma n_L_top_below_L:
"L * top \<le> L"
proof -
have "n(L * bot) * L * top \<le> L * bot"
by (metis dense_top_closed mult_isotone n_L_decreasing zero_vector mult_assoc)
hence "n(L * bot) * L * top \<le> L"
using order_lesseq_imp zero_right_mult_decreasing by blast
hence "n(L) * L * top \<le> L"
by (metis inf.absorb2 n_nL_meet_L_nL0 order.refl sup.absorb_iff1 top_right_mult_increasing mult_assoc)
thus "L * top \<le> L"
by (metis inf.absorb2 inf.sup_monoid.add_commute n_L_decreasing n_L_below_nL_top n_vector_meet_L)
qed
text \<open>AACP Theorem 3.9\<close>
lemma n_L_top_L:
"L * top = L"
by (simp add: order.antisym top_right_mult_increasing n_L_top_below_L)
text \<open>AACP Theorem 3.10\<close>
lemma n_L_below_L:
"L * x \<le> L"
by (metis mult_right_isotone top.extremum n_L_top_L)
text \<open>AACP Theorem 3.7\<close>
lemma n_nL_nT:
"n(L) = n(top)"
using order.eq_iff n_sub_nL n_add_left_top by auto
text \<open>AACP Theorem 3.8\<close>
lemma n_L_L:
"n(L) * L = L"
using order.antisym meet_L_below_n_L n_L_decreasing_meet_L by fastforce
lemma n_top_L:
"n(top) * L = L"
using n_L_L n_nL_nT by auto
text \<open>AACP Theorem 3.23\<close>
lemma n_n_L_split_n_L:
"x * n(y) * L \<le> x * bot \<squnion> n(x * y) * L"
by (metis n_n_L_split_n_n_L_L n_L_decreasing mult_assoc mult_left_isotone mult_right_isotone n_isotone sup_right_isotone)
text \<open>AACP Theorem 3.12\<close>
lemma n_L_split_n_L_L:
"x * L = x * bot \<squnion> n(x * L) * L"
apply (rule order.antisym)
apply (metis mult_assoc n_n_L_split_n_L n_L_L)
by (simp add: mult_right_isotone n_L_decreasing)
text \<open>AACP Theorem 3.11\<close>
lemma n_L_split_L:
"x * L \<le> x * bot \<squnion> L"
by (metis n_n_L_split_n_n_L_L n_sub_nL sup_right_isotone mult_assoc n_L_L n_galois)
text \<open>AACP Theorem 3.24\<close>
lemma n_split_top:
"x * n(y) * top \<le> x * y \<squnion> n(x * y) * top"
proof -
have "x * bot \<squnion> n(x * y) * top \<le> x * y \<squnion> n(x * y) * top"
by (meson bot_least mult_isotone order.refl sup_left_isotone)
thus ?thesis
using order.trans n_n_top_split_n_top by blast
qed
text \<open>AACP Theorem 3.9\<close>
lemma n_L_L_L:
"L * L = L"
by (metis inf.sup_monoid.add_commute inf_absorb1 n_L_below_L n_L_top_L n_vector_meet_L)
text \<open>AACP Theorem 3.9\<close>
lemma n_L_top_L_L:
"L * top * L = L"
by (simp add: n_L_L_L n_L_top_L)
text \<open>AACP Theorem 3.19\<close>
lemma n_n_nL:
"n(x) = n(x) * n(L)"
by (simp add: n_export n_n_L)
lemma n_L_mult_idempotent:
"n(L) * n(L) = n(L)"
using n_n_nL by auto
text \<open>AACP Theorem 3.22\<close>
lemma n_n_L_n:
"n(x * n(y) * L) \<le> n(x * y)"
by (simp add: mult_right_isotone n_L_decreasing mult_assoc n_isotone)
text \<open>AACP Theorem 3.3\<close>
lemma n_less_eq_char:
"x \<le> y \<longleftrightarrow> x \<le> y \<squnion> L \<and> x \<le> y \<squnion> n(y) * top"
by (meson inf.coboundedI2 le_supI1 n_less_eq_char_n)
text \<open>AACP Theorem 3.28\<close>
lemma n_top_meet_L_split_L:
"x * top * y \<sqinter> L \<le> x * bot \<squnion> L * y"
proof -
have "x * top * y \<sqinter> L \<le> x * bot \<squnion> n(x * L) * L * y"
by (smt n_top_meet_L_below_L mult_assoc n_L_L_L n_L_split_n_L_L mult_right_dist_sup mult_left_zero)
also have "... \<le> x * bot \<squnion> x * L * y"
using mult_left_isotone n_L_decreasing sup_right_isotone by force
also have "... \<le> x * bot \<squnion> (x * bot \<squnion> L) * y"
using mult_left_isotone sup_right_isotone n_L_split_L by blast
also have "... = x * bot \<squnion> x * bot * y \<squnion> L * y"
by (simp add: mult_right_dist_sup sup_assoc)
also have "... = x * bot \<squnion> L * y"
by (simp add: mult_assoc)
finally show ?thesis
.
qed
text \<open>AACP Theorem 3.29\<close>
lemma n_top_meet_L_L_meet_L:
"x * top * y \<sqinter> L = x * L * y \<sqinter> L"
apply (rule order.antisym)
apply (simp add: n_top_meet_L_below_L)
by (metis inf.sup_monoid.add_commute inf.sup_right_isotone mult_isotone order.refl top.extremum)
lemma n_n_top_below_n_L:
"n(x * top) \<le> n(x * L)"
by (meson order.trans n_L_decreasing_meet_L n_galois n_vector_meet_L)
text \<open>AACP Theorem 3.14\<close>
lemma n_n_top_n_L:
"n(x * top) = n(x * L)"
by (metis order.antisym mult_right_isotone n_isotone n_n_top_below_n_L top_greatest)
text \<open>AACP Theorem 3.30\<close>
lemma n_meet_L_0_below_0_meet_L:
"(x \<sqinter> L) * bot \<le> x * bot \<sqinter> L"
by (meson inf.boundedE inf.boundedI mult_right_sub_dist_inf_left zero_right_mult_decreasing)
text \<open>AACP Theorem 3.15\<close>
lemma n_n_L_below_L:
"n(x) * L \<le> x * L"
by (metis mult_assoc mult_left_isotone n_L_L_L n_L_decreasing)
lemma n_n_L_below_n_L_L:
"n(x) * L \<le> n(x * L) * L"
by (simp add: mult_left_isotone n_galois n_n_L_below_L)
text \<open>AACP Theorem 3.16\<close>
lemma n_below_n_L:
"n(x) \<le> n(x * L)"
by (simp add: n_galois n_n_L_below_L)
text \<open>AACP Theorem 3.17\<close>
lemma n_below_n_L_mult:
"n(x) \<le> n(L) * n(x)"
by (metis n_export order_trans meet_L_below_n_L n_L_decreasing_meet_L n_isotone n_n_L)
text \<open>AACP Theorem 3.33\<close>
lemma n_meet_L_below:
"n(x) \<sqinter> L \<le> x"
by (meson inf.coboundedI1 inf.coboundedI2 le_supI2 sup.cobounded1 top_right_mult_increasing n_less_eq_char)
text \<open>AACP Theorem 3.35\<close>
lemma n_meet_L_top_below_n_L:
"(n(x) \<sqinter> L) * top \<le> n(x) * L"
proof -
have "(n(x) \<sqinter> L) * top \<le> n(x) * top \<sqinter> L * top"
by (meson mult_right_sub_dist_inf)
thus ?thesis
by (metis n_L_top_L n_vector_meet_L order_trans)
qed
text \<open>AACP Theorem 3.34\<close>
lemma n_meet_L_top_below:
"(n(x) \<sqinter> L) * top \<le> x"
using order.trans n_L_decreasing n_meet_L_top_below_n_L by blast
text \<open>AACP Theorem 3.36\<close>
lemma n_n_meet_L:
"n(x) = n(x \<sqinter> L)"
by (meson order.antisym inf.cobounded1 n_L_decreasing_meet_L n_galois n_isotone)
lemma n_T_below_n_meet:
"n(x) * top = n(C x) * top"
by (metis inf.absorb2 inf.sup_monoid.add_assoc meet_L_below_C n_n_meet_L)
text \<open>AACP Theorem 3.44\<close>
lemma n_C:
"n(C x) = n(x)"
by (metis n_T_below_n_meet n_export n_mult_right_one)
text \<open>AACP Theorem 3.37\<close>
lemma n_T_meet_L:
"n(x) * top \<sqinter> L = n(x) * L"
by (metis antisym_conv n_L_decreasing_meet_L n_n_L n_n_top_n_L n_vector_meet_L)
text \<open>AACP Theorem 3.39\<close>
lemma n_L_top_meet_L:
"C L = L"
by (simp add: n_L_L n_T_meet_L)
end
class n_algebra = left_n_algebra + idempotent_left_zero_semiring
begin
(* independence of axioms, checked in n_algebra without the respective axiom: *)
proposition n_dist_n_add : "n(x) \<squnion> n(y) = n(n(x) * top \<squnion> y)" (* nitpick [expect=genuine,card=5] *) oops
proposition n_export : "n(x) * n(y) = n(n(x) * y)" (* nitpick [expect=genuine,card=4] *) oops
proposition n_left_upper_bound : "n(x) \<le> n(x \<squnion> y)" (* nitpick [expect=genuine,card=5] *) oops
proposition n_nL_meet_L_nL0 : "n(L) * x = (x \<sqinter> L) \<squnion> n(L * bot) * x" (* nitpick [expect=genuine,card=2] *) oops
proposition n_n_L_split_n_n_L_L : "x * n(y) * L = x * bot \<squnion> n(x * n(y) * L) * L" (* nitpick [expect=genuine,card=6] *) oops
proposition n_sub_nL : "n(x) \<le> n(L)" (* nitpick [expect=genuine,card=2] *) oops
proposition n_L_decreasing : "n(x) * L \<le> x" (* nitpick [expect=genuine,card=3] *) oops
proposition n_L_T_meet_mult_combined: "C (x * y) * z \<le> C x * y * C z" (* nitpick [expect=genuine,card=4] *) oops
proposition n_n_top_split_n_top : "x * n(y) * top \<le> x * bot \<squnion> n(x * y) * top" (* nitpick [expect=genuine,card=4] *) oops
proposition n_top_meet_L_below_L : "x * top * y \<sqinter> L \<le> x * L * y" (* nitpick [expect=genuine,card=5] *) oops
text \<open>AACP Theorem 3.25\<close>
lemma n_top_split_0:
"n(x) * top * y \<le> x * y \<squnion> n(x * bot) * top"
proof -
have 1: "n(x) * top * y \<sqinter> L \<le> x * y"
using inf.coboundedI1 mult_left_isotone n_L_decreasing_meet_L n_top_meet_L_L_meet_L by force
have "n(x) * top * y = n(x) * n(L) * top * y"
using n_n_nL by auto
also have "... = n(x) * ((top * y \<sqinter> L) \<squnion> n(L * bot) * top * y)"
by (metis mult_assoc n_nL_meet_L_nL0)
also have "... \<le> n(x) * (top * y \<sqinter> L) \<squnion> n(x) * n(L * bot) * top"
by (metis sup_right_isotone mult_assoc mult_left_dist_sup mult_right_isotone top_greatest)
also have "... \<le> (n(x) * top * y \<sqinter> L) \<squnion> n(n(x) * L * bot) * top"
by (smt sup_left_isotone order.trans inf_greatest mult_assoc mult_left_sub_dist_inf_left mult_left_sub_dist_inf_right n_export n_galois n_sub_nL)
also have "... \<le> x * y \<squnion> n(n(x) * L * bot) * top"
using 1 sup_left_isotone by blast
also have "... \<le> x * y \<squnion> n(x * bot) * top"
using mult_left_isotone n_galois n_isotone order.refl sup_right_isotone by auto
finally show ?thesis
.
qed
text \<open>AACP Theorem 3.26\<close>
lemma n_top_split:
"n(x) * top * y \<le> x * y \<squnion> n(x * y) * top"
by (metis order.trans sup_bot_right mult_assoc sup_right_isotone mult_left_isotone mult_left_sub_dist_sup_right n_isotone n_top_split_0)
proposition n_zero: "n(bot) = bot" nitpick [expect=genuine,card=2] oops
proposition n_one: "n(1) = bot" nitpick [expect=genuine,card=2] oops
proposition n_nL_one: "n(L) = 1" nitpick [expect=genuine,card=2] oops
proposition n_nT_one: "n(top) = 1" nitpick [expect=genuine,card=2] oops
proposition n_n_zero: "n(x) = n(x * bot)" nitpick [expect=genuine,card=2] oops
proposition n_dist_add: "n(x) \<squnion> n(y) = n(x \<squnion> y)" nitpick [expect=genuine,card=4] oops
proposition n_L_split: "x * n(y) * L = x * bot \<squnion> n(x * y) * L" nitpick [expect=genuine,card=3] oops
proposition n_split: "x \<le> x * bot \<squnion> n(x * L) * top" nitpick [expect=genuine,card=2] oops
proposition n_mult_top_1: "n(x * y) \<le> n(x * n(y) * top)" nitpick [expect=genuine,card=3] oops
proposition l91_1: "n(L) * x \<le> n(x * top) * top" nitpick [expect=genuine,card=3] oops
proposition meet_domain_top: "x \<sqinter> n(y) * top = n(y) * x" nitpick [expect=genuine,card=3] oops
proposition meet_domain_2: "x \<sqinter> n(y) * top \<le> n(L) * x" nitpick [expect=genuine,card=4] oops
proposition n_nL_top_n_top_meet_L_top_2: "n(L) * x * top \<le> n(x * top \<sqinter> L) * top" nitpick [expect=genuine,card=3] oops
proposition n_nL_top_n_top_meet_L_top_1: "n(x * top \<sqinter> L) * top \<le> n(L) * x * top" nitpick [expect=genuine,card=2] oops
proposition l9: "x * bot \<sqinter> L \<le> n(x * L) * L" nitpick [expect=genuine,card=4] oops
proposition l18_2: "n(x * L) * L \<le> n(x) * L" nitpick [expect=genuine,card=3] oops
proposition l51_1: "n(x) * L \<le> (x \<sqinter> L) * bot" nitpick [expect=genuine,card=2] oops
proposition l51_2: "(x \<sqinter> L) * bot \<le> n(x) * L" nitpick [expect=genuine,card=4] oops
proposition n_split_equal: "x \<squnion> n(x * L) * top = x * bot \<squnion> n(x * L) * top" nitpick [expect=genuine,card=2] oops
proposition n_split_top: "x * top \<le> x * bot \<squnion> n(x * L) * top" nitpick [expect=genuine,card=2] oops
proposition n_mult: "n(x * n(y) * L) = n(x * y)" nitpick [expect=genuine,card=3] oops
proposition n_mult_1: "n(x * y) \<le> n(x * n(y) * L)" nitpick [expect=genuine,card=3] oops
proposition n_mult_top: "n(x * n(y) * top) = n(x * y)" nitpick [expect=genuine,card=3] oops
proposition n_mult_right_upper_bound: "n(x * y) \<le> n(z) \<longleftrightarrow> n(x) \<le> n(z) \<and> x * n(y) * L \<le> x * bot \<squnion> n(z) * L" nitpick [expect=genuine,card=2] oops
proposition meet_domain: "x \<sqinter> n(y) * z = n(y) * (x \<sqinter> z)" nitpick [expect=genuine,card=3] oops
proposition meet_domain_1: "x \<sqinter> n(y) * z \<le> n(y) * x" nitpick [expect=genuine,card=3] oops
proposition meet_domain_top_3: "x \<sqinter> n(y) * top \<le> n(y) * x" nitpick [expect=genuine,card=3] oops
proposition n_n_top_n_top_split_n_n_top_top: "n(x) * top \<squnion> x * n(y) * top = x * bot \<squnion> n(x * n(y) * top) * top" nitpick [expect=genuine,card=2] oops
proposition n_n_top_n_top_split_n_n_top_top_1: "x * bot \<squnion> n(x * n(y) * top) * top \<le> n(x) * top \<squnion> x * n(y) * top" nitpick [expect=genuine,card=5] oops
proposition n_n_top_n_top_split_n_n_top_top_2: "n(x) * top \<squnion> x * n(y) * top \<le> x * bot \<squnion> n(x * n(y) * top) * top" nitpick [expect=genuine,card=2] oops
proposition n_nL_top_n_top_meet_L_top: "n(L) * x * top = n(x * top \<sqinter> L) * top" nitpick [expect=genuine,card=2] oops
proposition l18: "n(x) * L = n(x * L) * L" nitpick [expect=genuine,card=3] oops
proposition l22: "x * bot \<sqinter> L = n(x) * L" nitpick [expect=genuine,card=2] oops
proposition l22_1: "x * bot \<sqinter> L = n(x * L) * L" nitpick [expect=genuine,card=2] oops
proposition l22_2: "x \<sqinter> L = n(x) * L" nitpick [expect=genuine,card=3] oops
proposition l22_3: "x \<sqinter> L = n(x * L) * L" nitpick [expect=genuine,card=3] oops
proposition l22_4: "x \<sqinter> L \<le> n(x) * L" nitpick [expect=genuine,card=3] oops
proposition l22_5: "x * bot \<sqinter> L \<le> n(x) * L" nitpick [expect=genuine,card=4] oops
proposition l23: "x * top \<sqinter> L = n(x) * L" nitpick [expect=genuine,card=3] oops
proposition l51: "n(x) * L = (x \<sqinter> L) * bot" nitpick [expect=genuine,card=2] oops
proposition l91: "x = x * top \<longrightarrow> n(L) * x \<le> n(x) * top" nitpick [expect=genuine,card=3] oops
proposition l92: "x = x * top \<longrightarrow> n(L) * x \<le> n(x \<sqinter> L) * top" nitpick [expect=genuine,card=3] oops
proposition "x \<sqinter> L \<le> n(x) * top" nitpick [expect=genuine,card=3] oops
proposition n_meet_comp: "n(x) \<sqinter> n(y) \<le> n(x) * n(y)" nitpick [expect=genuine,card=3] oops
proposition n_n_meet_L_n_zero: "n(x) = (n(x) \<sqinter> L) \<squnion> n(x * bot)" oops
proposition n_below_n_zero: "n(x) \<le> x \<squnion> n(x * bot)" oops
proposition n_n_top_split_n_L_n_zero_top: "n(x) * top = n(x) * L \<squnion> n(x * bot) * top" oops
proposition n_meet_L_0_0_meet_L: "(x \<sqinter> L) * bot = x * bot \<sqinter> L" oops
end
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Correctness_Algebras/N_Algebras.thy"}
|
"""Generates a library with images in Kitti forma"""
import numpy as np
from renderer.library import Library
# Helper functions that load the current library objects
BACK_ORIENT = -np.pi / 2
FRONT_ORIENT = np.pi / 2
#Paths to car and road image directories
FORE_SPACES_FILE = './renderer/imgSampSpaces.pickle' # File with foreground spaces info
LIBRARY_PATH = 'renderer/library/'
BACK_PATH = LIBRARY_PATH + 'roads/'
FORE_PATH = LIBRARY_PATH + 'cars/'
def loadImages():
"""Load car and road images."""
roadImages = []
roadImages.append({'roadPath':BACK_PATH + 'desert_kitti.png', \
'roadType':'Desert Road', 'roadId':0, 'backgroundColor': 'brown light, blue light', 'environment': 'desert'})
roadImages.append({'roadPath':BACK_PATH + 'city_kitti.png',\
'roadType':'City Road', 'roadId':1, 'backgroundColor': 'brown light, gray', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'forest_kitti.png',\
'roadType':'Forest Road', 'roadId':2, 'backgroundColor': 'green light, green dark', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'big_sur_kitti.png',\
'roadType':'Big Sur Road', 'roadId':3, 'backgroundColor': 'brown, blue', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'mountain_kitti.jpg',\
'roadType':'Mountain Road', 'roadId':4, 'backgroundColor': 'green', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'bridge_kitti.jpg',\
'roadType':'Bridge Road', 'roadId':5, 'backgroundColor': 'green, red', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'tunnel_kitti.jpg',\
'roadType':'Tunnel Road', 'roadId':6, 'backgroundColor': 'gray', 'environment': 'mountain'})
roadImages.append({'roadPath':BACK_PATH + 'island_kitti.jpg',\
'roadType':'Island Road', 'roadId':7, 'backgroundColor': 'blue light, green, brown light', 'environment': 'field'})
roadImages.append({'roadPath':BACK_PATH + 'countryside_kitti.jpg',\
'roadType':'Countryside Road', 'roadId':8, 'backgroundColor': 'green', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'hill_kitti.jpg',\
'roadType':'Hill Road', 'roadId':9, 'backgroundColor': 'green, white', 'environment': 'field'})
roadImages.append({'roadPath':BACK_PATH + 'alps_kitti.png',\
'roadType':'Alps Road', 'roadId':10, 'backgroundColor': 'brown light, gray', 'environment': 'mountain'})
roadImages.append({'roadPath':BACK_PATH + 'bridge_1_kitti.png',\
'roadType':'Bridge 1 Road', 'roadId':11, 'backgroundColor': 'gray light, blue light', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'building_kitti.png',\
'roadType':'Building Road', 'roadId':12, 'backgroundColor': 'gray, brown light', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'cloud_kitti.png',\
'roadType':'Cloud Road', 'roadId':13, 'backgroundColor': 'green, brown, black', 'environment': 'field'})
roadImages.append({'roadPath':BACK_PATH + 'downtown_kitti.png',\
'roadType':'Downtown Road', 'roadId':14, 'backgroundColor': 'brown light, yellow, gray', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'freeway_kitti.png',\
'roadType':'Freeway Road', 'roadId':15, 'backgroundColor': 'gray', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'track_kitti.jpg',\
'roadType':'Track Road', 'roadId':16, 'backgroundColor': 'blue, blue light', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'rainforest_kitti.png',\
'roadType':'Rainforest Road', 'roadId':17, 'backgroundColor': 'green, brown light', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'tree_kitti.png',\
'roadType':'Tree Road', 'roadId':18, 'backgroundColor': 'green, yellow', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'trees_kitti.png',\
'roadType':'Trees Road', 'roadId':19, 'backgroundColor': 'green', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'fields_kitti.png',\
'roadType':'Fields Road', 'roadId':20, 'backgroundColor': 'green, brown', 'environment': 'forest, fields'})
roadImages.append({'roadPath':BACK_PATH + 'construction_kitti.png',\
'roadType':'Construction Road', 'roadId':21, 'backgroundColor': 'gray, brown', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'little_bridge_kitti.jpg',\
'roadType':'Little Bridge', 'roadId':22, 'backgroundColor': 'green, gray', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'parking_lot_kitti.png',\
'roadType':'Parking Lot', 'roadId':23, 'backgroundColor': 'gray', 'environment': 'city, parking'})
roadImages.append({'roadPath':BACK_PATH + 'indoor_parking_kitti.png',\
'roadType':'Indoor Parking Road', 'roadId':24, 'backgroundColor': 'gray', 'environment': 'city, parking'})
roadImages.append({'roadPath':BACK_PATH + 'freeway_moto_kitti.jpg',\
'roadType':'Freeway Moto Road', 'roadId':25, 'backgroundColor': 'black, brow', 'environment': 'desert, freeway'})
roadImages.append({'roadPath':BACK_PATH + 'freeway_kitti.jpg',\
'roadType':'Freeway Road', 'roadId':26, 'backgroundColor': 'black, blue, green', 'environment': 'freeway'})
roadImages.append({'roadPath':BACK_PATH + 'snow_kitti.jpg',\
'roadType':'Snow Road', 'roadId':27, 'backgroundColor': 'white', 'environment': 'snow, forest'})
roadImages.append({'roadPath':BACK_PATH + 'icy_kitti.jpg',\
'roadType':'Icy Road', 'roadId':28, 'backgroundColor': 'white', 'environment': 'snow, forest'})
roadImages.append({'roadPath':BACK_PATH + 'night_road_kitti.jpg',\
'roadType':'Night Road', 'roadId':29, 'backgroundColor': 'black', 'environment': 'fields'})
roadImages.append({'roadPath':BACK_PATH + 'night_bridge_kitti.jpg',\
'roadType':'Night Bridge Road', 'roadId':30, 'backgroundColor': 'black', 'environment': 'bridge'})
roadImages.append({'roadPath':BACK_PATH + 'in_tunnel_kitti.jpg',\
'roadType':'In Tunnel Road', 'roadId':31, 'backgroundColor': 'gray, blue, red', 'environment': 'tunnel'})
roadImages.append({'roadPath':BACK_PATH + 'rainy_bridge_kitti.jpg',\
'roadType':'Rainy Bridge Road', 'roadId':32, 'backgroundColor': 'gray, blue', 'environment': 'bridge'})
roadImages.append({'roadPath':BACK_PATH + 'joshua_tree_kitti.jpg',\
'roadType':'Joshua Tree Road', 'roadId':33, 'backgroundColor': 'brown, green, blue', 'environment': 'desert'})
roadImages.append({'roadPath':BACK_PATH + 'yosemite_kitti.png',\
'roadType':'Yosemite Road', 'roadId':34, 'backgroundColor': 'gray, green, blue', 'environment': 'forest'})
carImages = [{'carPath':FORE_PATH + 'bmw_gray_front_kitti.png', 'type':'BMW Kitti', \
'carId':0, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'suzuki_rear_kitti.png','type':'Suzuki Kitti',\
'carId':1, 'carCategory': 'jeep', 'carColor': 'red dark', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'tesla_rear_kitti.png', 'type':'Tesla Kitti', \
'carId':2, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'fiat_front_kitti.png', 'type':'Fiat Kitti',\
'carId':3, 'carCategory': 'car', 'carColor': 'green', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'honda_kitti.png', 'type':'Honda Kitti',\
'carId':4, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'toyota_kitti.png', 'type':'Toyota Kitti',\
'carId':5, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'peugeot_kitti.png', 'type':'Peugeot Kitti',\
'carId':6, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'chrysler_kitti.png', 'type':'Chrysler Kitti', \
'carId':7, 'carCategory': 'van', 'carColor': 'gray', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'bmw_blue_kitti.png', 'type': 'BMW Blue Kitti', \
'carId':8, 'carCategory': 'car', 'carColor': 'blue', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'honda_civic_front_kitti.png', 'type':'Honda Civic Front Kitti', \
'carId':9, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'toyota_camry_front_kitti.png', 'type': 'Toyota Camry Front Kitti', \
'carId':10, 'carCategory': 'car', 'carColor': 'cream', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'toyota_prius_front_kitti.png', 'type': 'Toyota Prius Front Kitti', \
'carId':11, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'benz_front_kitti.png', 'type': 'Benz Front Kitti', \
'carId':12, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'ford_front_kitti.png', 'type': 'Ford Front Kitti', \
'carId':13, 'carCategory': 'car', 'carColor': 'red', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'jeep_front_kitti.png', 'type': 'Jeep Front Kitti', \
'carId':14, 'carCategory': 'jeep', 'carColor': 'red', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'jeep_cherokee_front_kitti.png', 'type': 'Jeep Cherokee Front Kitti', \
'carId':15, 'carCategory': 'jeep', 'carColor': 'cream', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'fiat_front_kitti.png', 'type': 'Fiat Front Kitti', \
'carId':16, 'carCategory': 'car', 'carColor': 'blue', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'bmw_front_kitti.png', 'type': 'BMW Front Kitti', \
'carId':17, 'carCategory': 'car', 'carColor': 'blue dark', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'suzuki_front_kitti.png', 'type': 'Suzuki Front Kitti', \
'carId':18, 'carCategory': 'jeep', 'carColor': 'red dark', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'volkswagen_golf_front_kitti.png', 'type': 'Volkswagen Golf Kitti', \
'carId':19, 'carCategory': 'car', 'carColor': 'blue light', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'toyota_new_prius_front_kitti.png', 'type': 'Toyota New Prius Kitti', \
'carId':20, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'volvo_rear_kitti.png', 'type': 'Volvo Kitti', \
'carId':21, 'carCategory': 'car', 'carColor': 'brown', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'porche_rear_kitti.png', 'type': 'Porche Kitti', \
'carId':22, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'corvette_front_kitti.png', 'type': 'Corvette Kitti', \
'carId':23, 'carCategory': 'car', 'carColor': 'yellow', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'ford_truck_rear_kitti.png', 'type': 'Ford Kitti', \
'carId':24, 'carCategory': 'truck', 'carColor': 'white', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'chevrolet_truck_rear_kitti.png', 'type': 'Chevrolet Kitti', \
'carId':25, 'carCategory': 'truck', 'carColor': 'red', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'mercedes_rear_kitti.png', 'type': 'Mercedes Kitti', \
'carId':26, 'carCategory': 'car', 'carColor': 'black', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'tesla_front_kitti.png', 'type': 'Tesla Kitti', \
'carId':27, 'carCategory': 'car', 'carColor': 'black', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'mercedes_front_kitti.png', 'type': 'Mercedes Kitti', \
'carId':28, 'carCategory': 'jeep', 'carColor': 'gray', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'mazda_front_kitti.png', 'type': 'Mazda Kitti', \
'carId':29, 'carCategory': 'car', 'carColor': 'blue light', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'mazda_rear_kitti.png', 'type': 'Mazda Kitti', \
'carId':30, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'scion_rear_kitti.png', 'type': 'Scion Kitti', \
'carId':31, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'scion_front_kitti.png', 'type': 'Scion Kitti', \
'carId':32, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'fiat_abarth_front_kitti.png', 'type': 'Fiat Abarth Kitti', \
'carId':33, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'volkswagen_beetle_front_kitti.png', 'type': 'Volkswagen Beetle Kitti', \
'carId':34, 'carCategory': 'car', 'carColor': 'red dark', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'smart_rear_kitti.png', 'type': 'Smart Kitti', \
'carId':35, 'carCategory': 'car', 'carColor': 'black', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'smart_front_kitti.png', 'type': 'Smart Kitti', \
'carId':36, 'carCategory': 'car', 'carColor': 'blue light', 'carOrientation': FRONT_ORIENT }
]
return roadImages, carImages
def getLib():
"""Instantiate the library"""
roadImages, carImages = loadImages()
return Library(roadImages, carImages, FORE_SPACES_FILE)
|
{"hexsha": "ddcd3d7078a71e9d576abb986942471b247cc420", "size": 14974, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/data_augmentation/renderer/kittiLib.py", "max_stars_repo_name": "BehaviorPredictionTestingPlatform/VerifAI", "max_stars_repo_head_hexsha": "db05f3573c2e7d98c03029c1b4efca93e6b08edb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 109, "max_stars_repo_stars_event_min_datetime": "2019-04-29T03:30:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:06:26.000Z", "max_issues_repo_path": "examples/data_augmentation/renderer/kittiLib.py", "max_issues_repo_name": "BehaviorPredictionTestingPlatform/VerifAI", "max_issues_repo_head_hexsha": "db05f3573c2e7d98c03029c1b4efca93e6b08edb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2019-03-25T00:27:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T20:29:23.000Z", "max_forks_repo_path": "examples/data_augmentation/renderer/kittiLib.py", "max_forks_repo_name": "BehaviorPredictionTestingPlatform/VerifAI", "max_forks_repo_head_hexsha": "db05f3573c2e7d98c03029c1b4efca93e6b08edb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2019-02-12T20:50:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T11:25:06.000Z", "avg_line_length": 85.5657142857, "max_line_length": 134, "alphanum_fraction": 0.598704421, "include": true, "reason": "import numpy", "num_tokens": 3961}
|
[STATEMENT]
lemma prefixToLevel_auxPrefixToLevel_auxHigherLevel:
assumes "i \<le> j"
shows "prefixToLevel_aux a i k = prefixToLevel_aux (prefixToLevel_aux a j k) i k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. prefixToLevel_aux a i k = prefixToLevel_aux (prefixToLevel_aux a j k) i k
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
i \<le> j
goal (1 subgoal):
1. prefixToLevel_aux a i k = prefixToLevel_aux (prefixToLevel_aux a j k) i k
[PROOF STEP]
by (induct a arbitrary: k) auto
|
{"llama_tokens": 204, "file": "SATSolverVerification_Trail", "length": 2}
|
# Functions related to the counting bound.
module CountingBound
export countingBound, approxNumMaximalCliques1
"""
Counting bound, based on Shannon's argument.
m: number of edges
w: number of 'wires' -- that is, log2(number of functions),
which is the number of bits needed to specify a function
Returns: average number of NAND gates (with unbounded fan-in)
required to compute any of those functions.
(This may not be an integer).
"""
function countingBound(m, w)
m = BigFloat(m)
w = BigFloat(w)
b = m - 0.5
# the "-1" here is because this is the average, not the max.
sqrt(2*w + b*b) - b - 1
end
"""
Approximate number of maximal hypercliques of some size.
Note that k < r < n .
Also, the precision of what's returned can be set by setprecision().
k: number of vertices per hyperedge
r: number of vertices in the clique
n: vertices in the larger graph
Returns: expected number of maximal hypercliques. (This is
approximate, but presumably it's more accurate for larger
numbers).
"""
function approxNumMaximalCliques1(k, r, n)
k = BigInt(k)
r = BigInt(r)
n = BigInt(n)
one = BigInt(1)
two = BigInt(2)
# probability that one of those is not covered by a larger clique
a = one << binomial(r, k-one)
# print("computed a\n")
pNumerator = (a-one) ^ (n-r)
# print("computed numerator\n")
pDenominator = a ^ (n-r)
# print("computed denominator\n")
# expected number of r-cliques should be equivalent to:
# numRCliques = Rational(binomial(n, r), two ^ binomial(r, k))
# # result is number of cliques, * prob. they're maximal
# numRCliques * (pNumerator / pDenominator)
r = (pNumerator * binomial(n, r)) /
(pDenominator * (one << binomial(r, k)))
# ??? is this off by two? I don't think so.
r
end
"""
Approximate number of maximal hypercliques of some size
(alternate take).
Note that k < r < n .
Also, the precision of what's returned can be set by setprecision().
k: number of vertices per hyperedge
r: number of vertices in the clique
n: vertices in the larger graph
Returns: number of maximal hypercliques. This is approximate, because
it's the expected number. (Presumably it's more accurate for larger
numbers).
"""
function approxNumMaximalCliques2(k, r, n)
k = BigInt(k)
r = BigInt(r)
n = BigInt(n)
one = BigInt(1)
two = BigInt(2)
# probability that one of those is not covered by a larger clique
a = one << binomial(r, k-one)
print("computed a\n")
pNumerator = (a-one) ^ (n-r)
# ??? how is this implemented for BigInts?
# also, if a = 1111111... in binary, is there a cheaper way to
# compute this?
print("computed numerator\n")
pDenominator = a ^ (n-r)
# FIXME is denominator all powers of two? If so, presumably could
# replace the division with a bit shift
print("computed denominator\n")
# expected number of r-cliques should be equivalent to:
# numRCliques = Rational(binomial(n, r), two ^ binomial(r, k))
# # result is number of cliques, * prob. they're maximal
# numRCliques * (pNumerator / pDenominator)
r = (pNumerator * binomial(n, r)) /
(pDenominator * (one << binomial(r, k)))
# ??? is this off by two? (doesn't seem to be, for small k, r, n)
r
end
end
|
{"hexsha": "9fd463dd534844b94af84b2ecd0983e283da9eb5", "size": 3237, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "countingBound/julia/CountingBound.jl", "max_stars_repo_name": "joshtburdick/misc", "max_stars_repo_head_hexsha": "7bb103b4f9d850e3279eb675c6df420aa7b8da22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "countingBound/julia/CountingBound.jl", "max_issues_repo_name": "joshtburdick/misc", "max_issues_repo_head_hexsha": "7bb103b4f9d850e3279eb675c6df420aa7b8da22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "countingBound/julia/CountingBound.jl", "max_forks_repo_name": "joshtburdick/misc", "max_forks_repo_head_hexsha": "7bb103b4f9d850e3279eb675c6df420aa7b8da22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8285714286, "max_line_length": 71, "alphanum_fraction": 0.6793327155, "num_tokens": 938}
|
"""
Functions relating to processing the Takahashi et al HSC simulations, available at
http://cosmo.phys.hirosaki-u.ac.jp/takahasi/allsky_raytracing/.
"""
import signal
import os.path
import time
import traceback
import urllib.request
import warnings
from collections import namedtuple
import healpy as hp
import numpy as np
import scipy.stats
import likelihood
# Z slices: names, redshifts and URL
# from http://cosmo.phys.hirosaki-u.ac.jp/takahasi/allsky_raytracing/sourceplane_redshift.html
ZSlice = namedtuple('ZSlice', ['id', 'z'])
Z_SLICES = [
ZSlice('zs1', 0.0506),
ZSlice('zs2', 0.1023),
ZSlice('zs3', 0.1553),
ZSlice('zs4', 0.2097),
ZSlice('zs5', 0.2657),
ZSlice('zs6', 0.3233),
ZSlice('zs7', 0.3827),
ZSlice('zs8', 0.4442),
ZSlice('zs9', 0.5078),
ZSlice('zs10', 0.5739),
ZSlice('zs11', 0.6425),
ZSlice('zs12', 0.7140),
ZSlice('zs13', 0.7885),
ZSlice('zs14', 0.8664),
ZSlice('zs15', 0.9479),
ZSlice('zs16', 1.0334),
ZSlice('zs17', 1.1233),
ZSlice('zs18', 1.2179),
ZSlice('zs19', 1.3176),
ZSlice('zs20', 1.4230),
ZSlice('zs21', 1.5345),
ZSlice('zs22', 1.6528),
ZSlice('zs23', 1.7784),
ZSlice('zs24', 1.9121),
ZSlice('zs25', 2.0548),
ZSlice('zs26', 2.2072),
ZSlice('zs27', 2.3704),
ZSlice('zs28', 2.5455),
ZSlice('zs29', 2.7338),
ZSlice('zs30', 2.9367),
ZSlice('zs31', 3.1559),
ZSlice('zs32', 3.3932),
ZSlice('zs33', 3.6507),
ZSlice('zs34', 3.9309),
ZSlice('zs35', 4.2367),
ZSlice('zs36', 4.5712),
ZSlice('zs37', 4.9382),
ZSlice('zs38', 5.3423)
]
SLICE_URL = ('http://cosmo.phys.hirosaki-u.ac.jp/takahasi/allsky_raytracing/{subdir}/nres12/'
'allskymap_nres12{realisation_id}.{slice_id}.mag.dat')
NSIDE = 4096
class DLTimeOutError(Exception):
"""
Exception to raise when a download times out.
"""
pass
def timeout_download(*_):
"""
Raise a download timeout exception.
Raises:
DLTimeOutError: download timeout exception.
"""
raise DLTimeOutError('Download timed out')
def read_slice(input_path, verbose=True):
"""
Read gamma1 and gamma2 from a slice of the Takahashi et al. HSC simulations.
Based on the script by Toshiya Namikawa / Ken Osato available at
http://cosmo.phys.hirosaki-u.ac.jp/takahasi/allsky_raytracing/nres12.html.
Args:
input_path (str): Path to slice.
verbose (bool, optional): Whether to output progress (default True).
Returns:
(1D numpy array, 1D numpy array): (gamma1, gamma2) healpix maps.
"""
# Open file
with open(input_path, 'rb') as f:
# Skip 4 bytes, read nside and npix, skip another 2*4 bytes
f.seek(4, 1)
nside = np.squeeze(np.fromfile(f, dtype='int32', count=1))
npix = np.squeeze(np.fromfile(f, dtype='int64', count=1))
assert nside == 4096 # This logic only works for nside 4096
assert hp.pixelfunc.nside2npix(nside) == npix
f.seek(8, 1)
if verbose:
print(f'nside: {nside} npix: {npix}')
# Next npix*4 bytes are kappa, so skip those
f.seek(4 * npix, 1)
# Skip 2*4 bytes between kappa and gamma1
f.seek(8, 1)
# Next npix*4 bytes are gamma1
if verbose:
print('Reading gamma1...')
gamma1 = np.fromfile(f, dtype='float32', count=npix)
# Skip 2*4 bytes between gamma1 and gamma2
f.seek(8, 1)
# Next npix*4 bytes are gamma2
if verbose:
print('Reading gamma2...')
gamma2 = np.fromfile(f, dtype='float32', count=npix)
return gamma1, gamma2
def process_realisation(realisation_id, z_mean, z_std, mask_path, slice_path, lmax, lmin, cl_save_path,
delete_after_use=True):
"""
Process a single realisation of the Takhashi et al. HSC simulations.
Download all slices, combine into tomographic bins and measure the power spectra.
Args:
realisation_id (string): ID of the realisation, from 'r000' to 'r107'.
z_mean (list): List containing the mean redshift of each bin.
mask_path (str): Path to mask fits file, or None for full sky.
slice_path (str): Path to save each slice locally, including placeholders for {realisation_id} and {slice_id}.
lmax (int): Maximum l to measure.
lmin (int): Minimum l to measure.
cl_save_path (str): Path to save measured power spectra, including placeholder for {realisation_id}.
delete_after_use (bool, optional): If True, delete each downloaded slice file once it has been used
(default True).
"""
# Set timeout function as alarm signal handler
signal.signal(signal.SIGALRM, timeout_download)
# For each redshift bin, form a Gaussian distribution having mean of the relevant bin and a fixed std
z_dists = [scipy.stats.norm(loc=mean, scale=z_std) for mean in z_mean]
# Calculate weights for each of the 38 slices for each bin by evaluating the pdf at each slice redshift
# Weights array is indexed [bin_idx, slice_idx]
slice_z = np.array([z_slice.z for z_slice in Z_SLICES])
slice_weights = np.array([z_dist.pdf(slice_z) for z_dist in z_dists])
# Reweight so they sum to 1 per bin
slice_weights /= np.sum(slice_weights, axis=1)[:, np.newaxis]
assert np.allclose(np.sum(slice_weights, axis=1), 1)
# Start with a zero spin-2 map of the relevant resolution, per bin
# Maps are indexed [bin_idx, {0: gamma1, 1: gamma2}, healpix_pixel_idx]
n_zbin = len(z_mean)
npix = hp.pixelfunc.nside2npix(NSIDE)
tomo_maps = np.zeros((n_zbin, 2, npix))
# Loop over slices
n_slice = len(Z_SLICES)
for slice_idx, z_slice in enumerate(Z_SLICES):
print(f'Processing slice {slice_idx + 1} / {n_slice} at {time.strftime("%c")}')
# Work out remote and local paths
slice_path = slice_path.format(realisation_id=realisation_id, slice_id=z_slice.id)
realisation_no = int(realisation_id[1:])
assert realisation_id == f'r{realisation_no:03d}'
subdir = 'sub1' if realisation_no < 54 else 'sub2'
slice_url = slice_url.format(subdir=subdir, realisation_id=realisation_id, slice_id=z_slice.id)
# Loop indefinitely to attempt to download file
complete = False
attempt_count = 1
while not complete:
try:
print(f'Attempt {attempt_count} to download {slice_url} at {time.strftime("%c")}')
signal.alarm(30 * 60) # allow 30 mins for the download
urllib.request.urlretrieve(slice_url, slice_path)
signal.alarm(0)
assert os.path.isfile(slice_path)
print(f'Successfully downloaded to {slice_path} at {time.strftime("%c")}')
complete = True
except (urllib.error.ContentTooShortError, DLTimeOutError):
warnings.warn(f'Exception while attempting download:\n{traceback.format_exc()}')
print('Trying again...')
attempt_count += 1
# Read gamma1 and gamma2 from slice file
print(f'Reading from {slice_path} at {time.strftime("%c")}')
gamma1, gamma2 = read_slice(slice_path)
# Add slice to the cumulative maps, weighted appropriately
this_slice_weights = slice_weights[:, slice_idx][:, np.newaxis]
print(f'Adding to maps... 1/2 at {time.strftime("%c")}')
tomo_maps[:, 0, :] += this_slice_weights * gamma1[np.newaxis, :]
print(f'Adding to maps... 2/2 at {time.strftime("%c")}')
tomo_maps[:, 1, :] += this_slice_weights * gamma2[np.newaxis, :]
# Delete from disk
if delete_after_use:
print(f'Deleting {slice_path} at {time.strftime("%c")}')
os.remove(slice_path)
print(f'Deleted {slice_path} at {time.strftime("%c")}')
else:
print('Not deleting local copy')
print(f'Finished slice {slice_idx + 1} / {n_slice} at {time.strftime("%c")}')
# Load and apply mask
if mask_path is not None:
print(f'Loading mask at {time.strftime("%c")}')
mask = hp.pixelfunc.ud_grade(hp.read_map(mask_path, dtype=float, verbose=False), NSIDE)
print(f'Applying mask at {time.strftime("%c")}')
tomo_maps *= mask[np.newaxis, np.newaxis, :]
else:
print('Not applying mask; using full sky')
# Measure power spectra -
# First convert to E-mode alms, indexed [zbin_idx, healpix_alm_idx]
# For spin-2 SHTs in healpix a dummy 'T' field is required
t_map = np.zeros(npix)
alms = np.full((n_zbin, hp.sphtfunc.Alm.getsize(lmax)), np.nan, dtype=complex)
for zbin_idx, (gamma1_map, gamma2_map) in enumerate(tomo_maps):
print(f'Converting to alms {zbin_idx + 1} / {n_zbin} at {time.strftime("%c")}')
_, e_alm, _ = hp.sphtfunc.map2alm([t_map, gamma1_map, gamma2_map], lmax=lmax, pol=True)
alms[zbin_idx, :] = e_alm
assert np.all(np.isfinite(alms))
del tomo_maps
print(f'Finished converting to alms at {time.strftime("%c")}')
# Calculate all EE auto- and cross-spectra
print(f'Calculating power spectra at {time.strftime("%c")}')
cl = hp.sphtfunc.alm2cl(alms)[:, lmin:]
del alms
# Save Cls to disk
print(f'Saving to disk at {time.strftime("%c")}')
cl_save_path = cl_save_path.format(realisation_id=realisation_id)
header = (f'Output from {__file__}.process_realisation for realisation_id {realisation_id}, nside {NSIDE}, '
f'lmax {lmax}, lmin {lmin} at {time.strftime("%c")}')
np.savez_compressed(cl_save_path, cl=cl, realisation_id=realisation_id, header=header)
print(f'Saved {cl_save_path} at {time.strftime("%c")}')
print(f'Done for realisation {realisation_id} at {time.strftime("%c")}')
def loop_realisations(first_real, last_real, z_mean, z_std, mask_path, slice_path, lmax, lmin, cl_save_path,
delete_after_use=True):
"""
Loop over realisations in serial from first_real to last_real (inclusive).
Args:
first_real (int): First realisation to process (between 0 and 107 inclusive).
last_real (int): Last realisation to process (in same range).
z_mean (list): List containing the mean redshift of each bin.
mask_path (str): Path to mask fits file, or None for full sky.
slice_path (str): Path to save each slice locally, including placeholders for {realisation_id} and {slice_id}.
lmax (int): Maximum l to measure.
lmin (int): Minimum l to measure.
cl_save_path (str): Path to save measured power spectra, including placeholder for {realisation_id}.
delete_after_use (bool, optional): If True, delete each downloaded slice file once it has been used
(default True).
"""
assert 0 <= first_real <= last_real <= 107
# Loop over realisations
realisation_ids = [f'r{r_id:03d}' for r_id in range(first_real, last_real + 1)]
for realisation_id in realisation_ids:
print(f'Starting realisation {realisation_id} at {time.strftime("%c")}')
process_realisation(realisation_id, z_mean, z_std, mask_path, slice_path, lmax, lmin, cl_save_path,
delete_after_use)
print()
print(f'All done at {time.strftime("%c")}')
def combine_sim_cl(input_filemask, n_real, n_zbin, lmax, lmin, output_path):
"""
Combine all realisations into a single file.
Args:
input_filemask (str): Path to input files, with {realisation} placeholder.
n_real (int): Number of realisations.
n_zbin (int): Number of redshift bins.
lmax (int): Maximum l.
lmin (int): Minimum l.
output_path (str): Path to save combined .npz file.
"""
# Create array to hold all data, indexed [spec_idx, ell_idx, realisation_idx]
n_spec = n_zbin * (n_zbin + 1) // 2
n_ell = lmax - lmin + 1
all_cls = np.full((n_spec, n_ell, n_real), np.nan)
# Loop over all realisations and extract Cls
for real in range(n_real):
input_path = input_filemask.format(realisation=real)
with np.load(input_path) as data:
assert data['realisation_id'] == f'r{real:03d}'
all_cls[:, :, real] = data['cl']
assert np.all(np.isfinite(all_cls))
# Save to disk
header = (f'Output from {__file__}.combine_sim_cl for input parameters input_filemask = {input_filemask}, '
f'n_real = {n_real}, n_zbin = {n_zbin}, lmax = {lmax}, lmin = {lmin}, output_path = {output_path} '
f'at {time.strftime("%c")}')
np.savez_compressed(output_path, cls=all_cls, header=header)
print('Saved ' + output_path)
def gaussian_sim(cl_in_path, lmax_in, lmin_in, nside, mask_path, lmax_out, lmin_out, n_real, save_path):
"""
Produce repeated simulated realisations of a single Gaussian field, and save the power spectra to disk.
Args:
cl_in_path (str): Path to input power spectrum.
lmax_in (int): Maximum l to read in.
lmin_in (int): If > 0, input power spectrum will be padded with zeros to start at l = 0.
nside (int): Healpix map resolution to use.
mask_path (str): Path to mask fits file, or None for full sky.
lmax_out (int): Maximum l to measure.
lmin_out (int): Minimum l to measure.
n_real (int): Number of realisations to produce.
save_path (str): Path to save all power spectra to.
"""
# Load input Cls, assume zero B-mode and create TT and TE placeholders for healpy
print('Loading input Cls')
cl_ee_in = np.concatenate((np.zeros(lmin_in), np.loadtxt(cl_in_path, max_rows=(lmax_in - lmin_in + 1))))
cl_bb_in = np.zeros_like(cl_ee_in)
cl_tt_in = np.zeros_like(cl_ee_in)
cl_te_in = np.zeros_like(cl_ee_in)
input_cls = [cl_tt_in, cl_ee_in, cl_bb_in, cl_te_in]
# Load mask
if mask_path is not None:
print('Loading mask')
mask = hp.pixelfunc.ud_grade(hp.fitsfunc.read_map(mask_path, dtype=float, verbose=False), nside)
else:
print('Full sky')
mask = np.ones(hp.pixelfunc.nside2npix(nside))
# Generate realisations in loop
n_ell_out = lmax_out - lmin_out + 1
obs_cls = np.full((n_real, n_ell_out), np.nan)
for real_idx in range(n_real):
print(f'Generating realisation {real_idx + 1} / {n_real} at {time.strftime("%c")}')
# Generate spin-2 map
t_map, shear1_map, shear2_map = hp.sphtfunc.synfast(input_cls, nside, pol=True, new=True, verbose=False)
# Apply mask
shear1_map *= mask
shear2_map *= mask
# Measure Cls
maps = [t_map, shear1_map, shear2_map]
obs_cl_ee = hp.sphtfunc.anafast(maps, lmax=lmax_out, pol=True)[1, lmin_out:]
obs_cls[real_idx] = obs_cl_ee
assert np.all(np.isfinite(obs_cls))
# Save all Cls to disk
header = (f'Output from {__file__}.gaussian_sim for nside = {nside}, lmax_out = {lmax_out}, lmin_out = {lmin_out}, '
f'cl_in_path = {cl_in_path}, lmax_in = {lmax_in}, lmin_in = {lmin_in}, mask_path = {mask_path}, '
f'n_real = {n_real} at {time.strftime("%c")}')
np.savez_compressed(save_path, obs_cls=obs_cls, header=header)
print('Saved ' + save_path)
def get_obs(theory_cl_dir, nl_path, mixmat_path, binmat_path, cov_tot_path, n_zbin, lmax_in, lmin_in, lmax_obs,
lmin_obs, n_bandpower, save_path):
"""
Generate a mock observation by sampling from a Gaussian likelihood with the total covariance.
Args:
theory_cl_dir (str): Path to directory containing theory shear power spectra.
nl_path (str): Path to noise power spectrum.
mixmat_path (str): Path to mixing matrix.
binmat_path (str): Path to binning matrix.
cov_tot_path (str): Path to total covariance matrix.
n_zbin (int): Number of redshift bins.
lmax_in (int): Maximum l to use as input pre-mixing.
lmin_int (int): Minimum l to use as input pre-mixing.
lmax_obs (int): Maximum l to use in the observation.
lmin_obs (int): Minimum l to use in the observation.
n_bandpower (int): Number of bandpowers.
save_path (str): Path to save observed bandpowers to.
"""
# Load theory Cls
cl_in = likelihood.load_shear_cls(n_zbin, theory_cl_dir, lmax_in)
n_spec = n_zbin * (n_zbin + 1) // 2
n_ell_in = lmax_in - lmin_in + 1
assert cl_in.shape == (n_spec, n_ell_in)
# Add noise to auto-spectra
nl = np.loadtxt(nl_path, max_rows=n_ell_in)
cl_in[:n_zbin, :] += nl
# Load and apply mixing matrix
with np.load(mixmat_path) as data:
mixmat = data['mixmat_ee_to_ee'][lmin_obs:, lmin_in:] # this mixing matrix starts from 0
n_ell_obs = lmax_obs - lmin_obs + 1
assert mixmat.shape == (n_ell_obs, n_ell_in)
cl_mixed = np.einsum('ij,kj->ki', mixmat, cl_in) # i = l, j = l', k = spec_idx
assert cl_mixed.shape == (n_spec, n_ell_obs)
# Load and apply binning matrix
with np.load(binmat_path) as data:
binmat = data['pbl']
assert binmat.shape == (n_bandpower, n_ell_obs)
bp_exp = np.einsum('bl,kl->kb', binmat, cl_mixed) # b = bandpower, l = l, k = spec_idx
assert bp_exp.shape == (n_spec, n_bandpower)
# Reshape into a vector to obtain the mean
n_data = n_spec * n_bandpower
bp_exp = np.reshape(bp_exp, (n_data,))
# Load the total covariance
with np.load(cov_tot_path) as data:
cov_tot = data['cov']
assert cov_tot.shape == (n_data, n_data)
# Sample from a Gaussian with the mean and covariance to obtain the observation
obs_bp = scipy.stats.multivariate_normal.rvs(mean=bp_exp, cov=cov_tot)
# Reshape back to (n_spec, n_bandpower)
obs_bp = np.reshape(obs_bp, (n_spec, n_bandpower))
# Save to disk
header = ('Mock observation drawn from a multivariate Gaussian with the total covariance. '
f'Output from {__file__}.get_obs for params theory_cl_dir = {theory_cl_dir}, nl_path = {nl_path}, '
f'mixmat_path = {mixmat_path}, binmat_path = {binmat_path}, cov_tot_path = {cov_tot_path}, '
f'n_zbin = {n_zbin}, lmax_in = {lmax_in}, lmin_in = {lmin_in}, lmax_obs = {lmax_obs}, '
f'lmin_obs = {lmin_obs}, n_bandpower = {n_bandpower}, at {time.strftime("%c")}')
np.savez_compressed(save_path, obs_bp=obs_bp, header=header)
print('Saved ' + save_path)
|
{"hexsha": "aba46373aaf8067bf995f0236ee33b534372082c", "size": 18555, "ext": "py", "lang": "Python", "max_stars_repo_path": "shear_pcl_cov/simulation.py", "max_stars_repo_name": "robinupham/shear_pcl_cov", "max_stars_repo_head_hexsha": "6afc8bb48f714b87d4b7143575033b9723ef9df4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shear_pcl_cov/simulation.py", "max_issues_repo_name": "robinupham/shear_pcl_cov", "max_issues_repo_head_hexsha": "6afc8bb48f714b87d4b7143575033b9723ef9df4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shear_pcl_cov/simulation.py", "max_forks_repo_name": "robinupham/shear_pcl_cov", "max_forks_repo_head_hexsha": "6afc8bb48f714b87d4b7143575033b9723ef9df4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5131004367, "max_line_length": 120, "alphanum_fraction": 0.6458097548, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5241}
|
[STATEMENT]
lemma w_addrs_vs_type_all_in_vs_type_all:
"(\<Union>ad \<in> w_addrs (vs_type_all P). {(ad, al)|al. \<exists>T. P \<turnstile> ad@al : T}) \<subseteq> {adal. vs_type_all P adal \<noteq> {}}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Union>ad\<in>w_addrs (vs_type_all P). {(ad, al) |al. \<exists>T. P \<turnstile> ad@al : T}) \<subseteq> {adal. vs_type_all P adal \<noteq> {}}
[PROOF STEP]
by(auto simp add: w_addrs_def vs_type_all_def intro: defval_conf)
|
{"llama_tokens": 218, "file": "JinjaThreads_MM_JMM_Typesafe", "length": 1}
|
# %load data_mafe_EI.py
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
import glob
from obspy import UTCDateTime, read, Trace, Stream
from scipy import signal
import pandas as pd
from haversine import haversine
sta_data = pd.read_excel("/sdd1/sta_list.xlsx", sheet_name="Data")
sta_num = len(sta_data['latitude'])
def extract_seed(st_ex, time_DATE, save_dir, i_sta, i_cha, i_date_e, i_time_e, extract_time, time_DATE_E, event_i, ENZ):
extract_data = [0]
sample_rate = 0
for st_list in st_ex:
if (st_list.stats.starttime < time_DATE and st_list.stats.endtime > time_DATE):
tr = st_list.copy()
sample_rate = tr.stats.sampling_rate
start_c = int((time_DATE - UTCDateTime(tr.stats.starttime)-5) * sample_rate)
end_c = int(start_c + sample_rate * (extract_time + 60))
extract_data = tr.data[start_c:end_c]
latitude = 0.0
longitude = 0.0
depth = 0.0
sta_i = 0
while True:
if sta_data['station_code'][sta_i] == i_sta or sta_data['code_old'][sta_i] == i_sta:
latitude = sta_data['latitude'][sta_i]
longitude = sta_data['longitude'][sta_i]
depth = sta_data['height'][sta_i]
break
elif sta_i == sta_num - 1:
print(i_sta)
break
sta_i += 1
return extract_data, latitude, longitude, depth, sample_rate
def mafe_EI(fname, event_index, upper_dir, odata_dir, idx):
t = 0
extract_time = 10
f = open(fname, 'r')
if event_index == 0:
save_dir = upper_dir + '/0'
elif event_index == 1:
save_dir = upper_dir + '/1'
elif event_index == 2:
save_dir = upper_dir + '/2'
else:
save_dir = upper_dir + '/3'
while True:
t = t + 1
if t == 3:
lines = f.readline().split()
info_DATE_E = lines[2]
info_TIME_E = lines[3]
info_TIME_T = info_TIME_E[0:2] + '_' + info_TIME_E[3:5] + '_' + info_TIME_E[7:]
elif t == 5:
lines = f.readline().split()
info_LAT = float(lines[2])
lines = f.readline().split()
info_LONG = float(lines[2])
lines = f.readline().split()
info_DEPTH = float(lines[2])
lines = f.readline().split()
info_MAG = float(lines[2])
elif t >= 15:
lines = f.readline().split()
if len(lines) == 0 or lines[0] == 'First':
break
else:
if info_MAG > 0.0:
info_STA = lines[0]
info_CHA = lines[1]
info_DATE = lines[3]
info_TIME = lines[4]
if len(lines) > 5:
if lines[5] == '/' and lines[6] == 'ml':
### Event Time
UTC_DATE_E = info_DATE_E + 'T' + info_TIME_E
time_DATE_E = UTCDateTime(UTC_DATE_E)
DATA_julday_E = time_DATE_E.julday
### Station Time
UTC_DATE = info_DATE + 'T' + info_TIME
time_DATE = UTCDateTime(UTC_DATE)
DATA_julday = time_DATE.julday
### mseed extract
myjulday_name = '%03d' % (DATA_julday)
mydata_path = os.path.join(odata_dir, myjulday_name)
if info_CHA[0:2] == 'HH' or info_CHA[0:2] == 'EL':
info_CHA_E_HG = 'HGE'
info_CHA_N_HG = 'HGN'
info_CHA_Z_HG = 'HGZ'
myfile_name_E = '*.' + '%s.' % (info_STA) + '%s.' % (info_CHA_E_HG) + '*.*'
myfile_name_N = '*.' + '%s.' % (info_STA) + '%s.' % (info_CHA_N_HG) + '*.*'
myfile_name_Z = '*.' + '%s.' % (info_STA) + '%s.' % (info_CHA_Z_HG) + '*.*'
ext_file_E = glob.glob(os.path.join(mydata_path, myfile_name_E))
ext_file_N = glob.glob(os.path.join(mydata_path, myfile_name_N))
ext_file_Z = glob.glob(os.path.join(mydata_path, myfile_name_Z))
info_MAG_D = float(lines[7])
info_LABEL = event_index
event_info = {'event_DATE': info_DATE_E,
'event_TIME': info_TIME_E,
'event_LAT': info_LAT,
'event_LONG': info_LONG,
'event_DEPTH': info_DEPTH,
'STA': info_STA,
'CHA': info_CHA_E_HG,
'station_DATE': info_DATE,
'station_TIME': info_TIME,
'MAG': info_MAG,
'MAG_D': info_MAG_D,
'LABEL_E': info_LABEL,
'LABEL_W': 'none',
'LABEL_D': 'none'
}
if ext_file_E != [] and ext_file_N != [] and ext_file_Z != []:
st_E = read(ext_file_E[0]) ## file reading
st_N = read(ext_file_N[0]) ## file reading
st_Z = read(ext_file_Z[0]) ## file reading
tr_E, sta_LAT, sta_LONG, sta_DEPTH, sample_rate = extract_seed(st_E, time_DATE_E,
save_dir, info_STA,
info_CHA_E_HG,
info_DATE_E,
info_TIME_T,
extract_time,
time_DATE_E,
event_info, 'E')
tr_N, _, _, _, _ = extract_seed(st_N, time_DATE_E, save_dir, info_STA,
info_CHA_N_HG, info_DATE_E, info_TIME_T,
extract_time, time_DATE_E, event_info, 'N')
tr_Z, _, _, _, _ = extract_seed(st_Z, time_DATE_E, save_dir, info_STA,
info_CHA_Z_HG, info_DATE_E, info_TIME_T,
extract_time, time_DATE_E, event_info, 'Z')
if sta_LAT == 0.0 or sta_LONG == 0.0:
continue
if sample_rate <= 20:
continue
if len(tr_E) == 7000 and len(tr_N) == 7000 and len(tr_Z) == 7000:
event_info['tr_E'] = tr_E
event_info['tr_N'] = tr_N
event_info['tr_Z'] = tr_Z
event_info['sta_LAT'] = sta_LAT
event_info['sta_LONG'] = sta_LONG
event_info['sta_DEPTH'] = sta_DEPTH
name_seed = event_info['STA'] + '_' + event_info['CHA'] ### mseed filename
event_save_dir = save_dir + '/event/' + '{0:03}'.format(idx) + '/'
if not os.path.isdir(event_save_dir):
os.makedirs(event_save_dir)
gen_name1 = event_save_dir + name_seed + '.npz'
np.savez(gen_name1, **event_info)
if info_CHA[2] == 'E':
info_CHA_E = info_CHA
info_CHA_N = info_CHA[0:2] + 'N'
info_CHA_Z = info_CHA[0:2] + 'Z'
elif info_CHA[2] == 'N':
info_CHA_N = info_CHA
info_CHA_E = info_CHA[0:2] + 'E'
info_CHA_Z = info_CHA[0:2] + 'Z'
else:
info_CHA_Z = info_CHA
info_CHA_E = info_CHA[0:2] + 'E'
info_CHA_N = info_CHA[0:2] + 'N'
myfile_name_E = '*.' + '%s.' % (info_STA) + '%s.' % (info_CHA_E) + '*.*'
myfile_name_N = '*.' + '%s.' % (info_STA) + '%s.' % (info_CHA_N) + '*.*'
myfile_name_Z = '*.' + '%s.' % (info_STA) + '%s.' % (info_CHA_Z) + '*.*'
ext_file_E = glob.glob(os.path.join(mydata_path, myfile_name_E))
ext_file_N = glob.glob(os.path.join(mydata_path, myfile_name_N))
ext_file_Z = glob.glob(os.path.join(mydata_path, myfile_name_Z))
info_MAG_D = float(lines[7])
info_LABEL = event_index
event_info = {'event_DATE': info_DATE_E,
'event_TIME': info_TIME_E,
'event_LAT': info_LAT,
'event_LONG': info_LONG,
'event_DEPTH': info_DEPTH,
'STA': info_STA,
'CHA': info_CHA_E,
'station_DATE': info_DATE,
'station_TIME': info_TIME,
'MAG': info_MAG,
'MAG_D': info_MAG_D,
'LABEL_E': info_LABEL,
'LABEL_W': 'none',
'LABEL_D': 'none'
}
if ext_file_E != [] and ext_file_N != [] and ext_file_Z != []:
st_E = read(ext_file_E[0]) ## file reading
st_N = read(ext_file_N[0]) ## file reading
st_Z = read(ext_file_Z[0]) ## file reading
tr_E, sta_LAT, sta_LONG, sta_DEPTH, sample_rate = extract_seed(st_E, time_DATE_E,
save_dir, info_STA,
info_CHA_E, info_DATE_E,
info_TIME_T,
extract_time,
time_DATE_E, event_info,
'E')
tr_N, _, _, _, _ = extract_seed(st_N, time_DATE_E, save_dir, info_STA, info_CHA_N,
info_DATE_E, info_TIME_T,
extract_time, time_DATE_E, event_info, 'N')
tr_Z, _, _, _, _ = extract_seed(st_Z, time_DATE_E, save_dir, info_STA, info_CHA_Z,
info_DATE_E, info_TIME_T,
extract_time, time_DATE_E, event_info, 'Z')
if sta_LAT == 0.0 or sta_LONG == 0.0:
continue
if sample_rate <= 20:
continue
if len(tr_E) == 7000 and len(tr_N) == 7000 and len(tr_Z) == 7000:
event_info['tr_E'] = tr_E
event_info['tr_N'] = tr_N
event_info['tr_Z'] = tr_Z
event_info['sta_LAT'] = sta_LAT
event_info['sta_LONG'] = sta_LONG
event_info['sta_DEPTH'] = sta_DEPTH
name_seed = event_info['STA'] + '_' + event_info['CHA'] ### mseed filename
event_save_dir = save_dir + '/event/' + '{0:03}'.format(idx) + '/'
if not os.path.isdir(event_save_dir):
os.makedirs(event_save_dir)
gen_name1 = event_save_dir + name_seed + '.npz'
np.savez(gen_name1, **event_info)
else:
temp = f.readline()
f.close()
return
current_path = u"/sde1/2018_list"
each_dir = [u"1_국내지진", u"3_미소지진", u"4_인공지진"]
upper_dir = '/sdd1/Eq2020_multisite_0925/2018Data2/'
odata_dir = "/media/super/4af6ecf9-b4dc-4ffd-9da0-0f1667a69e01/2018/"
dir1_name = upper_dir + '/0'
dir2_name = upper_dir + '/1'
dir3_name = upper_dir + '/2'
dir4_name = upper_dir + '/3'
if not (os.path.isdir(dir1_name)):
os.makedirs(os.path.join(dir1_name))
if not (os.path.isdir(dir2_name)):
os.makedirs(os.path.join(dir2_name))
if not (os.path.isdir(dir3_name)):
os.makedirs(os.path.join(dir3_name))
if not (os.path.isdir(dir4_name)):
os.makedirs(os.path.join(dir4_name))
UTC_REF = '2018-01-01T00:00:01'
time = UTCDateTime(UTC_REF)
Ref_julday = time.julday
event_index = -1
for dir_i in each_dir:
print(os.path.join(current_path, dir_i))
data_path = os.path.join(current_path, dir_i)
event_index = event_index + 1
idx = 0
for (path, dir, files) in os.walk(data_path):
for filename in files:
ext = os.path.splitext(filename)[0]
if ext.find('arrival') != (-1):
print("%s/%s" % (path, filename))
mafe_EI(os.path.join(path, filename), event_index, upper_dir, odata_dir, idx)
idx += 1
|
{"hexsha": "0a0d0add54b2d543e05a7844a3152ab279e38674", "size": 15324, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_data/24h_Data_2018.py", "max_stars_repo_name": "GT-KIM/GCN_seismic_event_classification", "max_stars_repo_head_hexsha": "84b9116a00c08687e3ec17f9b616e61527efcf00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_data/24h_Data_2018.py", "max_issues_repo_name": "GT-KIM/GCN_seismic_event_classification", "max_issues_repo_head_hexsha": "84b9116a00c08687e3ec17f9b616e61527efcf00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-29T20:33:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T00:28:10.000Z", "max_forks_repo_path": "generate_data/24h_Data_2018.py", "max_forks_repo_name": "GT-KIM/GCN_seismic_event_classification", "max_forks_repo_head_hexsha": "84b9116a00c08687e3ec17f9b616e61527efcf00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.4322580645, "max_line_length": 120, "alphanum_fraction": 0.3882798225, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3026}
|
import pandas as pd
import datetime
import numpy as np
from tpau_gtfsutilities.gtfs.gtfssingleton import gtfs as gtfs_singleton
from tpau_gtfsutilities.helpers.datetimehelpers import seconds_since_zero
from tpau_gtfsutilities.helpers.datetimehelpers import seconds_to_military
def get_trip_duration_seconds(gtfs_override=None, trip_bounds=None):
# returns trip duration series 'duration_seconds'
gtfs = gtfs_override if gtfs_override else gtfs_singleton
trip_bounds = trip_bounds if trip_bounds is not None else get_trip_bounds(gtfs_override=gtfs)
trip_durations_df = trip_bounds.assign( \
duration_seconds = \
trip_bounds['end_time'].transform(seconds_since_zero) \
- trip_bounds['start_time'].transform(seconds_since_zero) \
)
return trip_durations_df['duration_seconds']
def get_trip_bounds(gtfs_override=None, original=False):
# returns trip bounds dataframe
# index: trip_id
# columns: start_time, end_time
gtfs = gtfs_override if gtfs_override else gtfs_singleton
stop_times = gtfs.get_table('stop_times', original=original)
def min_miliary_arrival_time(grouped):
trip_id = grouped.name
grouped_df = grouped.to_frame()
grouped_df = grouped_df[grouped_df[trip_id] != ''] \
.assign(seconds_since_zero = lambda df: df[trip_id].transform(lambda t: seconds_since_zero(t)))
idx_of_min = grouped_df['seconds_since_zero'].idxmin(axis=0)
return grouped_df.loc[idx_of_min, trip_id]
def max_miliary_arrival_time(grouped):
trip_id = grouped.name
grouped_df = grouped.to_frame()
grouped_df = grouped_df[grouped_df[trip_id] != ''] \
.assign(seconds_since_zero = lambda df: df[trip_id].transform(lambda t: seconds_since_zero(t)))
idx_of_max = grouped_df['seconds_since_zero'].idxmax(axis=0)
return grouped_df.loc[idx_of_max, trip_id]
grouped_arrival_times = stop_times[stop_times['arrival_time'].notnull()].groupby('trip_id')['arrival_time']
min_arrival_times = grouped_arrival_times \
.agg(min_miliary_arrival_time) \
.rename('start_time')
max_arrival_times = grouped_arrival_times \
.agg(max_miliary_arrival_time) \
.rename('end_time')
return pd.concat([min_arrival_times, max_arrival_times], axis=1)
def get_trips_extended(gtfs_override=None, original=False):
# returns trips with agency, calendar and time information
# start_date, end_date
# daygroups
# start_time
# end_time
# duration
# is_repeating
# route_type
# agency id
# agency name
gtfs = gtfs_override if gtfs_override else gtfs_singleton
trips_extended = gtfs.get_table('trips', original=original)
if gtfs.has_table('calendar', check_empty=False):
calendar = gtfs.get_table('calendar', original=original)
calendar_info = calendar[ \
[
'start_date', 'end_date', \
'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', \
] \
]
trips_extended = trips_extended.reset_index() \
.merge(calendar_info, how='left', on='service_id').set_index('trip_id')
trip_bounds = get_trip_bounds(gtfs_override=gtfs, original=original)
trips_extended = trips_extended.merge(trip_bounds, left_index=True, right_index=True)
trips_extended = trips_extended.merge(get_trip_duration_seconds(gtfs_override=gtfs, trip_bounds=trip_bounds), left_index=True, right_index=True)
frequencies = gtfs.get_table('frequencies', original=original)
trips_extended['is_repeating'] = \
trips_extended.index.to_series().isin(frequencies['trip_id']) \
if gtfs.has_table('frequencies') else False
# agency information
agency = gtfs.get_table('agency', original=original)
if not gtfs.is_multiagency():
agency_row = agency.iloc[0]
trips_extended['agency_name'] = agency_row['agency_name']
if 'agency_id' in agency.columns:
trips_extended['agency_id'] = agency_row['agency_id']
else:
trips_extended['agency_id'] = ''
else:
route_agencies = gtfs.get_table('routes', original=original)['agency_id']
trips_extended = trips_extended.reset_index()
trips_extended = trips_extended.merge( \
route_agencies,
how='left',
left_on='route_id',
right_index=True
)
trips_extended = trips_extended.merge( \
agency[['agency_id', 'agency_name']],
how='left',
left_on='agency_id',
right_on='agency_id'
)
trips_extended = trips_extended.set_index('trip_id')
route_types = gtfs.get_table('routes', original=original)['route_type']
trips_extended = trips_extended \
.reset_index() \
.merge(
route_types,
how='left',
left_on='route_id',
right_index=True
).set_index('trip_id')
return trips_extended
def get_unwrapped_repeating_trips(gtfs_override=None):
# returns dataframe with a row for every occurring trip represented by frequencies.txt
# dataframe returned has frequencies columns with changes/additions:
# trip_order: sequence of trip in frequency (starting at 0)
# start_time -> frequency_start
# end_time -> frequency_end
# trip_start, trip_end: individual trip bounds
gtfs = gtfs_override if gtfs_override else gtfs_singleton
if not gtfs.has_table('frequencies'):
return pd.DataFrame()
frequencies = gtfs.get_table('frequencies')
frequencies.rename(columns={'start_time': 'frequency_start', 'end_time': 'frequency_end' }, inplace=True)
# expand into row per each occurring trip
frequencies['trip_order'] = np.ceil( \
(frequencies['frequency_end'].transform(seconds_since_zero) - frequencies['frequency_start'].transform(seconds_since_zero)) \
/ frequencies['headway_secs'].transform(int) \
).transform(lambda x: list(range(int(x))))
unwrapped_frequencies = frequencies.explode('trip_order')
# calculate start time seconds for each trip
trip_start_kwargs = { \
'start_time' : \
lambda x: \
x['frequency_start'].transform(seconds_since_zero) + \
x['trip_order'] * x['headway_secs']
}
unwrapped_frequencies = unwrapped_frequencies.assign(**trip_start_kwargs)
# calculate end time seconds for each trip
unwrapped_frequencies = unwrapped_frequencies.merge(get_trip_duration_seconds(gtfs_override=gtfs), left_on='trip_id', right_index=True)
unwrapped_frequencies = unwrapped_frequencies.assign( \
end_time=unwrapped_frequencies['start_time'] + unwrapped_frequencies['duration_seconds'] \
)
unwrapped_frequencies['start_time'] = unwrapped_frequencies['start_time'].transform(seconds_to_military)
unwrapped_frequencies['end_time'] = unwrapped_frequencies['end_time'].transform(seconds_to_military)
unwrapped_frequencies = unwrapped_frequencies \
.rename(columns={ 'start_time': 'trip_start', 'end_time': 'trip_end' }) \
.drop('duration_seconds', axis='columns')
return unwrapped_frequencies
|
{"hexsha": "74840f874b7cc8375e22417015acadc33919c579", "size": 7351, "ext": "py", "lang": "Python", "max_stars_repo_path": "tpau_gtfsutilities/gtfs/methods/helpers/triphelpers.py", "max_stars_repo_name": "anniekfifer/tpau-gtfsutils", "max_stars_repo_head_hexsha": "a022d4c8465b7f736023ecc294ff0d7d0201b0e9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-25T23:33:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-25T23:33:01.000Z", "max_issues_repo_path": "tpau_gtfsutilities/gtfs/methods/helpers/triphelpers.py", "max_issues_repo_name": "anniekfifer/tpau-gtfsutils", "max_issues_repo_head_hexsha": "a022d4c8465b7f736023ecc294ff0d7d0201b0e9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tpau_gtfsutilities/gtfs/methods/helpers/triphelpers.py", "max_forks_repo_name": "anniekfifer/tpau-gtfsutils", "max_forks_repo_head_hexsha": "a022d4c8465b7f736023ecc294ff0d7d0201b0e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4869109948, "max_line_length": 148, "alphanum_fraction": 0.687117399, "include": true, "reason": "import numpy", "num_tokens": 1654}
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
# This code contains snippets of code from
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
# published under the following license and copyright:
# BSD 3-Clause License
#
# Copyright (c) 2007-2020 The scikit-learn developers.
# All rights reserved.
import numbers
from warnings import catch_warnings, simplefilter, warn
from abc import ABCMeta, abstractmethod
import numpy as np
import threading
from .._ensemble import (BaseEnsemble, _partition_estimators, _get_n_samples_subsample,
_accumulate_prediction, _accumulate_prediction_var, _accumulate_prediction_and_var,
_accumulate_oob_preds)
from ..utilities import check_inputs, cross_product
from ..tree._tree import DTYPE, DOUBLE
from ._base_grftree import GRFTree
from joblib import Parallel, delayed
from scipy.sparse import hstack as sparse_hstack
from sklearn.utils import check_random_state, compute_sample_weight
from sklearn.utils.validation import _check_sample_weight, check_is_fitted
from sklearn.utils import check_X_y
import scipy.stats
from scipy.special import erfc
__all__ = ["BaseGRF"]
MAX_INT = np.iinfo(np.int32).max
# =============================================================================
# Base Generalized Random Forest
# =============================================================================
class BaseGRF(BaseEnsemble, metaclass=ABCMeta):
"""
Base class for Genearlized Random Forests for solving linear moment equations of
the form::
E[J * theta(x) - A | X = x] = 0
where J is an (d, d) random matrix, A is an (d, 1) random vector and theta(x)
is a local parameter to be estimated, which might contain both relevant and
nuisance parameters.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def __init__(self,
n_estimators=100, *,
criterion="mse",
max_depth=None,
min_samples_split=10,
min_samples_leaf=5,
min_weight_fraction_leaf=0.,
min_var_fraction_leaf=None,
min_var_leaf_on_val=False,
max_features="auto",
min_impurity_decrease=0.,
max_samples=.45,
min_balancedness_tol=.45,
honest=True,
inference=True,
fit_intercept=True,
subforest_size=4,
n_jobs=-1,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=GRFTree(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"min_var_leaf", "min_var_leaf_on_val",
"max_features", "min_impurity_decrease", "honest",
"min_balancedness_tol",
"random_state"))
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_var_fraction_leaf = min_var_fraction_leaf
self.min_var_leaf_on_val = min_var_leaf_on_val
self.max_features = max_features
self.min_impurity_decrease = min_impurity_decrease
self.min_balancedness_tol = min_balancedness_tol
self.honest = honest
self.inference = inference
self.fit_intercept = fit_intercept
self.subforest_size = subforest_size
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.max_samples = max_samples
def _get_alpha_and_pointJ(self, X, T, y, **kwargs):
""" This function must be implemented by child class and given input variables
X, T, y and any auxiliary variables passed as keyword only, should be calculating
the point-wise random vector A and the point-wise jacobian random variable J of
the linear moment equation for every sample in the input samples.
Returns
-------
A : array of shape (n_samples, n_outputs)
The A part of the moment equation for each sample
J : array of shape (n_samples, n_outputs * n_outputs)
The J matrix part of the moment equation, flattened in Fortran-contiguous format.
"""
pass
def _get_n_outputs_decomposition(self, X, T, y, **kwargs):
""" This function must be implemented by child class and given input variables
X, T, y and any auxiliary variables passed as keyword only, should return a tuple
(n_outputs, n_relevant_outputs), which determines how many parameters is the moment
estimating and what prefix of these parameters are the relevant ones that we care about.
Returns
-------
n_outputs : int
The number of parameters we are estimating
n_relevant_outputs : int
The length of the prefix of parameters that we care about (remainder are nuisance)
"""
pass
def apply(self, X):
"""
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
delayed(tree.apply)(X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""
Return the decision path in the forest.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading')(
delayed(tree.decision_path)(X, check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, T, y, *, sample_weight=None, **kwargs):
"""
Build a forest of trees from the training set (X, T, y) and any other auxiliary variables.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float64``.
T : array-like of shape (n_samples, n_treatments)
The treatment vector for each sample
y : array-like of shape (n_samples,) or (n_samples, n_outcomes)
The outcome values for each sample.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
**kwargs : dictionary of array-like items of shape (n_samples, d_var)
Auxiliary random variables that go into the moment function (e.g. instrument, censoring etc)
Any of these variables will be passed on as is to the `get_pointJ` and
`get_alpha` method of the children classes.
Returns
-------
self : object
"""
# TODO: support freq_weight and sample_var
y, T, X, _ = check_inputs(y, T, X, W=None, multi_output_T=True, multi_output_Y=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, DOUBLE)
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_y_ = y.shape[1]
T = np.atleast_1d(T)
if T.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
T = np.reshape(T, (-1, 1))
alpha, pointJ = self._get_alpha_and_pointJ(X, T, y, **kwargs)
self.n_outputs_, self.n_relevant_outputs_ = self._get_n_outputs_decomposition(X, T, y, **kwargs)
yaug = np.hstack([y, alpha, pointJ])
if getattr(yaug, "dtype", None) != DOUBLE or not yaug.flags.contiguous:
yaug = np.ascontiguousarray(yaug, dtype=DOUBLE)
if getattr(X, "dtype", None) != DTYPE:
X = X.astype(DTYPE)
# Get subsample sample size
n_samples_subsample = _get_n_samples_subsample(
n_samples=n_samples,
max_samples=self.max_samples
)
# Converting `min_var_fraction_leaf` to an absolute `min_var_leaf` that the GRFTree can handle
if self.min_var_fraction_leaf is None:
self.min_var_leaf = None
elif (not isinstance(self.min_var_fraction_leaf, numbers.Real)) or (not (0 < self.min_var_fraction_leaf <= 1)):
msg = "`min_var_fraction_leaf` must be in range (0, 1) but got value {}"
raise ValueError(msg.format(self.min_var_fraction_leaf))
else:
# We calculate the min eigenvalue proxy that each criterion is considering
# on the overall mean jacobian, to determine the absolute level of `min_var_leaf`
jac = np.mean(pointJ, axis=0).reshape((self.n_outputs_, self.n_outputs_))
min_var = np.min(np.abs(np.diag(jac)))
if self.criterion == 'mse':
for i in range(self.n_outputs_):
for j in range(self.n_outputs_):
if j != i:
det = np.sqrt(np.abs(jac[i, i] * jac[j, j] - jac[i, j] * jac[j, i]))
if det < min_var:
min_var = det
self.min_var_leaf = min_var * self.min_var_fraction_leaf
# Check parameters
self._validate_estimator()
random_state = check_random_state(self.random_state)
# We re-initialize the subsample_random_seed_ only if we are not in warm_start mode or
# if this is the first `fit` call of the warm start mode.
if (not self.warm_start) or (not hasattr(self, 'subsample_random_seed_')):
self.subsample_random_seed_ = random_state.randint(MAX_INT)
else:
random_state.randint(MAX_INT) # just advance random_state
subsample_random_state = check_random_state(self.subsample_random_seed_)
if (self.warm_start and hasattr(self, 'inference_') and (self.inference != self.inference_)):
raise ValueError("Parameter inference cannot be altered in between `fit` "
"calls when `warm_start=True`.")
self.inference_ = self.inference
self.warm_start_ = self.warm_start
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.slices_ = []
# the below are needed to replicate randomness of subsampling when warm_start=True
self.slices_n_samples_ = []
self.slices_n_samples_subsample_ = []
self.n_samples_ = []
self.n_samples_subsample_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.inference:
if not isinstance(self.subforest_size, numbers.Integral):
raise ValueError("Parameter `subforest_size` must be "
"an integer but got value {}.".format(self.subforest_size))
if self.subforest_size < 2:
raise ValueError("Parameter `subforest_size` must be at least 2 if `inference=True`, "
"but got value {}".format(self.subforest_size))
if not (n_more_estimators % self.subforest_size == 0):
raise ValueError("The number of estimators to be constructed must be divisible "
"the `subforest_size` parameter. Asked to build `n_estimators={}` "
"with `subforest_size={}`.".format(n_more_estimators, self.subforest_size))
if n_samples_subsample > n_samples // 2:
if isinstance(self.max_samples, numbers.Integral):
raise ValueError("Parameter `max_samples` must be in [1, n_samples // 2], "
"if `inference=True`. "
"Got values n_samples={}, max_samples={}".format(n_samples, self.max_samples))
else:
raise ValueError("Parameter `max_samples` must be in (0, .5], if `inference=True`. "
"Got value {}".format(self.max_samples))
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False,
random_state=random_state).init()
for i in range(n_more_estimators)]
if self.inference:
if self.warm_start:
# Advancing subsample_random_state. Assumes each prior fit call has the same number of
# samples at fit time. If not then this would not exactly replicate a single batch execution,
# but would still advance randomness enough so that tree subsamples will be different.
for sl, n_, ns_ in zip(self.slices_, self.slices_n_samples_, self.slices_n_samples_subsample_):
subsample_random_state.choice(n_, n_ // 2, replace=False)
for _ in range(len(sl)):
subsample_random_state.choice(n_ // 2, ns_, replace=False)
# Generating indices a priori before parallelism ended up being orders of magnitude
# faster than how sklearn does it. The reason is that random samplers do not release the
# gil it seems.
n_groups = n_more_estimators // self.subforest_size
new_slices = np.array_split(np.arange(len(self.estimators_),
len(self.estimators_) + n_more_estimators),
n_groups)
s_inds = []
for sl in new_slices:
half_sample_inds = subsample_random_state.choice(n_samples, n_samples // 2, replace=False)
s_inds.extend([half_sample_inds[subsample_random_state.choice(n_samples // 2,
n_samples_subsample,
replace=False)]
for _ in range(len(sl))])
else:
if self.warm_start:
# Advancing subsample_random_state. Assumes each prior fit call has the same number of
# samples at fit time. If not then this would not exactly replicate a single batch execution,
# but would still advance randomness enough so that tree subsamples will be different.
for _, n_, ns_ in zip(range(len(self.estimators_)), self.n_samples_, self.n_samples_subsample_):
subsample_random_state.choice(n_, ns_, replace=False)
new_slices = []
s_inds = [subsample_random_state.choice(n_samples, n_samples_subsample, replace=False)
for _ in range(n_more_estimators)]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading')(
delayed(t.fit)(X[s], yaug[s], self.n_y_, self.n_outputs_, self.n_relevant_outputs_,
sample_weight=sample_weight[s] if sample_weight is not None else None,
check_input=False)
for t, s in zip(trees, s_inds))
# Collect newly grown trees
self.estimators_.extend(trees)
self.n_samples_.extend([n_samples] * len(trees))
self.n_samples_subsample_.extend([n_samples_subsample] * len(trees))
self.slices_.extend(list(new_slices))
self.slices_n_samples_.extend([n_samples] * len(new_slices))
self.slices_n_samples_subsample_.extend([n_samples_subsample] * len(new_slices))
return self
def get_subsample_inds(self,):
""" Re-generate the example same sample indices as those at fit time using same pseudo-randomness.
"""
check_is_fitted(self)
subsample_random_state = check_random_state(self.subsample_random_seed_)
if self.inference_:
s_inds = []
for sl, n_, ns_ in zip(self.slices_, self.slices_n_samples_, self.slices_n_samples_subsample_):
half_sample_inds = subsample_random_state.choice(n_, n_ // 2, replace=False)
s_inds.extend([half_sample_inds[subsample_random_state.choice(n_ // 2, ns_, replace=False)]
for _ in range(len(sl))])
return s_inds
else:
return [subsample_random_state.choice(n_, ns_, replace=False)
for n_, ns_ in zip(self.n_samples_, self.n_samples_subsample_)]
def feature_importances(self, max_depth=4, depth_decay_exponent=2.0):
"""
The feature importances based on the amount of parameter heterogeneity they create.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized) total heterogeneity that the feature
creates. For each tree and for each split that the feature was chosen adds::
parent_weight * (left_weight * right_weight)
* mean((value_left[k] - value_right[k])**2) / parent_weight**2
to the importance of the feature. Each such quantity is also weighted by the depth of the split.
These importances are normalized at the tree level and then averaged across trees.
Parameters
----------
max_depth : int, default=4
Splits of depth larger than `max_depth` are not used in this calculation
depth_decay_exponent: double, default=2.0
The contribution of each split to the total score is re-weighted by 1 / (1 + `depth`)**2.0.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
Normalized total parameter heterogeneity inducing importance of each feature
"""
check_is_fitted(self)
all_importances = Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(tree.feature_importances)(
max_depth=max_depth, depth_decay_exponent=depth_decay_exponent)
for tree in self.estimators_ if tree.tree_.node_count > 1)
if not all_importances:
return np.zeros(self.n_features_, dtype=np.float64)
all_importances = np.mean(all_importances,
axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
@property
def feature_importances_(self):
return self.feature_importances()
def _validate_X_predict(self, X):
"""
Validate X whenever one tries to predict, apply, and other predict methods."""
check_is_fitted(self)
return self.estimators_[0]._validate_X_predict(X, check_input=True)
def predict_tree_average_full(self, X):
""" Return the fitted local parameters for each X, i.e. theta(X). This
method simply returns the average of the parameters estimated by each tree. `predict_full`
should be preferred over `pred_tree_average_full`, as it performs a more stable averaging across
trees.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
theta(X) : array-like of shape (n_samples, n_outputs)
The estimated relevant parameters for each row of X
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend='threading', require="sharedmem")(
delayed(_accumulate_prediction)(e.predict_full, X, [y_hat], lock)
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def predict_tree_average(self, X):
""" Return the prefix of relevant fitted local parameters for each X, i.e. theta(X)[1..n_relevant_outputs].
This method simply returns the average of the parameters estimated by each tree. `predict`
should be preferred over `pred_tree_average`, as it performs a more stable averaging across
trees.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
theta(X)[1, .., n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs)
The estimated relevant parameters for each row of X
"""
y_hat = self.predict_tree_average_full(X)
if self.n_relevant_outputs_ == self.n_outputs_:
return y_hat
return y_hat[:, :self.n_relevant_outputs_]
def predict_moment_and_var(self, X, parameter, slice=None, parallel=True):
""" Return the value of the conditional expected moment vector at each sample and
for the given parameter estimate for each sample::
M(x; theta(x)) := E[J | X=x] theta(x) - E[A | X=x]
where conditional expectations are estimated based on the forest weights, i.e.::
M_tree(x; theta(x)) := (1/ |leaf(x)|) sum_{val sample i in leaf(x)} w[i] (J[i] theta(x) - A[i])
M(x; theta(x) = (1/n_trees) sum_{trees} M_tree(x; theta(x))
where w[i] is the sample weight (1.0 if sample_weight is None), as well as the variance of the local
moment vector across trees::
Var(M_tree(x; theta(x))) = (1/n_trees) sum_{trees} M_tree(x; theta(x)) @ M_tree(x; theta(x)).T
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
parameter : array-like of shape (n_samples, n_outputs)
An estimate of the parameter theta(x) for each sample x in X
slice : list of int or None, default=None
If not None, then only the trees with index in slice, will be used to calculate the mean
and the variance.
parallel : bool , default=True
Whether the averaging should happen using parallelism or not. Parallelism adds some overhead
but makes it faster with many trees.
Returns
-------
moment : array-like of shape (n_samples, n_outputs)
The estimated conditional moment M(x; theta(x)) for each sample x in X
moment_var : array-like of shape (n_samples, n_outputs)
The variance of the conditional moment Var(M_tree(x; theta(x))) across trees for each sample x
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
if slice is None:
slice = np.arange(len(self.estimators_))
moment_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
moment_var_hat = np.zeros((X.shape[0], self.n_outputs_, self.n_outputs_), dtype=np.float64)
lock = threading.Lock()
if parallel:
n_jobs, _, _ = _partition_estimators(len(slice), self.n_jobs)
verbose = self.verbose
# Parallel loop
Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading', require="sharedmem")(
delayed(_accumulate_prediction_and_var)(self.estimators_[t].predict_moment, X,
[moment_hat], [moment_var_hat], lock,
parameter)
for t in slice)
else:
[_accumulate_prediction_and_var(self.estimators_[t].predict_moment, X,
[moment_hat], [moment_var_hat], lock,
parameter)
for t in slice]
moment_hat /= len(slice)
moment_var_hat /= len(slice)
return moment_hat, moment_var_hat
def predict_alpha_and_jac(self, X, slice=None, parallel=True):
""" Return the value of the conditional jacobian E[J | X=x] and the conditional alpha E[A | X=x]
using the forest as kernel weights, i.e.::
alpha(x) = (1/n_trees) sum_{trees} (1/ |leaf(x)|) sum_{val sample i in leaf(x)} w[i] A[i]
jac(x) = (1/n_trees) sum_{trees} (1/ |leaf(x)|) sum_{val sample i in leaf(x)} w[i] J[i]
where w[i] is the sample weight (1.0 if sample_weight is None).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
slice : list of int or None, default=None
If not None, then only the trees with index in slice, will be used to calculate the mean
and the variance.
parallel : bool , default=True
Whether the averaging should happen using parallelism or not. Parallelism adds some overhead
but makes it faster with many trees.
Returns
-------
alpha : array-like of shape (n_samples, n_outputs)
The estimated conditional A, alpha(x) for each sample x in X
jac : array-like of shape (n_samples, n_outputs, n_outputs)
The estimated conditional J, jac(x) for each sample x in X
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
if slice is None:
slice = np.arange(len(self.estimators_))
n_jobs = 1
verbose = 0
if parallel:
n_jobs, _, _ = _partition_estimators(len(slice), self.n_jobs)
verbose = self.verbose
alpha_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
jac_hat = np.zeros((X.shape[0], self.n_outputs_**2), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading', require="sharedmem")(
delayed(_accumulate_prediction)(self.estimators_[t].predict_alpha_and_jac, X, [alpha_hat, jac_hat], lock)
for t in slice)
alpha_hat /= len(slice)
jac_hat /= len(slice)
return alpha_hat, jac_hat.reshape((-1, self.n_outputs_, self.n_outputs_))
def _predict_point_and_var(self, X, full=False, point=True, var=False, project=False, projector=None):
""" An internal private method that coordinates all prediction functionality and tries to share
as much computation between different predict methods to avoid re-computation and re-spawining of
parallel executions.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
full : bool, default=False
Whether to return the full estimated parameter or only the relevant part
point : bool, default=True
Whether to return the point estimate theta(x)
var : bool, default=False
Whether to return the co-variance of the point estimate V(theta(x))
project : bool, default=False
Whether to project the point estimate using an inner product with a projector, and also
return the variance of the projection
projector : array-like of shape (n_samples, n_outputs)
The projection vector for each sample. The point estimate theta(x) for each sample will
be projected and return the inner produce <theta(x), projector(x)> for each sample x.
Also the variance information will be about the inner product as opposed to the parameter
theta(x).
Returns
-------
point : array-like of shape (n_samples, x)
The point estimate of the parameter theta(x) or its inner product with projector(x) for each
sample x in X.
If `point=False`, this return value is omitted. If `project=True`, then `x=1`. If `project=False`
and `full=True`, then `x=n_outputs`. If `project=False` and `full=False`, then `x=n_relevant_outputs`.
var : array-like of shape (n_samples, x, x) or (n_samples, 1)
The covariance of the parameter theta(x) or its inner product with projector(x) for each sample x in X.
If `var=False`, this return value is omitted. If `project=True`, then return is of shape (n_samples, 1).
If `project=False` and `full=True`, then `x=n_outputs`. If `project=False` and `full=False`,
then `x=n_relevant_outputs`.
"""
alpha, jac = self.predict_alpha_and_jac(X)
invjac = np.linalg.pinv(jac)
parameter = np.einsum('ijk,ik->ij', invjac, alpha)
if var:
if not self.inference:
raise AttributeError("Inference not available. Forest was initiated with `inference=False`.")
slices = self.slices_
n_jobs, _, _ = _partition_estimators(len(slices), self.n_jobs)
moment_bags, moment_var_bags = zip(*Parallel(n_jobs=n_jobs, verbose=self.verbose, backend='threading')(
delayed(self.predict_moment_and_var)(X, parameter, slice=sl, parallel=False) for sl in slices))
moment = np.mean(moment_bags, axis=0)
trans_moment_bags = np.moveaxis(moment_bags, 0, -1)
sq_between = np.einsum('tij,tjk->tik', trans_moment_bags,
np.transpose(trans_moment_bags, (0, 2, 1))) / len(slices)
moment_sq = np.einsum('tij,tjk->tik',
moment.reshape(moment.shape + (1,)),
moment.reshape(moment.shape[:-1] + (1, moment.shape[-1])))
var_between = sq_between - moment_sq
pred_cov = np.einsum('ijk,ikm->ijm', invjac,
np.einsum('ijk,ikm->ijm', var_between, np.transpose(invjac, (0, 2, 1))))
if project:
pred_var = np.einsum('ijk,ikm->ijm', projector.reshape((-1, 1, projector.shape[1])),
np.einsum('ijk,ikm->ijm', pred_cov,
projector.reshape((-1, projector.shape[1], 1))))[:, 0, 0]
else:
pred_var = np.diagonal(pred_cov, axis1=1, axis2=2)
#####################
# Variance correction
#####################
# Subtract the average within bag variance. This ends up being equal to the
# overall (E_{all trees}[moment^2] - E_bags[ E[mean_bag_moment]^2 ]) / sizeof(bag).
# The negative part is just sq_between.
var_total = np.mean(moment_var_bags, axis=0)
correction = (var_total - sq_between) / (len(slices[0]) - 1)
pred_cov_correction = np.einsum('ijk,ikm->ijm', invjac,
np.einsum('ijk,ikm->ijm', correction, np.transpose(invjac, (0, 2, 1))))
if project:
pred_var_correction = np.einsum('ijk,ikm->ijm', projector.reshape((-1, 1, projector.shape[1])),
np.einsum('ijk,ikm->ijm', pred_cov_correction,
projector.reshape((-1, projector.shape[1], 1))))[:, 0, 0]
else:
pred_var_correction = np.diagonal(pred_cov_correction, axis1=1, axis2=2)
# Objective bayes debiasing for the diagonals where we know a-prior they are positive
# The off diagonals we have no objective prior, so no correction is applied.
naive_estimate = pred_var - pred_var_correction
se = np.maximum(pred_var, pred_var_correction) * np.sqrt(2.0 / len(slices))
zstat = naive_estimate / np.clip(se, 1e-10, np.inf)
numerator = np.exp(- (zstat**2) / 2) / np.sqrt(2.0 * np.pi)
denominator = 0.5 * erfc(-zstat / np.sqrt(2.0))
pred_var_corrected = naive_estimate + se * numerator / denominator
# Finally correcting the pred_cov or pred_var
if project:
pred_var = pred_var_corrected
else:
pred_cov = pred_cov - pred_cov_correction
for t in range(self.n_outputs_):
pred_cov[:, t, t] = pred_var_corrected[:, t]
if project:
if point:
pred = np.sum(parameter * projector, axis=1)
if var:
return pred, pred_var
else:
return pred
else:
return pred_var
else:
n_outputs = self.n_outputs_ if full else self.n_relevant_outputs_
if point and var:
return (parameter[:, :n_outputs],
pred_cov[:, :n_outputs, :n_outputs],)
elif point:
return parameter[:, :n_outputs]
else:
return pred_cov[:, :n_outputs, :n_outputs]
def predict_full(self, X, interval=False, alpha=0.05):
""" Return the fitted local parameters for each x in X, i.e. theta(x).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
interval : bool, default=False
Whether to return a confidence interval too
alpha : float in (0, 1), default=0.05
The confidence level of the confidence interval. Returns a symmetric (alpha/2, 1-alpha/2)
confidence interval.
Returns
-------
theta(x) : array-like of shape (n_samples, n_outputs)
The estimated relevant parameters for each row x of X
lb(x), ub(x) : array-like of shape (n_samples, n_outputs)
The lower and upper end of the confidence interval for each parameter. Return value is omitted if
`interval=False`.
"""
if interval:
point, pred_var = self._predict_point_and_var(X, full=True, point=True, var=True)
lb, ub = np.zeros(point.shape), np.zeros(point.shape)
for t in range(self.n_outputs_):
lb[:, t] = scipy.stats.norm.ppf(alpha / 2, loc=point[:, t], scale=np.sqrt(pred_var[:, t, t]))
ub[:, t] = scipy.stats.norm.ppf(1 - alpha / 2, loc=point[:, t], scale=np.sqrt(pred_var[:, t, t]))
return point, lb, ub
return self._predict_point_and_var(X, full=True, point=True, var=False)
def predict(self, X, interval=False, alpha=0.05):
""" Return the prefix of relevant fitted local parameters for each x in X,
i.e. theta(x)[1..n_relevant_outputs].
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
interval : bool, default=False
Whether to return a confidence interval too
alpha : float in (0, 1), default=0.05
The confidence level of the confidence interval. Returns a symmetric (alpha/2, 1-alpha/2)
confidence interval.
Returns
-------
theta(X)[1, .., n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs)
The estimated relevant parameters for each row of X
lb(x), ub(x) : array-like of shape (n_samples, n_relevant_outputs)
The lower and upper end of the confidence interval for each parameter. Return value is omitted if
`interval=False`.
"""
if interval:
y_hat, lb, ub = self.predict_full(X, interval=interval, alpha=alpha)
if self.n_relevant_outputs_ == self.n_outputs_:
return y_hat, lb, ub
return (y_hat[:, :self.n_relevant_outputs_],
lb[:, :self.n_relevant_outputs_], ub[:, :self.n_relevant_outputs_])
else:
y_hat = self.predict_full(X, interval=False)
if self.n_relevant_outputs_ == self.n_outputs_:
return y_hat
return y_hat[:, :self.n_relevant_outputs_]
def predict_interval(self, X, alpha=0.05):
""" Return the confidence interval for the relevant fitted local parameters for each x in X,
i.e. theta(x)[1..n_relevant_outputs].
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
alpha : float in (0, 1), default=0.05
The confidence level of the confidence interval. Returns a symmetric (alpha/2, 1-alpha/2)
confidence interval.
Returns
-------
lb(x), ub(x) : array-like of shape (n_samples, n_relevant_outputs)
The lower and upper end of the confidence interval for each parameter. Return value is omitted if
`interval=False`.
"""
_, lb, ub = self.predict(X, interval=True, alpha=alpha)
return lb, ub
def predict_and_var(self, X):
""" Return the prefix of relevant fitted local parameters for each x in X,
i.e. theta(x)[1..n_relevant_outputs] and their covariance matrix.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
theta(x)[1, .., n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs)
The estimated relevant parameters for each row of X
var(theta(x)) : array-like of shape (n_samples, n_relevant_outputs, n_relevant_outputs)
The covariance of theta(x)[1, .., n_relevant_outputs]
"""
return self._predict_point_and_var(X, full=False, point=True, var=True)
def predict_var(self, X):
""" Return the covariance matrix of the prefix of relevant fitted local parameters
for each x in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
var(theta(x)) : array-like of shape (n_samples, n_relevant_outputs, n_relevant_outputs)
The covariance of theta(x)[1, .., n_relevant_outputs]
"""
return self._predict_point_and_var(X, full=False, point=False, var=True)
def prediction_stderr(self, X):
""" Return the standard deviation of each coordinate of the prefix of relevant fitted local parameters
for each x in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
Returns
-------
std(theta(x)) : array-like of shape (n_samples, n_relevant_outputs)
The standard deviation of each theta(x)[i] for i in {1, .., n_relevant_outputs}
"""
return np.sqrt(np.diagonal(self.predict_var(X), axis1=1, axis2=2))
def _check_projector(self, X, projector):
""" validate the projector parameter
"""
X, projector = check_X_y(X, projector, multi_output=True, y_numeric=True)
if projector.ndim == 1:
projector = projector.reshape((-1, 1))
if self.n_outputs_ > self.n_relevant_outputs_:
projector = np.hstack([projector,
np.zeros((projector.shape[0], self.n_outputs_ - self.n_relevant_outputs_))])
return X, projector
def predict_projection_and_var(self, X, projector):
""" Return the inner product of the prefix of relevant fitted local parameters for each x in X,
i.e. theta(x)[1..n_relevant_outputs], with a projector vector projector(x), i.e.::
mu(x) := <theta(x)[1..n_relevant_outputs], projector(x)>
as well as the variance of mu(x).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
projector : array-like of shape (n_samples, n_relevant_outputs)
The projector vector for each sample x in X
Returns
-------
mu(x) : array-like of shape (n_samples, 1)
The estimated inner product of the relevant parameters with the projector for each row x of X
var(mu(x)) : array-like of shape (n_samples, 1)
The variance of the estimated inner product
"""
X, projector = self._check_projector(X, projector)
return self._predict_point_and_var(X, full=False, point=True, var=True,
project=True, projector=projector)
def predict_projection(self, X, projector):
""" Return the inner product of the prefix of relevant fitted local parameters for each x in X,
i.e. theta(x)[1..n_relevant_outputs], with a projector vector projector(x), i.e.::
mu(x) := <theta(x)[1..n_relevant_outputs], projector(x)>
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
projector : array-like of shape (n_samples, n_relevant_outputs)
The projector vector for each sample x in X
Returns
-------
mu(x) : array-like of shape (n_samples, 1)
The estimated inner product of the relevant parameters with the projector for each row x of X
"""
X, projector = self._check_projector(X, projector)
return self._predict_point_and_var(X, full=False, point=True, var=False,
project=True, projector=projector)
def predict_projection_var(self, X, projector):
""" Return the variance of the inner product of the prefix of relevant fitted local parameters
for each x in X, i.e. theta(x)[1..n_relevant_outputs], with a projector vector projector(x), i.e.::
Var(mu(x)) for mu(x) := <theta(x)[1..n_relevant_outputs], projector(x)>
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float64``.
projector : array-like of shape (n_samples, n_relevant_outputs)
The projector vector for each sample x in X
Returns
-------
var(mu(x)) : array-like of shape (n_samples, 1)
The variance of the estimated inner product
"""
X, projector = self._check_projector(X, projector)
return self._predict_point_and_var(X, full=False, point=False, var=True,
project=True, projector=projector)
def oob_predict(self, Xtrain):
""" Returns the relevant output predictions for each of the training data points, when
only trees where that data point was not used are incorporated. This method is not
available is the estimator was trained with `warm_start=True`.
Parameters
----------
Xtrain : (n_training_samples, n_features) matrix
Must be the same exact X matrix that was passed to the forest at fit time.
Returns
-------
oob_preds : (n_training_samples, n_relevant_outputs) matrix
The out-of-bag predictions of the relevant output parameters for each of the training points
"""
if self.warm_start_:
raise AttributeError("`oob_predict` is not available when "
"the estimator was fitted with `warm_start=True`")
# avoid storing the output of every estimator by summing them here
alpha_hat = np.zeros((Xtrain.shape[0], self.n_outputs_), dtype=np.float64)
jac_hat = np.zeros((Xtrain.shape[0], self.n_outputs_**2), dtype=np.float64)
counts = np.zeros((Xtrain.shape[0],), dtype=np.intp)
subsample_inds = self.get_subsample_inds()
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading', require="sharedmem")(
delayed(_accumulate_oob_preds)(tree, Xtrain, sinds, alpha_hat, jac_hat, counts, lock)
for tree, sinds in zip(self.estimators_, subsample_inds))
pos_count = (counts > 0)
alpha_hat[pos_count] /= counts[pos_count].reshape((-1, 1))
jac_hat[pos_count] /= counts[pos_count].reshape((-1, 1))
invjac = np.linalg.pinv(jac_hat.reshape((-1, self.n_outputs_, self.n_outputs_)))
oob_preds = np.einsum('ijk,ik->ij', invjac, alpha_hat)[:, :self.n_relevant_outputs_]
oob_preds[~pos_count] = np.nan
return oob_preds
|
{"hexsha": "bc53298ba88b34313b7d2b847b584af073ea3d75", "size": 49979, "ext": "py", "lang": "Python", "max_stars_repo_path": "econml/grf/_base_grf.py", "max_stars_repo_name": "gregorybchris/EconML", "max_stars_repo_head_hexsha": "420dfceba9f8b689450379442edffd214e9e8d6a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-26T16:05:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T16:05:27.000Z", "max_issues_repo_path": "econml/grf/_base_grf.py", "max_issues_repo_name": "QPC-database/EconML", "max_issues_repo_head_hexsha": "0cec5ab3d4cb5d681f7d8600fb261d93ca528020", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "econml/grf/_base_grf.py", "max_forks_repo_name": "QPC-database/EconML", "max_forks_repo_head_hexsha": "0cec5ab3d4cb5d681f7d8600fb261d93ca528020", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.1957569913, "max_line_length": 120, "alphanum_fraction": 0.5862062066, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 10678}
|
[STATEMENT]
lemma extended_predist_nonneg [simp, mono_intros]:
"extended_predist x y \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> extended_predist x y
[PROOF STEP]
unfolding extended_predist_def min_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> real_of_ereal (if esqrt (extended_Gromov_distance x y) \<le> eexp (ereal (- epsilonG TYPE('a)) * extended_Gromov_product_at basepoint x y) then esqrt (extended_Gromov_distance x y) else eexp (ereal (- epsilonG TYPE('a)) * extended_Gromov_product_at basepoint x y))
[PROOF STEP]
by (auto intro: real_of_ereal_pos)
|
{"llama_tokens": 248, "file": "Gromov_Hyperbolicity_Gromov_Boundary", "length": 2}
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
from federatedml.ftl.plain_ftl import PlainFTLGuestModel, PlainFTLHostModel
from federatedml.ftl.encrypted_ftl import EncryptedFTLGuestModel, EncryptedFTLHostModel
from federatedml.ftl.test.fake_models import FakeAutoencoder, FakeFTLModelParam
def run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B, y, overlap_indexes, non_overlap_indexes,
public_key=None, private_key=None, is_encrypted=False):
fake_model_param = FakeFTLModelParam(alpha=1)
if is_encrypted:
partyA = EncryptedFTLGuestModel(autoencoderA, fake_model_param, public_key=public_key, private_key=private_key)
partyA.set_batch(U_A, y, non_overlap_indexes, overlap_indexes)
partyB = EncryptedFTLHostModel(autoencoderB, fake_model_param, public_key=public_key, private_key=private_key)
partyB.set_batch(U_B, overlap_indexes)
else:
partyA = PlainFTLGuestModel(autoencoderA, fake_model_param)
partyA.set_batch(U_A, y, non_overlap_indexes, overlap_indexes)
partyB = PlainFTLHostModel(autoencoderB, fake_model_param)
partyB.set_batch(U_B, overlap_indexes)
comp_A_beta1, comp_A_beta2, mapping_comp_A = partyA.send_components()
U_B_overlap, U_B_overlap_2, mapping_comp_B = partyB.send_components()
partyA.receive_components([U_B_overlap, U_B_overlap_2, mapping_comp_B])
partyB.receive_components([comp_A_beta1, comp_A_beta2, mapping_comp_A])
return partyA, partyB
class TestPlainGradients(unittest.TestCase):
def test_party_b_gradient_checking_test(self):
U_A = np.array([[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
U_B = np.array([[4, 2, 3, 1, 2],
[6, 5, 1, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
y = np.array([[1], [-1], [1], [-1]])
overlap_indexes = [1, 2]
non_overlap_indexes = [0, 3]
Wh = np.ones((5, U_A.shape[1]))
bh = np.zeros(U_A.shape[1])
autoencoderA = FakeAutoencoder(0)
autoencoderA.build(U_A.shape[1], Wh, bh)
autoencoderB = FakeAutoencoder(1)
autoencoderB.build(U_B.shape[1], Wh, bh)
partyA, partyB = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B, y, overlap_indexes, non_overlap_indexes)
loss_grads_B_1 = partyB.get_loss_grads()
loss1 = partyA.send_loss()
U_B_prime = np.array([[4, 2, 3, 1, 2],
[6, 5, 1.001, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
partyA, partyB = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B_prime, y, overlap_indexes, non_overlap_indexes)
loss_grads_B_2 = partyB.get_loss_grads()
loss2 = partyA.send_loss()
grad_approx = (loss2 - loss1) / 0.001
grad_real = loss_grads_B_1[0, 2]
grad_diff = np.abs(grad_approx - grad_real)
assert grad_diff < 0.001
def test_party_a_gradient_checking_test(self):
U_A = np.array([[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
U_B = np.array([[4, 2, 3, 1, 2],
[6, 5, 1, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
y = np.array([[1], [-1], [1], [-1]])
overlap_indexes = [1, 2]
non_overlap_indexes = [0, 3]
Wh = np.ones((5, U_A.shape[1]))
bh = np.zeros(U_A.shape[1])
autoencoderA = FakeAutoencoder(0)
autoencoderA.build(U_A.shape[1], Wh, bh)
autoencoderB = FakeAutoencoder(1)
autoencoderB.build(U_B.shape[1], Wh, bh)
partyA, _ = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B, y, overlap_indexes, non_overlap_indexes)
loss_grads_A_1 = partyA.get_loss_grads()
loss1 = partyA.send_loss()
U_A_prime = np.array([[1, 2, 3, 4, 5],
[4, 5.001, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
partyA, _ = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A_prime, U_B, y, overlap_indexes, non_overlap_indexes)
loss_grads_A_2 = partyA.get_loss_grads()
loss2 = partyA.send_loss()
grad_approx = (loss2 - loss1) / 0.001
grad_real = loss_grads_A_1[1, 1]
grad_diff = np.abs(grad_approx - grad_real)
assert grad_diff < 0.001
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "cc7a8ca1bce9189a627c9f70a1cc8bbdb59be0e5", "size": 5340, "ext": "py", "lang": "Python", "max_stars_repo_path": "federatedml/ftl/test/whitebox_plain_gradients_test.py", "max_stars_repo_name": "chenlongzhen/FATE-0.1", "max_stars_repo_head_hexsha": "5a1f316676e77dca8311bb74a26a7623c4a97b86", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-25T13:43:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-25T13:43:24.000Z", "max_issues_repo_path": "federatedml/ftl/test/whitebox_plain_gradients_test.py", "max_issues_repo_name": "crownpku/FATE", "max_issues_repo_head_hexsha": "38fe6cea0dca3841b59c3d04cb04f556803e2e29", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "federatedml/ftl/test/whitebox_plain_gradients_test.py", "max_forks_repo_name": "crownpku/FATE", "max_forks_repo_head_hexsha": "38fe6cea0dca3841b59c3d04cb04f556803e2e29", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2647058824, "max_line_length": 136, "alphanum_fraction": 0.6058052434, "include": true, "reason": "import numpy", "num_tokens": 1563}
|
import sys
import os
import argparse
import requests
import json
from PIL import Image
import numpy as np
sys.path.append(os.path.abspath(os.path.join("..", "CameraTraps/")))
# sys.path.append(os.path.abspath(os.path.join("..", "CameraTraps/visualization")))
from visualization.visualization_utils import render_detection_bounding_boxes
parser = argparse.ArgumentParser()
parser.add_argument("img_uri")
args = parser.parse_args()
URL = "http://localhost:8080/invocations"
RENDER_THRESHOLD = 0.8
MODEL_NAME = "megadetector" # TODO: implement local MIRA testing as well
if __name__ == "__main__":
print("Detecting objects in: ", args.img_uri)
headers = {
"content-type": "application/x-image",
"X-Amzn-SageMaker-Custom-Attribute": "tfs-model-name={}".format(MODEL_NAME)
}
# Open image in binary format
with open(args.img_uri, "rb") as fd:
# Post request
# TODO: I think we should pass in fd in "files" param, instead of "data"
# for mulitpart-encoded files?
# https://requests.readthedocs.io/en/master/user/quickstart/#passing-parameters-in-urls
r = requests.post(URL, data=fd, headers=headers)
# https://requests.readthedocs.io/en/master/user/quickstart/#passing-parameters-in-urls
# Read predictions
print("response: ", r.status_code)
print(json.loads(r.text))
predictions = json.loads(r.text)["predictions"]
results = []
image_paths = [args.img_uri]
for i, image_path in enumerate(image_paths):
detections = []
for box, clss, score in zip(predictions[i]["detection_boxes"],
predictions[i]["detection_classes"],
predictions[i]["detection_scores"]):
if score >= RENDER_THRESHOLD:
detections.append({
"category": str(int(clss)),
"conf": score,
"bbox": box
# to change from [ymin, xmin, ymax, xmax] to [xmin, ymin, w, h] :
# "bbox": [box[1], box[0], box[3] - box[1], box[2] - box[0]]
})
results.append({
"file": image_path,
"detections": detections
})
# Display results
for res in results:
print("result: ", res)
# # Download image and render bounding box on it
# s3 = boto3.client("s3")
# with open("output/annotated_img.jpg", "wb") as f:
# s3.download_fileobj(BUCKET, args.img_uri, f)
# img=Image.open("output/annotated_img.jpg")
# render_detection_bounding_boxes(
# res["detections"],
# img,
# confidence_threshold=RENDER_THRESHOLD)
# img.save("output/annotated_img.jpg")
|
{"hexsha": "73c6d71944b9254aabb539fb39841749cf86a3b8", "size": 2961, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/make-request.py", "max_stars_repo_name": "tnc-ca-geo/animl-ml", "max_stars_repo_head_hexsha": "95aeb1e99fddf7199692144ef3425340d6b8dc3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-28T02:10:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-28T02:10:25.000Z", "max_issues_repo_path": "scripts/make-request.py", "max_issues_repo_name": "tnc-ca-geo/animl-ml", "max_issues_repo_head_hexsha": "95aeb1e99fddf7199692144ef3425340d6b8dc3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2020-03-18T22:44:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:51:44.000Z", "max_forks_repo_path": "scripts/make-request.py", "max_forks_repo_name": "tnc-ca-geo/animl-ml", "max_forks_repo_head_hexsha": "95aeb1e99fddf7199692144ef3425340d6b8dc3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.48, "max_line_length": 95, "alphanum_fraction": 0.5663627153, "include": true, "reason": "import numpy", "num_tokens": 667}
|
'''
Created on 2 juin 2015
@author: Jose Pedro Matos
'''
import numpy as np
import matplotlib.pyplot as plt
def paretoSorting(x0, x1):
fronts=list()
idx=np.lexsort((x1, x0))
fronts.append(list())
fronts[-1].append(idx[0])
for i0 in idx[1:]:
if x1[i0]>=x1[fronts[-1][-1]]:
fronts.append(list())
fronts[-1].append(i0)
else:
for i1 in range(0,len(fronts)):
if x1[i0]<x1[fronts[i1][-1]]:
fronts[i1].append(i0)
break
return (fronts, idx)
def doubleParetoSorting(x0, x1):
fronts = [[]]
left = [[]]
right = [[]]
idx = np.lexsort((x1, x0))
idxEdge = np.lexsort((-np.square(x0-0.5), x1))
fronts[-1].append(idxEdge[0])
left[-1].append(x0[idxEdge[0]])
right[-1].append(x0[idxEdge[0]])
for i0 in idxEdge[1:]:
if x0[i0]>=left[-1] and x0[i0]<=right[-1]:
#add a new front
fronts.append([])
left.append([])
right.append([])
fronts[-1].append(i0)
left[-1].append(x0[i0])
right[-1].append(x0[i0])
else:
#check existing fonts
for i1 in range(len(fronts)):
if x0[i0]<left[i1] or x0[i0]>right[i1]:
if x0[i0]<left[i1]:
left[i1] = x0[i0]
fronts[i1].insert(0, i0)
else:
right[i1] = x0[i0]
fronts[i1].append(i0)
break
return (fronts, idx)
def plotFronts(fronts, x0, x1, **kwargs):
fig=plt.figure()
ax=plt.gca()
if 'size' in kwargs:
ax.scatter(x0, x1, c='k', s=kwargs['size'])
else:
ax.plot(x0, x1,'ok')
for l0 in fronts:
tmp0=x0[l0]
tmp1=x1[l0]
ax.plot(tmp0, tmp1,'-')
if 'annotate' in kwargs and kwargs['annotate']:
for label, x, y in zip(range(0,len(x0)), x0, x1):
plt.annotate(
label,
xy = (x, y), xytext = (-10, 10),
textcoords = 'offset points', ha = 'right', va = 'bottom',
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3, rad=-0.2'))
return fig
def convexSortingApprox(x0, x1):
''' does not work well '''
fronts0=paretoSorting(x0, x1)[0]
fronts1=paretoSorting(-x0, x1)[0]
minErrIdx=np.argmin(x1)
minErrNE=x0[minErrIdx]
fronts=[]
len0=len(fronts0)
len1=len(fronts1)
for i0 in range(max(len0, len1)):
tmpList=[]
if len0>i0:
tmp=x0[fronts0[i0]]<=minErrNE
tmpList.extend(np.array(fronts0[i0])[tmp])
if len1>i0:
tmp=x0[fronts1[i0]]>minErrNE
tmpList.extend(np.array(fronts1[i0])[tmp])
fronts.append(tmpList)
return fronts
def convexSorting(x0, x1):
#===========================================================================
# fronts, idx=paretoSorting(x0, x1)
#===========================================================================
fronts, idx=doubleParetoSorting(x0, x1)
lastChanged=0
for i0 in range(len(fronts)):
if len(fronts[i0])>0:
for i1 in range(lastChanged-1,i0-1,-1):
tmp=list()
for l0 in reversed(fronts[i1+1]):
if len(fronts[i1])==0 or x0[fronts[i1][-1]]<x0[l0] and x1[fronts[i1][-1]]>x1[l0]:
tmp.insert(0,fronts[i1+1].pop())
if len(tmp)>0:
fronts[i1].extend(tmp)
for i1 in range(i0+1, len(fronts)):
if len(fronts[i1])>0 and x0[fronts[i0][-1]]<x0[fronts[i1][-1]]:
fronts[i0].append(fronts[i1].pop())
lastChanged=i1
#=======================================================================
# if i0 in range(len(fronts)-23,len(fronts)-20):
# plotFronts(fronts, x0, x1)
# plt.show(block=False)
#=======================================================================
for i0 in range(len(fronts)-1,-1,-1):
if len(fronts[i0])==0:
fronts.pop(i0)
return (fronts, idx)
|
{"hexsha": "32025adb7d250e51db657bbafb3ff9fefd4708e5", "size": 4297, "ext": "py", "lang": "Python", "max_stars_repo_path": "gpu/domination.py", "max_stars_repo_name": "JosePedroMatos/ADAPT-DB", "max_stars_repo_head_hexsha": "98df645d1bcd3f11f5cdb52a3fb0236592e869cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gpu/domination.py", "max_issues_repo_name": "JosePedroMatos/ADAPT-DB", "max_issues_repo_head_hexsha": "98df645d1bcd3f11f5cdb52a3fb0236592e869cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gpu/domination.py", "max_forks_repo_name": "JosePedroMatos/ADAPT-DB", "max_forks_repo_head_hexsha": "98df645d1bcd3f11f5cdb52a3fb0236592e869cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3649635036, "max_line_length": 101, "alphanum_fraction": 0.4505468932, "include": true, "reason": "import numpy", "num_tokens": 1192}
|
# import scholar.scholar as sch
from scipy import spatial
import numpy as np
import pickle as pkl
### Usage from other files ###
# import utils
# v = utils.vecMaster()
# word_list = v.expand(source_words, expansion_method, epsilon)
# def create_vector_object(sourcefile="data/fasttext.wiki.en.vec", destfile="data/fasttext.en", truncate=None):
### NOTE: run this function using python3
# otherwise the dict file is unusable
def create_fasttext_pkl(sourcefile="fasttexttrunc", destfile="data/fasttext.en", truncate=None):
f = open(sourcefile, "r")
firstline = f.readline()
# vector_dict={}
# token_dict={}
tokens = []
vectors = []
for line in f:
token = line[:line.index(' ')]
vector_string = line[line.index(' ') + 1:]
vector = np.fromstring(vector_string, sep=' ')
# token_dict[vector_string] = token
# vector_dict[token] = vector
tokens.append(token)
# tokens.append(unitcode(token.decode('utf-8',errors='ignore')))
vectors.append(vector)
vectors = np.vstack(vectors)
data = {}
data['tokens'] = tokens
data['vectors'] = vectors
f = open(destfile + '.pkl', 'wb')
pkl.dump(data, f, protocol=4)
f.close()
class vecMaster():
def __init__(self, sourcefile='data/fasttext.en.pkl'):
with open(sourcefile, 'rb') as myfile:
data = pkl.load(myfile)
self.token_list = data['tokens']
self.tokens = np.atleast_1d(self.token_list[:50000])
self.vectors = data['vectors'][:50000]
def validate(self, word_list):
valid_words = word_list.copy()
for w in word_list:
if w not in self.tokens:
if ' ' in w:
for sub_w in w.split(' '):
if sub_w not in self.tokens:
print("Word " + w + " not found in vector model. Omitting...")
valid_words.remove(w)
return valid_words
def neighbor_expansion(self, source_words, epsilon=0.35, distance_metric='cosine', k=None):
source_words = self.validate(source_words)
#source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
sv = []
for w in source_words:
#if multiword, then average them
if ' ' in w:
words = w.split(' ')
phrase_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in words])
sv.append(np.mean(phrase_vectors,axis=0))
else:
#otherwise, take the vector
sv.append(self.vectors[np.squeeze(np.argwhere(self.tokens==w))])
source_vectors = np.vstack(sv)
distances = spatial.distance.cdist(self.vectors, source_vectors, distance_metric)[:,0]
if k is not None:
# find the k nearest
inds = np.argsort( distances )
return np.array( self.tokens[ inds[0:k] ] )
else:
return np.squeeze(self.tokens[np.argwhere(distances < epsilon)])
def mahalanobis_expansion(self, source_words, epsilon=0.25, k=None, sigma=0.00001):
source_words = self.validate(source_words)
source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
c = np.cov(source_vectors.T)
c += sigma * np.identity(c.shape[0])
c = np.linalg.inv(c)
#c = np.linalg.pinv(c)
def mahalanobis_squared(u, v, VI=c):
delta = u - v
return np.dot(np.dot(delta, VI), delta)
centroid = np.atleast_2d(np.mean(source_vectors, axis=0))
distances = spatial.distance.cdist(self.vectors, centroid, metric=mahalanobis_squared)[:, 0]
if k is not None:
# find the k nearest
inds = np.argsort( distances )
return np.array( self.tokens[ inds[0:k] ] )
else:
# find anything within radius epsilon (scaled by mean distance)
epsilon = epsilon * np.mean(distances)
return np.squeeze(self.tokens[np.argwhere(distances <= epsilon)])
def naive_centroid_expansion(self, source_words, epsilon=0.25, distance_metric='cosine', k=None):
source_words = self.validate(source_words)
source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
centroid = np.atleast_2d(np.mean(source_vectors, axis=0))
distances = spatial.distance.cdist(self.vectors, centroid, distance_metric)[:, 0]
if k is not None:
# find the k nearest
inds = np.argsort( distances )
return np.array( self.tokens[ inds[0:k] ] )
else:
# find anything within radius epsilon (scaled by mean distance)
epsilon = epsilon * np.mean(distances)
return np.squeeze(self.tokens[np.argwhere(distances <= epsilon)])
def bounding_box(self, source_words):
source_words = self.validate(source_words)
source_vectors = np.array([self.vectors[np.squeeze(np.argwhere(self.tokens == w))] for w in source_words])
min_vector = np.min(source_vectors,axis=0)
max_vector = np.max(source_vectors,axis=0)
print(min_vector)
print(max_vector)
print(min_vector.shape)
print(max_vector.shape)
indexes = []
for i in range(len(self.tokens)):
if (np.all(self.vectors[i] >= min_vector) and np.all(self.vectors[i] <= max_vector)):
indexes.append(i)
return np.squeeze(self.tokens)[indexes]
if __name__ == '__main__':
v=vecMaster()
#print(v.neighbor_expansion(['beautiful', 'gorgeous', 'handsome'], k=30))
#print(v.mahalanobis_expansion(['beautiful', 'gorgeous', 'handsome'], k=30))
#print(v.neighbor_expansion(['beautiful', 'gorgeous', 'handsome'], k=30))
#print(v.mahalanobis_expansion(['beautiful', 'gorgeous', 'handsome', 'studly','hot'],k=20))
#print(v.neighbor_expansion(['france', 'germany','guatemala'], k=30))
#print(v.mahalanobis_expansion(['france', 'germany','guatemala'], k=30))
#print(v.neighbor_expansion(['red', 'green','blue','yellow','ruby','orange','maroon'],k=20))
#print(v.mahalanobis_expansion(['red', 'green','blue','yellow','ruby','orange','maroon'],k=20))
# print(v.neighbor_expansion(['beautiful', 'gorgeous']))
# print(v.neighbor_expansion(['red', 'green','blue']))
#print(v.neighbor_expansion(['idiot', 'jerk','stupid','dumb','fat','imbecile','imbecilic','sadistic'], k=30))
#print(v.neighbor_expansion(['idiot', 'jerk','stupid','dumb','fat','imbecile','imbecilic','sadistic'], k=30))
#print(v.mahalanobis_expansion(['idiot', 'jerk','stupid','dumb','fat','imbecile','imbecilic','sadistic'], k=30))
print(v.bounding_box(['clever','smart','intelligent','red','belligerent','the']))
#print(v.mahalanobis_expansion(['genius', 'prodigy','innovator'], k=30))
|
{"hexsha": "8406fc0fa4580bde34b429e0f3a99d297c0a27be", "size": 7015, "ext": "py", "lang": "Python", "max_stars_repo_path": "regexv/utils.py", "max_stars_repo_name": "BYU-PCCL/regexv", "max_stars_repo_head_hexsha": "0d27d47a9441cf8f2a114bdd2a87c54ac47520bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-03-03T17:54:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-14T15:55:29.000Z", "max_issues_repo_path": "regexv/utils.py", "max_issues_repo_name": "NancyFulda/regexv", "max_issues_repo_head_hexsha": "0d27d47a9441cf8f2a114bdd2a87c54ac47520bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "regexv/utils.py", "max_forks_repo_name": "NancyFulda/regexv", "max_forks_repo_head_hexsha": "0d27d47a9441cf8f2a114bdd2a87c54ac47520bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5151515152, "max_line_length": 116, "alphanum_fraction": 0.6159657876, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1739}
|
import tensorflow as tf
import numpy as np
from src.data_loader.data_generator import DataGenerator
from src.models.simple_model import SimpleModel
from src.trainers.simple_trainer import SimpleTrainer
from src.utils.config import processing_config
from src.utils.utils import get_args
from src.utils.dirs import create_dirs
from src.utils.logger import Logger
from src.testers.simple_tester import SimpleTester
from src.trainers.tiny_vgg_trainer import TinyVGGTrainer
from src.models.tiny_vgg_model import TinyVGG
from src.testers.tiny_vgg_tester import TinyVGGTester
def main():
try:
args = get_args()
print(args.config)
config = processing_config(args.config)
except:
print("Missing or invalid arguments")
exit(0)
sess = tf.Session()
model = TinyVGG(config)
model.load(sess)
data = DataGenerator(config)
if config.mode == "prediction":
tester = TinyVGGTester(sess, model, None, config, None)
prediction = tester.predict_image(args.img_path)
print("the input image is of class: ", data.get_label_name(prediction))
return
create_dirs([config.summary_dir, config.checkpoint_dir])
logger = Logger(sess, config)
if config.mode == "train":
print(" train data size: ", data.x_train.shape[0],
" val data size: ", data.x_val.shape[0])
trainer = TinyVGGTrainer(sess, model, data, config, logger)
trainer.train()
elif config.mode == "test":
print(" test data size: ", data.x_test.shape[0])
tester = TinyVGGTester(sess, model, data, config, logger)
tester.test()
else:
print(" Mode: ", config.mode, " is not supported")
if __name__ == '__main__':
main()
|
{"hexsha": "aef4b10f85d844be3f24e62da21d6351d4ef00d1", "size": 1748, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mains/main.py", "max_stars_repo_name": "MohamedAli1995/Cifar-100-Classifier", "max_stars_repo_head_hexsha": "924704a81ce13062825a88b90b80e8ac2ba45d63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-12T16:11:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-10T22:39:57.000Z", "max_issues_repo_path": "src/mains/main.py", "max_issues_repo_name": "MohamedAli1995/Cifar-100-Classifier", "max_issues_repo_head_hexsha": "924704a81ce13062825a88b90b80e8ac2ba45d63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mains/main.py", "max_forks_repo_name": "MohamedAli1995/Cifar-100-Classifier", "max_forks_repo_head_hexsha": "924704a81ce13062825a88b90b80e8ac2ba45d63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7818181818, "max_line_length": 79, "alphanum_fraction": 0.6916475973, "include": true, "reason": "import numpy", "num_tokens": 404}
|
import csv
import os
from collections import defaultdict
import h5py
import numpy as np
import ray
from misc.shared import BASE_DIR, CONFIG, DATASET_DIR
from misc.utils import get_gender
from psbody.mesh import Mesh
from ray.util import ActorPool
from tqdm import tqdm
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
from TF_FLAME.utils.landmarks import load_embedding, tf_project_points
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.contrib.opt import ScipyOptimizerInterface as scipy_pt
from TF_FLAME.tf_smpl.batch_smpl import SMPL
@ray.remote(num_gpus=0.0625, num_cpus=0.5)
class FrameOptimizer(object):
def __init__(self, neutral_mesh_faces, template_path):
tf.compat.v1.reset_default_graph()
tf.keras.backend.clear_session()
self.graph1 = tf.Graph()
with self.graph1.as_default():
weights = {
"lmk": 1.0,
"shape": 1e-3,
"expr": 1e-3,
"neck_pose": 100.0,
"jaw_pose": 1e-3,
"eyeballs_pose": 10.0,
}
self.template_mesh = tf.constant(neutral_mesh_faces)
self.target_2d_lmks_x = tf.Variable(np.zeros((51, 1)))
self.target_2d_lmks_y = tf.Variable(np.zeros((51, 1)))
self.target_2d_lmks = tf.concat(
[self.target_2d_lmks_x, 1024 - self.target_2d_lmks_y], axis=1
)
self.tf_trans = tf.Variable(
np.zeros((1, 3)), name="trans", dtype=tf.float64, trainable=True
)
self.tf_rot = tf.Variable(
np.zeros((1, 3)), name="rot", dtype=tf.float64, trainable=True
)
self.tf_pose = tf.Variable(
np.zeros((1, 12)), name="pose", dtype=tf.float64, trainable=True
)
self.tf_shape = tf.Variable(
np.zeros((1, 300)), name="shape", dtype=tf.float64, trainable=True
)
self.tf_exp = tf.Variable(
np.zeros((1, 100)), name="expression", dtype=tf.float64, trainable=True
)
# tf_scale = tf.Variable(0, dtype=tf.float64)
smpl = SMPL(template_path)
self.tf_model = tf.squeeze(
smpl(
self.tf_trans,
tf.concat((self.tf_shape, self.tf_exp), axis=-1),
tf.concat((self.tf_rot, self.tf_pose), axis=-1),
)
)
lmks_3d = self.tf_get_model_lmks(self.tf_model)
self.s2d = tf.reduce_mean(
tf.linalg.norm(
self.target_2d_lmks - tf.reduce_mean(self.target_2d_lmks, axis=0),
axis=1,
)
)
self.s3d = tf.reduce_mean(
tf.sqrt(
tf.reduce_sum(
tf.square(lmks_3d - tf.reduce_mean(lmks_3d, axis=0))[:, :2],
axis=1,
)
)
)
self.tf_scale = tf.Variable(self.s2d / self.s3d, dtype=lmks_3d.dtype)
self.lmks_proj_2d = tf_project_points(lmks_3d, self.tf_scale, np.zeros(2))
factor = tf.math.maximum(
tf.math.reduce_max(self.target_2d_lmks[:, 0])
- tf.math.reduce_min(self.target_2d_lmks[:, 0]),
tf.math.reduce_max(self.target_2d_lmks[:, 1])
- tf.math.reduce_min(self.target_2d_lmks[:, 1]),
)
self.lmk_dist = (
weights["lmk"]
* tf.reduce_sum(
tf.square(tf.subtract(self.lmks_proj_2d, self.target_2d_lmks))
)
/ (factor ** 2)
)
self.neck_pose_reg = weights["neck_pose"] * tf.reduce_sum(
tf.square(self.tf_pose[:3])
)
self.jaw_pose_reg = weights["jaw_pose"] * tf.reduce_sum(
tf.square(self.tf_pose[3:6])
)
self.eyeballs_pose_reg = weights["eyeballs_pose"] * tf.reduce_sum(
tf.square(self.tf_pose[6:])
)
self.shape_reg = weights["shape"] * tf.reduce_sum(tf.square(self.tf_shape))
self.exp_reg = weights["expr"] * tf.reduce_sum(tf.square(self.tf_exp))
self.optimizer1 = scipy_pt(
loss=self.lmk_dist,
var_list=[self.tf_scale, self.tf_trans, self.tf_rot],
method="L-BFGS-B",
options={"disp": 0, "ftol": 5e-6},
)
loss = (
self.lmk_dist
+ self.shape_reg
+ self.exp_reg
+ self.neck_pose_reg
+ self.jaw_pose_reg
+ self.eyeballs_pose_reg
)
self.optimizer2 = scipy_pt(
loss=loss,
var_list=[
self.tf_scale,
self.tf_trans[:2],
self.tf_rot,
self.tf_pose,
self.tf_shape,
self.tf_exp,
],
method="L-BFGS-B",
options={"disp": 0, "ftol": 1e-7},
)
def tf_get_model_lmks(self, tf_model):
"""Get a differentiable landmark embedding in the FLAME surface"""
lmk_face_idx, lmk_b_coords = load_embedding(
BASE_DIR / CONFIG["flame"]["static_landmark_embedding_path"]
)
faces = tf.cast(
tf.gather_nd(self.template_mesh, [[x] for x in lmk_face_idx.tolist()]),
tf.int32,
)
return tf.einsum(
"ijk,ij->ik", tf.gather(tf_model, faces), tf.convert_to_tensor(lmk_b_coords)
)
def fit_lmk2d_v2(self, pose, shape, expression, target_2d_lmks, file_name):
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.optimizer_options.global_jit_level = (
tf.compat.v1.OptimizerOptions.OFF
)
with tf.compat.v1.Session(config=config, graph=self.graph1) as session:
session.run(tf.compat.v1.global_variables_initializer())
self.tf_shape.load(
np.expand_dims(np.hstack((shape, np.zeros(200))), 0), session,
)
self.tf_exp.load(
np.expand_dims(np.hstack((expression, np.zeros(50))), 0), session,
)
self.tf_pose.load(
np.expand_dims(np.hstack((pose[3:], np.zeros(9))), 0), session,
)
self.tf_rot.load(np.expand_dims(pose[:3], 0), session)
self.target_2d_lmks_x.load(target_2d_lmks[:, :1], session)
self.target_2d_lmks_y.load(target_2d_lmks[:, 1:], session)
self.tf_scale.initializer.run()
self.optimizer1.minimize(
session,
fetches=[
self.tf_model,
self.tf_scale,
self.template_mesh,
self.target_2d_lmks,
self.lmks_proj_2d,
],
)
self.optimizer2.minimize(
session,
fetches=[
self.tf_model,
self.tf_scale,
self.template_mesh,
self.target_2d_lmks,
self.lmks_proj_2d,
self.lmk_dist,
self.shape_reg,
self.exp_reg,
self.neck_pose_reg,
self.jaw_pose_reg,
self.eyeballs_pose_reg,
],
)
return (
file_name,
{
"tf_trans": self.tf_trans.eval(),
"tf_rot": self.tf_rot.eval(),
"tf_pose": self.tf_pose.eval(),
"tf_shape": self.tf_shape.eval(),
"tf_exp": self.tf_exp.eval(),
},
)
def extract_flame(fps):
files = list(DATASET_DIR.glob(f"*/*/video_{fps}fps.mp4"))
for i, video_file in enumerate(
tqdm(files, desc="Extracting flame parameters", leave=False)
):
flame_h5_file = video_file.parent / f"flame_{fps}fps.h5"
if flame_h5_file.exists():
continue
flame_dir = video_file.parent / f"flame_{fps}fps"
gender = get_gender(video_file.parent.parent.name, video_file.parent.name)
template_path = BASE_DIR / CONFIG["flame"][f"model_path_{gender}"]
# with open(template_model_fname, "rb") as f:
# template = pickle.load(f, encoding="latin1")
ringnet_file = video_file.parent / f"ringnet_{fps}fps.h5"
openface_file = video_file.parent / f"openface_{fps}fps.csv"
neutral_mesh_faces = Mesh(
filename=str(video_file.parent / "neutral_mesh.ply")
).f
f = h5py.File(ringnet_file, "r")["flame_params"]
pool = ActorPool(
[
FrameOptimizer.remote(neutral_mesh_faces, template_path)
for _ in range(8)
]
)
openface_data = list(csv.reader(openface_file.open()))[1:]
data = f["pose"], f["shape"], f["expression"], openface_data
flame_dir.mkdir(parents=True, exist_ok=True)
runners = []
for i, (pose, shape, expression, openface) in enumerate(zip(*data), 1):
flame_file = flame_dir / f"{i:06}.npy"
if flame_file.exists():
continue
# Get 68 facial landmarks
landmarks = [float(x) for x in openface[299:435]]
# reshape the landmarks so that they are 2x51 (cut of the jaw (17 landmarks))
target_2d_lmks = np.array(landmarks).reshape(2, -1).T[17:]
runners.append((pose, shape, expression, target_2d_lmks, flame_file))
for file_name, flame_params in tqdm(
pool.map(lambda a, v: a.fit_lmk2d_v2.remote(*v), runners),
total=len(runners),
leave=False,
):
np.save(file_name, flame_params)
np_files = list(flame_dir.glob("*.npy"))
assert len(np_files) == len(openface_data)
results = defaultdict(list)
for file in flame_dir.glob("*.npy"):
for key, value in np.load(file, allow_pickle=True).item().items():
results[key].append(value)
with h5py.File(flame_h5_file, "w") as f:
for key, value in results.items():
f.create_dataset(key, data=np.vstack(value))
|
{"hexsha": "abd437329c5985915f35fccf3aa76d86cb5148e0", "size": 10775, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/feature_extraction/flame.py", "max_stars_repo_name": "jonepatr/lets_face_it", "max_stars_repo_head_hexsha": "fefba5e82d236f89703449bd517cfa5867fda09f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-10-21T09:58:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T08:31:57.000Z", "max_issues_repo_path": "code/feature_extraction/flame.py", "max_issues_repo_name": "jonepatr/lets_face_it", "max_issues_repo_head_hexsha": "fefba5e82d236f89703449bd517cfa5867fda09f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-05T07:15:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T14:43:42.000Z", "max_forks_repo_path": "code/feature_extraction/flame.py", "max_forks_repo_name": "jonepatr/lets_face_it", "max_forks_repo_head_hexsha": "fefba5e82d236f89703449bd517cfa5867fda09f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-21T09:46:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T11:41:03.000Z", "avg_line_length": 35.4440789474, "max_line_length": 89, "alphanum_fraction": 0.5283526682, "include": true, "reason": "import numpy", "num_tokens": 2468}
|
[STATEMENT]
lemma Gcd_image_normalize [simp]: "Gcd (normalize ` A) = Gcd A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Gcd (normalize ` A) = Gcd A
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Gcd (normalize ` A) = Gcd A
[PROOF STEP]
have "Gcd (normalize ` A) dvd a" if "a \<in> A" for a
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Gcd (normalize ` A) dvd a
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Gcd (normalize ` A) dvd a
[PROOF STEP]
from that
[PROOF STATE]
proof (chain)
picking this:
a \<in> A
[PROOF STEP]
obtain B where "A = insert a B"
[PROOF STATE]
proof (prove)
using this:
a \<in> A
goal (1 subgoal):
1. (\<And>B. A = insert a B \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
A = insert a B
goal (1 subgoal):
1. Gcd (normalize ` A) dvd a
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
A = insert a B
goal (1 subgoal):
1. Gcd (normalize ` A) dvd a
[PROOF STEP]
have "gcd (normalize a) (Gcd (normalize ` B)) dvd normalize a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. gcd (normalize a) (Gcd (normalize ` B)) dvd normalize a
[PROOF STEP]
by (rule gcd_dvd1)
[PROOF STATE]
proof (state)
this:
gcd (normalize a) (Gcd (normalize ` B)) dvd normalize a
goal (1 subgoal):
1. Gcd (normalize ` A) dvd a
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
A = insert a B
gcd (normalize a) (Gcd (normalize ` B)) dvd normalize a
[PROOF STEP]
show "Gcd (normalize ` A) dvd a"
[PROOF STATE]
proof (prove)
using this:
A = insert a B
gcd (normalize a) (Gcd (normalize ` B)) dvd normalize a
goal (1 subgoal):
1. Gcd (normalize ` A) dvd a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Gcd (normalize ` A) dvd a
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?a1 \<in> A \<Longrightarrow> Gcd (normalize ` A) dvd ?a1
goal (1 subgoal):
1. Gcd (normalize ` A) = Gcd A
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?a1 \<in> A \<Longrightarrow> Gcd (normalize ` A) dvd ?a1
[PROOF STEP]
have "Gcd (normalize ` A) dvd Gcd A" and "Gcd A dvd Gcd (normalize ` A)"
[PROOF STATE]
proof (prove)
using this:
?a1 \<in> A \<Longrightarrow> Gcd (normalize ` A) dvd ?a1
goal (1 subgoal):
1. Gcd (normalize ` A) dvd Gcd A &&& Gcd A dvd Gcd (normalize ` A)
[PROOF STEP]
by (auto intro!: Gcd_greatest intro: Gcd_dvd)
[PROOF STATE]
proof (state)
this:
Gcd (normalize ` A) dvd Gcd A
Gcd A dvd Gcd (normalize ` A)
goal (1 subgoal):
1. Gcd (normalize ` A) = Gcd A
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Gcd (normalize ` A) dvd Gcd A
Gcd A dvd Gcd (normalize ` A)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Gcd (normalize ` A) dvd Gcd A
Gcd A dvd Gcd (normalize ` A)
goal (1 subgoal):
1. Gcd (normalize ` A) = Gcd A
[PROOF STEP]
by (auto intro: associated_eqI)
[PROOF STATE]
proof (state)
this:
Gcd (normalize ` A) = Gcd A
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1344, "file": null, "length": 20}
|
r"""
Continued Fractions
Sage implements the field ``ContinuedFractionField`` (or ``CFF``
for short) of finite simple continued fractions. This is really
isomorphic to the field `\QQ` of rational numbers, but with different
printing and semantics. It should be possible to use this field in
most cases where one could use `\QQ`, except arithmetic is slower.
The ``continued_fraction(x)`` command returns an element of
``CFF`` that defines a continued fraction expansion to `x`. The
command ``continued_fraction(x,bits)`` computes the continued
fraction expansion of an approximation to `x` with given bits of
precision. Use ``show(c)`` to see a continued fraction nicely
typeset, and ``latex(c)`` to obtain the typeset version, e.g., for
inclusion in a paper.
EXAMPLES:
We create some example elements of the continued fraction field::
sage: c = continued_fraction([1,2]); c
[1, 2]
sage: c = continued_fraction([3,7,15,1,292]); c
[3, 7, 15, 1, 292]
sage: c = continued_fraction(pi); c
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: c.value()
80143857/25510582
sage: QQ(c)
80143857/25510582
sage: RealField(200)(QQ(c) - pi)
-5.7908701643756732744264903067012149647564522968979302505514e-16
We can also create matrices, polynomials, vectors, etc., over the continued
fraction field.
::
sage: a = random_matrix(CFF, 4)
sage: a
[ [-1, 2] [-1, 1, 94] [0, 2] [-12]]
[ [-1] [0, 2] [-1, 1, 3] [0, 1, 2]]
[ [-3, 2] [0] [0, 1, 2] [-1]]
[ [1] [-1] [0, 3] [1]]
sage: f = a.charpoly()
sage: f
[1]*x^4 + ([-2, 3])*x^3 + [14, 1, 1, 1, 9, 1, 8]*x^2 + ([-13, 4, 1, 2, 1, 1, 1, 1, 1, 2, 2])*x + [-6, 1, 5, 9, 1, 5]
sage: f(a)
[[0] [0] [0] [0]]
[[0] [0] [0] [0]]
[[0] [0] [0] [0]]
[[0] [0] [0] [0]]
sage: vector(CFF, [1/2, 2/3, 3/4, 4/5])
([0, 2], [0, 1, 2], [0, 1, 3], [0, 1, 4])
AUTHORS:
- Niles Johnson (2010-08): Trac #3893: ``random_element()`` should pass on ``*args`` and ``**kwds``.
"""
from sage.structure.element import FieldElement
from sage.structure.parent_gens import ParentWithGens
from sage.libs.pari.all import pari
from field import Field
from rational_field import QQ
from integer_ring import ZZ
from infinity import infinity
from real_mpfr import is_RealNumber, RealField
from real_double import RDF
from arith import (continued_fraction_list,
convergent, convergents)
class ContinuedFractionField_class(Field):
"""
The field of all finite continued fraction of real numbers.
EXAMPLES::
sage: CFF
Field of all continued fractions
The continued fraction field inherits from the base class
:class:`sage.rings.ring.Field`. However it was initialised
as such only since trac ticket #11900::
sage: CFF.category()
Category of fields
"""
def __init__(self):
"""
EXAMPLES::
sage: ContinuedFractionField()
Field of all continued fractions
TESTS::
sage: CFF._repr_option('element_is_atomic')
False
"""
Field.__init__(self, self)
self._assign_names(('x'),normalize=False)
def __cmp__(self, right):
"""
EXAMPLES::
sage: CFF == ContinuedFractionField()
True
sage: CFF == CDF
False
sage: loads(dumps(CFF)) == CFF
True
"""
return cmp(type(self), type(right))
def __iter__(self):
"""
EXAMPLES::
sage: i = 0
sage: for a in CFF:
... print a
... i += 1
... if i > 5: break
...
[0]
[1]
[-1]
[0, 2]
[-1, 2]
[2]
"""
for n in QQ:
yield self(n)
def _latex_(self):
r"""
EXAMPLES::
sage: latex(CFF)
\Bold{CFF}
"""
return "\\Bold{CFF}"
def _is_valid_homomorphism_(self, codomain, im_gens):
"""
Return whether or not the map to codomain by sending the
continued fraction [1] of self to im_gens[0] is a
homomorphism.
EXAMPLES::
sage: CFF._is_valid_homomorphism_(ZZ,[ZZ(1)])
False
sage: CFF._is_valid_homomorphism_(CFF,[CFF(1)])
True
"""
try:
return im_gens[0] == codomain._coerce_(self(1))
except TypeError:
return False
def _repr_(self):
"""
EXAMPLES::
sage: CFF
Field of all continued fractions
"""
return "Field of all continued fractions"
def _coerce_impl(self, x):
"""
Anything that implicitly coerces to the rationals or a real
field, implicitly coerces to the continued fraction field.
EXAMPLES:
The additions below call _coerce_impl implicitly::
sage: a = CFF(3/5); a
[0, 1, 1, 2]
sage: a + 2/5
[1]
sage: 2/5 + a
[1]
sage: 1.5 + a
[2, 10]
sage: a + 1.5
[2, 10]
"""
if is_RealNumber(x):
return self(x)
return self._coerce_try(x, [QQ, RDF])
def __call__(self, x, bits=None, nterms=None):
"""
INPUT:
- `x` -- a number
- ``bits`` -- integer (optional) the number of bits of the
input number to use when computing the continued fraction.
EXAMPLES::
sage: CFF(1.5)
[1, 2]
sage: CFF(e)
[2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 1, 1, 12, 1, 1]
sage: CFF(pi)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: CFF([1,2,3])
[1, 2, 3]
sage: CFF(15/17)
[0, 1, 7, 2]
sage: c2 = loads(dumps(CFF))
sage: c2(CFF(15/17)).parent() is c2
True
We illustrate varying the bits parameter::
sage: CFF(pi)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: CFF(pi, bits=20)
[3, 7]
sage: CFF(pi, bits=80)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 2, 1, 1, 2, 2, 2, 2, 1]
sage: CFF(pi, bits=100)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 2, 1, 1, 2, 2, 2, 2, 1, 84, 2, 1, 1, 15, 3]
And varying the nterms parameter::
sage: CFF(pi, nterms=3)
[3, 7, 15]
sage: CFF(pi, nterms=10)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1]
sage: CFF(pi, bits=10, nterms=10)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1]
"""
return ContinuedFraction(self, x, bits, nterms)
def __len__(self):
"""
EXAMPLES::
sage: len(CFF)
Traceback (most recent call last):
...
TypeError: len() of unsized object
"""
raise TypeError('len() of unsized object')
def gens(self):
"""
EXAMPLES::
sage: CFF.gens()
([1],)
"""
return (self(1), )
def gen(self, n=0):
"""
EXAMPLES::
sage: CFF.gen()
[1]
sage: CFF.0
[1]
"""
if n == 0:
return self(1)
else:
raise IndexError("n must be 0")
def degree(self):
"""
EXAMPLES::
sage: CFF.degree()
1
"""
return 1
def ngens(self):
"""
EXAMPLES::
sage: CFF.ngens()
1
"""
return 1
def is_field(self, proof = True):
"""
Return True, since the continued fraction field is a field.
EXAMPLES::
sage: CFF.is_field()
True
"""
return True
def is_finite(self):
"""
Return False, since the continued fraction field is not finite.
EXAMPLES::
sage: CFF.is_finite()
False
"""
return False
def characteristic(self):
"""
Return 0, since the continued fraction field has characteristic 0.
EXAMPLES::
sage: c = CFF.characteristic(); c
0
sage: parent(c)
Integer Ring
"""
return ZZ(0)
def order(self):
"""
EXAMPLES::
sage: CFF.order()
+Infinity
"""
return infinity
def random_element(self, *args, **kwds):
"""
EXAMPLES::
sage: CFF.random_element(10,10)
[0, 4]
Passes extra positional or keyword arguments through::
sage: [CFF.random_element(den_bound=10, num_bound=2) for x in range(4)]
[[-1, 1, 3], [0, 7], [0, 3], [0, 4]]
"""
return self(QQ.random_element(*args, **kwds))
class ContinuedFraction(FieldElement):
"""
A continued fraction object.
EXAMPLES::
sage: continued_fraction(pi)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: CFF(pi)
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
"""
def __init__(self, parent, x, bits=None, nterms=None):
"""
EXAMPLES::
sage: sage.rings.contfrac.ContinuedFraction(CFF,[1,2,3,4,1,2])
[1, 2, 3, 4, 1, 2]
sage: sage.rings.contfrac.ContinuedFraction(CFF,[1,2,3,4,-1,2])
Traceback (most recent call last):
...
ValueError: each entry except the first must be positive
"""
FieldElement.__init__(self, parent)
if isinstance(x, ContinuedFraction):
self._x = list(x._x)
elif isinstance(x, (list, tuple)):
x = [ZZ(a) for a in x]
for i in range(1,len(x)):
if x[i] <= 0:
raise ValueError("each entry except the first must be positive")
self._x = list(x)
else:
self._x = [ZZ(a) for a in continued_fraction_list(x, bits=bits, nterms=nterms)]
def __getitem__(self, n):
"""
Returns `n`-th term of the continued fraction.
OUTPUT:
- an integer or a a continued fraction
EXAMPLES::
sage: a = continued_fraction(pi); a
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: a[4]
292
sage: a[-1]
14
sage: a[2:5]
[15, 1, 292]
sage: a[:3]
[3, 7, 15]
sage: a[4:]
[292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: a[4::2]
[292, 1, 2, 3, 14]
"""
if isinstance(n, slice):
start, stop, step = n.indices(len(self))
return ContinuedFraction(self.parent(), self._x[start:stop:step])
else:
return self._x[n]
def _repr_(self):
"""
EXAMPLES::
sage: a = continued_fraction(pi); a
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: a.rename('continued fraction of pi')
sage: a
continued fraction of pi
"""
return str(self._x)
def convergents(self):
"""
Return a list of rational numbers, which are the partial
convergents of this continued fraction.
OUTPUT:
- list of rational numbers
EXAMPLES::
sage: a = CFF(pi, bits=34); a
[3, 7, 15, 1, 292]
sage: a.convergents()
[3, 22/7, 333/106, 355/113, 103993/33102]
sage: a.value()
103993/33102
sage: a[:-1].value()
355/113
"""
return convergents(self._x)
def convergent(self, n):
"""
Return the `n`-th partial convergent to self.
OUTPUT:
rational number
EXAMPLES::
sage: a = CFF(pi, bits=34); a
[3, 7, 15, 1, 292]
sage: a.convergents()
[3, 22/7, 333/106, 355/113, 103993/33102]
sage: a.convergent(0)
3
sage: a.convergent(1)
22/7
sage: a.convergent(4)
103993/33102
"""
return convergent(self._x, n)
def __len__(self):
"""
Return the number of terms in this continued fraction.
EXAMPLES::
sage: len(continued_fraction([1,2,3,4,5]) )
5
"""
return len(self._x)
def pn(self, n):
"""
Return the numerator of the `n`-th partial convergent, computed
using the recurrence.
EXAMPLES::
sage: c = continued_fraction(pi); c
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: c.pn(0), c.qn(0)
(3, 1)
sage: len(c)
13
sage: c.pn(12), c.qn(12)
(80143857, 25510582)
"""
if n < -2:
raise ValueError("n must be at least -2")
if n > len(self._x):
raise ValueError("n must be at most %s"%len(self._x))
try:
return self.__pn[n+2]
except AttributeError:
self.__pn = [0, 1, self._x[0]]
self.__qn = [1, 0, 1]
except IndexError:
pass
for k in range(len(self.__pn), n+3):
self.__pn.append(self._x[k-2]*self.__pn[k-1] + self.__pn[k-2])
self.__qn.append(self._x[k-2]*self.__qn[k-1] + self.__qn[k-2])
return self.__pn[n+2]
def qn(self, n):
"""
Return the denominator of the `n`-th partial convergent, computed
using the recurrence.
EXAMPLES::
sage: c = continued_fraction(pi); c
[3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14]
sage: c.qn(0), c.pn(0)
(1, 3)
sage: len(c)
13
sage: c.pn(12), c.qn(12)
(80143857, 25510582)
"""
if n < -2:
raise ValueError("n must be at least -2")
if n > len(self._x):
raise ValueError("n must be at most %s"%len(self._x))
try:
return self.__qn[n+2]
except (AttributeError, IndexError):
pass
self.pn(n)
return self.__qn[n+2]
def _rational_(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: a._rational_()
-17/389
sage: QQ(a)
-17/389
"""
try:
return self.__rational
except AttributeError:
r = convergents(self._x)[-1]
self.__rational =r
return r
def value(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: a.value()
-17/389
sage: QQ(a)
-17/389
"""
return self._rational_()
def numerator(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: a.numerator()
-17
"""
return self._rational_().numerator()
def denominator(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: a.denominator()
389
"""
return self._rational_().denominator()
def __int__(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: int(a)
-1
"""
return int(self._rational_())
def __long__(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: long(a)
-1L
"""
return long(self._rational_())
def __float__(self):
"""
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: float(a)
-0.043701799485861184
"""
return float(self._rational_())
def _add_(self, right):
"""
EXAMPLES::
sage: a = CFF(-17/389)
sage: b = CFF(1/389)
sage: c = a+b; c
[-1, 1, 23, 3, 5]
sage: c.value()
-16/389
"""
return ContinuedFraction(self.parent(),
self._rational_() + right._rational_())
def _sub_(self, right):
"""
EXAMPLES::
sage: a = CFF(-17/389)
sage: b = CFF(1/389)
sage: c = a - b; c
[-1, 1, 20, 1, 1, 1, 1, 3]
sage: c.value()
-18/389
"""
return ContinuedFraction(self.parent(),
self._rational_() - right._rational_())
def _mul_(self, right):
"""
EXAMPLES::
sage: a = CFF(-17/389)
sage: b = CFF(1/389)
sage: c = a * b; c
[-1, 1, 8900, 4, 4]
sage: c.value(), (-1/389)*(17/389)
(-17/151321, -17/151321)
"""
return ContinuedFraction(self.parent(),
self._rational_() * right._rational_())
def _div_(self, right):
"""
EXAMPLES::
sage: a = CFF(-17/389)
sage: b = CFF(1/389)
sage: c = a / b; c
[-17]
sage: c.value(), (17/389) / (-1/389)
(-17, -17)
"""
return ContinuedFraction(self.parent(),
self._rational_() / right._rational_())
def __cmp__(self, right):
"""
EXAMPLES::
sage: a = CFF(-17/389)
sage: b = CFF(1/389)
sage: a < b
True
sage: QQ(a) < QQ(b)
True
sage: QQ(a)
-17/389
sage: QQ(b)
1/389
"""
return cmp(self._rational_(), right._rational_())
def _latex_(self):
"""
EXAMPLES::
sage: a = CFF(-17/389)
sage: latex(a)
-1+ \frac{\displaystyle 1}{\displaystyle 1+ \frac{\displaystyle 1}{\displaystyle 21+ \frac{\displaystyle 1}{\displaystyle 1+ \frac{\displaystyle 1}{\displaystyle 7+ \frac{\displaystyle 1}{\displaystyle 2}}}}}
"""
v = self._x
if len(v) == 0:
return '0'
s = str(v[0])
for i in range(1,len(v)):
s += '+ \\frac{\\displaystyle 1}{\\displaystyle %s'%v[i]
s += '}'*(len(v)-1)
return s
def sqrt(self, prec=53, all=False):
"""
Return continued fraction approximation to square root of the
value of this continued fraction.
INPUT:
- `prec` -- integer (default: 53) precision of square root
that is approximated
- `all` -- bool (default: False); if True, return all square
roots of self, instead of just one.
EXAMPLES::
sage: a = CFF(4/19); a
[0, 4, 1, 3]
sage: b = a.sqrt(); b
[0, 2, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5, 4, 5, 1, 1, 2, 1]
sage: b.value()
4508361/9825745
sage: float(b.value()^2 - a)
-5.451492525672688e-16
sage: b = a.sqrt(prec=100); b
[0, 2, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5, 4, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5, 4, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5]
sage: b^2
[0, 4, 1, 3, 7849253184229368265220252099, 1, 3]
sage: a.sqrt(all=True)
[[0, 2, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5, 4, 5, 1, 1, 2, 1],
[-1, 1, 1, 5, 1, 1, 2, 1, 16, 1, 2, 1, 1, 5, 4, 5, 1, 1, 2, 1]]
sage: a = CFF(4/25).sqrt(); a
[0, 2, 2]
sage: a.value()
2/5
"""
r = self._rational_()
if r < 0:
raise ValueError("self must be positive")
X = r.sqrt(all=all, prec=prec)
if not all:
return ContinuedFraction(self.parent(), X)
else:
return [ContinuedFraction(self.parent(), x) for x in X]
def list(self):
"""
Return copy of the underlying list of this continued fraction.
EXAMPLES::
sage: a = CFF(e); v = a.list(); v
[2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 1, 1, 12, 1, 1]
sage: v[0] = 5
sage: a
[2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 1, 1, 12, 1, 1]
"""
return list(self._x)
def __hash__(self):
"""
Return hash of self, which is the same as the hash of the value
of self, as a rational number.
EXAMPLES::
sage: a = CFF(e)
sage: hash(a)
19952398
sage: hash(QQ(a))
19952398
"""
return hash(self._rational_())
def __invert__(self):
"""
Return the multiplicative inverse of self.
EXAMPLES::
sage: a = CFF(e)
sage: b = ~a; b
[0, 2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 1, 1, 12, 2]
sage: b*a
[1]
"""
return ContinuedFraction(self.parent(),
self._rational_().__invert__())
def __pow__(self, n):
"""
Return self to the power of `n`.
EXAMPLES::
sage: a = CFF([1,2,3]); a
[1, 2, 3]
sage: a^3
[2, 1, 10, 1, 4, 1, 4]
sage: QQ(a)^3 == QQ(a^3)
True
sage: a^(-3)
[0, 2, 1, 10, 1, 4, 1, 4]
sage: QQ(a)^(-3) == QQ(a^(-3))
True
"""
return ContinuedFraction(self.parent(),
self._rational_()**n)
def __neg__(self):
"""
Return additive inverse of self.
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: -a
[0, 22, 1, 7, 2]
sage: QQ(-a)
17/389
"""
return ContinuedFraction(self.parent(),
self._rational_().__neg__())
def __abs__(self):
"""
Return absolute value of self.
EXAMPLES::
sage: a = CFF(-17/389); a
[-1, 1, 21, 1, 7, 2]
sage: abs(a)
[0, 22, 1, 7, 2]
sage: QQ(abs(a))
17/389
"""
return ContinuedFraction(self.parent(),
self._rational_().__abs__())
def is_one(self):
"""
Return True if self is one.
EXAMPLES::
sage: continued_fraction(1).is_one()
True
sage: continued_fraction(2).is_one()
False
"""
return self._rational_().is_one()
def __nonzero__(self):
"""
Return False if self is zero.
EXAMPLES::
sage: continued_fraction(0).is_zero()
True
sage: continued_fraction(1).is_zero()
False
"""
return not self._rational_().is_zero()
def _pari_(self):
"""
Return PARI list corresponding to this continued fraction.
EXAMPLES::
sage: c = continued_fraction(0.12345); c
[0, 8, 9, 1, 21, 1, 1]
sage: pari(c)
[0, 8, 9, 1, 21, 1, 1]
"""
return pari(self._x)
def _interface_init_(self, I=None):
"""
Return list representation for other systems corresponding to
this continued fraction.
EXAMPLES::
sage: c = continued_fraction(0.12345); c
[0, 8, 9, 1, 21, 1, 1]
sage: gp(c)
[0, 8, 9, 1, 21, 1, 1]
sage: gap(c)
[ 0, 8, 9, 1, 21, 1, 1 ]
sage: maxima(c)
[0,8,9,1,21,1,1]
"""
return str(self._x)
def additive_order(self):
"""
Return the additive order of this continued fraction,
which we defined to be the additive order of its value.
EXAMPLES::
sage: CFF(-1).additive_order()
+Infinity
sage: CFF(0).additive_order()
1
"""
return self.value().additive_order()
def multiplicative_order(self):
"""
Return the multiplicative order of this continued fraction,
which we defined to be the multiplicative order of its value.
EXAMPLES::
sage: CFF(-1).multiplicative_order()
2
sage: CFF(1).multiplicative_order()
1
sage: CFF(pi).multiplicative_order()
+Infinity
"""
return self.value().multiplicative_order()
CFF = ContinuedFractionField_class()
def ContinuedFractionField():
"""
Return the (unique) field of all continued fractions.
EXAMPLES::
sage: ContinuedFractionField()
Field of all continued fractions
"""
return CFF
def continued_fraction(x, bits=None, nterms=None):
"""
Return the truncated continued fraction expansion of the real number
`x`, computed with an interval floating point approximation of `x`
to the given number of bits of precision. The returned continued
fraction is a list-like object, with a value method and partial
convergents method.
If bits is not given, then use the number of valid bits of
precision of `x`, if `x` is a floating point number, or 53 bits
otherwise. If nterms is given, the precision is increased until
the specified number of terms can be computed, if possible.
INPUT:
- `x` -- number
- ``bits`` -- None (default) or a positive integer
- ``nterms`` -- None (default) or a positive integer
OUTPUT:
- a continued fraction
EXAMPLES::
sage: v = continued_fraction(sqrt(2)); v
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
sage: v = continued_fraction(sqrt(2), nterms=22); v
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
sage: type(v)
<class 'sage.rings.contfrac.ContinuedFraction'>
sage: parent(v)
Field of all continued fractions
sage: v.value()
131836323/93222358
sage: RR(v.value()) == RR(sqrt(2))
True
sage: v.convergents()
[1, 3/2, 7/5, 17/12, 41/29, 99/70, 239/169,...131836323/93222358]
sage: [RR(x) for x in v.convergents()]
[1.00000000000000, 1.50000000000000, 1.40000000000000, 1.41666666666667, ...1.41421356237310]
sage: continued_fraction(sqrt(2), 10)
[1, 2, 2]
sage: v.numerator()
131836323
sage: v.denominator()
93222358
sage: [v.pn(i) for i in range(10)]
[1, 3, 7, 17, 41, 99, 239, 577, 1393, 3363]
sage: [v.qn(i) for i in range(10)]
[1, 2, 5, 12, 29, 70, 169, 408, 985, 2378]
Here are some more examples::
sage: continued_fraction(e, bits=20)
[2, 1, 2, 1, 1, 4, 1, 1]
sage: continued_fraction(e, bits=30)
[2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8]
sage: continued_fraction(RealField(200)(e))
[2, 1, 2, 1, 1, 4, 1, 1, 6, ...36, 1, 1, 38, 1, 1]
Initial rounding can result in incorrect trailing digits::
sage: continued_fraction(RealField(39)(e))
[2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10, 2]
sage: continued_fraction(RealIntervalField(39)(e))
[2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8, 1, 1, 10]
"""
return CFF(x, bits=bits, nterms=nterms)
|
{"hexsha": "cfd8a42a3431b89e6913158b7836b066c7d2b941", "size": 28019, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/rings/contfrac.py", "max_stars_repo_name": "bopopescu/classic_diff_geom", "max_stars_repo_head_hexsha": "2b1d88becbc8cb30962e0995cc78e429e0f5589f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-08-11T05:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-15T17:27:25.000Z", "max_issues_repo_path": "src/sage/rings/contfrac.py", "max_issues_repo_name": "bopopescu/classic_diff_geom", "max_issues_repo_head_hexsha": "2b1d88becbc8cb30962e0995cc78e429e0f5589f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/rings/contfrac.py", "max_forks_repo_name": "bopopescu/classic_diff_geom", "max_forks_repo_head_hexsha": "2b1d88becbc8cb30962e0995cc78e429e0f5589f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-24T11:56:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-24T11:56:55.000Z", "avg_line_length": 26.7868068834, "max_line_length": 220, "alphanum_fraction": 0.4673614333, "include": true, "reason": "from sage", "num_tokens": 8650}
|
import numpy as np
import pyworld as pw
import soundfile as sf
import librosa, glob
from resemblyzer import VoiceEncoder
SAMPLE_RATE = 16000
SP_MIN = -38.6925
SP_MAX = 4.3340
def load_wav(filename):
x = librosa.load(filename, sr=SAMPLE_RATE)[0]
return x
def save_wav(y, filename) :
sf.write(filename, y, SAMPLE_RATE)
def logsp_norm(sp):
return np.clip((sp - SP_MIN) / (SP_MAX - SP_MIN), 0, 1)
def logsp_unnorm(nsp):
return nsp * (SP_MAX - SP_MIN) + SP_MIN
def world_split(wav, use_ap=True):
wav = wav.astype(np.float64)
f0, t = pw.harvest(wav, SAMPLE_RATE)
sp = pw.cheaptrick(wav, f0, t, SAMPLE_RATE)
if use_ap:
ap = pw.d4c(wav, f0, t, SAMPLE_RATE)
return f0, t, sp, ap
else:
return f0, t, sp
def world_join(f0, sp, ap) :
return pw.synthesize(f0, sp, ap, SAMPLE_RATE)
def f0_conversion(f0, src_f0_logmean, src_f0_logstd, tgt_f0_logmean, tgt_f0_logstd):
f0_out = np.exp( (np.ma.log(f0).data - src_f0_logmean ) / src_f0_logstd * tgt_f0_logstd + tgt_f0_logmean )
f0_out[f0==0] = 0
return f0_out
def getConvertInfo(wav_gpath):
encoder = VoiceEncoder()
wave = []
embed = []
for path in glob.glob(wav_gpath):
wav = load_wav(path)
wave.append(wav)
embed.append(encoder.embed_utterance(wav))
wave = np.array(wav).flatten()
f0, _, _ = world_split(wave, use_ap=False)
logf0 = np.log(f0[np.nonzero(f0)])
f0_logmean = logf0.mean()
f0_logstd = logf0.std()
embed = np.array(embed).mean(axis=0)
return embed, f0_logmean, f0_logstd
|
{"hexsha": "5c02438391977dc1c8fe3e8c05f3f990eef08e89", "size": 1572, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "SuzukiDaishi/WorldAutoVCApp", "max_stars_repo_head_hexsha": "cb6f81ed738e7dbbb68d9ac0dfef028668b3ff33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util.py", "max_issues_repo_name": "SuzukiDaishi/WorldAutoVCApp", "max_issues_repo_head_hexsha": "cb6f81ed738e7dbbb68d9ac0dfef028668b3ff33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util.py", "max_forks_repo_name": "SuzukiDaishi/WorldAutoVCApp", "max_forks_repo_head_hexsha": "cb6f81ed738e7dbbb68d9ac0dfef028668b3ff33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-26T07:42:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T07:42:45.000Z", "avg_line_length": 28.0714285714, "max_line_length": 110, "alphanum_fraction": 0.6628498728, "include": true, "reason": "import numpy", "num_tokens": 518}
|
import sys
import copy
import numpy as np
#Using RMC profile and Keen's definition of G(r) and gij(r) in order to generate S(Q)
#Defines Qi = rho* integral 4*pi*r*G(r)*sin(Qr) dr or integral D(r)sin(Qr) dr from Martin's total scattering formalism
def QiQ(Q,G,rlist,rho):
dr = rlist[1]-rlist[0]
integral=0
for i in range(len(rlist)):
integral=integral+(dr*rlist[i]*Gr[i]*np.sin(Q*rlist[i]))
integral=4*np.pi*rho*integral
return integral
print("Did you remain to change rho (number density N/V)")
listb=[['B', 5.30], ['Na', 3.64], ['Al', 3.449], ['Ca', 4.7], ['Si', 4.1491], ['O', 5.803],['Zr',7.160],['U', 10.47]]
class Read_field:
def __init__(self, input_file):
#Open FIELD file
self.field_file = open(input_file, 'r')
#extract lines of FIELD file
self.lines = self.field_file.readlines()
#strip linebreak from strings in lines
self.lines = [line.rstrip('\n') for line in self.lines]
self.lines = [line.split() for line in self.lines]
#identify number of atom types
for line in self.lines:
for i in range(len(line)):
if (line[i] == "molecular") and (line[i+1] == "types"):
self.numatomtypes = int(line[i+2])
#identify element types and number of each element type
self.atomlist=[]
for i in range(len(self.lines)):
if len(self.lines[i]) == 0:
pass
else:
if (self.lines[i][0] == "nummols"):
atomprops=[]
atomprops.append(self.lines[i+2][0]) #append atom type
atomprops.append(int(self.lines[i][1])) #append nummols
self.atomlist.append(atomprops)
field=Read_field("FIELD")
#Open rdf_all.dat produced by bash script from RDFDAT
rdf_file = open("rdf_all.dat",'r')
lines=rdf_file.readlines()
lines = [line.split() for line in lines]
rdf_file.close()
pair_atoms=[]
curpair=['','']
cnt=0
for atom in lines[0]:
if cnt==0 and atom!="#":
curpair[0]=atom
cnt=1
elif cnt==1:
curpair[1]=atom
pair_atoms.append(curpair.copy())
cnt=0
Natom = copy.deepcopy(field.atomlist)
#Define total number of atoms
Ntot=0
for i in range(len(Natom)):
Ntot=Ntot+Natom[i][1]
#Define c1c2b1b2 for each pair
coln=[]
for pair in pair_atoms:
for element in Natom:
if pair[0]==element[0]:
N1=element[1]
if pair[1]==element[0]:
N2=element[1]
if pair[0]==pair[1]:
A=1.0
if pair[0]!=pair[1]:
A=2.0
c1c2=A*float(N1*N2)/(float(Ntot)*float(Ntot))
for element in listb:
if pair[0]==element[0]:
b1=element[1]
if pair[1]==element[0]:
b2=element[1]
b1b2= b1*b2
coln.append(c1c2*b1b2)
#Define Gr as Sum_ij (c_i*c_j*b_i*b_j*(g_ij(r)-1))
TotGr=[]
for i in range(1,len(lines)):
Gr=0.0
for j in range(1,len(lines[i])):
Gr=Gr+(float(lines[i][j])-1.0)*coln[(j-1)]
TotGr.append([float(lines[i][0]),Gr])
outfile=open('Grtot.dat','w')
for rpos in TotGr:
info="%16.8f %16.12f\n" % (rpos[0],rpos[1])
outfile.write(info)
outfile.close()
#Define normalisation term for Gdash where Gdash-1=Gr/(Sum_ijc_i*b_i)^2 from Keen JAC (2000)
sumbici = 0.0
for atomspec in Natom:
for element in listb:
if atomspec[0]==element[0]:
bici=(float(atomspec[1])/float(Ntot))*element[1]
sumbici = sumbici + bici
normfactor=sumbici**(-2)
print("(Sum_i bi*ci)^-2 = %16.8f" % (normfactor))
Grdash = copy.deepcopy(TotGr)
for i in range(len(TotGr)):
Grdash[i][1]=(TotGr[i][1]*normfactor)+1.0
outfile2=open('Grdash.dat','w')
for rpos in Grdash:
info="%16.8f %16.12f\n" % (float(rpos[0]),rpos[1])
outfile2.write(info)
outfile2.close()
#grdata=pd.read_csv("Grtot.dat",header=None,delim_whitespace=True)
gr=np.array(TotGr)
gr=np.transpose(gr)
print(gr)
rlist=gr[0]
Gr=gr[1]
Qlist=np.arange(0.1,45.1,0.05)
QiQlist=[]
FQlist=[]
SQlist=[]
rho=4997.0/40.1294647120**3
for Q in Qlist:
QiQlist.append(QiQ(Q,Gr,rlist,rho))
FQlist.append(QiQ(Q,Gr,rlist,rho)/Q)
SQlist.append((normfactor*QiQ(Q,Gr,rlist,rho)/Q)+1.0)
Qoutfile=open('Qi_tot.dat','w')
FQoutfile=open('FQ_tot.dat','w')
SQoutfile=open('SQ_tot.dat','w')
for i in range(len(Qlist)):
info="%16.8f %16.12f\n" % (Qlist[i],QiQlist[i])
Qoutfile.write(info)
Qoutfile.close()
for i in range(len(Qlist)):
info="%16.8f %16.12f\n" % (Qlist[i],FQlist[i])
FQoutfile.write(info)
FQoutfile.close()
for i in range(len(Qlist)):
info="%16.8f %16.12f\n" % (Qlist[i],SQlist[i])
SQoutfile.write(info)
SQoutfile.close()
|
{"hexsha": "f4108c773d64f8d6cbe337316b8851839a163ba1", "size": 4708, "ext": "py", "lang": "Python", "max_stars_repo_path": "SFQiG_maker/make_SFQiG.py", "max_stars_repo_name": "ccp5UK/dlpoly-py", "max_stars_repo_head_hexsha": "a7f2f83dd97b963248d706894dc1d12f7fec16d8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SFQiG_maker/make_SFQiG.py", "max_issues_repo_name": "ccp5UK/dlpoly-py", "max_issues_repo_head_hexsha": "a7f2f83dd97b963248d706894dc1d12f7fec16d8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SFQiG_maker/make_SFQiG.py", "max_forks_repo_name": "ccp5UK/dlpoly-py", "max_forks_repo_head_hexsha": "a7f2f83dd97b963248d706894dc1d12f7fec16d8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1555555556, "max_line_length": 118, "alphanum_fraction": 0.6008920986, "include": true, "reason": "import numpy", "num_tokens": 1513}
|
[STATEMENT]
lemma project_constrains_mono:
"[| D \<subseteq> C; project h C F \<in> A co B |] ==> project h D F \<in> A co B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>D \<subseteq> C; project h C F \<in> A co B\<rbrakk> \<Longrightarrow> project h D F \<in> A co B
[PROOF STEP]
apply (auto simp add: constrains_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>act x xa. \<lbrakk>D \<subseteq> C; act \<in> Acts F; (xa, x) \<in> project_act h (Restrict D act); xa \<in> A; A \<subseteq> B; \<forall>act\<in>Acts F. project_act h (Restrict C act) `` A \<subseteq> B\<rbrakk> \<Longrightarrow> x \<in> B
[PROOF STEP]
apply (drule project_act_mono, blast)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 310, "file": null, "length": 3}
|
r"""
Ordination methods (:mod:`skbio.stats.ordination`)
==================================================
.. currentmodule:: skbio.stats.ordination
This module contains several ordination methods, including Principal
Coordinate Analysis, Correspondence Analysis, Redundancy Analysis and
Canonical Correspondence Analysis.
Functions
---------
.. autosummary::
:toctree: generated/
ca
pcoa
cca
rda
mean_and_std
corr
scale
svd_rank
Examples
--------
This is an artificial dataset (table 11.3 in [1]_) that represents fish
abundance in different sites (`Y`, the response variables) and
environmental variables (`X`, the explanatory variables).
>>> import numpy as np
>>> import pandas as pd
First we need to construct our explanatory variable dataset `X`.
>>> X = np.array([[1.0, 0.0, 1.0, 0.0],
... [2.0, 0.0, 1.0, 0.0],
... [3.0, 0.0, 1.0, 0.0],
... [4.0, 0.0, 0.0, 1.0],
... [5.0, 1.0, 0.0, 0.0],
... [6.0, 0.0, 0.0, 1.0],
... [7.0, 1.0, 0.0, 0.0],
... [8.0, 0.0, 0.0, 1.0],
... [9.0, 1.0, 0.0, 0.0],
... [10.0, 0.0, 0.0, 1.0]])
>>> transects = ['depth', 'substrate_coral', 'substrate_sand',
... 'substrate_other']
>>> sites = ['site1', 'site2', 'site3', 'site4', 'site5', 'site6', 'site7',
... 'site8', 'site9', 'site10']
>>> X = pd.DataFrame(X, sites, transects)
Then we need to create a dataframe with the information about the species
observed at different sites.
>>> species = ['specie1', 'specie2', 'specie3', 'specie4', 'specie5',
... 'specie6', 'specie7', 'specie8', 'specie9']
>>> Y = np.array([[1, 0, 0, 0, 0, 0, 2, 4, 4],
... [0, 0, 0, 0, 0, 0, 5, 6, 1],
... [0, 1, 0, 0, 0, 0, 0, 2, 3],
... [11, 4, 0, 0, 8, 1, 6, 2, 0],
... [11, 5, 17, 7, 0, 0, 6, 6, 2],
... [9, 6, 0, 0, 6, 2, 10, 1, 4],
... [9, 7, 13, 10, 0, 0, 4, 5, 4],
... [7, 8, 0, 0, 4, 3, 6, 6, 4],
... [7, 9, 10, 13, 0, 0, 6, 2, 0],
... [5, 10, 0, 0, 2, 4, 0, 1, 3]])
>>> Y = pd.DataFrame(Y, sites, species)
We can now perform canonical correspondence analysis. Matrix `X` contains a
continuous variable (depth) and a categorical one (substrate type) encoded
using a one-hot encoding.
>>> from skbio.stats.ordination import cca
We explicitly need to avoid perfect collinearity, so we'll drop one of the
substrate types (the last column of `X`).
>>> del X['substrate_other']
>>> ordination_result = cca(Y, X, scaling=2)
Exploring the results we see that the first three axes explain about
80% of all the variance.
>>> ordination_result.proportion_explained
CCA1 0.466911
CCA2 0.238327
CCA3 0.100548
CCA4 0.104937
CCA5 0.044805
CCA6 0.029747
CCA7 0.012631
CCA8 0.001562
CCA9 0.000532
dtype: float64
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier,
Amsterdam.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._redundancy_analysis import rda
from ._correspondence_analysis import ca
from ._canonical_correspondence_analysis import cca
from ._principal_coordinate_analysis import pcoa
from ._utils import (mean_and_std, scale, svd_rank, corr, e_matrix, f_matrix)
__all__ = ['ca', 'rda', 'cca', 'pcoa',
'mean_and_std', 'scale', 'svd_rank', 'corr',
'e_matrix', 'f_matrix']
test = TestRunner(__file__).test
|
{"hexsha": "b2704c6a11141ccde5344a3b53af29dec4695633", "size": 3853, "ext": "py", "lang": "Python", "max_stars_repo_path": "skbio/stats/ordination/__init__.py", "max_stars_repo_name": "squirrelo/scikit-bio", "max_stars_repo_head_hexsha": "f9016283638ef49ffccb3bb5f79e5a421462cfd1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skbio/stats/ordination/__init__.py", "max_issues_repo_name": "squirrelo/scikit-bio", "max_issues_repo_head_hexsha": "f9016283638ef49ffccb3bb5f79e5a421462cfd1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skbio/stats/ordination/__init__.py", "max_forks_repo_name": "squirrelo/scikit-bio", "max_forks_repo_head_hexsha": "f9016283638ef49ffccb3bb5f79e5a421462cfd1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1015625, "max_line_length": 78, "alphanum_fraction": 0.5593044381, "include": true, "reason": "import numpy", "num_tokens": 1280}
|
#pragma once
#include "BMP180.hpp"
#include <boost/endian/arithmetic.hpp>
#include <limits>
namespace icarus
{
namespace registers::bmp180
{
struct Callibration
{
enum { address = 0xAA };
boost::endian::big_int16_t ac1, ac2, ac3;
boost::endian::big_uint16_t ac4, ac5, ac6;
boost::endian::big_int16_t b1, b2, mb, mc, md;
};
enum class MeasurementControl : uint8_t
{
pressure = 0b10100,
temperature = 0b01110,
};
enum class Oversampling : uint8_t
{
times1,
times2,
times4,
times8,
};
struct Control
{
enum { address = 0xF4 };
MeasurementControl measurementControl : 5;
bool startOfConversion : 1;
Oversampling oversampling : 2;
};
struct TemperatureReading
{
enum { address = 0xF6 };
boost::endian::big_uint16_t temperature;
};
struct PressureReading
{
enum { address = 0xF6 };
boost::endian::big_uint24_t pressure;
};
struct ChipId
{
enum { address = 0xD0 };
uint8_t id;
};
}
template<typename RegisterBank>
BMP180<RegisterBank>::BMP180(RegisterBank * device) :
mDevice(device),
mPeriodsToTemperatureUpdate(0),
mTemperature(std::numeric_limits<float>::quiet_NaN()),
mPressure(std::numeric_limits<float>::quiet_NaN())
{}
template<typename RegisterBank>
void BMP180<RegisterBank>::initialize()
{
using namespace registers::bmp180;
mDevice->template read<ChipId>([](auto & reg){
if (reg.id != 0x55) {
throw std::runtime_error("Unrecognized bmp180 ID");
}
});
mDevice->template read<Callibration>([this](auto const & cal) {
mCallibration.ac1 = cal.ac1;
mCallibration.ac2 = cal.ac2;
mCallibration.ac3 = cal.ac3;
mCallibration.ac4 = cal.ac4;
mCallibration.ac5 = cal.ac5;
mCallibration.ac6 = cal.ac6;
mCallibration.b1 = cal.b1;
mCallibration.b2 = cal.b2;
mCallibration.mb = cal.mb;
mCallibration.mc = cal.mc;
mCallibration.md = cal.md;
});
startTemperatureRead();
}
template<typename RegisterBank>
void BMP180<RegisterBank>::read()
{
if (mPeriodsToTemperatureUpdate == 0) {
readTemperature();
mPeriodsToTemperatureUpdate = 20;
} else {
readPressure();
}
--mPeriodsToTemperatureUpdate;
if (mPeriodsToTemperatureUpdate == 0) {
startTemperatureRead();
} else {
startPressureRead();
}
}
template<typename RegisterBank>
void BMP180<RegisterBank>::startTemperatureRead()
{
using namespace registers::bmp180;
mDevice->template write<Control>([this](auto & reg) {
reg.measurementControl = MeasurementControl::temperature;
reg.oversampling = Oversampling::times1;
reg.startOfConversion = true;
});
}
template<typename RegisterBank>
void BMP180<RegisterBank>::readTemperature()
{
mDevice->template read<registers::bmp180::TemperatureReading>([this](auto const & reg) {
int32_t t = reg.temperature;
int32_t x1 = ((t - int32_t(mCallibration.ac6)) * int32_t(mCallibration.ac5)) >> 15;
int32_t x2 = (int32_t(mCallibration.mc) << 11) / (x1 + int32_t(mCallibration.md));
mB5 = x1 + x2;
mTemperature = float(mB5 + 8) / (10 << 4) + 273.15;
});
}
template<typename RegisterBank>
void BMP180<RegisterBank>::startPressureRead()
{
using namespace registers::bmp180;
mDevice->template write<Control>([this](auto & reg) {
reg.measurementControl = MeasurementControl::pressure;
reg.oversampling = Oversampling::times2;
reg.startOfConversion = true;
});
}
template<typename RegisterBank>
void BMP180<RegisterBank>::readPressure()
{
using namespace registers::bmp180;
mDevice->template read<PressureReading>([this](auto const & reg) {
// BEWARE!! Here be dragons
// The following piece of code is based on BMP180 datasheet
constexpr uint32_t oss = 1;
uint32_t up = uint32_t(reg.pressure) >> (8 - oss);
int32_t b6 = mB5 - 4000;
int32_t x1 = (int32_t(mCallibration.b2) * ((b6 * b6) >> 12)) >> 11;
int32_t x2 = (int32_t(mCallibration.ac2) * b6) >> 11;
int32_t x3 = x1 + x1;
int32_t b3 = (((int32_t(mCallibration.ac1) * 4 + x3) << oss) + 2) / 4;
x1 = (int32_t(mCallibration.ac3) * b6) >> 13;
x2 = (int32_t(mCallibration.b1) * ((b6 *b6) >> 12)) >> 16;
x3 = ((x1 + x2) + 2) >> 2;
uint32_t b4 = (uint32_t(mCallibration.ac4) * uint32_t(x3 + 32768)) >> 15;
uint32_t b7 = (up - uint32_t(b3)) * (50000 >> oss);
uint32_t p = (b7 / b4) * 2;
x1 = (p >> 8) * (p >> 8);
x1 = (x1 * 3038) >> 16;
x2 = (-7357 * int32_t(p)) >> 16;
p = p + ((x1 + x2 + 3791) >> 4);
mPressure = float(p);
});
}
}
|
{"hexsha": "ecb5aa56f927bb3f97c4407802334e3449d229c0", "size": 5520, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "icarus/include/icarus/sensor/BMP180_impl.hpp", "max_stars_repo_name": "Icarus-Quadro/Icarus", "max_stars_repo_head_hexsha": "10c4f1e804432d8cd11541f3e7342a12acec79f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "icarus/include/icarus/sensor/BMP180_impl.hpp", "max_issues_repo_name": "Icarus-Quadro/Icarus", "max_issues_repo_head_hexsha": "10c4f1e804432d8cd11541f3e7342a12acec79f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "icarus/include/icarus/sensor/BMP180_impl.hpp", "max_forks_repo_name": "Icarus-Quadro/Icarus", "max_forks_repo_head_hexsha": "10c4f1e804432d8cd11541f3e7342a12acec79f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3296703297, "max_line_length": 96, "alphanum_fraction": 0.5375, "num_tokens": 1450}
|
"""
This zone parsing codebook will be used along the notebooks/utils.py ZoningInfo data class and
src/pcts_parser.py ZoningInfo data class.
After the zoning string is parsed, there were still observations that failed to be parsed
Those were manually coded.
Save the failed to be parsed codebook and general codebook for zoning string into S3.
"""
import numpy as np
import pandas as pd
bucket_name = 'city-planning-entitlements'
# Import the codebook we want to use for parse fails
df = pd.read_excel(f's3://{bucket_name}/references/Zoning_Parser_Codebook.xlsx',
sheet_name = 'use_for_parse_fails')
# Make sure column types are the same as the other observations
col_names = ['Q', 'T', 'D']
df[col_names] = df[col_names].astype(bool)
for col in ['zone_class', 'specific_plan', 'height_district']:
df[col] = df[col].fillna('')
df[col] = df[col].astype(str)
df.to_parquet(f's3://{bucket_name}/data/crosswalk_zone_parse_fails.parquet')
# Import other sheets in the codebook and upload to S3
for name in ['zone_class', 'supplemental_use_overlay', 'specific_plan']:
df = pd.read_excel(f's3://{bucket_name}/references/Zoning_Parser_Codebook.xlsx',
sheet_name = f'{name}')
df.to_parquet(f's3://{bucket_name}/data/crosswalk_{name}.parquet')
|
{"hexsha": "aec0ecb45b9f0fe0bf969bcd6764a6ea2e6d650c", "size": 1328, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/B1_zone_parsing_codebook.py", "max_stars_repo_name": "CityOfLosAngeles/planning-entitlements", "max_stars_repo_head_hexsha": "cf83b57063b4e55722cc640172b529611b263b3a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/B1_zone_parsing_codebook.py", "max_issues_repo_name": "CityOfLosAngeles/planning-entitlements", "max_issues_repo_head_hexsha": "cf83b57063b4e55722cc640172b529611b263b3a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2020-01-08T17:50:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-13T21:45:31.000Z", "max_forks_repo_path": "src/B1_zone_parsing_codebook.py", "max_forks_repo_name": "CityOfLosAngeles/planning-entitlements", "max_forks_repo_head_hexsha": "cf83b57063b4e55722cc640172b529611b263b3a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-16T02:10:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-25T21:14:49.000Z", "avg_line_length": 36.8888888889, "max_line_length": 96, "alphanum_fraction": 0.7138554217, "include": true, "reason": "import numpy", "num_tokens": 328}
|
import types
inductive formula (ι : Type) (gri : ground_interpretation ι)
| prime (p : Prop) [decidable p] : formula
| conjunction : formula → formula → formula
| disjunction : formula → formula → formula
| implication : formula → formula → formula
| universal {σ : type ι gri} : (∥σ∥ → formula) → formula
| existential {σ : type ι gri} : (∥σ∥ → formula) → formula
inductive restricted_formula (ι : Type) (gri : ground_interpretation ι)
section basics
namespace formula
infixr `⟹` : 45 := implication
infix `⋀` : 50 := conjunction
infix `⋁` : 50 := disjunction
notation `universal'` := @universal _ _
notation `existential'` := @existential _ _
-- how does this actually work?
notation `∀∀` binders `,` r:(scoped A, universal A) := r
notation `∃∃` binders `,` r:(scoped A, existential A) := r
@[reducible, simp] def falsum (ι : Type) (gri : ground_interpretation ι) : formula ι gri := @prime ι gri false _
@[reducible, simp] def falsum' {ι : Type} {gri : ground_interpretation ι} : formula ι gri := falsum _ _
variables {ι : Type} {gri : ground_interpretation ι}
local notation `𝕋` := type ι gri
local notation `𝔽` := formula ι gri
instance : has_bot 𝔽 := ⟨falsum ι gri⟩
def negation (A : 𝔽) := A ⟹ falsum ι gri
prefix `∼` : 90 := negation
def equivalence (A B : 𝔽) := A ⟹ B ⋀ B ⟹ A
infixl `⇔` : 15 := equivalence
@[simp]
def eqext (greq : Π {i : ι}, ∥𝕏 i∥ → ∥𝕏 i∥ → 𝔽) : Π {σ : 𝕋}, ∥σ∥ → ∥σ∥ → 𝔽
| 𝕆 x y := prime $ x = y
| (𝕏 i) x y := greq x y
| (σ ↣ τ) x y := ∀∀ z : ∥σ∥ , (eqext (x z) (y z))
| (σ ⊗ τ) x y := eqext x.1 y.1 ⋀ eqext x.2 y.2
@[simp, pp_nodot]
def interpret : 𝔽 → Prop
| (@prime _ _ p _) := p
| (A ⋀ B) := A.interpret ∧ B.interpret
| (A ⋁ B) := A.interpret ∨ B.interpret
| (A ⟹ B) := A.interpret → B.interpret
| (universal' σ A) := ∀ x : ∥σ∥, (A x).interpret
| (existential' σ A) := ∃ x : ∥σ∥, (A x).interpret
-- @[simp]
-- lemma prime_interpret (p : Prop) [decidable p] : ∥(prime p : 𝔽)∥ ↔ p :=
-- by split; intros; simpa
notation `∥` A `∥` := interpret A
end formula
end basics
section kinds_of_formulas
variables {ι : Type} {gri : ground_interpretation ι}
local notation `𝕋` := type ι gri
local notation `𝔽` := formula ι gri
variables {greq : Π {i : ι}, ∥𝕏 i // gri∥ → ∥𝕏 i // gri∥ → 𝔽}
namespace formula
@[simp]
def is_qf : 𝔽 → Prop
| (@prime _ _ _ _) := true
| (A ⋀ B) := and A.is_qf B.is_qf
| (A ⋁ B) := and A.is_qf B.is_qf
| (A ⟹ B) := and A.is_qf B.is_qf
| (universal' σ A) := false
| (existential' σ A) := false
@[simp]
def is_qf_disj_free : 𝔽 → Prop
| (@prime _ _ _ _) := true
| (A ⋀ B) := and A.is_qf_disj_free B.is_qf_disj_free
| (A ⋁ B) := false
| (A ⟹ B) := and A.is_qf_disj_free B.is_qf_disj_free
| (universal' σ A) := false
| (existential' σ A) := false
inductive purely_univ : 𝔽 → Type
| of_qf (A : 𝔽) : A.is_qf → purely_univ A
| of_univ {σ : 𝕋} (A : ∥σ∥ → 𝔽) : (∀ x : ∥σ∥, purely_univ (A x)) → purely_univ (universal' σ A)
def is_purely_univ : 𝔽 → Prop
| (@prime _ _ _ _) := true
| (A ⋀ B) := A.is_qf ∧ B.is_qf
| (A ⋁ B) := A.is_qf ∧ B.is_qf
| (A ⟹ B) := A.is_qf ∧ B.is_qf
| (universal' σ A) := ∀ x, is_purely_univ (A x)
| (existential' σ A) := false
inductive purely_univ_disj_free : 𝔽 → Type
| of_qf_disj_free (A : 𝔽) : A.is_qf_disj_free → purely_univ_disj_free A
| of_univ {σ : 𝕋} (A : ∥σ∥ → 𝔽) : (∀ x : ∥σ∥, purely_univ_disj_free (A x)) → purely_univ_disj_free (universal' σ A)
end formula
end kinds_of_formulas
|
{"author": "hcheval", "repo": "formalized-proof-mining", "sha": "216cc73fccd84900a1ba7eaae5f73732496d6afe", "save_path": "github-repos/lean/hcheval-formalized-proof-mining", "path": "github-repos/lean/hcheval-formalized-proof-mining/formalized-proof-mining-216cc73fccd84900a1ba7eaae5f73732496d6afe/src/formula.lean"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.