Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .gitignore +24 -0
- README.md +16 -4
- __init__.py +0 -0
- anchor_set_generation.py +80 -0
- anchor_sets/anchor_set_1024.pickle +3 -0
- anchor_sets/anchor_set_512.pickle +3 -0
- configuration.py +68 -0
- custom_typing.py +4 -0
- history/aob/anchor_set_1000_dim.pickle +3 -0
- history/aob/anchor_set_1024_dim.pickle +3 -0
- history/aob/anchor_set_no_diff_1024_dim.pickle +3 -0
- history/aob/customdataset.py +93 -0
- history/aob/data/human/data.csv +95 -0
- history/aob/data/human/labels.csv +1 -0
- history/aob/data/linear/data.csv +201 -0
- history/aob/data/linear/labels.csv +1 -0
- history/aob/data/maritime/data.csv +0 -0
- history/aob/data/maritime/labels.csv +1 -0
- history/aob/data/maritime/times.csv +1 -0
- history/aob/data/robot5/data.csv +71 -0
- history/aob/data/robot5/labels.csv +1 -0
- history/aob/data/train/data.csv +0 -0
- history/aob/data/train/labels.csv +1 -0
- history/aob/data/train/times.csv +1 -0
- history/aob/descriptive/balanced_train_set.pkl +3 -0
- history/aob/descriptive/balanced_validation_set.csv +0 -0
- history/aob/descriptive/easysk_train_set.pkl +3 -0
- history/aob/descriptive/hardsk_train_set.pkl +3 -0
- history/aob/descriptive/old_train_set.pkl +3 -0
- history/aob/handcoded_tokenizer_OLD.py +232 -0
- history/aob/kernel_example.py +65 -0
- history/aob/old_utils.py +571 -0
- history/aob/train-16batch-bis.py +419 -0
- history/aob/train-16batch.py +415 -0
- history/aob/train-modified.py +418 -0
- history/aob/train-oldfile.py +414 -0
- history/aob/train16.sh +36 -0
- history/aob/utils2.py +739 -0
- history/aob/utils3.py +162 -0
- history/aob/validate.py +94 -0
- history/aob/validate_step.py +79 -0
- history/aob/validation-evaluation.py +41 -0
- history/configuration_stldec.py +69 -0
- history/datasets_things/analysis.ipynb +0 -0
- history/datasets_things/compose_datasets.py +23 -0
- history/datasets_things/descriptive_analysis.py +182 -0
- history/datasets_things/download_datasets.py +10 -0
- history/datasets_things/formulae_generation.py +83 -0
- history/datasets_things/generate_sets.py +77 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
history/results/easysk1024/partial_24000.csv filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dummy_datasets/
|
| 2 |
+
datasets/
|
| 3 |
+
.ipynb_checkpoints/
|
| 4 |
+
__pycache__/
|
| 5 |
+
output_test_16batch/
|
| 6 |
+
output_test_32batch/
|
| 7 |
+
output_test_16batch_grad2/
|
| 8 |
+
marianOriginal/
|
| 9 |
+
tf_output_test_16batch/
|
| 10 |
+
lr_32_training/
|
| 11 |
+
test_training/
|
| 12 |
+
epoch_8/
|
| 13 |
+
step_20000/
|
| 14 |
+
easyskewed/
|
| 15 |
+
errors/
|
| 16 |
+
|
| 17 |
+
easysk/
|
| 18 |
+
easysk512/
|
| 19 |
+
hardsk/
|
| 20 |
+
hardsk512/
|
| 21 |
+
oldtrain/
|
| 22 |
+
oltrain512/
|
| 23 |
+
balanced/
|
| 24 |
+
balanced512/
|
README.md
CHANGED
|
@@ -1,4 +1,16 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Materials for the paper "Bridging Logic and Learning: Decoding Temporal Logic Embeddings via Transformers" (Candussio et al.) @ ECML-PKDD 2025
|
| 2 |
+
|
| 3 |
+
**TL;DR:**
|
| 4 |
+
- (trained) models are available at: https://huggingface.co/collections/saracandu/stldec-ecml-pkdd-2025-686fe174a16915bc32aa53eb
|
| 5 |
+
- code, results, and other details can be found in this repo.
|
| 6 |
+
|
| 7 |
+
The goal of STLdecoder is to take a NeSy embedding of a Signal Temporal Logic (STL) formula and recover a semantically equivalent formula.
|
| 8 |
+
|
| 9 |
+
The `encoder.py` file allows you to obtain the NeSy embeddings of (a list of) formulae with respect to a predefined anchor set, which you can find in the `anchor_sets/` folder. More details on this procedure can be found at https://ebooks.iospress.nl/doi/10.3233/FAIA240638
|
| 10 |
+
This class also relies on the following files: `phis_generator.py`, `traj_measure.py`, `kernel.py`, `stl.py`, `anchor_set_generation.py`, `custom_typing.py`, `trajectories.py`.
|
| 11 |
+
|
| 12 |
+
The `decoder.py` component aims at translating a vector (i.e., the encoding of a formula, as done by `encoder.py`) into a string (i.e., an STL formula consisting of a hybrid syntax made of numbers, parentheses, and words, whose vocabulary can be found in the `tokenizer_files/` folder).
|
| 13 |
+
This is practically implemented in the `modeling_stldec.py` file, as we perform the aforementioned procedure using a decoder-only Transformer architecture. This process requires autoregressively generating the tokens of the STL formula and embedding them in order to merge this information with the initial semantic vector through the cross-attention block. The `configuration.py` file serves as a crystallized structure guiding the `transformers` classes.
|
| 14 |
+
|
| 15 |
+
In order to train this architecture, we can use the `training.py` file, leveraging the different training settings available in the `training_config/` folder.
|
| 16 |
+
|
__init__.py
ADDED
|
File without changes
|
anchor_set_generation.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from torch.nn.functional import normalize
|
| 4 |
+
import copy
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from phis_generator import StlGenerator
|
| 8 |
+
from traj_measure import BaseMeasure
|
| 9 |
+
from utils import from_string_to_formula, load_pickle, dump_pickle
|
| 10 |
+
from kernel import StlKernel
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def anchorGeneration(diff_init = False, # to control whether we want formulae to be semantically different by construction
|
| 14 |
+
embed_dim: int = 30, # embedding dimension, aka number of generated formulae in the anchor set
|
| 15 |
+
n_vars: int = 3, # dimension of the input signal (3D in this case)
|
| 16 |
+
leaf_prob: float = 0.4, # complexity of the generated formula
|
| 17 |
+
cosine_similarity_threshold: float = 0.8 # if two formulae cosine similarity exceeds 0.9, then discard one of the two
|
| 18 |
+
) -> str:
|
| 19 |
+
|
| 20 |
+
# initialize STL formula generator
|
| 21 |
+
sampler = StlGenerator(leaf_prob)
|
| 22 |
+
|
| 23 |
+
# effective anchor set generation
|
| 24 |
+
if diff_init:
|
| 25 |
+
|
| 26 |
+
# initialize the anchor set with a randomly sampled formula
|
| 27 |
+
diff_anchor_set = [sampler.sample(nvars=n_vars)]
|
| 28 |
+
|
| 29 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 30 |
+
mu = BaseMeasure(device=device)
|
| 31 |
+
|
| 32 |
+
# generates a set of random signals working as a tester for the formulae testing
|
| 33 |
+
signals = mu.sample(samples=10000, varn=n_vars)
|
| 34 |
+
|
| 35 |
+
# computes robustness value for the initial set of formulae in the anchor set
|
| 36 |
+
anchor_rob_vectors = torch.cat([phi.quantitative(signals, normalize=True).unsqueeze(0) for phi in diff_anchor_set], 0)
|
| 37 |
+
|
| 38 |
+
while len(diff_anchor_set) < embed_dim:
|
| 39 |
+
# sample the 'remaining' formulae to reach the desired number of `embed_dim` formulae:
|
| 40 |
+
candidate_anchors = sampler.bag_sample(embed_dim - len(diff_anchor_set), nvars = n_vars)
|
| 41 |
+
|
| 42 |
+
# compute robustness of candidate anchor formulae on the same signals as previous anchor set
|
| 43 |
+
candidate_robs = torch.cat([phi.quantitative(signals, normalize=True).unsqueeze(0) for phi in candidate_anchors], 0)
|
| 44 |
+
|
| 45 |
+
# compute cosine similarity between current anchor set and candidate new formulae
|
| 46 |
+
cos_simil = torch.tril(normalize(candidate_robs) @ normalize(anchor_rob_vectors).t(), diagonal=-1)
|
| 47 |
+
|
| 48 |
+
# check which formulae are similar (i.e. greater cosine similarity then threshold) w.r.t. current anchors
|
| 49 |
+
# NOTA: chiedere a gaia se cosine similarities negative vanno ammazzate con un valore assoluto o meno!
|
| 50 |
+
similar_idx = [torch.where(cos_simil[r, :] > cosine_similarity_threshold)[0].tolist() for r in range(cos_simil.shape[0])]
|
| 51 |
+
|
| 52 |
+
# keep only those who are semantically distant
|
| 53 |
+
keep_idx = list(set(np.arange(len(candidate_anchors)).tolist()).difference(set([i for sublist in similar_idx for i in sublist])))
|
| 54 |
+
|
| 55 |
+
diff_anchor_set += [copy.deepcopy(candidate_anchors[i]) for i in keep_idx]
|
| 56 |
+
|
| 57 |
+
# Convert keep_idx to a tensor on the same device as candidate_robs
|
| 58 |
+
keep_idx_tensor = torch.tensor(keep_idx, device=candidate_robs.device)
|
| 59 |
+
|
| 60 |
+
# Use index_select to pick the relevant rows
|
| 61 |
+
selected_robs = torch.index_select(candidate_robs, 0, keep_idx_tensor)
|
| 62 |
+
|
| 63 |
+
# Concatenate on the same device
|
| 64 |
+
anchor_rob_vectors = torch.cat([anchor_rob_vectors, copy.deepcopy(selected_robs)], dim=0)
|
| 65 |
+
|
| 66 |
+
anchor_set = diff_anchor_set[:embed_dim]
|
| 67 |
+
|
| 68 |
+
else:
|
| 69 |
+
anchor_set = sampler.bag_sample(bag_size=embed_dim, nvars=n_vars)
|
| 70 |
+
|
| 71 |
+
filename = f'anchor_set_no_diff_{embed_dim}_dim'
|
| 72 |
+
dump_pickle(filename, anchor_set)
|
| 73 |
+
return filename
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# EXAMPLE OF USAGE
|
| 77 |
+
# anchorGeneration(diff_init = False, embed_dim = 1024)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
anchor_sets/anchor_set_1024.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0914a369bb6e37993b34a8b04651995392f363f5474a72f9c0bbb41b49ae173
|
| 3 |
+
size 235912
|
anchor_sets/anchor_set_512.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc648369dfdf56982aeaa909fb43fd5c6c612d6782bd458c8fc534abc808cde5
|
| 3 |
+
size 124388
|
configuration.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
class STLConfig(PretrainedConfig):
|
| 4 |
+
|
| 5 |
+
model_type = "stldec"
|
| 6 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 7 |
+
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
vocab_size=35,
|
| 12 |
+
decoder_vocab_size=None, # unused
|
| 13 |
+
max_position_embeddings=1024,
|
| 14 |
+
encoder_layers=12,
|
| 15 |
+
encoder_ffn_dim=4096,
|
| 16 |
+
encoder_attention_heads=16,
|
| 17 |
+
decoder_layers=12,
|
| 18 |
+
decoder_ffn_dim=4096,
|
| 19 |
+
decoder_attention_heads=16,
|
| 20 |
+
encoder_layerdrop=0.0,
|
| 21 |
+
decoder_layerdrop=0.0,
|
| 22 |
+
use_cache=True,
|
| 23 |
+
is_encoder_decoder=True,
|
| 24 |
+
activation_function="gelu",
|
| 25 |
+
d_model=1024,
|
| 26 |
+
dropout=0.1,
|
| 27 |
+
attention_dropout=0.0,
|
| 28 |
+
activation_dropout=0.0,
|
| 29 |
+
init_std=0.02,
|
| 30 |
+
decoder_start_token_id=3,
|
| 31 |
+
scale_embedding=False,
|
| 32 |
+
pad_token_id=1,
|
| 33 |
+
eos_token_id=3,
|
| 34 |
+
bos_token_id=2,
|
| 35 |
+
forced_eos_token_id=3,
|
| 36 |
+
share_encoder_decoder_embeddings=True,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
self.vocab_size = vocab_size
|
| 40 |
+
self.decoder_vocab_size = decoder_vocab_size or vocab_size
|
| 41 |
+
self.max_position_embeddings = max_position_embeddings
|
| 42 |
+
self.d_model = d_model
|
| 43 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
| 44 |
+
self.encoder_layers = encoder_layers
|
| 45 |
+
self.encoder_attention_heads = encoder_attention_heads
|
| 46 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
| 47 |
+
self.decoder_layers = decoder_layers
|
| 48 |
+
self.decoder_attention_heads = decoder_attention_heads
|
| 49 |
+
self.dropout = dropout
|
| 50 |
+
self.attention_dropout = attention_dropout
|
| 51 |
+
self.activation_dropout = activation_dropout
|
| 52 |
+
self.activation_function = activation_function
|
| 53 |
+
self.init_std = init_std
|
| 54 |
+
self.encoder_layerdrop = encoder_layerdrop
|
| 55 |
+
self.decoder_layerdrop = decoder_layerdrop
|
| 56 |
+
self.use_cache = use_cache
|
| 57 |
+
self.num_hidden_layers = encoder_layers
|
| 58 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
| 59 |
+
self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings
|
| 60 |
+
super().__init__(
|
| 61 |
+
bos_token_id=bos_token_id,
|
| 62 |
+
pad_token_id=pad_token_id,
|
| 63 |
+
eos_token_id=eos_token_id,
|
| 64 |
+
is_encoder_decoder=is_encoder_decoder,
|
| 65 |
+
decoder_start_token_id=decoder_start_token_id,
|
| 66 |
+
forced_eos_token_id=forced_eos_token_id,
|
| 67 |
+
**kwargs,
|
| 68 |
+
)
|
custom_typing.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
|
| 3 |
+
# Custom types
|
| 4 |
+
realnum = Union[float, int]
|
history/aob/anchor_set_1000_dim.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea225a6adcaa6030d270729c6ca937999df4ba79eb660a03ba185b622c0cb7ab
|
| 3 |
+
size 217242
|
history/aob/anchor_set_1024_dim.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0914a369bb6e37993b34a8b04651995392f363f5474a72f9c0bbb41b49ae173
|
| 3 |
+
size 235912
|
history/aob/anchor_set_no_diff_1024_dim.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe3b76f14c5c199be01d5b9d31760cdec845e089e661b0a276bb7c87ae90a0ef
|
| 3 |
+
size 231933
|
history/aob/customdataset.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import ast
|
| 3 |
+
import torch
|
| 4 |
+
from torch.utils.data import Dataset
|
| 5 |
+
|
| 6 |
+
class CustomDataset(Dataset):
|
| 7 |
+
def __init__(self, df, device='cpu'):
|
| 8 |
+
"""
|
| 9 |
+
Initializes the dataset by storing the DataFrame and setting the device.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
- df: A pandas DataFrame containing the data (e.g., `Encoded_Formula`, `Embedding`).
|
| 13 |
+
- device: The device ('cpu' or 'cuda') where the tensors will be moved for processing.
|
| 14 |
+
"""
|
| 15 |
+
self.df = df
|
| 16 |
+
self.device = device
|
| 17 |
+
transformed_data = []
|
| 18 |
+
|
| 19 |
+
for idx in range(len(self.df)):
|
| 20 |
+
|
| 21 |
+
# Extract the encoded formula (tokenized input sequence) from the DataFrame
|
| 22 |
+
encoded_formula = self.df['Encoded_Formula'][idx]
|
| 23 |
+
# Convert the string representation of a list back to a Python list using ast.literal_eval
|
| 24 |
+
encoded_formula = ast.literal_eval(encoded_formula.strip())
|
| 25 |
+
|
| 26 |
+
# Extract the precomputed formula embedding (hidden states) from the DataFrame
|
| 27 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 28 |
+
|
| 29 |
+
# Clean the string and convert it back to a tensor
|
| 30 |
+
formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 31 |
+
formula_embedding = eval(formula_embedding)
|
| 32 |
+
|
| 33 |
+
# Define the input_ids by excluding the last token (shifted tokens for prediction)
|
| 34 |
+
input_ids = encoded_formula[:-1] # All tokens except the last
|
| 35 |
+
# Define the labels by excluding the first token (shifted tokens for teacher forcing)
|
| 36 |
+
labels = encoded_formula[1:] # All tokens except the first
|
| 37 |
+
|
| 38 |
+
# Create the attention mask to indicate which tokens should be attended to.
|
| 39 |
+
# Tokens equal to '1' (typically padding tokens) will be masked (set to 0),
|
| 40 |
+
# and the rest will be visible (set to 1).
|
| 41 |
+
attention_mask = [0 if token == 1 else 1 for token in input_ids]
|
| 42 |
+
|
| 43 |
+
# Convert `input_ids`, `labels`, and `attention_mask` to tensors and move them to the desired device (e.g., GPU or CPU)
|
| 44 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 45 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 46 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 47 |
+
|
| 48 |
+
# Convert the formula embedding (list of hidden states) to a tensor and move it to the device
|
| 49 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 50 |
+
|
| 51 |
+
# Store the transformed data in a dictionary
|
| 52 |
+
transformed_data.append({
|
| 53 |
+
'input_ids': input_ids,
|
| 54 |
+
'labels': labels,
|
| 55 |
+
'attention_mask': attention_mask,
|
| 56 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 57 |
+
})
|
| 58 |
+
|
| 59 |
+
# Convert the transformed data into a DataFrame (now with tensors)
|
| 60 |
+
self.df = pd.DataFrame(transformed_data)
|
| 61 |
+
|
| 62 |
+
def __len__(self):
|
| 63 |
+
"""
|
| 64 |
+
Returns the length of the dataset, i.e., the number of examples in the DataFrame.
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
- Length of the DataFrame (number of samples).
|
| 68 |
+
"""
|
| 69 |
+
return len(self.df)
|
| 70 |
+
|
| 71 |
+
def __getitem__(self, idx):
|
| 72 |
+
"""
|
| 73 |
+
Retrieves a specific example from the dataset, processes it, and formats it
|
| 74 |
+
into the required structure for the model (e.g., `input_ids`, `labels`, `attention_mask`).
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
- idx: Index of the example to retrieve.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
- A dictionary containing the formatted input data, including:
|
| 81 |
+
- `input_ids`: The tokenized input sequence (excluding the last token).
|
| 82 |
+
- `labels`: The tokenized target sequence (excluding the first token).
|
| 83 |
+
- `attention_mask`: A mask indicating which tokens should be attended to.
|
| 84 |
+
- `encoder_hidden_states`: Embedding for each formula (precomputed, used as hidden states).
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
# Return the formatted data as a dictionary, which the model can use directly for training or evaluation
|
| 88 |
+
return {
|
| 89 |
+
'input_ids': self.df['input_ids'][idx],
|
| 90 |
+
'labels': self.df['labels'][idx],
|
| 91 |
+
'attention_mask': self.df['attention_mask'][idx],
|
| 92 |
+
'encoder_hidden_states': self.df['encoder_hidden_states'][idx]
|
| 93 |
+
}
|
history/aob/data/human/data.csv
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# x1
|
| 2 |
+
-0.328280,-0.229207,-0.275158,-0.229924,-0.226577,-0.224537,-0.233956,-0.247087,-0.353099,-0.262942,-0.260705,-0.239030,-0.200974,-0.168656,-0.208425,-0.220544,-0.205516,-0.248645,-0.303125,-0.363238,-0.183444,-0.111717,-0.139370,-0.171239,-0.237225,-0.251462,-0.230044,-0.198107,-0.198700,-0.246964,-0.273985,-0.293967,-0.312237,-0.178454,-0.221793,-0.252537,-0.245917,-0.220770,-0.215309,-0.262798,-0.392944,-0.370784,-0.368800,-0.439000,-0.367220,-0.319937,-0.376543,-0.444181,-0.394085,-0.352498
|
| 3 |
+
-0.367516,-0.359032,-0.362666,-0.368157,-0.335254,-0.406022,-0.406744,-0.341699,-0.234499,-0.316791,-0.357120,-0.361185,-0.427528,-0.421474,-0.396448,-0.433845,-0.457558,-0.350926,-0.352228,-0.397071,-0.442670,-0.446155,-0.470911,-0.388356,-0.361438,-0.343683,-0.346843,-0.364893,-0.333353,-0.318333,-0.271541,-0.309743,-0.370057,-0.375189,-0.351269,-0.230762,-0.290751,-0.400050,-0.283562,-0.279905,-0.358326,-0.291934,-0.268213,-0.321063,-0.321501,-0.373299,-0.347980,-0.324009,-0.334255,-0.411774
|
| 4 |
+
-0.995279,-0.998245,-0.995380,-0.996091,-0.998139,-0.997335,-0.996921,-0.996559,-0.997328,-0.994803,-0.994815,-0.998246,-0.999135,-0.999188,-0.988641,-0.996839,-0.996941,-0.997695,-0.997491,-0.997811,-0.997904,-0.997763,-0.997862,-0.998389,-0.998534,-0.998060,-0.999255,-0.973723,-0.994609,-0.994447,-0.994169,-0.990291,-0.992678,-0.994157,-0.996983,-0.995779,-0.995531,-0.998285,-0.998360,-0.996902,-0.995829,-0.969600,-0.993930,-0.998432,-0.997576,-0.997089,-0.998455,-0.998038,-0.997897,-0.997865
|
| 5 |
+
-0.914811,-0.984883,-0.994195,-0.996135,-0.996775,-0.994928,-0.994425,-0.996475,-0.996287,-0.994788,-0.995511,-0.996143,-0.995237,-0.661294,-0.958596,-0.633135,-0.637325,-0.803903,-0.755994,-0.599392,-0.596410,-0.919666,-0.977395,-0.985693,-0.995495,-0.994580,-0.993125,-0.962290,-0.967033,-0.995137,-0.994446,-0.995020,-0.995855,-0.994931,-0.996429,-0.995667,-0.994971,-0.994663,-0.995237,-0.683779,-0.738200,-0.957068,-0.960021,-0.982403,-0.981943,-0.986413,-0.991016,-0.967101,-0.956753,-0.953846
|
| 6 |
+
-0.548849,-0.495607,-0.424519,-0.398586,-0.429508,-0.416898,-0.377151,-0.378402,-0.381995,-0.314334,-0.313526,-0.397388,-0.423816,-0.371831,-0.359799,-0.466703,-0.639804,-0.558919,-0.435674,-0.321010,-0.239592,-0.252034,-0.298798,-0.342525,-0.308007,-0.252658,-0.297589,-0.344479,-0.371366,-0.408076,-0.457170,-0.274163,-0.250653,-0.280954,-0.309882,-0.291149,-0.239439,-0.270902,-0.317470,-0.317952,-0.325649,-0.295634,-0.317274,-0.398102,-0.453246,-0.365372,-0.314881,-0.303172,-0.275163,-0.306706
|
| 7 |
+
-0.467002,-0.433471,-0.438665,-0.464909,-0.414957,-0.409452,-0.389807,-0.385950,-0.405515,-0.313553,-0.357281,-0.400089,-0.313528,-0.162720,-0.165876,-0.236691,-0.231752,-0.326449,-0.287961,-0.357186,-0.305027,-0.232510,-0.259095,-0.238438,-0.218841,-0.234709,-0.134166,-0.103585,-0.207528,-0.238264,-0.272255,-0.279023,-0.185147,-0.104817,-0.302313,-0.319188,-0.316590,-0.280111,-0.242900,-0.321112,-0.302663,-0.298204,-0.331980,-0.317705,-0.283591,-0.308290,-0.387271,-0.378362,-0.334454,-0.311426
|
| 8 |
+
-0.920649,-0.988191,-0.992382,-0.994510,-0.994587,-0.991583,-0.873003,-0.867258,-0.826141,-0.936335,-0.974542,-0.980127,-0.928724,-0.978630,-0.990701,-0.996035,-0.997018,-0.997169,-0.995794,-0.993342,-0.990860,-0.991744,-0.997423,-0.996976,-0.996896,-0.996605,-0.935585,-0.964717,-0.988475,-0.995218,-0.996539,-0.997152,-0.996879,-0.994442,-0.991653,-0.991727,-0.985470,-0.982646,-0.738075,-0.902998,-0.966037,-0.979105,-0.985159,-0.997379,-0.998256,-0.999300,-0.997953,-0.996788,-0.996137,-0.978281
|
| 9 |
+
-0.854112,-0.963742,-0.993056,-0.974128,-0.961174,-0.963892,-0.956451,-0.961057,-0.950400,-0.915584,-0.916682,-0.949404,-0.962757,-0.957768,-0.930288,-0.927586,-0.921666,-0.975542,-0.992670,-0.993790,-0.990154,-0.980508,-0.977942,-0.987753,-0.995107,-0.997435,-0.997986,-0.998770,-0.997564,-0.997786,-0.991619,-0.934992,-0.975687,-0.962223,-0.953150,-0.961298,-0.981092,-0.992043,-0.997514,-0.997818,-0.996498,-0.996016,-0.998085,-0.997599,-0.997832,-0.909463,-0.848423,-0.840524,-0.929865,-0.966685
|
| 10 |
+
-0.919353,-0.977899,-0.990926,-0.995660,-0.992669,-0.991548,-0.987131,-0.986186,-0.980346,-0.976834,-0.978686,-0.979655,-0.988386,-0.969547,-0.940046,-0.977652,-0.990252,-0.994067,-0.995149,-0.997297,-0.995504,-0.994463,-0.994815,-0.988697,-0.987126,-0.988096,-0.992917,-0.995742,-0.996337,-0.994767,-0.929874,-0.959013,-0.986705,-0.991719,-0.994511,-0.994942,-0.994495,-0.998059,-0.991251,-0.984471,-0.981058,-0.964635,-0.956278,-0.969548,-0.978345,-0.979745,-0.932473,-0.961579,-0.966784,-0.991619
|
| 11 |
+
-0.429430,-0.416617,-0.430360,-0.477886,-0.482722,-0.524088,-0.567069,-0.553660,-0.512119,-0.442591,-0.419906,-0.385756,-0.372015,-0.393250,-0.397297,-0.424589,-0.360249,-0.249202,-0.254373,-0.330359,-0.342633,-0.372103,-0.438069,-0.392662,-0.355803,-0.376183,-0.397110,-0.382623,-0.334456,-0.367495,-0.125851,-0.101585,-0.117948,-0.189892,-0.201154,-0.223856,-0.234741,-0.209568,-0.166480,-0.149271,-0.183212,-0.203244,-0.245733,-0.015107,0.048226,-0.057317,-0.162784,-0.159780,-0.126189,-0.154298
|
| 12 |
+
-0.815371,-0.932295,-0.897297,-0.880509,-0.920953,-0.972564,-0.988789,-0.988123,-0.986281,-0.989864,-0.972766,-0.973468,-0.991712,-0.992423,-0.995499,-0.843029,-0.976873,-0.975745,-0.976346,-0.989912,-0.987356,-0.988534,-0.997680,-0.997437,-0.995744,-0.995762,-0.996864,-0.997480,-0.864615,-0.944654,-0.967748,-0.963789,-0.974623,-0.981171,-0.984213,-0.990704,-0.992093,-0.995618,-0.995893,-0.995117,-0.994366,-0.994825,-0.875737,-0.959699,-0.967857,-0.983310,-0.980381,-0.977853,-0.984525,-0.996022
|
| 13 |
+
-0.742664,-0.925353,-0.949227,-0.934361,-0.940206,-0.967568,-0.988985,-0.996113,-0.994056,-0.992457,-0.993513,-0.994208,-0.993768,-0.988397,-0.666897,-0.926855,-0.977756,-0.983522,-0.987190,-0.987043,-0.987386,-0.990268,-0.989063,-0.986518,-0.988980,-0.990542,-0.988589,-0.982775,-0.784316,-0.916591,-0.984731,-0.992034,-0.994682,-0.994984,-0.995027,-0.994277,-0.993472,-0.992944,-0.990022,-0.987741,-0.907936,-0.960992,-0.985357,-0.993138,-0.995346,-0.996110,-0.994561,-0.993572,-0.991576,-0.992254
|
| 14 |
+
-0.426931,-0.257331,-0.237674,-0.375048,-0.379958,-0.408362,-0.461658,-0.407212,-0.433983,-0.442459,-0.316894,-0.217791,-0.182425,-0.217216,-0.305865,-0.383107,-0.324685,-0.272394,-0.253347,-0.217030,-0.181742,-0.193518,-0.298017,-0.307359,-0.262137,-0.259491,-0.283213,-0.287190,-0.344854,-0.255453,-0.291699,-0.284967,-0.295424,-0.210913,-0.193606,-0.251925,-0.262024,-0.275551,-0.274943,-0.277709,-0.329710,-0.336640,-0.302236,-0.315915,-0.255013,-0.253257,-0.285261,-0.238753,-0.268608,-0.309003
|
| 15 |
+
-0.076126,-0.018839,-0.041373,-0.053525,0.002228,0.011658,-0.071289,-0.088599,-0.032272,-0.090301,0.009746,-0.005158,-0.087499,-0.152977,-0.176171,-0.127894,-0.137898,-0.029892,0.004931,-0.090252,-0.125553,-0.077319,-0.078214,-0.070452,-0.031426,-0.058064,-0.035236,-0.068646,-0.067471,-0.043337,-0.029313,0.030735,-0.064342,-0.100474,-0.057627,-0.036589,-0.013337,0.009813,-0.017612,-0.021754,-0.000671,-0.039149,-0.084002,0.017475,0.026049,0.068628,-0.043120,-0.045372,-0.030903,-0.112703
|
| 16 |
+
-0.962326,-0.988226,-0.994190,-0.991453,-0.991037,-0.994242,-0.994843,-0.993793,-0.989689,-0.988957,-0.992444,-0.993727,-0.995888,-0.993928,-0.983022,-0.921222,-0.973255,-0.982855,-0.990333,-0.950396,-0.935593,-0.957274,-0.978206,-0.985178,-0.990259,-0.992811,-0.993500,-0.990604,-0.988972,-0.919912,-0.978335,-0.957450,-0.956017,-0.980211,-0.986020,-0.988437,-0.992916,-0.994135,-0.993251,-0.992411,-0.959355,-0.951170,-0.977204,-0.973701,-0.979815,-0.984300,-0.979626,-0.980437,-0.985808,-0.986658
|
| 17 |
+
-0.953571,-0.987560,-0.995988,-0.995814,-0.996173,-0.994897,-0.993512,-0.993281,-0.994215,-0.996706,-0.996958,-0.998296,-0.995064,-0.942067,-0.975192,-0.988139,-0.990925,-0.995941,-0.978206,-0.972892,-0.982442,-0.986829,-0.989232,-0.994635,-0.995893,-0.990960,-0.991269,-0.994094,-0.992487,-0.990214,-0.916687,-0.987963,-0.982104,-0.981741,-0.967842,-0.959006,-0.947707,-0.953604,-0.984661,-0.988959,-0.994371,-0.992965,-0.993639,-0.994258,-0.881914,-0.956681,-0.963871,-0.976719,-0.975666,-0.983639
|
| 18 |
+
-0.833400,-0.823163,-0.944412,-0.940096,-0.967200,-0.980151,-0.981395,-0.989341,-0.987475,-0.984257,-0.985447,-0.991131,-0.986949,-0.323467,-0.921598,-0.963922,-0.973841,-0.989187,-0.989508,-0.990519,-0.994131,-0.983142,-0.979887,-0.984219,-0.987485,-0.986157,-0.970175,-0.963274,-0.057142,-0.824315,-0.900874,-0.980894,-0.980403,-0.963226,-0.966594,-0.954295,-0.938002,-0.966477,-0.944269,-0.959073,-0.974659,-0.963232,-0.801869,-0.887593,-0.971598,-0.971137,-0.979895,-0.981386,-0.984774,-0.987469
|
| 19 |
+
-0.654376,-0.548338,-0.471152,-0.440420,-0.435979,-0.422096,-0.373723,-0.402016,-0.449953,-0.395805,-0.348047,-0.346682,-0.337416,-0.337611,-0.393288,-0.428666,-0.380124,-0.355878,-0.379726,-0.369203,-0.383228,-0.406681,-0.349276,-0.282699,-0.258358,-0.242989,-0.291156,-0.366257,-0.442243,-0.381347,-0.345192,-0.242268,-0.194398,-0.226010,-0.249182,-0.310171,-0.245381,-0.205253,-0.220882,-0.222913,-0.253919,-0.248732,-0.326785,-0.424032,-0.337640,-0.331896,-0.311192,-0.264112,-0.233791,-0.235448
|
| 20 |
+
-0.350424,-0.359032,-0.373497,-0.401986,-0.402796,-0.383315,-0.392907,-0.340510,-0.312085,-0.383088,-0.220770,-0.286725,-0.351641,-0.355215,-0.402083,-0.400697,-0.291261,-0.248907,-0.170677,-0.248394,-0.318453,-0.338530,-0.338614,-0.318156,-0.321495,-0.359709,-0.127196,-0.240327,-0.299713,-0.279086,-0.254700,-0.275411,-0.273517,-0.297216,-0.320525,-0.153852,-0.172548,-0.229385,-0.301755,-0.342265,-0.311166,-0.318150,-0.352543,-0.048639,-0.071607,-0.268533,-0.289607,-0.283959,-0.267319,-0.260079
|
| 21 |
+
-0.964092,-0.981316,-0.985699,-0.992781,-0.995176,-0.989678,-0.982325,-0.982057,-0.988278,-0.993290,-0.990371,-0.988039,-0.991030,-0.995092,-0.994336,-0.914073,-0.966236,-0.982834,-0.985577,-0.988424,-0.984546,-0.982759,-0.980608,-0.981926,-0.974276,-0.974173,-0.984720,-0.987995,-0.924663,-0.929842,-0.973058,-0.974470,-0.974946,-0.962752,-0.966594,-0.956474,-0.955562,-0.985379,-0.983611,-0.973575,-0.947261,-0.982415,-0.993005,-0.989440,-0.987150,-0.991603,-0.993103,-0.993578,-0.996618,-0.998509
|
| 22 |
+
-0.343682,-0.955632,-0.968469,-0.966775,-0.969338,-0.966040,-0.961468,-0.972355,-0.970006,-0.971562,-0.974430,-0.973735,-0.979110,-0.941810,-0.871940,-0.969834,-0.953969,-0.957083,-0.959166,-0.962033,-0.962548,-0.965706,-0.963304,-0.964611,-0.966705,-0.972559,-0.969909,-0.496530,-0.955620,-0.943030,-0.946955,-0.951792,-0.972917,-0.972154,-0.950912,-0.950806,-0.955501,-0.964245,-0.960676,-0.948549,-0.884655,-0.935190,-0.950862,-0.957257,-0.950894,-0.951223,-0.963265,-0.958332,-0.959733,-0.961165
|
| 23 |
+
-0.950604,-0.980373,-0.987098,-0.994011,-0.997259,-0.997468,-0.997338,-0.997790,-0.997704,-0.998699,-0.998925,-0.998295,-0.998379,-0.952083,-0.996766,-0.998817,-0.998510,-0.996397,-0.996058,-0.997450,-0.998376,-0.997875,-0.914093,-0.975746,-0.994919,-0.996433,-0.997550,-0.998370,-0.998894,-0.998669,-0.999125,-0.998978,-0.998804,-0.999533,-0.998656,-0.997682,-0.998497,-0.998450,-0.834736,-0.968472,-0.994335,-0.992774,-0.990574,-0.994175,-0.995452,-0.997048,-0.992084,-0.989539,-0.980072,-0.985776
|
| 24 |
+
-0.939398,-0.977721,-0.989307,-0.995290,-0.994476,-0.993824,-0.996508,-0.996210,-0.996710,-0.996729,-0.997048,-0.993825,-0.992908,-0.924114,-0.989748,-0.992787,-0.995059,-0.995068,-0.994660,-0.995180,-0.994255,-0.995351,-0.996913,-0.995124,-0.996143,-0.431167,-0.924139,-0.949362,-0.948191,-0.946291,-0.937074,-0.900433,-0.933070,-0.956639,-0.957529,-0.958669,-0.965045,-0.955709,-0.460601,-0.879740,-0.875032,-0.922579,-0.943933,-0.934303,-0.948454,-0.947718,-0.945830,-0.945338,-0.937103,-0.941794
|
| 25 |
+
-0.423246,-0.367523,-0.361682,-0.388893,-0.416532,-0.423055,-0.444258,-0.479023,-0.482187,-0.537223,-0.602252,-0.564141,-0.518253,-0.496554,-0.479384,-0.500860,-0.487049,-0.480790,-0.440916,-0.416470,-0.464898,-0.510347,-0.486097,-0.476549,-0.470036,-0.418007,-0.485787,-0.482982,-0.490526,-0.413921,-0.365051,-0.321231,-0.348490,-0.383739,-0.384131,-0.403272,-0.374506,-0.385669,-0.398091,-0.360803,-0.363580,-0.397700,-0.419742,-0.437349,-0.425018,-0.322239,-0.298638,-0.328947,-0.379813,-0.409038
|
| 26 |
+
-0.196613,-0.192415,-0.312880,-0.307894,-0.224162,-0.277626,-0.250300,-0.169809,-0.224707,-0.204938,-0.274146,-0.320808,-0.283230,-0.348241,-0.307961,-0.256101,-0.310136,-0.186489,-0.206456,-0.255995,-0.239108,-0.200181,-0.269964,-0.250346,-0.205978,-0.214011,-0.198920,-0.115108,-0.189938,-0.232452,-0.246549,-0.239779,-0.226976,-0.225720,-0.216457,-0.194870,-0.079276,-0.153925,-0.237611,-0.253833,-0.267440,-0.246321,-0.238100,-0.195953,-0.245144,-0.183998,-0.202936,-0.294125,-0.310399,-0.293703
|
| 27 |
+
-0.963227,-0.993842,-0.996529,-0.993552,-0.988543,-0.990957,-0.995914,-0.997077,-0.999004,-0.998555,-0.998526,-0.992646,-0.992302,-0.993779,-0.923986,-0.992838,-0.995360,-0.996616,-0.996429,-0.996093,-0.995365,-0.995434,-0.996747,-0.997464,-0.997455,-0.993234,-0.959351,-0.990517,-0.995058,-0.995631,-0.997528,-0.973430,-0.971546,-0.987627,-0.989737,-0.997822,-0.982241,-0.978691,-0.972604,-0.965235,-0.973195,-0.960071,-0.972263,-0.976083,-0.993947,-0.997006,-0.964780,-0.951245,-0.958541,-0.964470
|
| 28 |
+
-0.882031,-0.952247,-0.990652,-0.996188,-0.993865,-0.988984,-0.988568,-0.996688,-0.997197,-0.996391,-0.997606,-0.997336,-0.996014,-0.996067,-0.944035,-0.985765,-0.994339,-0.999163,-0.998896,-0.997713,-0.997211,-0.997878,-0.998054,-0.998121,-0.996805,-0.995344,-0.996349,-0.997767,-0.997900,-0.964786,-0.893765,-0.952804,-0.979477,-0.994805,-0.994821,-0.995599,-0.991515,-0.990494,-0.995165,-0.995210,-0.989133,-0.990555,-0.993584,-0.982521,-0.960154,-0.898724,-0.960492,-0.982255,-0.991800,-0.994363
|
| 29 |
+
-0.559489,-0.506682,-0.426637,-0.398808,-0.429564,-0.442366,-0.449426,-0.449776,-0.458498,-0.508146,-0.519163,-0.468039,-0.452938,-0.468762,-0.490911,-0.525010,-0.341431,-0.345706,-0.343061,-0.375428,-0.417161,-0.414237,-0.444397,-0.478400,-0.443059,-0.434237,-0.450283,-0.455903,-0.431831,-0.440078,-0.347623,-0.338657,-0.353499,-0.334541,-0.358689,-0.380943,-0.371413,-0.357846,-0.385801,-0.388493,-0.356873,-0.381819,-0.384536,-0.406683,-0.462585,-0.340386,-0.295349,-0.324469,-0.366669,-0.335613
|
| 30 |
+
-0.266166,-0.244058,-0.269980,-0.351671,-0.287167,-0.241313,-0.276071,-0.330722,-0.377743,-0.327485,-0.306524,-0.294486,-0.266634,-0.292223,-0.274925,-0.327995,-0.383109,-0.362775,-0.344689,-0.269575,-0.152683,-0.249049,-0.377876,-0.298123,-0.267344,-0.300699,-0.383990,-0.306743,-0.162510,-0.137352,-0.237696,-0.285228,-0.262954,-0.328231,-0.407600,-0.379510,-0.448257,-0.292380,-0.375656,-0.381849,-0.325248,-0.294493,-0.231952,-0.320417,-0.364337,-0.303031,-0.249808,-0.259686,-0.299689,-0.360720
|
| 31 |
+
-0.960901,-0.995189,-0.988746,-0.987929,-0.992640,-0.986253,-0.985530,-0.989371,-0.991662,-0.996465,-0.997517,-0.994623,-0.969729,-0.981294,-0.989635,-0.991253,-0.993063,-0.992142,-0.982574,-0.982279,-0.991639,-0.993349,-0.993625,-0.995618,-0.998104,-0.992345,-0.728226,-0.947637,-0.989938,-0.993893,-0.994299,-0.995887,-0.998064,-0.998813,-0.998053,-0.997002,-0.988691,-0.981750,-0.975254,-0.979140,-0.893966,-0.925304,-0.937803,-0.978352,-0.981760,-0.978131,-0.981022,-0.936222,-0.913942,-0.944237
|
| 32 |
+
-0.911675,-0.967181,-0.944801,-0.943846,-0.982553,-0.984849,-0.990325,-0.986292,-0.987953,-0.994681,-0.995214,-0.996585,-0.997561,-0.998558,-0.998173,-0.996948,-0.997321,-0.904417,-0.971373,-0.985796,-0.990202,-0.991881,-0.992748,-0.993192,-0.993980,-0.989508,-0.989029,-0.995598,-0.993703,-0.992503,-0.916834,-0.902754,-0.949913,-0.979380,-0.990517,-0.992864,-0.992264,-0.992711,-0.972960,-0.969252,-0.976244,-0.977873,-0.986107,-0.993519,-0.992835,-0.854914,-0.968557,-0.957310,-0.950581,-0.962059
|
| 33 |
+
-0.470436,-0.905218,-0.950264,-0.986029,-0.989868,-0.982031,-0.983786,-0.992609,-0.993772,-0.993366,-0.992242,-0.991204,-0.851988,-0.976177,-0.977877,-0.983682,-0.987804,-0.987427,-0.980413,-0.980921,-0.981517,-0.979873,-0.973622,-0.939817,-0.908534,-0.060639,-0.858760,-0.924939,-0.966649,-0.975314,-0.987538,-0.990190,-0.976892,-0.955295,-0.973299,-0.983980,-0.100786,-0.835378,-0.939359,-0.963819,-0.960624,-0.953638,-0.936184,-0.948693,-0.968543,-0.969739,-0.964658,-0.962358,-0.965095,-0.958303
|
| 34 |
+
-0.487265,-0.385096,-0.306499,-0.345548,-0.344995,-0.333072,-0.345152,-0.358372,-0.430536,-0.439045,-0.367631,-0.300489,-0.359404,-0.477112,-0.431755,-0.378416,-0.357670,-0.397898,-0.383181,-0.391875,-0.459498,-0.458447,-0.409898,-0.376633,-0.363925,-0.417579,-0.498894,-0.517075,-0.272377,-0.246565,-0.193736,-0.196453,-0.186235,-0.164511,-0.178766,-0.174051,-0.208584,-0.237878,-0.252635,-0.278698,-0.293879,-0.316067,-0.265670,-0.246007,-0.277457,-0.318902,-0.330190,-0.300347,-0.264311,-0.277348
|
| 35 |
+
-0.868186,-0.979544,-0.988142,-0.996513,-0.996050,-0.994542,-0.994654,-0.997214,-0.997301,-0.996032,-0.990301,-0.987625,-0.942659,-0.974127,-0.994703,-0.992713,-0.986306,-0.985996,-0.991162,-0.996058,-0.996899,-0.997872,-0.998362,-0.997751,-0.997516,-0.994664,-0.992714,-0.950110,-0.989322,-0.995555,-0.991968,-0.989025,-0.981104,-0.983547,-0.990582,-0.993663,-0.997038,-0.997365,-0.998232,-0.998139,-0.997717,-0.996959,-0.917648,-0.977321,-0.970059,-0.964723,-0.984621,-0.985907,-0.990546,-0.996098
|
| 36 |
+
-0.960858,-0.989618,-0.996760,-0.995907,-0.995879,-0.995890,-0.996620,-0.998036,-0.997216,-0.995801,-0.995614,-0.996846,-0.996908,-0.941668,-0.990778,-0.993646,-0.995480,-0.978485,-0.973720,-0.981534,-0.990754,-0.992280,-0.994782,-0.996372,-0.998130,-0.998739,-0.996097,-0.954136,-0.975079,-0.990477,-0.993567,-0.994637,-0.995880,-0.997325,-0.997139,-0.996951,-0.997697,-0.998256,-0.997883,-0.996821,-0.997520,-0.997079,-0.948577,-0.944419,-0.951630,-0.991403,-0.996580,-0.997199,-0.993324,-0.991364
|
| 37 |
+
-0.744180,-0.886149,-0.987889,-0.993820,-0.997068,-0.996976,-0.984479,-0.983580,-0.992994,-0.997176,-0.995842,-0.995336,-0.995911,-0.907731,-0.917314,-0.865384,-0.874158,-0.956177,-0.986790,-0.992651,-0.994335,-0.944870,-0.943325,-0.978924,-0.994412,-0.990188,-0.984656,-0.990167,-0.984519,-0.989551,-0.984632,-0.984950,-0.992882,-0.995530,-0.874968,-0.915338,-0.978036,-0.997049,-0.986365,-0.986196,-0.991933,-0.993087,-0.994853,-0.994104,-0.991706,-0.992672,-0.996423,-0.995699,-0.995426,-0.996401
|
| 38 |
+
-0.469601,-0.409429,-0.396753,-0.402102,-0.429262,-0.373694,-0.361335,-0.366809,-0.389022,-0.401748,-0.407526,-0.471038,-0.588018,-0.504010,-0.462696,-0.448509,-0.418815,-0.399544,-0.389553,-0.385214,-0.394159,-0.365015,-0.374283,-0.441875,-0.492007,-0.521563,-0.473832,-0.386347,-0.361240,-0.350068,-0.331512,-0.327626,-0.367955,-0.394232,-0.375404,-0.377790,-0.385060,-0.398210,-0.455139,-0.399907,-0.380277,-0.346135,-0.331211,-0.363850,-0.377822,-0.379600,-0.394675,-0.393759,-0.372487,-0.404893
|
| 39 |
+
-0.341425,-0.297045,-0.329719,-0.396253,-0.393155,-0.360166,-0.357067,-0.374689,-0.403724,-0.383623,-0.354901,-0.361391,-0.419980,-0.408444,-0.433613,-0.471624,-0.465460,-0.460095,-0.450909,-0.407809,-0.441001,-0.473478,-0.455993,-0.476247,-0.466200,-0.446681,-0.504466,-0.378426,-0.347860,-0.399147,-0.442663,-0.414661,-0.388062,-0.421678,-0.376266,-0.361368,-0.314654,-0.388278,-0.438901,-0.408581,-0.371949,-0.385249,-0.424764,-0.337461,-0.332666,-0.369767,-0.399435,-0.383411,-0.355805,-0.327911
|
| 40 |
+
-0.673730,-0.961714,-0.984960,-0.985643,-0.961258,-0.945083,-0.957441,-0.994267,-0.997325,-0.998324,-0.997789,-0.997831,-0.998008,-0.998137,-0.998467,-0.972426,-0.992120,-0.995019,-0.995585,-0.995984,-0.997264,-0.998320,-0.997457,-0.996497,-0.996365,-0.997815,-0.999206,-0.998579,-0.997418,-0.997592,-0.935685,-0.981432,-0.996315,-0.997172,-0.998629,-0.997831,-0.996442,-0.996166,-0.996929,-0.996070,-0.995587,-0.995540,-0.996835,-0.997813,-0.997756,-0.997229,-0.997539,-0.997882,-0.996135,-0.996087
|
| 41 |
+
-0.991549,-0.995410,-0.995682,-0.996002,-0.996948,-0.996866,-0.997711,-0.998614,-0.998131,-0.998350,-0.998688,-0.998207,-0.997801,-0.998759,-0.999579,-0.998753,-0.997461,-0.997276,-0.997884,-0.998050,-0.793113,-0.931892,-0.977701,-0.995445,-0.997387,-0.996941,-0.997304,-0.997255,-0.995834,-0.996580,-0.996614,-0.993851,-0.996568,-0.996011,-0.997655,-0.997497,-0.988980,-0.996044,-0.997241,-0.995918,-0.993588,-0.995507,-0.995920,-0.997890,-0.998410,-0.998030,-0.988408,-0.981783,-0.986457,-0.996508
|
| 42 |
+
-0.932176,-0.984428,-0.987043,-0.993209,-0.993737,-0.994088,-0.993227,-0.993691,-0.995446,-0.994807,-0.990098,-0.988476,-0.990456,-0.992851,-0.993506,-0.992224,-0.991630,-0.758748,-0.958408,-0.984338,-0.989347,-0.989787,-0.991407,-0.993804,-0.993241,-0.987990,-0.988882,-0.987577,-0.989971,-0.991339,-0.991670,-0.991453,-0.991894,-0.993656,-0.987971,-0.935462,-0.968613,-0.989591,-0.987626,-0.979955,-0.971816,-0.983335,-0.969466,-0.955950,-0.958373,-0.971909,-0.969468,-0.963460,-0.955781,-0.967852
|
| 43 |
+
-0.488392,-0.433205,-0.340920,-0.333693,-0.356182,-0.365945,-0.411474,-0.384203,-0.372018,-0.377440,-0.367889,-0.389804,-0.357826,-0.326957,-0.365882,-0.382689,-0.409972,-0.443636,-0.406872,-0.374477,-0.325015,-0.335084,-0.326436,-0.309422,-0.312939,-0.301242,-0.293552,-0.289828,-0.308425,-0.328737,-0.324247,-0.338685,-0.359477,-0.307339,-0.295343,-0.289490,-0.298214,-0.280304,-0.275683,-0.317810,-0.328020,-0.325683,-0.312744,-0.304710,-0.323669,-0.344068,-0.348696,-0.290104,-0.218987,-0.199116
|
| 44 |
+
-0.942472,-0.995850,-0.998002,-0.998352,-0.997673,-0.998282,-0.998263,-0.997858,-0.998339,-0.998337,-0.998564,-0.997838,-0.997420,-0.997437,-0.996998,-0.997420,-0.973892,-0.991349,-0.992757,-0.989290,-0.990973,-0.993406,-0.995773,-0.998040,-0.997413,-0.997230,-0.997026,-0.998566,-0.997522,-0.997995,-0.997482,-0.997467,-0.998698,-0.975859,-0.995385,-0.998183,-0.994225,-0.994069,-0.991496,-0.994445,-0.997978,-0.998481,-0.997150,-0.997622,-0.997593,-0.998493,-0.999079,-0.998922,-0.997171,-0.939467
|
| 45 |
+
-0.976717,-0.989516,-0.989822,-0.990786,-0.991873,-0.992857,-0.992318,-0.995341,-0.993457,-0.998051,-0.996181,-0.995002,-0.995603,-0.996375,-0.996593,-0.998387,-0.999048,-0.996526,-0.995632,-0.996984,-0.995952,-0.997070,-0.997925,-0.997678,-0.998479,-0.997511,-0.998784,-0.999012,-0.996716,-0.922167,-0.983414,-0.992836,-0.995856,-0.993452,-0.996612,-0.993520,-0.992253,-0.995724,-0.995805,-0.996690,-0.995667,-0.995041,-0.996210,-0.952195,-0.973200,-0.981532,-0.996016,-0.996748,-0.996937,-0.997903
|
| 46 |
+
-0.807875,-0.928589,-0.976480,-0.985611,-0.990987,-0.994344,-0.991291,-0.988617,-0.994308,-0.994736,-0.994346,-0.992890,-0.995697,-0.956208,-0.822866,-0.873345,-0.962068,-0.988513,-0.994969,-0.995318,-0.989357,-0.988664,-0.988150,-0.992489,-0.995925,-0.993692,-0.992500,-0.989070,-0.992760,-0.993866,-0.993938,-0.993316,-0.992182,-0.992773,-0.981306,-0.667647,-0.903981,-0.913981,-0.970614,-0.986086,-0.989819,-0.990812,-0.994763,-0.994034,-0.996438,-0.992049,-0.990856,-0.990700,-0.993292,-0.989299
|
| 47 |
+
-0.171779,-0.083044,-0.115577,-0.164425,-0.153174,-0.109061,-0.101064,-0.140090,-0.148370,-0.062304,-0.129745,-0.299814,-0.299957,-0.222085,-0.242130,-0.034398,-0.026571,-0.075290,0.015000,0.053294,-0.013647,-0.099316,-0.046267,0.011647,-0.059076,-0.139420,-0.220611,-0.148897,-0.002522,0.021592,0.025531,0.076389,0.085361,-0.008811,0.000503,0.021890,-0.041232,-0.105303,-0.037011,0.061021,-0.054216,0.045224,0.148337,0.134034,0.094034,0.126211,0.180293,0.106763,-0.026593,-0.071352
|
| 48 |
+
-0.865362,-0.967515,-0.980441,-0.980548,-0.983852,-0.981952,-0.980908,-0.983439,-0.986981,-0.986236,-0.986640,-0.988016,-0.988153,-0.989072,-0.990750,-0.990681,-0.987720,-0.988662,-0.989124,-0.885393,-0.975188,-0.962229,-0.956153,-0.943138,-0.978716,-0.970962,-0.976868,-0.974325,-0.983746,-0.983774,-0.984079,-0.984236,-0.983431,-0.982366,-0.980820,-0.980939,-0.983994,-0.929808,-0.972435,-0.982853,-0.992125,-0.973987,-0.974038,-0.984447,-0.985566,-0.985566,-0.983823,-0.983513,-0.982692,-0.987521
|
| 49 |
+
-0.967419,-0.991223,-0.986774,-0.986764,-0.992924,-0.996367,-0.995339,-0.995910,-0.998098,-0.997894,-0.997851,-0.997122,-0.996883,-0.996679,-0.997027,-0.997949,-0.997007,-0.991558,-0.992469,-0.996871,-0.938970,-0.954205,-0.979070,-0.995864,-0.987953,-0.986658,-0.993890,-0.996499,-0.997767,-0.998619,-0.998943,-0.998237,-0.997823,-0.996013,-0.994446,-0.996329,-0.996191,-0.995645,-0.996858,-0.996574,-0.995992,-0.969131,-0.974518,-0.991335,-0.995257,-0.995322,-0.996408,-0.996035,-0.995564,-0.995935
|
| 50 |
+
-0.991749,-0.993686,-0.993069,-0.991494,-0.991861,-0.994285,-0.990648,-0.988184,-0.988792,-0.993438,-0.994363,-0.991723,-0.992108,-0.993754,-0.994341,-0.995439,-0.995014,-0.993617,-0.994868,-0.995001,-0.995500,-0.947145,-0.981721,-0.986818,-0.988293,-0.992004,-0.991759,-0.991303,-0.994238,-0.993301,-0.993435,-0.991540,-0.989962,-0.994543,-0.993884,-0.992536,-0.992302,-0.990531,-0.991326,-0.990029,-0.984129,-0.876802,-0.972785,-0.921769,-0.904819,-0.930516,-0.957843,-0.957674,-0.965702,-0.981890
|
| 51 |
+
-0.344831,-0.305639,-0.364589,-0.352558,-0.312678,-0.319975,-0.364150,-0.378057,-0.329084,-0.323811,-0.289488,-0.323439,-0.404397,-0.450232,-0.463477,-0.396266,-0.397066,-0.440270,-0.379130,-0.378818,-0.365800,-0.322196,-0.346184,-0.348287,-0.347171,-0.415204,-0.443193,-0.318311,-0.234282,-0.100246,-0.072827,-0.206869,-0.236561,-0.197061,-0.192912,-0.253405,-0.236861,-0.174731,-0.229945,-0.227209,-0.134093,-0.183728,-0.217424,-0.206434,-0.191420,-0.192462,-0.220957,-0.206302,-0.265290,-0.368158
|
| 52 |
+
-0.955805,-0.983243,-0.993622,-0.994001,-0.994228,-0.996721,-0.997689,-0.996411,-0.996113,-0.997477,-0.997125,-0.997551,-0.997415,-0.995907,-0.995202,-0.995412,-0.996091,-0.997465,-0.997210,-0.995081,-0.995568,-0.997418,-0.997583,-0.966206,-0.994191,-0.994991,-0.995518,-0.991055,-0.990873,-0.994735,-0.996381,-0.996688,-0.995006,-0.995333,-0.996388,-0.996429,-0.994276,-0.994326,-0.995657,-0.995104,-0.995898,-0.997571,-0.995867,-0.995276,-0.996488,-0.994924,-0.992709,-0.932115,-0.984942,-0.992196
|
| 53 |
+
-0.847169,-0.967056,-0.954289,-0.952402,-0.978880,-0.978447,-0.979039,-0.994936,-0.995684,-0.996388,-0.997234,-0.995894,-0.996835,-0.995386,-0.993001,-0.993612,-0.995134,-0.996976,-0.997426,-0.997461,-0.997208,-0.994767,-0.994667,-0.981281,-0.994098,-0.989513,-0.987763,-0.973645,-0.973144,-0.976067,-0.987015,-0.992329,-0.991274,-0.993187,-0.991406,-0.992581,-0.994466,-0.996071,-0.995677,-0.995739,-0.995820,-0.996045,-0.996628,-0.996157,-0.993956,-0.889275,-0.921437,-0.963908,-0.943280,-0.933034
|
| 54 |
+
-0.775746,-0.958088,-0.971702,-0.975640,-0.976676,-0.973588,-0.974428,-0.977492,-0.976056,-0.975148,-0.980413,-0.967306,-0.963944,-0.965978,-0.980394,-0.979766,-0.970605,-0.972916,-0.978159,-0.535081,-0.909217,-0.967066,-0.974473,-0.974641,-0.976330,-0.976080,-0.976217,-0.971541,-0.970649,-0.974293,-0.974499,-0.977813,-0.984162,-0.992838,-0.974825,-0.970378,-0.978330,-0.986944,-0.992052,-0.973966,-0.967012,-0.971557,-0.972743,-0.974111,-0.851548,-0.854991,-0.927778,-0.942217,-0.965022,-0.990374
|
| 55 |
+
-0.971225,-0.986354,-0.986855,-0.991942,-0.988069,-0.989211,-0.993185,-0.985751,-0.979022,-0.987644,-0.992463,-0.994307,-0.994332,-0.993317,-0.991015,-0.891191,-0.982269,-0.994799,-0.992490,-0.994445,-0.995588,-0.995314,-0.990583,-0.984837,-0.980077,-0.981165,-0.983674,-0.988425,-0.993440,-0.994812,-0.987669,-0.984594,-0.923475,-0.943126,-0.980609,-0.992394,-0.971965,-0.951619,-0.955240,-0.984321,-0.995065,-0.996264,-0.991095,-0.990051,-0.991154,-0.977947,-0.830825,-0.951409,-0.964678,-0.963351
|
| 56 |
+
-0.907377,-0.982458,-0.993657,-0.997574,-0.995709,-0.995186,-0.996270,-0.996727,-0.995325,-0.994521,-0.998129,-0.997560,-0.995131,-0.995733,-0.997525,-0.980153,-0.991253,-0.994696,-0.993639,-0.987043,-0.987884,-0.995166,-0.993396,-0.994306,-0.994858,-0.993529,-0.995119,-0.995986,-0.994398,-0.994639,-0.943462,-0.953851,-0.983065,-0.944056,-0.880996,-0.896726,-0.993013,-0.989064,-0.985947,-0.967804,-0.969116,-0.994337,-0.994712,-0.993716,-0.996295,-0.997031,-0.998242,-0.997094,-0.949586,-0.988599
|
| 57 |
+
-0.752006,-0.952306,-0.977613,-0.991431,-0.993579,-0.995597,-0.994549,-0.993845,-0.995518,-0.994705,-0.993751,-0.994028,-0.993789,-0.993845,-0.994498,-0.829529,-0.921448,-0.970080,-0.991963,-0.989075,-0.989706,-0.989951,-0.989221,-0.995563,-0.996997,-0.994851,-0.990665,-0.991906,-0.995964,-0.993451,-0.991884,-0.984171,-0.985758,-0.993063,-0.995683,-0.993864,-0.993389,-0.664601,-0.905344,-0.892708,-0.860712,-0.877520,-0.960008,-0.942526,-0.930615,-0.949076,-0.986389,-0.992684,-0.992436,-0.987393
|
| 58 |
+
-0.265482,-0.290533,-0.320314,-0.302458,-0.324942,-0.367742,-0.413544,-0.418175,-0.386766,-0.388821,-0.411681,-0.382816,-0.326788,-0.302819,-0.317987,-0.314770,-0.285203,-0.277056,-0.251151,-0.261119,-0.286493,-0.383698,-0.410551,-0.300354,-0.222183,-0.248354,-0.255943,-0.223519,-0.293261,-0.351955,-0.363554,-0.340797,-0.343674,-0.344510,-0.318255,-0.284236,-0.247966,-0.270158,-0.331375,-0.328332,-0.289347,-0.281024,-0.255159,-0.260896,-0.283107,-0.165372,-0.125759,-0.169484,-0.245086,-0.271324
|
| 59 |
+
-0.384420,-0.482899,-0.374555,-0.323033,-0.253212,-0.246085,-0.316803,-0.348721,-0.343985,-0.223721,-0.230241,-0.299226,-0.398009,-0.385797,-0.335082,-0.286009,-0.291743,0.026954,0.001011,-0.224351,-0.277175,-0.215760,-0.167306,-0.243784,-0.295611,-0.186062,-0.156549,-0.208711,-0.282385,-0.186882,-0.127893,-0.215560,-0.214715,-0.300505,-0.270012,-0.204111,-0.283910,-0.368368,-0.288768,-0.199870,-0.143472,-0.179767,-0.079088,-0.162107,-0.331674,-0.294325,-0.154513,-0.162891,-0.169058,-0.122188
|
| 60 |
+
0.214658,0.086254,-0.084243,0.005563,-0.003712,-0.311163,-0.246857,-0.007142,0.111909,0.176318,-0.063161,-0.216465,-0.081003,0.055525,0.004572,0.122671,0.173854,0.053620,0.144792,-0.088371,-0.013861,0.056575,-0.064953,-0.078457,0.214398,0.271509,0.046068,0.079084,0.056294,0.094543,0.247483,0.178767,-0.126797,-0.016042,0.187699,0.110563,0.317016,0.255458,-0.090053,0.050146,0.063765,-0.164792,0.081514,0.228304,0.124667,0.312010,0.034914,-0.101561,0.124998,0.124373
|
| 61 |
+
-0.955268,-0.987376,-0.976972,-0.979017,-0.996434,-0.996972,-0.996042,-0.996469,-0.998139,-0.998925,-0.998772,-0.997177,-0.997703,-0.998285,-0.996927,-0.996878,-0.997634,-0.978224,-0.945274,-0.978666,-0.992116,-0.994860,-0.995712,-0.997043,-0.996406,-0.995897,-0.995501,-0.994321,-0.995510,-0.996205,-0.994691,-0.994701,-0.996290,-0.996356,-0.995328,-0.995299,-0.994315,-0.968494,-0.927167,-0.975962,-0.991107,-0.994041,-0.995391,-0.996801,-0.997235,-0.997126,-0.997049,-0.996702,-0.995783,-0.994748
|
| 62 |
+
-0.965429,-0.952426,-0.958275,-0.987356,-0.994391,-0.988809,-0.987452,-0.993596,-0.994390,-0.994795,-0.995658,-0.995906,-0.997023,-0.997315,-0.998076,-0.997905,-0.997744,-0.992071,-0.952623,-0.973297,-0.984174,-0.978736,-0.991468,-0.990424,-0.992974,-0.994508,-0.994653,-0.994998,-0.993203,-0.994452,-0.995309,-0.993509,-0.979330,-0.967277,-0.983001,-0.985493,-0.991141,-0.994329,-0.995904,-0.997256,-0.994810,-0.983151,-0.981788,-0.993389,-0.995673,-0.994126,-0.993266,-0.988612,-0.987723,-0.993492
|
| 63 |
+
-0.730141,-0.906150,-0.982233,-0.991808,-0.992998,-0.990958,-0.983312,-0.979118,-0.982634,-0.994079,-0.991173,-0.985727,-0.982800,-0.980296,-0.968297,-0.877010,-0.964655,-0.986748,-0.976070,-0.968275,-0.972158,-0.972818,-0.963028,-0.961219,-0.967559,-0.968638,-0.971143,-0.963642,-0.960069,-0.963670,-0.967555,-0.968829,-0.956085,-0.807613,-0.971142,-0.985933,-0.985436,-0.970995,-0.965177,-0.972446,-0.984323,-0.978681,-0.976411,-0.968799,-0.964850,-0.978221,-0.963882,-0.950665,-0.949239,-0.951140
|
| 64 |
+
-0.547995,-0.557558,-0.551946,-0.556380,-0.557972,-0.547246,-0.567365,-0.607137,-0.620794,-0.639184,-0.652526,-0.623103,-0.603368,-0.618574,-0.626226,-0.607933,-0.610562,-0.647512,-0.686139,-0.524086,-0.550827,-0.555352,-0.559171,-0.581242,-0.565338,-0.541894,-0.550283,-0.603193,-0.613821,-0.573374,-0.584860,-0.602929,-0.589970,-0.593753,-0.600805,-0.653085,-0.645235,-0.590639,-0.597485,-0.615788,-0.636231,-0.656423,-0.709704,-0.701626,-0.644293,-0.638193,-0.634037,-0.627198,-0.610066,-0.580623
|
| 65 |
+
-0.389400,-0.390670,-0.445152,-0.484021,-0.460053,-0.438640,-0.509089,-0.566228,-0.493069,-0.455975,-0.511204,-0.506898,-0.405855,-0.409344,-0.481501,-0.630739,-0.519019,-0.428689,-0.532962,-0.520748,-0.453301,-0.481653,-0.468871,-0.381234,-0.432461,-0.537462,-0.542527,-0.496110,-0.498454,-0.589981,-0.548244,-0.474733,-0.499408,-0.583967,-0.560536,-0.488280,-0.403714,-0.361950,-0.398684,-0.433250,-0.417313,-0.431271,-0.432460,-0.481371,-0.468826,-0.364008,-0.338823,-0.374638,-0.473340,-0.466375
|
| 66 |
+
-0.208033,-0.179041,-0.229048,-0.164928,-0.087985,-0.166441,-0.205634,-0.161374,-0.138189,-0.321420,-0.260177,-0.366546,-0.419074,-0.324596,-0.385641,-0.351361,-0.329414,-0.339352,-0.368683,-0.308693,-0.290043,-0.346680,-0.455267,-0.386935,-0.298745,-0.350402,-0.379527,-0.371774,-0.385005,-0.373215,-0.116550,-0.057610,-0.112016,-0.156676,-0.062718,-0.163219,-0.203794,-0.094651,-0.131446,-0.272700,-0.228289,-0.221862,-0.242550,-0.239063,-0.254231,-0.252231,-0.200173,-0.181639,-0.250229,-0.270149
|
| 67 |
+
-0.925469,-0.988529,-0.994318,-0.995845,-0.996965,-0.994163,-0.993439,-0.996112,-0.997267,-0.996099,-0.995846,-0.995129,-0.995382,-0.995783,-0.996253,-0.996364,-0.895077,-0.983214,-0.992325,-0.996336,-0.996538,-0.995653,-0.997053,-0.995067,-0.994923,-0.996119,-0.997168,-0.996003,-0.996070,-0.997064,-0.995769,-0.995078,-0.995376,-0.992319,-0.968357,-0.986274,-0.991192,-0.996280,-0.997050,-0.995326,-0.996402,-0.996632,-0.992437,-0.991496,-0.994686,-0.995201,-0.995233,-0.997367,-0.997020,-0.987124
|
| 68 |
+
-0.963456,-0.988980,-0.994482,-0.995081,-0.996522,-0.975964,-0.975794,-0.987991,-0.980345,-0.983882,-0.996657,-0.996899,-0.995001,-0.996034,-0.995167,-0.995568,-0.996676,-0.996402,-0.996651,-0.993125,-0.984727,-0.995027,-0.997019,-0.991678,-0.990214,-0.988626,-0.987719,-0.992661,-0.995646,-0.996174,-0.996011,-0.994894,-0.995491,-0.997323,-0.996720,-0.996990,-0.997506,-0.920200,-0.967695,-0.975772,-0.993149,-0.994762,-0.996434,-0.992164,-0.991782,-0.996424,-0.997715,-0.997560,-0.996024,-0.995742
|
| 69 |
+
-0.703402,-0.554773,-0.846446,-0.819078,-0.845072,-0.747608,-0.934471,-0.774065,-0.899308,-0.894043,-0.704687,-0.716145,-0.789764,-0.812687,-0.975714,-0.975008,-0.882756,-0.912035,-0.699179,-0.926128,-0.980846,-0.976944,-0.980520,-0.981644,-0.988943,-0.996096,-0.996542,-0.997444,-0.996424,-0.993925,-0.991128,-0.991861,-0.987205,-0.747614,-0.925094,-0.975794,-0.947142,-0.945106,-0.979204,-0.930434,-0.783216,-0.849361,-0.809970,-0.894664,-0.937650,-0.974110,-0.970236,-0.945921,-0.926757,-0.944841
|
| 70 |
+
-0.547625,-0.461614,-0.428962,-0.417300,-0.379726,-0.358462,-0.334931,-0.279438,-0.298288,-0.319141,-0.305555,-0.296822,-0.258272,-0.253753,-0.340254,-0.436924,-0.333301,-0.243340,-0.196942,-0.176257,-0.156777,-0.085100,-0.088733,-0.137394,-0.118518,-0.177834,-0.252330,-0.250410,-0.453460,-0.434397,-0.460954,-0.462223,-0.462110,-0.440608,-0.409183,-0.403424,-0.411419,-0.439868,-0.469322,-0.409703,-0.328628,-0.317125,-0.358906,-0.419426,-0.412214,-0.340691,-0.302320,-0.301964,-0.332095,-0.340296
|
| 71 |
+
-0.266946,-0.194854,-0.270365,-0.283271,-0.244040,-0.236381,-0.214211,-0.232794,-0.227089,-0.175511,-0.064572,-0.076449,-0.147358,-0.184639,-0.148514,-0.156551,-0.170439,-0.112258,-0.116552,-0.152670,-0.124942,-0.199812,-0.240661,-0.191899,-0.233345,-0.206679,-0.151008,-0.111466,-0.158608,-0.111226,-0.131234,-0.152424,-0.138939,-0.195909,-0.158122,-0.086607,-0.140049,-0.086010,-0.095081,-0.178950,-0.286743,-0.277691,-0.212903,-0.182999,-0.167115,-0.185527,-0.108086,-0.172251,-0.222181,-0.160932
|
| 72 |
+
0.011203,0.125388,0.183640,0.179628,0.182759,0.156543,0.149620,0.321838,0.183512,0.274800,0.259781,0.210616,0.253967,0.176582,0.215962,0.336427,0.127036,0.149925,0.078226,0.061638,0.047664,0.089282,0.164414,0.182380,0.197557,0.050700,0.085905,0.055306,0.055448,0.039000,-0.104725,-0.117529,0.012223,0.128050,0.346371,0.403575,0.225822,0.176234,0.255371,0.216651,0.336797,0.352290,0.152771,0.202300,0.165756,0.188582,0.319201,0.314301,0.271934,0.244971
|
| 73 |
+
-0.969901,-0.749612,-0.778809,-0.708892,-0.953262,-0.986811,-0.989511,-0.989513,-0.991749,-0.992764,-0.993439,-0.996208,-0.991773,-0.991039,-0.991807,-0.993524,-0.992336,-0.987994,-0.988484,-0.992741,-0.992582,-0.966983,-0.989372,-0.994603,-0.995407,-0.995271,-0.995234,-0.996147,-0.995322,-0.995645,-0.996822,-0.997699,-0.998616,-0.998112,-0.995689,-0.993010,-0.993601,-0.997560,-0.997368,-0.997718,-0.996947,-0.996473,-0.997565,-0.997451,-0.877094,-0.977308,-0.993200,-0.991261,-0.988773,-0.992074
|
| 74 |
+
-0.950890,-0.995688,-0.996411,-0.996928,-0.997313,-0.998312,-0.983820,-0.979845,-0.988249,-0.995668,-0.999218,-0.998719,-0.997273,-0.994900,-0.995448,-0.998481,-0.998242,-0.989813,-0.989073,-0.995014,-0.997057,-0.996627,-0.996992,-0.997388,-0.978924,-0.985215,-0.997087,-0.998469,-0.998174,-0.998404,-0.998556,-0.998087,-0.998700,-0.999303,-0.998478,-0.997872,-0.997458,-0.998192,-0.961087,-0.992780,-0.996045,-0.996713,-0.996564,-0.997764,-0.998230,-0.997613,-0.996978,-0.997026,-0.997901,-0.997745
|
| 75 |
+
-0.644629,-0.955188,-0.984927,-0.984100,-0.983924,-0.981700,-0.981965,-0.987789,-0.994759,-0.991124,-0.973648,-0.984628,-0.983634,-0.992366,-0.993605,-0.989691,-0.984178,-0.607342,-0.941638,-0.985878,-0.985755,-0.990889,-0.994482,-0.995771,-0.994236,-0.994459,-0.982257,-0.974007,-0.979891,-0.982521,-0.979357,-0.978661,-0.980767,-0.972148,-0.672401,-0.944678,-0.988177,-0.987271,-0.985586,-0.991113,-0.992934,-0.993636,-0.994060,-0.995988,-0.991600,-0.990128,-0.992898,-0.987069,-0.989939,-0.995205
|
| 76 |
+
-0.381469,-0.267677,-0.276367,-0.329480,-0.330567,-0.277933,-0.281723,-0.360760,-0.348318,-0.364880,-0.369339,-0.295171,-0.296975,-0.347830,-0.358549,-0.297934,-0.272924,-0.306092,-0.347568,-0.336609,-0.360488,-0.328945,-0.284612,-0.323899,-0.392338,-0.437708,-0.443819,-0.390587,-0.374181,-0.350667,-0.273491,-0.235363,-0.293078,-0.342662,-0.340323,-0.360228,-0.371526,-0.367132,-0.336858,-0.352186,-0.383375,-0.389365,-0.357139,-0.373686,-0.400215,-0.336684,-0.315933,-0.387087,-0.395033,-0.398557
|
| 77 |
+
-0.204197,-0.259667,-0.279620,-0.291246,-0.301075,-0.297034,-0.267388,-0.236188,-0.236785,-0.249074,-0.274197,-0.262158,-0.252647,-0.296999,-0.259760,-0.234662,-0.251892,-0.228226,-0.245491,-0.233878,-0.227047,-0.268381,-0.244385,-0.193461,-0.266562,-0.249868,-0.278258,-0.319574,-0.363146,-0.340421,-0.310263,-0.305719,-0.316577,-0.308289,-0.355371,-0.371061,-0.376644,-0.372374,-0.394695,-0.392939,-0.365594,-0.320433,-0.332218,-0.315757,-0.302194,-0.287220,-0.320199,-0.361170,-0.404407,-0.349515
|
| 78 |
+
-0.815437,-0.974526,-0.988292,-0.991494,-0.997183,-0.996541,-0.997706,-0.996982,-0.995460,-0.995420,-0.996244,-0.996282,-0.995352,-0.986658,-0.984196,-0.988182,-0.993073,-0.990138,-0.976455,-0.981942,-0.994613,-0.995830,-0.996701,-0.996239,-0.996022,-0.994752,-0.994328,-0.996278,-0.996846,-0.997144,-0.997532,-0.998190,-0.997327,-0.994856,-0.994033,-0.994823,-0.995812,-0.890163,-0.991233,-0.992687,-0.996241,-0.995834,-0.996624,-0.995747,-0.990064,-0.992709,-0.996432,-0.999295,-0.997363,-0.995004
|
| 79 |
+
-0.993050,-0.995569,-0.995089,-0.994266,-0.995235,-0.994821,-0.996938,-0.996356,-0.995720,-0.997337,-0.996625,-0.996189,-0.987088,-0.986676,-0.993666,-0.993155,-0.995811,-0.997694,-0.935541,-0.992709,-0.996383,-0.997216,-0.998408,-0.998402,-0.998298,-0.997939,-0.997759,-0.998500,-0.997784,-0.997621,-0.997079,-0.997259,-0.997850,-0.996853,-0.997707,-0.998643,-0.976955,-0.986899,-0.992332,-0.998156,-0.997260,-0.997314,-0.995670,-0.995190,-0.996138,-0.996593,-0.996769,-0.996176,-0.996596,-0.996457
|
| 80 |
+
-0.944434,-0.952784,-0.992603,-0.994216,-0.995416,-0.993453,-0.993365,-0.993882,-0.993517,-0.994626,-0.995227,-0.995508,-0.995623,-0.995507,-0.993460,-0.991437,-0.992520,-0.764059,-0.929440,-0.984492,-0.985230,-0.985726,-0.992048,-0.995232,-0.993831,-0.993089,-0.993443,-0.994534,-0.993034,-0.991492,-0.992971,-0.993331,-0.993586,-0.993920,-0.994519,-0.983734,-0.884718,-0.957004,-0.976345,-0.987387,-0.990242,-0.988045,-0.984544,-0.982159,-0.986998,-0.986808,-0.987267,-0.987648,-0.988554,-0.988101
|
| 81 |
+
-0.487861,-0.414603,-0.354786,-0.365833,-0.413439,-0.426839,-0.405722,-0.428879,-0.477162,-0.466639,-0.383783,-0.293438,-0.337781,-0.403894,-0.475696,-0.365212,-0.296895,-0.297886,-0.294424,-0.286195,-0.266108,-0.253721,-0.285009,-0.307546,-0.300226,-0.285595,-0.303768,-0.331880,-0.268842,-0.175638,-0.210034,-0.255119,-0.255976,-0.220873,-0.201152,-0.248425,-0.176833,-0.127170,-0.216388,-0.258099,-0.329580,-0.236744,-0.147654,-0.158629,-0.195170,-0.239908,-0.201261,-0.188434,-0.196325,-0.212732
|
| 82 |
+
-0.289142,-0.279392,-0.270767,-0.299962,-0.336589,-0.292066,-0.288936,-0.358942,-0.343746,-0.303403,-0.321272,-0.303905,-0.252613,-0.227055,-0.245659,-0.286888,-0.305743,-0.298735,-0.333203,-0.163862,-0.214876,-0.212189,-0.210175,-0.269328,-0.282947,-0.268475,-0.277582,-0.327602,-0.214994,-0.142635,-0.168382,-0.223974,-0.263970,-0.167223,-0.139323,-0.268935,-0.226344,-0.099624,-0.115361,-0.238649,-0.270827,-0.228370,-0.223590,-0.203275,-0.158668,-0.201446,-0.159701,-0.109564,-0.170152,-0.243490
|
| 83 |
+
-0.937115,-0.980407,-0.989976,-0.992486,-0.990345,-0.980247,-0.974960,-0.990240,-0.988670,-0.991354,-0.994719,-0.987619,-0.987438,-0.991248,-0.982999,-0.971087,-0.963460,-0.940007,-0.928488,-0.951123,-0.964617,-0.976590,-0.984578,-0.987764,-0.987312,-0.990392,-0.994359,-0.996415,-0.994355,-0.988073,-0.987848,-0.991997,-0.992594,-0.992718,-0.994182,-0.994363,-0.995032,-0.994101,-0.991439,-0.983195,-0.980167,-0.978023,-0.975260,-0.970401,-0.983708,-0.968356,-0.961121,-0.968759,-0.957804,-0.961760
|
| 84 |
+
-0.951032,-0.963129,-0.976924,-0.971956,-0.972722,-0.985254,-0.992753,-0.992841,-0.979788,-0.974871,-0.983932,-0.991965,-0.985929,-0.985594,-0.994481,-0.983397,-0.971810,-0.975200,-0.989538,-0.997593,-0.997676,-0.954767,-0.989858,-0.992509,-0.993912,-0.993301,-0.991383,-0.991345,-0.988376,-0.986368,-0.931186,-0.924157,-0.946738,-0.949382,-0.965486,-0.968759,-0.961089,-0.961918,-0.969507,-0.956914,-0.925121,-0.888546,-0.898204,-0.946254,-0.970096,-0.984085,-0.985438,-0.987756,-0.980777,-0.966424
|
| 85 |
+
-0.972089,-0.987401,-0.967398,-0.971010,-0.982939,-0.994551,-0.995946,-0.997534,-0.995202,-0.996491,-0.994840,-0.978226,-0.968085,-0.959310,-0.955893,-0.964516,-0.958266,-0.965738,-0.856213,-0.909738,-0.964486,-0.959725,-0.938408,-0.945307,-0.969049,-0.969447,-0.986039,-0.987145,-0.981254,-0.983040,-0.987905,-0.977354,-0.969398,-0.975286,-0.983316,-0.980971,-0.985414,-0.987237,-0.984132,-0.987514,-0.979447,-0.949491,-0.931431,-0.964555,-0.986755,-0.997942,-0.987306,-0.981447,-0.989147,-0.988025
|
| 86 |
+
-0.207676,-0.189596,-0.132828,-0.108118,-0.196187,-0.226529,-0.207141,-0.219620,-0.226811,-0.212686,-0.194400,-0.224667,-0.217832,-0.172445,-0.159796,-0.138343,-0.097972,-0.122792,-0.188362,-0.231666,-0.266684,-0.227935,-0.218459,-0.220878,-0.223905,-0.237566,-0.145159,-0.106671,-0.174953,-0.209685,-0.194143,-0.161648,-0.184908,-0.202879,-0.192479,-0.152902,-0.164827,-0.228632,-0.245324,-0.240545,-0.152198,-0.024613,-0.061021,-0.104363,-0.110290,-0.138345,-0.098039,-0.102075,-0.040557,-0.062734
|
| 87 |
+
-0.942960,-0.979993,-0.990505,-0.994215,-0.994963,-0.997265,-0.998033,-0.997803,-0.997655,-0.996482,-0.994765,-0.991466,-0.992029,-0.996150,-0.995992,-0.879079,-0.980148,-0.997689,-0.994487,-0.994267,-0.997650,-0.997173,-0.996842,-0.997547,-0.995534,-0.994575,-0.993543,-0.992275,-0.992662,-0.995201,-0.998067,-0.997317,-0.996784,-0.997967,-0.996551,-0.995642,-0.997268,-0.996945,-0.997111,-0.997771,-0.997396,-0.996687,-0.996750,-0.997415,-0.930875,-0.970493,-0.991216,-0.994591,-0.995717,-0.995599
|
| 88 |
+
-0.981076,-0.984419,-0.996317,-0.997089,-0.998605,-0.998599,-0.995078,-0.995084,-0.998413,-0.998237,-0.997763,-0.998123,-0.994789,-0.990291,-0.992835,-0.996091,-0.997025,-0.980440,-0.996911,-0.997307,-0.997522,-0.998976,-0.998528,-0.997891,-0.998420,-0.998349,-0.998309,-0.998500,-0.997432,-0.996203,-0.995768,-0.995771,-0.997712,-0.997709,-0.997371,-0.997428,-0.998360,-0.998850,-0.998169,-0.998016,-0.997446,-0.997779,-0.997973,-0.997867,-0.997709,-0.997834,-0.967184,-0.995061,-0.994261,-0.994649
|
| 89 |
+
-0.897240,-0.938953,-0.986402,-0.995950,-0.996086,-0.996901,-0.997005,-0.995478,-0.995476,-0.997134,-0.995446,-0.994340,-0.993787,-0.992729,-0.993138,-0.994306,-0.994317,-0.994544,-0.850296,-0.979435,-0.990707,-0.992440,-0.991711,-0.990942,-0.991173,-0.992868,-0.992937,-0.994601,-0.996113,-0.992656,-0.990116,-0.991574,-0.992278,-0.991776,-0.992904,-0.994847,-0.995499,-0.994465,-0.992867,-0.992854,-0.838042,-0.977475,-0.986395,-0.993805,-0.993648,-0.994486,-0.995594,-0.994517,-0.994229,-0.995400
|
| 90 |
+
-0.378867,-0.331595,-0.411028,-0.442934,-0.442249,-0.408495,-0.344836,-0.353894,-0.390178,-0.388247,-0.386125,-0.389253,-0.389347,-0.382626,-0.419080,-0.438064,-0.477771,-0.455124,-0.427774,-0.432565,-0.442604,-0.416594,-0.410642,-0.407608,-0.424739,-0.370686,-0.352013,-0.393014,-0.397689,-0.409974,-0.370854,-0.321210,-0.355567,-0.239825,-0.218953,-0.264854,-0.302479,-0.362774,-0.304567,-0.293976,-0.347131,-0.368655,-0.344247,-0.308579,-0.311608,-0.306167,-0.322233,-0.368162,-0.360827,-0.416369
|
| 91 |
+
-0.397298,-0.384518,-0.421467,-0.507363,-0.477225,-0.410625,-0.467228,-0.482831,-0.417616,-0.399232,-0.433755,-0.411493,-0.353076,-0.373687,-0.414037,-0.416686,-0.400391,-0.409514,-0.443779,-0.486872,-0.446032,-0.348344,-0.362629,-0.478425,-0.423900,-0.426066,-0.457049,-0.542771,-0.506070,-0.474856,-0.409992,-0.373123,-0.392303,-0.406045,-0.427111,-0.217817,-0.178207,-0.190855,-0.226858,-0.226657,-0.248229,-0.285951,-0.284294,-0.297800,-0.286574,-0.346270,-0.385755,-0.321271,-0.330964,-0.280320
|
| 92 |
+
-0.025575,-0.104160,-0.184955,-0.176453,-0.200168,-0.195778,-0.207576,-0.201105,-0.253276,-0.201304,-0.042121,-0.197354,-0.104555,-0.208903,-0.216310,-0.226034,-0.200410,-0.184683,-0.196499,-0.213603,-0.159043,-0.054184,-0.121423,-0.193129,-0.147044,-0.147286,-0.318215,-0.313362,-0.240844,-0.254509,-0.049503,-0.042278,-0.135815,-0.063637,-0.123365,-0.018569,-0.044273,-0.056394,-0.041148,-0.049035,-0.043659,-0.052704,-0.047966,0.145784,0.245688,0.212698,0.079138,0.061823,0.140363,0.158584
|
| 93 |
+
-0.960863,-0.994269,-0.995815,-0.919927,-0.840510,-0.797451,-0.942441,-0.977625,-0.994793,-0.996679,-0.997629,-0.998933,-0.998349,-0.997973,-0.967362,-0.978081,-0.978311,-0.979788,-0.989959,-0.994749,-0.994507,-0.995813,-0.997734,-0.997665,-0.995538,-0.995519,-0.996729,-0.997013,-0.998647,-0.999717,-0.998946,-0.997856,-0.967250,-0.988564,-0.993054,-0.995466,-0.992063,-0.992972,-0.996231,-0.996210,-0.998300,-0.997293,-0.990341,-0.983353,-0.963767,-0.992269,-0.993018,-0.994688,-0.994808,-0.995466
|
| 94 |
+
-0.969927,-0.987131,-0.995720,-0.995126,-0.995958,-0.997408,-0.951640,-0.945947,-0.977393,-0.969269,-0.923524,-0.975379,-0.995293,-0.996803,-0.997940,-0.998395,-0.990546,-0.987265,-0.977346,-0.977195,-0.988512,-0.994147,-0.997252,-0.997725,-0.997952,-0.997683,-0.997806,-0.970968,-0.893919,-0.893152,-0.977155,-0.995036,-0.994746,-0.988056,-0.980220,-0.978408,-0.987341,-0.996775,-0.992623,-0.991513,-0.991922,-0.992135,-0.978502,-0.984052,-0.942030,-0.944565,-0.994488,-0.957469,-0.915616,-0.919021
|
| 95 |
+
-0.942737,-0.964610,-0.965509,-0.983395,-0.995764,-0.990096,-0.987385,-0.991026,-0.994634,-0.993730,-0.991653,-0.993099,-0.995811,-0.963329,-0.956359,-0.952531,-0.982350,-0.921825,-0.938766,-0.958518,-0.971296,-0.979310,-0.981333,-0.983457,-0.983453,-0.984812,-0.993753,-0.997203,-0.992815,-0.969993,-0.964144,-0.988949,-0.993741,-0.997090,-0.996902,-0.996051,-0.995788,-0.992985,-0.854042,-0.898559,-0.991172,-0.992581,-0.993132,-0.996054,-0.995813,-0.996790,-0.995802,-0.994278,-0.988134,-0.984411
|
history/aob/data/human/labels.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
1,1,-1,-1,1,1,-1,-1,-1,1,-1,-1,1,1,-1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,-1,-1,1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,1,1,1,-1,-1,-1,1,1,1,-1,-1,-1,1,1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,1,1,-1,-1,-1
|
history/aob/data/linear/data.csv
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# x1
|
| 2 |
+
1.034418,1.051587,1.080255,1.085033,1.110629,1.072920,1.214597,1.089725,1.293437,1.183409,1.369291,1.323198,1.262347,1.349542,1.264756,1.574156,1.379359,1.437999,1.387382,1.397654
|
| 3 |
+
0.972716,0.947640,0.826014,0.952086,0.942324,0.864636,0.832463,0.898894,1.008209,0.810135,0.812011,0.746839,0.845031,0.604951,0.774728,0.583960,0.674733,0.674670,0.709309,0.446486
|
| 4 |
+
1.027653,1.018439,1.027888,1.093056,1.140154,1.078610,1.164092,1.137246,1.286842,1.099795,1.181430,1.063666,1.315552,1.401359,1.411300,1.202263,1.251724,1.454191,1.475299,1.401818
|
| 5 |
+
1.012817,0.988063,0.906642,0.910249,0.858589,0.881488,0.852838,0.841301,0.687728,0.748521,0.875319,0.850167,0.628398,0.712752,0.620572,0.633353,0.648631,0.531825,0.727386,0.491273
|
| 6 |
+
1.009878,1.073061,1.010030,1.059689,1.101083,1.125162,1.162942,1.239264,1.108087,1.288984,1.253384,1.203286,1.308839,1.163758,1.222064,1.204558,1.465141,1.285444,1.426164,1.478458
|
| 7 |
+
0.962425,0.978132,0.940748,0.933114,0.719135,0.891110,0.785976,0.992843,0.892334,0.639286,0.844355,0.749160,0.708497,0.667321,0.528842,0.612447,0.580410,0.317874,0.498804,0.845392
|
| 8 |
+
0.965291,1.012077,1.017408,1.026531,1.092049,1.050591,1.145272,0.954202,1.101186,1.113710,1.171299,1.255769,1.274301,1.252957,1.397339,1.438209,1.352193,1.515092,1.456924,1.890615
|
| 9 |
+
0.999951,0.895820,0.999941,0.961272,0.962266,0.838839,1.014653,0.792755,1.023239,1.008423,0.640878,0.864622,0.792542,0.793433,0.580313,0.561338,0.398556,0.579859,0.472038,0.256786
|
| 10 |
+
1.000787,1.066269,1.010700,1.107605,1.059054,1.129285,1.097222,1.097470,1.276270,1.278839,1.174804,1.144834,1.395965,1.214929,1.225833,1.155416,1.292349,1.077411,1.199754,1.286505
|
| 11 |
+
0.957675,0.904564,0.925856,0.965894,1.044527,0.899698,0.940990,0.863763,0.776876,0.808954,0.738762,0.742453,0.756770,0.753649,0.613156,0.441697,0.541025,0.377954,0.354237,0.273345
|
| 12 |
+
1.090108,0.986783,1.024074,1.063291,1.135330,1.126011,1.148271,1.090038,1.047030,1.185842,1.335610,1.271770,1.212249,1.263001,1.437276,1.401106,1.347092,1.519733,1.786250,1.574989
|
| 13 |
+
0.957453,1.001547,0.903169,0.964777,0.874547,0.888121,1.020902,0.833215,0.866922,0.974897,0.567544,0.735275,0.565079,0.848555,0.743078,0.680886,0.868715,0.534944,0.400708,0.494488
|
| 14 |
+
1.009708,1.041803,1.055137,1.074351,1.090122,1.172643,1.148062,1.091848,1.085196,1.225025,1.188415,1.116525,1.298953,1.360910,1.259647,1.141250,1.378232,1.402421,1.422430,1.515357
|
| 15 |
+
0.978457,1.001960,0.998660,0.972815,1.059723,0.776736,0.894591,0.940412,0.608178,1.019941,0.607620,0.929602,0.610673,0.703167,0.582753,0.827422,0.797692,0.616238,0.547443,0.655721
|
| 16 |
+
1.014638,1.088802,1.027443,0.955729,1.117455,1.083792,1.142450,1.260821,1.220664,1.156358,1.208495,1.103586,1.192784,1.409979,1.501922,1.280030,1.364567,1.490505,1.303341,1.243707
|
| 17 |
+
0.940871,0.952064,0.882061,0.975597,0.851585,0.925852,0.907563,0.801688,0.954808,1.022766,0.835479,0.751089,0.602375,0.926138,0.424283,0.708199,0.470432,0.487495,0.433679,0.819119
|
| 18 |
+
0.978919,1.007685,1.158663,1.029274,1.021437,1.077123,1.105171,1.186524,1.194253,1.102384,1.072104,1.180698,1.149075,1.189099,1.334665,1.147745,1.240429,1.141971,1.700831,1.691519
|
| 19 |
+
0.997020,0.929814,1.046718,0.984962,0.944330,0.906934,0.817181,0.940043,0.836965,0.805916,0.910424,0.817466,0.729560,0.826762,0.626741,0.676617,0.812856,0.683242,0.279313,0.597512
|
| 20 |
+
0.979265,1.113258,0.998777,0.984875,1.030406,0.991268,1.184065,1.204953,1.184243,1.082967,1.070973,1.250678,1.187274,1.099269,1.237465,1.122819,1.333070,1.359405,1.307479,1.606702
|
| 21 |
+
0.996858,1.025170,0.868868,0.872436,0.971107,0.874799,0.943063,0.753649,0.835375,0.835492,0.923910,0.585019,0.830066,0.641435,0.683339,0.713943,0.599204,0.460284,0.504971,0.641645
|
| 22 |
+
1.029240,1.002987,1.069093,1.103506,1.141958,0.935520,1.117032,1.221020,1.266241,1.061523,1.108640,1.226905,1.084095,1.351930,1.290158,1.444024,1.659558,1.321895,1.204054,1.505575
|
| 23 |
+
0.991489,0.947845,0.961831,0.926785,0.939090,0.954911,0.841503,0.769999,0.896310,0.763499,0.834400,0.673020,0.900809,0.636744,0.668810,0.723978,0.647513,0.420337,0.402782,0.604564
|
| 24 |
+
1.014167,1.010160,1.052370,1.133786,1.135304,1.104363,1.050108,1.308751,1.229044,1.188810,1.220948,1.226002,1.181114,1.028792,1.380993,1.319572,1.329446,1.450737,1.536237,1.285764
|
| 25 |
+
0.963198,1.024266,1.027899,0.864104,0.965019,0.903986,0.839967,0.985240,0.963036,0.827608,0.739682,0.792483,0.720691,0.630885,0.707989,0.618574,0.562392,0.610726,0.574650,0.231221
|
| 26 |
+
1.077409,1.074772,1.024291,1.022857,1.094468,1.106590,1.086634,1.067310,1.139032,1.116886,1.163324,1.203500,1.253752,1.184018,1.309228,1.178794,1.224712,1.612828,1.170469,1.488919
|
| 27 |
+
0.949765,0.961662,0.885797,0.914160,0.932185,0.984331,0.891980,0.829764,1.018016,0.832235,0.951494,0.786543,0.734913,0.583805,0.645020,0.853838,0.799304,0.582821,0.520966,0.427260
|
| 28 |
+
0.984116,1.003636,1.137679,1.067974,1.085510,1.018313,1.036200,1.063171,1.197347,1.198794,1.395158,1.317011,1.215009,1.500886,1.545495,1.281415,1.481728,1.252549,1.463972,1.477631
|
| 29 |
+
0.973445,1.028535,0.953476,0.921194,0.749067,0.765917,0.979982,0.716093,1.075840,0.896232,0.807776,0.698316,0.683615,0.723998,0.835240,0.672311,0.511270,0.870636,0.713485,0.751174
|
| 30 |
+
1.062891,1.069400,0.991324,1.067936,1.161312,0.965901,1.104457,1.204798,1.068179,1.221502,1.180495,1.335893,1.268588,1.282259,1.105012,1.174447,1.409286,1.362122,1.293948,1.576547
|
| 31 |
+
0.948203,0.998438,0.944431,0.944385,0.817501,1.054653,0.784476,0.773346,0.754524,0.885004,0.779890,0.658014,0.936478,0.731775,0.549770,0.389478,0.686365,0.525417,0.493292,0.622577
|
| 32 |
+
0.944288,1.029788,1.040815,1.072184,1.077238,0.994879,1.148229,1.125618,1.002547,1.110668,1.193687,1.208584,1.186326,1.170374,1.118603,1.484705,1.349616,1.516261,1.242085,1.258650
|
| 33 |
+
0.988779,0.976621,1.001157,0.967497,1.037105,0.935714,0.808266,0.903310,0.851135,0.870327,0.756884,0.911328,0.868031,0.718093,0.691478,0.583802,0.650587,0.479745,0.308155,0.491910
|
| 34 |
+
1.085153,1.073642,1.100511,1.001138,1.041096,1.115651,1.011480,1.169700,1.260790,1.222341,1.363464,1.032211,1.296384,1.123877,1.002011,1.138875,1.480179,1.369597,1.206010,1.558681
|
| 35 |
+
1.041137,0.956849,1.025986,0.956049,1.018057,0.912034,0.784769,1.045138,0.816067,0.930751,0.836238,0.688574,0.780570,0.946144,0.717750,0.364033,0.818563,0.393612,0.508692,0.579205
|
| 36 |
+
1.035680,0.908900,0.989913,1.148627,1.073118,1.012889,1.122288,1.030911,1.155033,1.156216,1.201513,1.238905,1.426344,1.339354,1.405539,1.242835,1.318396,1.531090,1.655689,1.479239
|
| 37 |
+
0.991503,0.964899,1.041145,0.883826,0.889279,0.844067,0.895712,1.014234,0.868476,0.963194,0.925438,0.851390,0.612368,0.865382,0.642670,0.802900,0.627450,0.474322,0.617943,0.453832
|
| 38 |
+
1.067848,1.082504,0.957823,1.158977,1.072193,1.179633,1.111925,1.157719,1.111405,1.062204,1.010826,1.319675,1.182159,1.466594,1.255886,1.265586,1.349349,1.262021,1.498645,1.690462
|
| 39 |
+
0.925754,0.900228,0.997102,0.979351,0.940209,1.014913,0.822733,0.812503,0.762314,0.777372,0.868580,0.778370,0.786929,0.784100,0.910539,0.373155,0.623249,0.538895,0.706919,0.549032
|
| 40 |
+
1.011807,1.033991,1.022671,1.151902,1.076088,1.216656,0.965971,1.200501,1.223439,1.141765,1.015486,1.298602,1.211191,1.381944,1.634408,1.197560,1.333997,1.328986,1.684017,1.189899
|
| 41 |
+
1.005660,0.912239,1.046115,0.882979,0.932000,0.899041,0.941133,0.958090,0.784573,0.813265,0.688956,0.754649,0.879242,0.612387,0.817420,0.459663,0.644505,0.694861,0.408531,0.529505
|
| 42 |
+
1.051552,1.063584,1.053163,0.938047,1.018040,1.212773,1.108764,1.080060,1.233271,1.159751,1.113872,1.181315,1.390123,1.393911,1.278204,1.412794,1.181531,1.320732,1.656563,0.948985
|
| 43 |
+
0.961673,0.920192,1.039165,0.905378,0.860192,0.915669,0.854120,0.919817,0.765306,0.997835,0.922328,0.833382,0.719851,0.955998,0.871904,0.667368,0.629322,0.433511,0.475453,0.564071
|
| 44 |
+
1.076328,1.016108,1.113456,0.996764,1.054725,1.070204,1.114170,1.075696,1.064004,1.212671,1.327001,1.096502,1.283882,1.316149,1.254849,1.492065,1.308760,1.375968,1.643715,1.411749
|
| 45 |
+
0.975916,0.977752,0.981508,0.893231,0.883689,0.791370,0.979457,0.960772,1.025088,0.804286,0.841360,0.960020,0.729231,0.744131,0.845434,0.737534,0.448898,0.371476,0.775746,0.777546
|
| 46 |
+
1.012654,0.973687,1.135495,1.039336,1.090393,1.039366,1.117302,1.192247,1.270120,1.194386,1.218584,1.412239,1.191956,1.380692,1.115363,1.361141,1.302324,1.696366,1.500102,1.490493
|
| 47 |
+
0.889817,0.954631,0.906590,0.896520,0.914328,0.967185,0.910163,0.910655,0.852416,0.736092,0.982758,0.726124,0.632361,0.743896,0.855775,0.575297,0.562354,0.570874,0.232847,0.792853
|
| 48 |
+
1.027786,1.010781,1.070923,0.993861,1.088586,1.093273,1.104431,1.085483,1.237495,1.141167,1.174491,1.108317,1.188365,1.253136,1.393975,1.293486,1.401355,1.509072,1.463830,1.513914
|
| 49 |
+
0.941287,0.957056,0.960534,0.929434,0.986690,0.977488,0.886847,0.847711,0.664024,0.791452,0.736196,0.768103,0.797248,0.795494,0.850068,0.626411,0.827118,0.530732,0.542608,0.452364
|
| 50 |
+
0.935128,0.983483,1.115080,1.095862,1.016695,0.977355,1.139713,1.153132,1.149925,1.176110,1.183708,1.177570,1.273156,1.243014,1.322537,1.278641,1.261792,1.613351,1.567381,1.493342
|
| 51 |
+
0.941376,1.007489,0.961361,0.871278,0.946781,0.888474,0.964096,0.856474,0.843352,0.785377,0.843291,0.737855,0.965979,0.979706,0.570151,0.838959,0.763724,0.427541,0.632906,0.367758
|
| 52 |
+
0.999543,1.025125,1.095776,1.035790,1.038296,1.317716,1.035667,1.184386,1.179888,1.083622,1.289880,0.959554,1.367570,1.108511,1.388083,1.153992,1.474777,1.487934,1.479162,1.816651
|
| 53 |
+
0.986796,0.986175,0.920635,0.914432,0.877552,0.800863,0.850605,0.878333,0.750784,0.870121,1.036635,0.644608,0.754772,0.732770,0.808435,0.669915,0.633769,0.574753,0.777016,0.816992
|
| 54 |
+
1.025268,1.022220,0.994546,1.044341,1.033098,0.976755,1.201861,1.015132,1.077502,1.044094,1.287626,1.214725,1.250283,1.300201,1.164126,1.480211,1.447348,1.649678,1.461714,1.346895
|
| 55 |
+
0.937282,0.970839,0.974719,0.853589,0.885484,0.920752,0.860328,1.012669,0.895713,0.733986,0.847348,0.838717,0.587773,0.867207,0.958539,0.645450,0.216084,0.714512,0.893155,0.632577
|
| 56 |
+
0.986866,1.103690,1.068082,1.059409,1.005631,1.204137,1.004974,1.097739,1.205867,1.137326,1.114900,1.068963,1.303824,1.218405,1.376787,1.430495,1.427971,1.499148,1.296586,1.490881
|
| 57 |
+
0.970458,0.966387,0.839606,0.991488,0.909652,0.969739,0.899339,0.698706,0.941126,0.864308,0.752269,0.760549,0.635235,0.590588,0.629587,0.571265,0.676218,0.474322,0.411200,0.524285
|
| 58 |
+
1.062579,1.011991,1.099254,1.151252,1.105863,1.217466,1.213226,1.229837,1.125116,1.224907,1.197160,1.277179,1.200215,1.086420,1.272358,1.325914,1.399897,0.999024,1.348336,1.200460
|
| 59 |
+
0.944534,0.960233,0.990328,0.909319,1.026986,0.832306,0.850887,0.743243,0.715228,0.653034,0.809618,0.816748,0.592551,0.528670,0.681741,0.678905,0.528311,0.518472,0.183396,0.597345
|
| 60 |
+
1.079338,1.070228,1.058140,1.042389,0.993477,1.033612,1.161517,1.137047,1.184636,1.159240,1.162543,1.302503,1.139561,1.442646,1.358176,1.227224,1.393526,1.200695,1.454344,1.704220
|
| 61 |
+
0.901115,0.963926,0.991739,0.949780,0.955443,1.058352,0.795928,0.900376,0.867628,0.832824,0.863610,0.633851,1.204462,0.813680,0.730729,0.571071,0.638973,0.550401,0.576666,0.585943
|
| 62 |
+
1.135960,1.002580,1.037030,1.066795,1.105337,1.105883,1.132142,1.163607,1.069390,1.219524,1.078955,1.145036,1.177073,1.440544,1.455150,1.269670,1.053989,1.169847,1.130027,1.197130
|
| 63 |
+
0.939974,0.904867,0.895365,0.858773,0.931951,0.959812,0.901583,0.928361,0.608563,0.883447,0.627578,0.589010,0.706817,0.771447,0.680065,0.629130,0.673123,0.626282,0.797821,0.370785
|
| 64 |
+
1.035649,1.105597,0.975055,1.000484,1.021366,1.064115,0.995687,1.208874,1.215230,1.398676,1.251875,1.394241,1.017253,1.102314,1.195603,1.503837,1.524015,1.161732,1.232718,1.828243
|
| 65 |
+
1.035604,0.971536,1.010678,0.991341,0.881640,0.870225,0.882816,0.878475,0.716131,0.977031,0.678453,0.633443,0.726941,0.779190,0.597494,0.513524,0.556938,0.850769,0.520465,0.394698
|
| 66 |
+
1.001235,1.125518,0.981956,1.200383,1.142732,1.041340,1.183585,1.204515,0.901401,1.171410,1.043837,1.194326,1.257152,1.275186,1.562068,1.365639,1.324202,1.664670,1.337325,1.236092
|
| 67 |
+
0.937672,0.927708,1.012597,0.962772,0.884723,1.022059,0.867659,0.908296,0.905920,0.692472,0.695538,0.756625,0.733729,0.961149,0.537656,0.585075,0.448632,0.327271,0.282541,0.221304
|
| 68 |
+
1.014258,1.084098,0.901894,1.193147,1.068565,1.117556,1.138243,1.161179,1.233206,1.223189,1.183343,1.180669,1.220905,1.417033,1.194294,1.371941,1.485073,1.314956,1.525960,1.543658
|
| 69 |
+
0.976979,0.965988,0.917834,1.028135,0.971798,0.911736,0.855655,0.998011,0.873581,0.784312,0.832486,0.826180,0.787303,0.561963,0.781489,0.740716,0.715670,0.623060,0.555926,0.614297
|
| 70 |
+
1.053208,1.034809,1.120977,1.069921,1.028325,1.091610,1.190917,1.188660,1.247142,1.275781,1.209665,1.067914,1.101619,1.174977,1.265688,1.358562,1.814643,1.310567,1.386191,1.328214
|
| 71 |
+
0.971720,0.992342,0.966474,1.006320,0.943567,0.811513,0.904149,0.739354,0.775983,0.891766,0.841075,0.831390,0.587624,0.798951,0.500796,0.647120,0.847921,0.673418,0.566666,0.272934
|
| 72 |
+
1.075188,1.035733,1.138204,1.047670,1.117677,1.067247,1.124162,1.186730,1.283458,0.973873,1.124307,1.170698,0.939812,1.155744,1.304692,1.248114,1.223460,1.421474,1.256073,1.663344
|
| 73 |
+
0.935139,0.798304,0.922791,0.933654,0.868136,1.000943,0.893159,0.734645,0.972483,0.799816,0.771427,0.896391,0.721613,0.804699,0.773385,0.687068,0.621543,0.438643,0.279048,0.809668
|
| 74 |
+
1.065634,1.011393,1.023715,1.112275,1.035321,1.050590,1.020418,1.214957,1.170525,1.337184,1.196819,1.252003,1.132218,1.355930,1.375333,1.397719,1.281548,1.444873,1.382766,1.684946
|
| 75 |
+
1.021681,0.989550,1.004724,1.026766,0.958593,0.941534,1.092427,0.777126,0.866840,0.838299,0.737853,0.782809,0.732282,0.662891,0.660336,0.695362,0.449567,0.567299,0.372251,0.620417
|
| 76 |
+
1.064975,1.035122,1.072400,1.151884,1.056152,1.121479,1.185600,1.088435,1.199254,1.173967,1.237271,1.063879,1.271305,1.135491,1.491738,1.398451,1.275089,1.342045,1.296004,1.177510
|
| 77 |
+
0.921960,0.950890,0.950734,0.896379,1.080222,0.921674,0.897494,0.782739,0.885581,0.860819,0.842945,0.801084,0.911069,0.667489,0.550398,0.643059,0.710287,0.680310,0.710138,0.534464
|
| 78 |
+
1.027679,0.952812,1.054979,1.109267,1.083728,1.257720,1.158814,1.058530,1.184356,1.255674,1.121898,1.380923,1.341995,1.168755,1.362424,1.026091,1.488618,1.430321,1.371794,1.282334
|
| 79 |
+
1.018811,0.951178,0.908935,1.040575,0.959963,0.770005,0.860848,0.766210,0.995317,0.925491,0.873519,0.765663,0.684032,0.698932,0.557936,0.601353,0.827814,0.667519,0.435479,0.624615
|
| 80 |
+
1.050098,1.094829,1.067765,1.072105,1.055178,1.104932,0.969893,1.276913,1.101668,1.091793,1.252990,1.333127,1.088469,1.297841,1.317050,1.277394,1.523935,1.365666,1.527809,1.598452
|
| 81 |
+
1.044379,0.890860,0.989308,0.924699,1.007793,0.962199,0.846968,0.826722,0.812875,0.729137,0.921610,0.790469,0.557507,0.641214,0.592339,0.769275,0.657221,0.844680,0.679832,0.296976
|
| 82 |
+
1.069377,1.049183,1.057975,1.084853,1.102366,1.074587,1.213638,1.147513,1.278959,1.198844,1.224505,1.173487,1.253081,1.282462,1.429717,1.503688,1.522773,1.482563,1.451945,1.698433
|
| 83 |
+
1.020560,0.935117,0.953674,0.951274,1.016635,0.854800,1.016874,0.901479,0.816801,0.936252,0.747007,0.755104,0.560043,0.801224,0.720198,0.575065,0.556097,0.717593,0.765092,0.801588
|
| 84 |
+
1.063106,1.076850,0.971333,1.111349,1.200362,1.145365,1.129047,1.190600,1.086430,1.357359,1.134712,1.404862,1.357398,1.343021,1.309624,1.478391,1.290670,1.220905,1.415704,1.487975
|
| 85 |
+
0.917661,0.945337,0.879929,0.922619,0.890323,0.981996,0.922020,0.837456,0.789080,0.861901,0.939547,0.664130,0.849945,0.816639,0.683313,0.892001,0.622286,0.505705,0.583666,0.647469
|
| 86 |
+
0.985867,1.020343,1.118163,1.101334,1.158053,1.128863,1.221955,1.040217,1.135571,1.192205,1.322752,1.126492,1.379830,1.138493,1.216283,1.424034,1.327588,1.400070,1.620920,1.034334
|
| 87 |
+
0.919502,0.987476,0.981928,0.872362,0.783819,0.908402,0.869845,0.986483,0.703207,0.751847,0.725783,0.921074,0.817763,0.619163,0.604326,0.462842,0.920156,0.580336,0.462515,0.802188
|
| 88 |
+
1.029275,1.110957,1.138208,1.062840,1.026498,1.129460,0.939884,1.110779,1.169702,1.155980,1.119809,1.319948,1.100133,1.256672,1.273427,1.372033,1.479000,1.366407,1.410760,1.428592
|
| 89 |
+
0.983296,1.009474,0.970233,0.910827,0.938051,0.921261,0.904726,0.721322,0.868554,0.793656,0.643124,0.634796,0.559212,0.773193,0.632000,0.767726,0.682562,0.418067,0.531364,0.356088
|
| 90 |
+
1.036300,1.013929,1.056540,1.153918,1.125904,1.223970,1.191897,1.169474,1.120181,1.093950,1.097311,1.301669,1.103697,1.578001,1.253871,1.218269,1.520957,1.176694,1.190316,1.328063
|
| 91 |
+
0.991892,1.001782,0.957308,0.902183,0.927129,0.887597,0.834670,0.889436,0.828782,0.593512,0.653324,1.064683,0.818306,0.832637,0.572962,0.661143,0.447432,0.535270,0.549262,0.468978
|
| 92 |
+
0.988603,1.072887,1.039483,1.057579,1.040590,0.968953,1.228864,0.941455,1.129915,1.371932,1.152037,1.340091,1.413665,1.276220,1.216471,1.418221,1.222851,1.274208,1.401235,1.273693
|
| 93 |
+
0.993929,0.947321,0.961922,1.033345,0.941971,0.944085,0.730735,0.998337,0.897045,0.899772,0.881842,1.059300,0.403860,0.706573,0.733978,0.645201,0.546499,0.548530,0.679007,0.401953
|
| 94 |
+
1.062622,1.117948,1.078810,1.091271,1.147552,1.070159,1.155172,0.994943,1.065744,1.204224,1.128001,1.190231,1.324556,1.324375,1.316194,1.202906,1.179542,1.393640,1.278472,1.565378
|
| 95 |
+
1.051135,1.002400,0.987962,0.973696,0.940204,0.923171,0.767497,0.844897,0.822165,0.792718,0.656828,0.849659,0.703411,0.653780,0.682831,0.895498,0.737915,0.707393,0.391383,0.576433
|
| 96 |
+
1.056513,1.079643,0.981773,1.094782,1.167706,1.040959,0.935949,1.189405,1.159851,1.359403,1.316033,1.018584,1.193189,1.503676,1.267676,1.468674,1.476997,1.132405,1.619055,1.496418
|
| 97 |
+
0.929718,0.970292,0.977301,0.857603,0.954582,1.091906,0.827445,0.905927,0.767119,0.779275,0.796709,0.671430,0.693001,0.809421,0.821294,0.377165,0.741257,0.758607,0.875157,0.328680
|
| 98 |
+
0.981880,0.967029,1.051196,1.041118,1.120122,1.277660,1.258103,1.135502,1.082234,1.167598,1.328663,1.200359,1.060915,1.350785,1.353769,1.450039,1.599801,1.516228,1.498605,1.648117
|
| 99 |
+
1.021056,0.976484,0.967595,0.989191,0.919998,0.906241,0.935226,0.921449,0.904255,0.701150,0.888542,0.706508,0.707333,0.522957,0.564038,0.970116,0.356569,0.567311,0.479569,0.429897
|
| 100 |
+
1.022831,0.972105,1.056550,1.008321,1.081786,1.177595,0.964289,1.261487,1.092795,1.168640,1.295994,1.194044,1.340732,1.211527,1.544675,1.346150,1.599474,1.167111,1.555957,1.603451
|
| 101 |
+
0.967494,0.892849,0.978587,1.089203,0.815468,0.824053,0.762720,0.918684,0.797414,0.724810,0.596020,0.781326,0.530486,0.893696,0.699160,0.872398,0.561373,0.578808,0.277587,0.423575
|
| 102 |
+
1.047146,0.985923,1.153755,1.061816,1.199611,1.224905,0.956167,1.189395,1.137300,1.237478,1.216921,1.066223,1.379859,1.257040,1.378889,1.341852,1.355834,1.298298,0.986881,1.409935
|
| 103 |
+
0.932622,0.932029,0.907684,0.978301,0.895075,0.838073,0.727004,0.842329,0.799869,1.006765,0.729792,0.857372,0.723125,0.756404,0.720193,0.735726,0.946997,0.660135,0.566865,0.616736
|
| 104 |
+
0.999640,1.045524,1.002868,1.193808,1.165135,1.092248,1.054938,1.051181,1.175775,1.275020,1.062680,1.257835,1.210863,1.434792,1.288890,1.302840,1.602331,1.324084,1.093142,1.589501
|
| 105 |
+
0.989242,0.939381,0.989020,0.891267,0.942021,0.930301,0.875091,0.904080,0.667775,0.763287,0.661320,0.672827,0.801965,0.787492,0.477813,0.602077,0.673965,0.702271,0.667953,0.851983
|
| 106 |
+
1.099381,1.115992,1.057031,1.038198,1.044167,1.096709,1.066844,1.203906,1.102586,1.088580,1.226187,1.136119,1.273810,1.291774,1.171524,1.432245,1.419430,1.349198,1.219395,1.384495
|
| 107 |
+
0.971314,0.971526,0.922753,0.956230,0.960129,0.978475,0.962133,0.958052,0.736880,0.927886,0.895450,0.897537,0.759667,0.624664,0.870450,0.645081,0.619023,0.487822,0.562613,0.430029
|
| 108 |
+
1.066112,1.097100,1.084278,1.064688,1.044373,1.097457,1.116787,1.112650,1.229252,1.157432,1.230900,1.316507,1.374775,1.233955,1.444207,1.208816,1.160450,1.367444,1.429856,1.548211
|
| 109 |
+
0.993380,0.940726,0.931739,0.987252,0.854518,0.790794,0.730790,0.750332,0.808269,0.919230,0.846557,0.805130,0.670818,0.787709,0.662512,0.655255,0.704019,0.626658,0.613994,0.823261
|
| 110 |
+
1.044487,0.981893,1.019932,1.110546,1.071564,1.071551,1.159186,1.098081,1.212231,0.943778,1.273825,1.231871,1.144664,1.217075,0.892662,1.375429,1.137403,1.599634,1.613985,1.592242
|
| 111 |
+
0.921065,1.014789,0.985760,0.892711,0.978744,0.945408,0.921726,0.781596,0.724434,0.672639,0.993047,0.619456,0.651515,0.710254,0.837662,0.578201,0.430287,0.456336,0.321979,0.573045
|
| 112 |
+
1.036636,1.067368,1.041755,0.985725,1.029615,1.050375,1.088877,1.248937,1.222728,1.221684,1.102147,1.315307,1.353488,1.408541,1.514738,1.543708,1.544292,1.359793,1.504438,1.484388
|
| 113 |
+
0.911854,0.949198,0.968587,0.993299,0.945147,0.822174,0.763928,0.699091,0.927883,0.996102,0.838969,0.937566,0.722220,0.618959,0.603254,0.968327,0.587262,0.736915,0.821628,0.466425
|
| 114 |
+
1.004757,0.958911,1.009025,0.894507,0.982699,1.127189,1.208211,1.204824,1.052578,1.355854,1.087628,1.492821,1.304203,1.077319,1.306672,1.533282,1.270246,1.336089,1.514820,1.429523
|
| 115 |
+
0.964901,0.955090,1.032844,0.962468,0.891895,0.819174,0.706257,0.813203,0.857207,0.861329,0.867614,0.877228,0.798072,0.629539,1.015456,0.599898,0.576286,0.583819,0.787541,0.516628
|
| 116 |
+
1.049475,1.069004,1.065739,1.008650,1.115611,1.125177,1.186815,1.258113,1.194634,1.225006,1.205791,1.044928,1.194103,1.343260,1.273351,1.262851,1.588963,1.132793,1.652409,1.231865
|
| 117 |
+
1.050081,0.923084,0.979452,0.908920,0.954381,0.964726,0.908374,0.910432,0.892122,0.789614,0.883893,0.693746,0.875754,0.720093,0.616454,0.567282,0.561537,0.825147,0.589377,0.440752
|
| 118 |
+
1.007481,1.041164,1.005552,1.076564,1.072555,1.007572,1.205947,1.272163,1.309207,1.119050,1.136543,1.320221,1.334768,1.294886,1.609654,1.281560,1.488219,1.278576,1.691832,1.233439
|
| 119 |
+
0.987966,0.968531,0.921508,0.874435,1.021637,0.829832,1.001798,0.847255,0.774957,0.884160,0.894735,1.002464,0.642613,0.812721,0.734504,0.885283,0.617618,0.568765,0.585252,0.418413
|
| 120 |
+
1.057558,1.069858,1.027996,1.131311,1.052310,1.088632,0.943323,1.196024,1.095669,1.188561,1.218110,1.179419,1.138751,1.202808,1.437981,1.321844,1.308796,1.361836,1.623838,1.311886
|
| 121 |
+
0.989674,0.970189,0.966136,0.886830,0.892063,0.841832,0.902699,0.904164,0.883210,0.994004,0.603850,0.645047,0.705120,0.882078,0.514273,0.677302,0.785714,0.657481,0.743939,0.170033
|
| 122 |
+
0.994792,1.087587,0.997596,1.192450,1.092436,1.137184,1.105699,1.140206,1.119562,1.076235,1.283033,1.172559,1.313619,1.372454,1.324841,0.989553,1.242169,1.175095,1.478817,1.343862
|
| 123 |
+
0.999686,0.928487,0.935614,0.914017,0.928620,0.967611,0.945514,0.795554,0.928820,0.840219,0.897748,0.784287,0.726864,0.653331,0.667698,0.678036,0.890915,0.459950,0.776517,0.606893
|
| 124 |
+
1.081835,1.044748,1.005376,0.912798,1.048906,1.130999,0.984779,1.038885,1.032649,1.185163,1.271480,1.169168,1.266758,1.231440,1.339665,1.385660,1.469557,1.543467,1.748966,1.441931
|
| 125 |
+
1.034628,0.910256,0.906655,1.046916,0.922222,0.850598,0.773876,0.906966,0.922867,0.825306,0.832859,0.828419,0.756996,0.728455,0.368823,0.506926,0.408411,0.482449,0.315293,0.785101
|
| 126 |
+
1.076272,1.073279,1.026881,1.053927,1.096091,0.952723,1.138197,0.987358,1.131007,1.209089,1.187451,1.232580,1.183671,1.245303,1.086397,1.318606,1.267470,1.342112,1.515926,1.265974
|
| 127 |
+
0.972416,1.015814,0.941382,0.959174,1.088947,0.816307,0.960489,0.689671,0.823291,0.705068,0.578219,0.574420,0.974777,0.604375,0.786643,0.517502,0.725541,0.447532,0.019859,0.573730
|
| 128 |
+
0.996072,1.073661,1.061996,1.049731,1.177515,1.010696,1.032634,1.242288,1.214204,1.120258,1.110644,1.217661,1.221465,1.023679,1.341941,1.408548,1.205931,1.298765,1.345876,1.443106
|
| 129 |
+
0.966132,0.982495,0.970125,1.052351,0.985885,0.929960,0.840847,0.965356,0.927424,0.744083,0.844991,0.808677,0.743740,0.820732,0.648460,0.454455,0.473961,0.575200,0.370911,0.630380
|
| 130 |
+
1.063478,1.109811,1.031280,1.017260,1.060232,1.216372,1.124865,1.105971,1.137488,1.147574,1.117937,1.137531,1.220987,1.251609,1.189880,1.295186,1.245896,1.683680,1.353992,1.257972
|
| 131 |
+
1.011387,0.961808,0.836152,1.016002,0.846620,0.865624,0.969454,0.958259,0.782404,0.769725,0.859935,0.908101,0.644194,0.746271,0.662335,0.660903,0.565772,0.512943,0.782608,0.425778
|
| 132 |
+
1.005486,1.112775,1.095329,1.087498,0.996823,1.122833,1.092704,1.160098,1.099593,1.102238,1.230300,1.211314,1.189029,1.207207,1.388789,1.099036,1.489555,1.184536,1.497633,1.684507
|
| 133 |
+
0.942972,0.993380,0.948219,0.985185,0.864847,0.901264,0.963230,1.002397,0.909336,0.690408,0.710743,0.758587,0.566984,0.610940,0.477124,0.408115,0.633156,0.716590,0.445114,0.497243
|
| 134 |
+
0.996517,0.952313,1.124125,1.179285,1.050028,1.078722,1.273089,1.031756,1.255811,1.337545,1.035880,1.379597,1.164832,1.096843,1.408533,1.376451,1.349727,1.473328,1.423185,1.546156
|
| 135 |
+
1.013326,1.058770,0.908025,1.013500,0.950972,0.949981,0.874679,0.906906,0.823749,0.878008,1.025979,0.857357,0.727641,0.741337,0.701081,0.535715,0.936009,0.766717,0.514244,0.423058
|
| 136 |
+
1.088231,1.070271,0.984505,1.177060,1.113207,1.144456,1.016121,1.130233,1.167625,1.258305,1.100592,1.233516,1.213442,1.239522,1.311277,1.344777,1.635004,1.508876,1.351856,1.475050
|
| 137 |
+
0.928069,0.931363,1.004386,0.863919,0.885647,0.885962,0.804693,0.845691,0.838351,0.968021,0.878018,0.817947,0.779178,0.563392,0.883244,0.841124,0.762715,0.191161,0.388245,0.216757
|
| 138 |
+
1.000646,1.024225,1.037286,1.030792,1.009170,1.051143,1.117377,1.156674,0.989368,1.090377,1.151534,1.025482,1.306213,1.421131,1.281060,1.681736,1.242709,1.433624,1.389911,1.623864
|
| 139 |
+
0.929404,0.956683,0.958865,0.859989,0.949466,0.755381,0.841334,0.942206,0.815490,0.698588,0.792718,0.782956,0.734735,0.746220,0.723661,0.916519,0.581523,0.436206,0.361770,0.582177
|
| 140 |
+
0.950209,1.051612,1.051496,1.012090,1.108321,1.227914,1.173852,1.187951,1.197934,1.083047,1.180918,1.174511,1.268544,1.140809,1.295512,1.395731,1.382004,1.421907,1.628776,1.308892
|
| 141 |
+
0.966659,0.933865,0.948234,0.926660,1.064025,1.026021,0.909292,0.848709,0.760962,0.829496,0.820858,0.844338,0.785964,0.491772,0.753625,0.643393,0.675786,0.558795,0.581220,0.107489
|
| 142 |
+
1.042153,1.100052,1.117124,0.986118,1.158849,0.888294,1.120910,1.125606,1.126723,1.196060,1.383979,1.139480,0.987388,1.170380,1.453117,1.271484,1.147203,1.598811,1.487440,1.522728
|
| 143 |
+
1.008256,1.017743,0.903214,0.805085,0.753392,0.930933,0.835962,0.808523,0.656253,0.960848,0.926709,0.668860,0.700602,0.642139,0.951906,0.582217,0.427933,0.134914,0.731641,0.547826
|
| 144 |
+
0.936708,1.064849,0.943488,1.114215,1.080612,1.096309,1.078854,1.141435,1.150475,1.307001,1.325106,1.264478,1.084034,1.326755,1.390407,1.074761,1.382161,1.357116,1.394622,1.448596
|
| 145 |
+
1.028255,0.939904,0.953549,0.860379,0.920746,0.916401,0.898861,0.764807,0.827760,0.808765,0.722375,0.757620,0.862052,0.742545,0.816957,0.599282,0.682440,0.660146,0.532812,0.615034
|
| 146 |
+
1.020720,1.052023,1.074793,0.979221,1.046681,1.090550,1.035029,1.152206,1.354793,1.114164,1.250040,1.181653,1.250262,1.330907,1.151586,1.367478,1.158428,1.407965,1.222860,1.822140
|
| 147 |
+
0.991494,0.936361,0.900448,0.931972,0.849468,0.934053,0.898633,0.912362,0.865067,0.819913,0.734120,0.520462,0.874042,0.768077,0.686720,0.620709,0.752774,0.631034,0.728690,0.450551
|
| 148 |
+
1.005127,1.033600,0.995538,1.063501,1.166779,0.973622,1.208566,1.107321,1.165987,1.118880,1.315727,1.439740,1.372632,1.313997,1.202223,1.342514,1.276392,1.325587,1.353565,1.725232
|
| 149 |
+
0.959785,0.954423,0.961876,1.006393,0.923312,1.014915,0.807217,0.790650,0.824888,0.969494,0.847540,0.690619,0.542417,0.838559,0.573913,0.689070,0.580307,0.912830,0.324114,0.388940
|
| 150 |
+
1.011725,0.977007,1.117294,1.099837,0.957830,1.097050,1.200804,1.181231,1.371125,1.145649,1.108846,1.203368,1.347475,1.322595,1.389274,1.537299,1.373291,1.456963,1.515872,1.241381
|
| 151 |
+
0.969339,0.965831,0.904512,0.901730,0.888900,0.870032,0.676839,0.858710,0.866979,0.778563,0.681271,0.681524,0.895673,0.909469,0.771705,0.845931,0.573678,0.688042,0.297908,0.485840
|
| 152 |
+
1.031076,1.035573,1.011024,1.072207,1.034815,1.165412,1.146043,1.160266,1.054588,1.164189,1.269557,1.259340,1.194145,1.151973,1.358816,1.085973,1.331982,1.685508,1.430994,1.401771
|
| 153 |
+
0.967367,1.040650,0.985812,0.908979,0.927120,0.827866,1.000465,0.871942,1.080391,0.876753,0.926304,0.636748,0.595262,0.980179,0.676699,0.712785,0.598264,0.558617,0.280290,0.764173
|
| 154 |
+
1.053184,1.018973,1.099705,1.104165,1.025674,1.023305,0.982215,1.169747,1.103773,1.078712,1.198805,1.174410,1.294052,1.050914,1.409866,1.273946,1.601610,1.435240,1.264030,1.388109
|
| 155 |
+
0.990225,0.905447,0.946935,0.898572,0.879922,0.899338,0.911277,0.924841,0.934259,0.840160,0.939958,0.832656,0.641587,0.747798,0.547538,0.731000,0.685785,0.556828,0.687869,0.645413
|
| 156 |
+
1.095122,1.074673,1.028497,0.999256,1.097312,1.120868,1.168275,1.162832,1.140937,1.220175,1.299807,1.347916,1.258558,1.229069,1.406430,1.376880,1.309907,1.355731,1.530229,1.523827
|
| 157 |
+
0.981750,0.948247,1.016004,0.957699,0.844262,0.840831,0.783948,0.857934,0.932733,0.849365,0.564459,0.648717,0.703354,0.793503,0.754312,0.627839,0.713982,0.709220,0.634928,0.531399
|
| 158 |
+
0.995013,1.126281,1.001619,1.092438,1.000159,1.144753,1.160474,0.986717,1.151219,1.089321,1.346445,1.269599,1.209709,1.425329,1.310018,1.309160,1.385695,1.619276,1.385851,1.593324
|
| 159 |
+
1.034009,1.005540,0.965916,1.006467,1.013765,0.921158,0.908658,0.903928,0.933353,0.893495,0.907737,0.814201,0.744699,0.648096,0.520029,0.793683,0.439810,0.566102,0.702203,0.621123
|
| 160 |
+
1.100164,1.084161,1.083991,1.096099,1.150079,1.122584,1.153191,0.912642,1.238136,0.874572,1.156265,1.272416,1.088900,1.503671,1.351804,1.379513,1.276193,1.392000,1.416778,1.298501
|
| 161 |
+
0.928352,0.930104,0.946879,0.869445,0.968619,0.806030,0.812043,0.888435,1.013501,0.849966,0.826413,0.828029,0.777399,0.685529,0.771003,0.496120,0.417473,0.662825,0.676655,0.658625
|
| 162 |
+
1.083877,1.010858,1.030151,1.172301,1.027916,1.070532,1.096473,1.066737,1.315608,1.211984,1.315774,1.342838,1.334140,1.254493,1.428281,1.536868,1.383420,1.105149,1.483759,1.608288
|
| 163 |
+
0.960966,0.906899,1.069247,1.025986,0.879972,0.811229,0.895482,0.930382,1.006055,0.767611,0.765888,0.822465,0.704065,0.634628,0.667398,0.856689,0.559317,0.631989,0.265862,0.630611
|
| 164 |
+
1.111366,1.106573,1.077440,1.082024,1.060702,1.047309,1.065809,1.056263,1.211463,0.974277,1.018098,1.108056,1.344719,1.165889,1.360527,1.253188,1.398409,1.377836,1.415466,1.779141
|
| 165 |
+
0.974028,0.945350,0.916021,0.885083,0.804685,0.939498,1.038886,1.009988,0.933212,0.867395,0.704026,0.716897,0.881126,0.862781,0.787726,0.609708,0.434893,0.697333,0.469927,0.705905
|
| 166 |
+
1.027821,1.048067,1.070130,1.034877,1.062278,1.050880,1.147247,1.187915,1.071783,1.274015,0.958037,1.332214,1.327594,1.057612,1.252733,1.222185,1.277943,1.526722,1.492245,1.589045
|
| 167 |
+
0.986590,0.964403,0.944957,0.980027,0.971083,0.957783,0.954301,0.887686,1.015016,0.749041,0.733909,0.832650,0.712276,0.898730,0.739117,0.690380,0.414369,0.597226,0.504977,0.362914
|
| 168 |
+
1.014892,0.981764,0.956783,0.987647,1.046062,1.032702,1.044645,1.148996,1.242806,1.098044,1.297661,1.304702,1.183560,1.226499,1.447147,1.306987,1.489788,1.263329,1.351812,1.418576
|
| 169 |
+
0.905382,0.981200,0.977039,0.952616,1.018557,0.825385,0.916097,0.962157,0.650039,0.835140,0.858776,0.949726,0.895302,0.603770,0.622705,0.601907,0.363747,0.507042,0.775376,0.580436
|
| 170 |
+
1.015651,1.035026,1.056023,1.143392,1.105358,1.051541,1.091774,1.275567,0.933614,1.093448,1.323604,1.251204,1.364099,1.197002,1.308624,1.422399,1.544316,1.261868,1.405333,1.407676
|
| 171 |
+
0.993319,0.905322,0.954411,0.963851,0.925855,0.973752,1.026823,0.854877,0.878355,0.784064,0.737434,0.744217,0.771488,0.789581,0.592405,0.633671,0.859271,0.617813,0.740832,0.501911
|
| 172 |
+
1.020015,0.911411,1.149984,1.066790,1.147054,1.021364,1.147798,1.204171,1.197961,1.222054,1.218674,1.331698,1.258547,1.081024,1.368594,1.395007,1.258529,1.301836,1.489059,1.612718
|
| 173 |
+
0.944144,0.962914,0.915387,1.000202,0.969247,0.867676,0.925267,0.912981,0.864026,0.837350,0.960616,0.696001,0.781098,0.516423,0.714605,0.784579,0.758184,0.740673,0.385132,0.482482
|
| 174 |
+
1.067142,1.026009,0.958805,1.028581,0.977361,0.949055,1.087794,1.130438,1.231635,1.261341,1.310974,1.149392,1.205442,1.220034,1.389533,1.383859,1.134514,1.420160,1.170387,1.324111
|
| 175 |
+
1.011490,0.943966,0.953707,1.018834,0.930841,0.941132,0.794265,0.822492,0.806987,0.694556,0.691877,0.931303,0.709403,0.656929,0.467408,0.702957,0.526910,0.732917,0.553778,0.457842
|
| 176 |
+
0.996450,1.083025,1.044426,1.067663,1.177114,1.058881,1.104785,1.165710,1.078765,1.246745,0.944462,1.225357,1.318683,1.402825,1.756144,1.459144,1.601212,1.343664,1.473104,1.497323
|
| 177 |
+
0.980781,1.025252,0.933384,0.865257,0.843319,0.848499,0.837678,0.846248,0.753909,0.777639,0.712838,0.824152,0.699192,0.748942,0.702109,0.509536,0.691847,0.633425,0.623476,0.563389
|
| 178 |
+
1.052070,1.023710,1.208697,1.070135,0.970910,1.073562,1.138622,1.187193,1.080429,1.102257,1.029748,1.163198,1.281926,1.381767,1.128317,1.411389,1.394441,1.798010,1.734634,1.529469
|
| 179 |
+
0.976587,0.935981,0.825781,0.873706,0.853151,0.893777,0.727868,1.009493,1.025915,0.881917,0.769841,0.488935,0.918527,0.844087,0.729214,0.514297,0.655727,0.685264,0.556445,0.258297
|
| 180 |
+
1.004987,0.994294,1.060504,1.079231,0.996226,1.070181,1.181554,1.196258,1.192900,1.152190,1.506078,1.250261,1.146061,1.473322,1.229911,1.311957,1.087097,1.510040,1.211063,1.762783
|
| 181 |
+
0.914452,0.954823,1.001122,0.869588,0.895898,0.876761,0.975532,0.891934,0.797972,0.842022,0.889106,0.768071,0.805485,0.646864,0.665293,0.548906,0.733534,0.747535,0.622040,0.388828
|
| 182 |
+
0.999425,1.012210,0.969944,1.101410,1.162434,1.097110,1.088617,1.068908,1.362686,1.077951,1.179655,1.272288,1.257695,1.372675,1.235276,1.309915,1.203113,1.396140,1.498377,1.541538
|
| 183 |
+
0.981755,0.902539,1.043476,0.856252,0.857159,0.970999,0.743331,0.923208,0.787678,0.735573,0.769249,0.686659,0.548227,0.786952,0.758740,0.670115,0.636188,0.497494,0.445613,0.408635
|
| 184 |
+
1.070327,1.006837,1.117583,1.031980,1.096081,0.991770,1.156564,1.041866,1.083106,1.146328,1.156998,1.111472,1.379246,1.099779,1.506463,1.505078,1.486031,1.395148,1.522914,1.460499
|
| 185 |
+
0.955006,1.016612,0.935278,0.864127,0.887351,0.965576,0.821791,1.037734,0.855734,0.826484,0.882182,0.536269,0.422503,0.503469,0.660418,0.917688,0.721992,0.661795,0.607480,0.426767
|
| 186 |
+
1.039969,1.035764,1.044362,1.148313,1.014103,1.118714,1.113592,1.033784,1.109434,1.214896,1.348377,1.176343,1.210872,1.367276,1.091080,0.914447,1.189447,1.597747,1.549064,1.276082
|
| 187 |
+
0.885423,0.977203,0.876072,0.870147,0.946049,0.949273,0.830830,1.014336,0.761907,0.735761,0.631085,0.723421,0.602726,0.860948,1.044761,0.770929,0.422082,0.730817,0.312166,0.412042
|
| 188 |
+
0.957398,1.074878,1.054224,0.999281,1.142424,1.235380,1.119400,1.222359,1.236700,1.310463,1.234726,1.376464,1.305584,1.329290,1.118503,1.504955,1.513739,1.387610,1.243150,1.262666
|
| 189 |
+
1.009469,0.937873,0.942518,0.929386,0.974572,1.032077,0.807679,0.790258,0.770499,0.780018,0.765565,0.772342,0.637553,0.595182,1.052088,0.497413,0.586134,0.871762,0.833331,0.208469
|
| 190 |
+
0.982286,1.072582,1.095697,1.084081,1.080082,1.197969,1.030671,1.046394,1.119402,1.227159,1.055750,1.104041,1.259687,1.293373,1.229984,1.366351,1.272603,1.403818,1.532387,1.553620
|
| 191 |
+
1.022053,0.947463,0.902480,0.863808,0.883680,0.879241,0.884396,0.864390,0.923931,0.956350,0.729015,0.763841,0.655682,0.886631,0.693527,0.669333,0.657759,0.839289,0.641313,0.503056
|
| 192 |
+
1.081311,1.001209,1.017331,1.071441,1.010585,1.009639,0.961194,1.205944,1.178347,1.093946,1.364133,1.219159,1.283487,1.221739,1.367454,1.459605,1.612673,1.370269,1.546203,1.325862
|
| 193 |
+
0.975179,0.926038,0.993748,0.983321,0.930036,0.845737,0.817692,0.989782,0.594201,0.727335,0.772751,0.816381,0.736613,0.847629,0.642170,0.736852,0.588699,0.479210,0.537558,0.188448
|
| 194 |
+
1.058201,1.025094,1.100601,1.028473,0.972293,1.174076,1.148862,1.191703,1.235207,1.083456,1.189128,1.193887,1.073798,1.111346,1.120231,1.264262,1.220559,1.396407,1.411141,1.764244
|
| 195 |
+
0.934290,0.962329,0.921649,0.887550,0.835333,0.929460,0.925635,0.828180,0.824856,0.790566,0.914807,0.840639,0.677547,0.654169,0.848950,0.609682,0.590500,0.604063,0.593721,0.455275
|
| 196 |
+
1.069485,0.943667,1.022164,0.990101,1.125914,1.081294,1.054399,1.122754,1.183348,0.957460,1.226456,1.279474,1.396340,1.067242,1.342908,1.517229,1.368640,1.501382,1.257323,1.476539
|
| 197 |
+
1.002633,0.995478,0.938370,0.967998,0.979241,0.903411,0.895203,0.653826,0.827003,0.874705,0.894379,0.612152,0.707320,0.695647,0.844404,0.705542,0.432181,0.574251,0.536218,0.691718
|
| 198 |
+
1.019486,1.130734,1.104560,1.097014,1.172566,1.083572,1.219407,1.192988,1.013073,1.234414,1.269856,1.010786,1.169714,1.501978,1.541903,1.058817,1.394252,1.270918,1.354589,1.471451
|
| 199 |
+
0.941996,0.908271,0.968599,0.997371,0.987177,0.828853,0.869563,0.857169,0.810686,0.864588,0.799841,0.797261,0.604492,0.560132,0.738160,0.524123,0.551125,0.465301,0.517422,0.390388
|
| 200 |
+
1.040326,0.982605,1.075976,1.115849,1.153120,1.181923,1.096099,1.185226,1.020487,1.096514,1.271505,1.124307,1.209771,1.269118,1.353386,1.511456,1.317431,1.301691,1.368685,1.478709
|
| 201 |
+
0.912455,0.891074,1.024862,0.892437,0.907131,0.848762,1.046258,0.841038,0.872214,0.928166,0.951910,0.712572,0.671894,0.874758,0.518583,0.447917,0.807196,0.448978,0.663571,0.526080
|
history/aob/data/linear/labels.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1
|
history/aob/data/maritime/data.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
history/aob/data/maritime/labels.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
-1,1,1,-1,-1,-1,-1,-1,1,1,-1,1,-1,-1,-1,-1,1,-1,-1,1,1,1,1,-1,-1,1,-1,-1,-1,1,-1,1,-1,-1,1,-1,1,-1,-1,-1,1,-1,-1,1,1,-1,-1,1,1,1,-1,1,1,-1,-1,1,1,-1,1,1,-1,1,1,1,-1,-1,-1,1,1,-1,1,-1,-1,1,-1,1,-1,1,-1,-1,1,-1,-1,1,-1,1,-1,1,-1,-1,1,1,1,-1,1,1,-1,-1,1,-1,-1,1,-1,-1,1,1,1,1,-1,1,-1,-1,-1,1,1,1,1,1,-1,-1,-1,1,-1,1,-1,-1,-1,1,1,-1,1,-1,1,-1,1,1,-1,1,-1,1,1,1,1,-1,-1,1,1,1,-1,-1,1,1,1,1,-1,-1,-1,1,-1,1,1,-1,-1,1,-1,1,1,-1,-1,1,-1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,-1,1,-1,-1,1,1,-1,1,-1,1,-1,1,-1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,1,1,1,1,1,-1,-1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,1,1,1,-1,-1,-1,1,1,1,-1,1,1,1,-1,-1,-1,-1,1,-1,-1,1,-1,1,1,1,-1,-1,-1,-1,-1,1,1,-1,1,1,-1,-1,-1,1,1,1,1,-1,-1,1,-1,1,1,1,-1,1,1,-1,-1,1,-1,-1,1,1,1,-1,1,1,1,1,-1,1,1,1,-1,1,1,1,1,1,-1,-1,1,1,-1,1,1,-1,1,-1,-1,1,1,-1,1,-1,1,-1,1,1,1,1,-1,-1,-1,-1,-1,1,1,-1,-1,-1,1,1,-1,1,-1,1,1,1,-1,-1,1,1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,1,1,1,-1,-1,-1,-1,1,-1,-1,-1,1,1,1,1,1,1,-1,-1,1,-1,1,-1,1,1,1,-1,-1,-1,1,-1,1,-1,1,1,1,-1,1,1,1,1,1,-1,-1,-1,-1,1,-1,-1,-1,-1,-1,1,-1,1,1,-1,-1,1,1,-1,1,-1,1,-1,1,-1,1,1,-1,-1,1,-1,1,-1,-1,-1,-1,1,-1,1,-1,1,-1,-1,1,1,-1,1,1,1,1,1,-1,-1,-1,1,1,1,1,-1,1,-1,-1,-1,-1,1,-1,1,1,1,1,-1,1,1,-1,-1,1,1,-1,-1,-1,1,1,1,-1,1,-1,1,-1,1,1,1,1,1,-1,1,1,-1,1,-1,1,-1,1,1,-1,-1,-1,1,-1,1,-1,1,1,-1,1,-1,-1,-1,-1,1,1,1,-1,-1,-1,1,-1,-1,1,-1,-1,-1,-1,-1,1,-1,-1,-1,1,1,1,1,1,1,-1,1,1,-1,1,1,1,-1,1,1,1,1,-1,1,1,1,-1,-1,1,1,1,-1,-1,-1,1,1,-1,-1,-1,-1,1,-1,1,-1,-1,1,1,1,-1,1,1,1,1,1,-1,-1,1,-1,1,1,1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,1,-1,-1,1,-1,-1,-1,1,-1,-1,1,1,-1,1,-1,1,1,-1,1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,1,-1,-1,-1,-1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,-1,-1,1,1,1,1,1,1,-1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,1,-1,-1,1,-1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,-1,-1,-1,1,-1,1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,-1,-1,1,-1,1,-1,1,-1,-1,-1,-1,-1,1,-1,1,1,1,-1,-1,-1,1,-1,-1,1,1,-1,1,-1,1,1,1,-1,-1,-1,1,1,-1,1,1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,-1,1,1,-1,1,-1,1,1,1,1,1,1,1,-1,1,1,1,-1,1,1,-1,-1,1,-1,1,-1,1,1,-1,1,-1,-1,1,-1,1,-1,-1,-1,1,1,1,-1,-1,-1,-1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,-1,1,1,-1,-1,1,1,-1,1,-1,1,-1,1,1,1,1,-1,1,-1,1,-1,1,-1,-1,1,1,-1,-1,-1,1,-1,-1,1,1,-1,-1,-1,-1,-1,1,-1,-1,-1,1,1,1,1,-1,1,1,-1,1,1,-1,1,-1,1,1,1,-1,-1,1,1,1,-1,1,1,1,-1,-1,-1,-1,1,-1,1,1,1,-1,1,1,-1,1,1,1,-1,1,1,1,1,-1,1,1,-1,1,-1,1,1,1,1,-1,-1,1,1,-1,1,-1,-1,-1,-1,1,-1,1,1,1,-1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,1,1,-1,1,-1,-1,1,1,1,-1,-1,1,-1,-1,-1,-1,1,1,1,-1,1,-1,1,1,1,-1,-1,-1,1,1,1,1,1,1,-1,1,-1,-1,-1,1,1,1,1,1,-1,1,1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,1,-1,1,1,-1,1,-1,1,-1,-1,1,-1,1,-1,-1,-1,-1,-1,1,1,-1,-1,1,-1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,-1,-1,1,-1,1,-1,-1,-1,1,-1,1,-1,-1,1,-1,-1,1,-1,1,1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,-1,1,-1,-1,-1,1,1,-1,1,-1,1,-1,1,1,-1,1,-1,1,-1,-1,-1,-1,1,1,1,-1,-1,1,-1,-1,1,1,-1,1,1,-1,-1,1,1,-1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,1,1,-1,-1,-1,-1,-1,1,1,-1,1,1,1,-1,-1,1,1,-1,1,-1,-1,-1,1,1,-1,1,1,-1,1,-1,-1,1,-1,1,-1,1,-1,-1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,1,-1,1,1,-1,1,1,-1,-1,1,1,-1,1,1,1,1,1,-1,1,-1,-1,1,1,1,1,-1,-1,1,1,1,-1,-1,1,-1,1,-1,-1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,-1,1,1,1,1,1,1,1,-1,1,-1,1,-1,-1,1,-1,-1,1,1,-1,-1,1,-1,1,1,1,1,-1,-1,1,1,-1,-1,-1,1,-1,1,1,1,-1,-1,1,-1,-1,-1,1,-1,-1,1,1,1,-1,-1,1,1,-1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,-1,1,1,-1,1,-1,-1,1,1,1,-1,1,-1,-1,-1,-1,-1,1,-1,-1,1,1,1,-1,1,1,1,1,1,1,1,-1,-1,1,1,-1,1,1,1,-1,-1,-1,-1,1,-1,1,1,1,1,1,-1,-1,1,-1,-1,1,-1,-1,1,-1,1,-1,-1,-1,-1,1,-1,-1,-1,-1,-1,1,1,-1,1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,-1,-1,1,1,1,1,-1,-1,-1,-1,1,-1,-1,1,-1,1,-1,1,1,1,1,1,-1,1,-1,1,-1,1,1,1,-1,1,1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,1,1,-1,1,-1,-1,-1,1,1,-1,1,-1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,1,1,-1,-1,-1,-1,-1,-1,1,-1,1,1,1,1,-1,-1,-1,1,1,1,1,1,-1,1,1,-1,1,1,-1,-1,1,1,-1,1,1,1,1,1,1,1,1,-1,-1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,-1,-1,1,1,-1,-1,1,1,1,1,1,-1,1,1,1,-1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,1,-1,1,-1,-1,-1,-1,1,-1,-1,-1,1,1,-1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,-1,1,1,-1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,1,-1,1,-1,-1,-1,-1,1,1,-1,-1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,-1,-1,1,1,-1,1,1,-1,1,1,-1,-1,-1,1,-1,1,1,1,1,-1,1,-1,-1,1,-1,-1,-1,-1,-1,-1,-1,1,-1,1,1,1,1,1,-1,-1,-1,1,1,-1,1,1,1,1,1,-1,-1,1,1,1,-1,-1,-1,1,1,-1,1,-1,-1,-1,-1,-1,-1,1,-1,1,-1,-1,-1,1,-1,-1,1,1,1,1,1,-1,-1,1,-1,1,-1,1,-1,-1,-1,-1,1,-1,-1,1,-1,-1,-1,-1,1,1,-1,-1,-1,1,1,-1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,-1,-1,1,1,1,1,-1,1,-1,1,1,-1,1,-1,1,-1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,1,-1,-1,1,-1,1,1,-1,-1,-1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,1,1,-1,-1,-1,-1,1,-1,1,1,1,1,-1,1,1,-1,-1,-1,-1,1,-1,1,1,-1,1,-1,1,1,-1,-1,-1,1,-1,-1,1,1,-1,-1,1,-1,1,1,1,1,-1,1,-1,-1,-1,1,-1,-1,-1,1,1,-1,1,-1,1,1,-1,-1,-1,-1,-1,-1,1,-1,1,-1,-1,-1,1,-1,-1,1,-1,-1,-1,-1,1,1,1,1,1,1,-1,1,-1,-1,-1,1,-1,1,-1,-1,1,-1,1,-1,1,1,-1,-1,1,-1,-1,1,-1,1,-1,-1,-1,-1,1,-1,1
|
history/aob/data/maritime/times.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300
|
history/aob/data/robot5/data.csv
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fx, Fy, Fz, Tx, Ty, Tz
|
| 2 |
+
-2.000000,-1.000000,81.000000,0.000000,-5.000000,0.000000,-2.000000,-1.000000,79.000000,0.000000,-4.000000,0.000000,-2.000000,-1.000000,79.000000,0.000000,-4.000000,0.000000,-2.000000,-1.000000,80.000000,0.000000,-4.000000,0.000000,-3.000000,-1.000000,79.000000,1.000000,-5.000000,1.000000,-2.000000,-1.000000,81.000000,0.000000,-5.000000,0.000000,-2.000000,-1.000000,80.000000,0.000000,-4.000000,0.000000,-2.000000,-1.000000,79.000000,0.000000,-4.000000,0.000000,-2.000000,-1.000000,79.000000,0.000000,-4.000000,0.000000,-2.000000,-1.000000,79.000000,0.000000,-4.000000,0.000000,-2.000000,-1.000000,80.000000,1.000000,-4.000000,0.000000,-1.000000,-1.000000,80.000000,0.000000,-4.000000,1.000000,-2.000000,-1.000000,78.000000,0.000000,-5.000000,0.000000,-3.000000,-1.000000,80.000000,1.000000,-4.000000,1.000000,-2.000000,-1.000000,79.000000,0.000000,-4.000000,0.000000
|
| 3 |
+
6.000000,-1.000000,79.000000,-2.000000,4.000000,-3.000000,42.000000,-3.000000,80.000000,5.000000,53.000000,3.000000,-5.000000,4.000000,74.000000,-15.000000,-10.000000,-1.000000,-4.000000,-17.000000,93.000000,26.000000,-6.000000,-3.000000,2.000000,1.000000,74.000000,-6.000000,-3.000000,-4.000000,0.000000,-5.000000,78.000000,3.000000,-6.000000,0.000000,-3.000000,-7.000000,83.000000,8.000000,-5.000000,-3.000000,-6.000000,2.000000,71.000000,-12.000000,-11.000000,1.000000,-3.000000,3.000000,76.000000,-6.000000,-9.000000,-4.000000,-2.000000,-9.000000,86.000000,16.000000,-8.000000,-2.000000,-4.000000,2.000000,77.000000,-5.000000,-7.000000,2.000000,2.000000,-3.000000,81.000000,7.000000,1.000000,9.000000,-1.000000,-5.000000,80.000000,6.000000,-6.000000,0.000000,-4.000000,5.000000,78.000000,-14.000000,-9.000000,-4.000000,-4.000000,1.000000,80.000000,-3.000000,-12.000000,5.000000
|
| 4 |
+
-2.000000,-6.000000,85.000000,14.000000,-5.000000,2.000000,0.000000,2.000000,74.000000,-7.000000,1.000000,0.000000,-4.000000,-5.000000,76.000000,7.000000,-11.000000,4.000000,-2.000000,-2.000000,78.000000,1.000000,-6.000000,1.000000,-4.000000,4.000000,76.000000,-18.000000,-13.000000,-10.000000,-2.000000,4.000000,74.000000,-10.000000,-9.000000,-4.000000,1.000000,-9.000000,87.000000,13.000000,-2.000000,-10.000000,-8.000000,3.000000,69.000000,-13.000000,-20.000000,-5.000000,-1.000000,0.000000,83.000000,-2.000000,-6.000000,-8.000000,-3.000000,-6.000000,83.000000,6.000000,-14.000000,-5.000000,-3.000000,8.000000,73.000000,-19.000000,-11.000000,-3.000000,-6.000000,-1.000000,80.000000,-1.000000,-20.000000,6.000000,0.000000,-9.000000,87.000000,13.000000,-5.000000,2.000000,-5.000000,5.000000,67.000000,-17.000000,-16.000000,7.000000,-6.000000,-10.000000,86.000000,16.000000,-14.000000,-1.000000
|
| 5 |
+
-2.000000,-10.000000,86.000000,13.000000,-7.000000,-5.000000,-5.000000,4.000000,73.000000,-16.000000,-18.000000,-2.000000,0.000000,1.000000,79.000000,-5.000000,-10.000000,-4.000000,-2.000000,-6.000000,87.000000,5.000000,-12.000000,-1.000000,-6.000000,3.000000,73.000000,-16.000000,-21.000000,-5.000000,2.000000,-9.000000,84.000000,7.000000,-6.000000,-9.000000,-1.000000,-8.000000,82.000000,6.000000,-8.000000,0.000000,1.000000,3.000000,78.000000,-11.000000,-2.000000,-5.000000,-3.000000,-8.000000,88.000000,5.000000,-13.000000,-3.000000,-1.000000,-1.000000,81.000000,-4.000000,-10.000000,-13.000000,-5.000000,4.000000,74.000000,-17.000000,-21.000000,-15.000000,-4.000000,-14.000000,84.000000,17.000000,-17.000000,-4.000000,1.000000,-7.000000,78.000000,3.000000,-7.000000,-2.000000,-9.000000,6.000000,75.000000,-18.000000,-28.000000,-9.000000,-1.000000,-9.000000,92.000000,11.000000,-15.000000,-11.000000
|
| 6 |
+
0.000000,2.000000,74.000000,-12.000000,-9.000000,-5.000000,-2.000000,4.000000,74.000000,-16.000000,-15.000000,-3.000000,2.000000,-9.000000,85.000000,11.000000,-4.000000,-6.000000,1.000000,-3.000000,77.000000,-5.000000,-11.000000,-10.000000,1.000000,-1.000000,71.000000,-7.000000,-2.000000,-3.000000,-5.000000,-6.000000,83.000000,2.000000,-21.000000,-5.000000,-1.000000,-6.000000,77.000000,-2.000000,-17.000000,-11.000000,-6.000000,-2.000000,77.000000,-10.000000,-25.000000,-10.000000,-5.000000,-10.000000,90.000000,7.000000,-21.000000,-9.000000,-1.000000,5.000000,75.000000,-23.000000,-16.000000,-6.000000,-3.000000,5.000000,73.000000,-19.000000,-19.000000,-6.000000,-4.000000,-11.000000,89.000000,11.000000,-14.000000,-4.000000,-2.000000,5.000000,72.000000,-18.000000,-11.000000,-9.000000,3.000000,-3.000000,74.000000,-1.000000,-7.000000,-1.000000,-7.000000,-13.000000,88.000000,12.000000,-30.000000,-3.000000
|
| 7 |
+
-2.000000,3.000000,70.000000,-15.000000,-18.000000,-5.000000,-1.000000,3.000000,78.000000,-16.000000,-17.000000,-4.000000,-1.000000,-7.000000,83.000000,4.000000,-14.000000,-10.000000,0.000000,-2.000000,70.000000,-12.000000,-12.000000,-7.000000,-4.000000,-2.000000,75.000000,-6.000000,-15.000000,-5.000000,-7.000000,-13.000000,85.000000,9.000000,-26.000000,-4.000000,0.000000,1.000000,75.000000,-14.000000,-9.000000,-5.000000,3.000000,-4.000000,80.000000,-2.000000,-10.000000,-10.000000,-3.000000,-2.000000,83.000000,-10.000000,-25.000000,-5.000000,1.000000,-5.000000,81.000000,0.000000,-12.000000,-8.000000,-6.000000,0.000000,77.000000,-13.000000,-31.000000,-7.000000,-6.000000,-9.000000,83.000000,3.000000,-26.000000,-5.000000,-6.000000,0.000000,77.000000,-21.000000,-27.000000,-10.000000,-5.000000,-9.000000,84.000000,5.000000,-22.000000,-10.000000,1.000000,-3.000000,79.000000,-5.000000,-16.000000,-6.000000
|
| 8 |
+
-5.000000,4.000000,77.000000,-20.000000,-22.000000,-6.000000,-4.000000,-2.000000,80.000000,-8.000000,-20.000000,-3.000000,-4.000000,-7.000000,82.000000,-1.000000,-22.000000,-9.000000,-1.000000,-2.000000,74.000000,-7.000000,-15.000000,-7.000000,-4.000000,-8.000000,84.000000,0.000000,-24.000000,-4.000000,-3.000000,3.000000,81.000000,-24.000000,-26.000000,-13.000000,-2.000000,2.000000,80.000000,-20.000000,-22.000000,-9.000000,-1.000000,-7.000000,82.000000,-1.000000,-18.000000,-3.000000,-1.000000,-2.000000,78.000000,-16.000000,-21.000000,-9.000000,1.000000,-5.000000,76.000000,0.000000,-10.000000,-8.000000,-6.000000,-3.000000,82.000000,-13.000000,-28.000000,-9.000000,-3.000000,-5.000000,77.000000,-5.000000,-20.000000,-9.000000,-2.000000,0.000000,76.000000,-19.000000,-20.000000,-14.000000,-3.000000,-8.000000,79.000000,-4.000000,-23.000000,-11.000000,-5.000000,0.000000,78.000000,-18.000000,-25.000000,-9.000000
|
| 9 |
+
-5.000000,4.000000,78.000000,-28.000000,-28.000000,-9.000000,-4.000000,-8.000000,79.000000,-5.000000,-24.000000,-14.000000,-1.000000,-9.000000,78.000000,0.000000,-17.000000,-14.000000,-1.000000,-1.000000,78.000000,-17.000000,-21.000000,-9.000000,1.000000,-5.000000,80.000000,-12.000000,-18.000000,-7.000000,-3.000000,3.000000,78.000000,-31.000000,-23.000000,-13.000000,0.000000,-8.000000,78.000000,0.000000,-16.000000,-16.000000,-7.000000,3.000000,81.000000,-29.000000,-32.000000,-12.000000,-7.000000,1.000000,79.000000,-21.000000,-29.000000,-11.000000,-6.000000,-1.000000,79.000000,-21.000000,-29.000000,-16.000000,-3.000000,-11.000000,77.000000,0.000000,-21.000000,-1.000000,-1.000000,-1.000000,77.000000,-19.000000,-20.000000,-8.000000,-1.000000,4.000000,77.000000,-24.000000,-20.000000,-7.000000,0.000000,-5.000000,77.000000,-6.000000,-20.000000,-10.000000,-2.000000,-8.000000,79.000000,4.000000,-21.000000,-10.000000
|
| 10 |
+
-4.000000,-2.000000,81.000000,-11.000000,-24.000000,-10.000000,-5.000000,3.000000,77.000000,-29.000000,-26.000000,-6.000000,-5.000000,2.000000,81.000000,-27.000000,-25.000000,-13.000000,-3.000000,0.000000,81.000000,-28.000000,-25.000000,-12.000000,-1.000000,-11.000000,76.000000,-2.000000,-20.000000,-2.000000,-2.000000,-3.000000,76.000000,-20.000000,-20.000000,-7.000000,-2.000000,-2.000000,82.000000,-20.000000,-22.000000,-14.000000,-4.000000,3.000000,76.000000,-26.000000,-23.000000,-9.000000,-6.000000,-7.000000,83.000000,-4.000000,-29.000000,-20.000000,-6.000000,-2.000000,79.000000,-24.000000,-29.000000,-17.000000,-2.000000,-9.000000,79.000000,-8.000000,-24.000000,-13.000000,-4.000000,-5.000000,78.000000,-17.000000,-23.000000,-9.000000,-2.000000,-4.000000,81.000000,-16.000000,-22.000000,2.000000,1.000000,-9.000000,74.000000,-1.000000,-19.000000,-4.000000,-3.000000,1.000000,84.000000,-17.000000,-24.000000,-11.000000
|
| 11 |
+
-3.000000,1.000000,84.000000,-26.000000,-22.000000,-11.000000,-1.000000,-2.000000,76.000000,-21.000000,-20.000000,-17.000000,-4.000000,-2.000000,78.000000,-20.000000,-27.000000,-12.000000,-5.000000,-5.000000,83.000000,-11.000000,-28.000000,-11.000000,0.000000,-1.000000,77.000000,-22.000000,-21.000000,-18.000000,-1.000000,2.000000,82.000000,-31.000000,-18.000000,-11.000000,-5.000000,-6.000000,80.000000,-9.000000,-29.000000,-14.000000,-4.000000,-13.000000,76.000000,6.000000,-26.000000,-11.000000,-9.000000,-2.000000,85.000000,-17.000000,-33.000000,-13.000000,-5.000000,2.000000,83.000000,-27.000000,-25.000000,-10.000000,-6.000000,-5.000000,78.000000,-15.000000,-27.000000,-11.000000,-8.000000,-2.000000,81.000000,-20.000000,-33.000000,-10.000000,-1.000000,-1.000000,84.000000,-20.000000,-18.000000,-5.000000,-3.000000,-10.000000,73.000000,-7.000000,-19.000000,-8.000000,-3.000000,1.000000,82.000000,-24.000000,-24.000000,-1.000000
|
| 12 |
+
0.000000,-4.000000,80.000000,-22.000000,-22.000000,-18.000000,0.000000,-9.000000,82.000000,-9.000000,-22.000000,-8.000000,-2.000000,0.000000,82.000000,-25.000000,-22.000000,-6.000000,-3.000000,-2.000000,80.000000,-26.000000,-23.000000,-8.000000,-4.000000,-6.000000,74.000000,-14.000000,-25.000000,-11.000000,-9.000000,-1.000000,87.000000,-23.000000,-31.000000,-6.000000,-8.000000,-1.000000,84.000000,-15.000000,-28.000000,-6.000000,0.000000,-4.000000,69.000000,-17.000000,-18.000000,-1.000000,18.000000,47.000000,80.000000,-85.000000,8.000000,-10.000000,-5.000000,-2.000000,82.000000,-17.000000,-32.000000,-11.000000,-4.000000,-16.000000,76.000000,-1.000000,-22.000000,-8.000000,2.000000,1.000000,81.000000,-25.000000,-13.000000,-8.000000,-4.000000,-2.000000,83.000000,-22.000000,-23.000000,-8.000000,-1.000000,-6.000000,76.000000,-14.000000,-22.000000,-9.000000,-3.000000,-1.000000,83.000000,-26.000000,-24.000000,-9.000000
|
| 13 |
+
2.000000,-1.000000,82.000000,-1.000000,-2.000000,6.000000,4.000000,0.000000,84.000000,3.000000,4.000000,-7.000000,-1.000000,-6.000000,84.000000,6.000000,-4.000000,1.000000,-6.000000,2.000000,72.000000,-6.000000,-11.000000,0.000000,-2.000000,-4.000000,80.000000,6.000000,-5.000000,4.000000,2.000000,-4.000000,87.000000,9.000000,3.000000,5.000000,-8.000000,6.000000,71.000000,-12.000000,-18.000000,2.000000,0.000000,-5.000000,87.000000,8.000000,0.000000,1.000000,0.000000,-4.000000,84.000000,6.000000,-2.000000,4.000000,-4.000000,7.000000,69.000000,-17.000000,-7.000000,0.000000,2.000000,-9.000000,84.000000,13.000000,-5.000000,-1.000000,-2.000000,-8.000000,84.000000,12.000000,-3.000000,2.000000,-5.000000,0.000000,68.000000,-2.000000,-16.000000,0.000000,-2.000000,-8.000000,86.000000,10.000000,-6.000000,2.000000,-4.000000,-5.000000,82.000000,6.000000,-8.000000,-2.000000
|
| 14 |
+
-5.000000,6.000000,67.000000,-15.000000,-16.000000,-7.000000,-3.000000,-3.000000,87.000000,7.000000,-11.000000,-6.000000,-1.000000,-2.000000,83.000000,2.000000,-7.000000,-5.000000,-4.000000,6.000000,71.000000,-18.000000,-14.000000,4.000000,0.000000,-5.000000,85.000000,6.000000,-4.000000,4.000000,-7.000000,-3.000000,81.000000,2.000000,-16.000000,-3.000000,-4.000000,4.000000,73.000000,-12.000000,-13.000000,-10.000000,-1.000000,-7.000000,86.000000,10.000000,-8.000000,1.000000,-3.000000,1.000000,73.000000,-6.000000,-11.000000,-3.000000,-2.000000,-5.000000,81.000000,3.000000,-6.000000,0.000000,-1.000000,-7.000000,85.000000,8.000000,-6.000000,-6.000000,-8.000000,8.000000,70.000000,-23.000000,-19.000000,-6.000000,-3.000000,0.000000,78.000000,-6.000000,-11.000000,-3.000000,-1.000000,-7.000000,80.000000,8.000000,-11.000000,-4.000000,-6.000000,3.000000,73.000000,-11.000000,-18.000000,2.000000
|
| 15 |
+
-4.000000,3.000000,76.000000,-11.000000,-16.000000,-5.000000,-2.000000,-13.000000,91.000000,18.000000,-12.000000,-5.000000,0.000000,0.000000,71.000000,-9.000000,-6.000000,-4.000000,-4.000000,0.000000,76.000000,-4.000000,-21.000000,-5.000000,-3.000000,-7.000000,89.000000,3.000000,-16.000000,-16.000000,-2.000000,-1.000000,82.000000,-9.000000,-11.000000,-7.000000,-6.000000,4.000000,73.000000,-11.000000,-20.000000,-4.000000,-5.000000,-9.000000,86.000000,9.000000,-22.000000,-2.000000,1.000000,-3.000000,78.000000,1.000000,-3.000000,-4.000000,-3.000000,-3.000000,79.000000,-4.000000,-25.000000,0.000000,0.000000,-12.000000,90.000000,19.000000,-12.000000,-7.000000,0.000000,11.000000,66.000000,-29.000000,-11.000000,-6.000000,-4.000000,-1.000000,83.000000,-7.000000,-18.000000,-13.000000,2.000000,-9.000000,83.000000,7.000000,-5.000000,-6.000000,1.000000,0.000000,74.000000,-6.000000,-8.000000,3.000000
|
| 16 |
+
-3.000000,0.000000,75.000000,-7.000000,-19.000000,-4.000000,-5.000000,-11.000000,84.000000,11.000000,-21.000000,-12.000000,-3.000000,9.000000,67.000000,-27.000000,-18.000000,-8.000000,-5.000000,-1.000000,79.000000,-5.000000,-20.000000,-4.000000,0.000000,-8.000000,83.000000,5.000000,-7.000000,-7.000000,-4.000000,4.000000,74.000000,-18.000000,-21.000000,-11.000000,-4.000000,-11.000000,89.000000,14.000000,-20.000000,-5.000000,-4.000000,-4.000000,79.000000,-1.000000,-20.000000,-5.000000,-2.000000,2.000000,75.000000,-15.000000,-14.000000,-2.000000,-2.000000,-10.000000,86.000000,11.000000,-13.000000,1.000000,-1.000000,-8.000000,76.000000,2.000000,-14.000000,-6.000000,0.000000,7.000000,68.000000,-20.000000,-12.000000,-5.000000,-1.000000,-11.000000,88.000000,11.000000,-15.000000,-10.000000,-6.000000,-4.000000,76.000000,-5.000000,-23.000000,-4.000000,-6.000000,8.000000,76.000000,-25.000000,-21.000000,-6.000000
|
| 17 |
+
0.000000,-9.000000,83.000000,7.000000,-9.000000,-8.000000,0.000000,-5.000000,76.000000,3.000000,-7.000000,-2.000000,-2.000000,2.000000,74.000000,-15.000000,-15.000000,-13.000000,-5.000000,-6.000000,85.000000,-2.000000,-25.000000,-6.000000,2.000000,2.000000,74.000000,-15.000000,-10.000000,-6.000000,-3.000000,-4.000000,82.000000,-4.000000,-15.000000,-6.000000,-8.000000,-1.000000,83.000000,-12.000000,-33.000000,-10.000000,-3.000000,-5.000000,75.000000,-4.000000,-19.000000,-3.000000,-1.000000,-1.000000,73.000000,-8.000000,-19.000000,-6.000000,-4.000000,-6.000000,81.000000,-6.000000,-21.000000,-5.000000,-1.000000,-2.000000,76.000000,-13.000000,-20.000000,-6.000000,-4.000000,0.000000,79.000000,-12.000000,-21.000000,-2.000000,-6.000000,-2.000000,81.000000,-11.000000,-24.000000,-6.000000,-2.000000,-4.000000,80.000000,-3.000000,-16.000000,-7.000000,-1.000000,-6.000000,75.000000,-1.000000,-10.000000,-8.000000
|
| 18 |
+
-8.000000,-6.000000,85.000000,-6.000000,-31.000000,-13.000000,-1.000000,1.000000,75.000000,-18.000000,-15.000000,-9.000000,2.000000,-11.000000,80.000000,9.000000,-12.000000,-17.000000,-2.000000,-5.000000,80.000000,-12.000000,-21.000000,-19.000000,0.000000,-1.000000,77.000000,-12.000000,-16.000000,-8.000000,-3.000000,-2.000000,78.000000,-9.000000,-20.000000,-10.000000,-2.000000,-14.000000,78.000000,8.000000,-20.000000,-5.000000,-6.000000,4.000000,75.000000,-30.000000,-29.000000,-10.000000,-6.000000,3.000000,80.000000,-18.000000,-25.000000,-11.000000,-4.000000,-5.000000,81.000000,-6.000000,-23.000000,-11.000000,-2.000000,-2.000000,77.000000,-15.000000,-20.000000,-5.000000,-2.000000,-2.000000,76.000000,-10.000000,-20.000000,-11.000000,1.000000,-13.000000,81.000000,12.000000,-13.000000,-13.000000,-2.000000,-1.000000,80.000000,-14.000000,-21.000000,-7.000000,-3.000000,2.000000,80.000000,-22.000000,-23.000000,2.000000
|
| 19 |
+
-2.000000,-3.000000,82.000000,-15.000000,-21.000000,-6.000000,-4.000000,4.000000,77.000000,-28.000000,-24.000000,-14.000000,-4.000000,-9.000000,78.000000,6.000000,-20.000000,-8.000000,-5.000000,-2.000000,79.000000,-12.000000,-26.000000,-11.000000,-6.000000,-5.000000,78.000000,-12.000000,-27.000000,-12.000000,-6.000000,0.000000,81.000000,-24.000000,-29.000000,-5.000000,-2.000000,2.000000,78.000000,-33.000000,-22.000000,-9.000000,2.000000,-4.000000,82.000000,-11.000000,-14.000000,-11.000000,-1.000000,-1.000000,81.000000,-22.000000,-21.000000,-12.000000,-1.000000,-7.000000,76.000000,-9.000000,-20.000000,-12.000000,-5.000000,3.000000,78.000000,-32.000000,-25.000000,-16.000000,-7.000000,0.000000,77.000000,-19.000000,-26.000000,-4.000000,-5.000000,-11.000000,80.000000,1.000000,-27.000000,-16.000000,-3.000000,-2.000000,81.000000,-19.000000,-25.000000,-10.000000,-1.000000,-8.000000,80.000000,-1.000000,-17.000000,2.000000
|
| 20 |
+
-1.000000,-11.000000,76.000000,5.000000,-20.000000,-13.000000,-1.000000,1.000000,82.000000,-24.000000,-23.000000,-18.000000,-2.000000,-1.000000,78.000000,-17.000000,-23.000000,-9.000000,-7.000000,0.000000,80.000000,-22.000000,-25.000000,-9.000000,-8.000000,7.000000,78.000000,-41.000000,-27.000000,-9.000000,-5.000000,0.000000,77.000000,-21.000000,-25.000000,-11.000000,-2.000000,-5.000000,78.000000,-12.000000,-22.000000,-10.000000,-1.000000,0.000000,81.000000,-25.000000,-21.000000,-6.000000,0.000000,-10.000000,78.000000,-3.000000,-17.000000,-8.000000,0.000000,9.000000,78.000000,-43.000000,-15.000000,-13.000000,-4.000000,1.000000,82.000000,-21.000000,-26.000000,-14.000000,-6.000000,-8.000000,81.000000,-7.000000,-26.000000,-9.000000,-4.000000,0.000000,77.000000,-21.000000,-22.000000,-10.000000,-5.000000,-1.000000,81.000000,-17.000000,-27.000000,-12.000000,-5.000000,-6.000000,81.000000,-6.000000,-27.000000,-9.000000
|
| 21 |
+
-3.000000,-2.000000,79.000000,-16.000000,-20.000000,-5.000000,-5.000000,-1.000000,83.000000,-22.000000,-24.000000,-13.000000,-2.000000,-3.000000,77.000000,-15.000000,-20.000000,-15.000000,-3.000000,2.000000,76.000000,-29.000000,-23.000000,-15.000000,-10.000000,-10.000000,89.000000,0.000000,-40.000000,-8.000000,2.000000,1.000000,75.000000,-32.000000,-12.000000,-5.000000,-3.000000,1.000000,79.000000,-27.000000,-20.000000,-4.000000,-4.000000,-5.000000,78.000000,-14.000000,-26.000000,-10.000000,0.000000,-5.000000,82.000000,-20.000000,-16.000000,-7.000000,0.000000,2.000000,78.000000,-29.000000,-16.000000,-5.000000,-6.000000,-2.000000,86.000000,-18.000000,-33.000000,-8.000000,0.000000,-2.000000,76.000000,-18.000000,-20.000000,-12.000000,-2.000000,-1.000000,80.000000,-20.000000,-21.000000,-10.000000,-5.000000,3.000000,82.000000,-33.000000,-26.000000,-13.000000,-1.000000,-9.000000,68.000000,-2.000000,-22.000000,-4.000000
|
| 22 |
+
-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,1.000000,20.000000,5.000000,-6.000000,0.000000,-2.000000,1.000000,23.000000,5.000000,-6.000000,0.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-3.000000,1.000000,22.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-3.000000,3.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,1.000000,13.000000,7.000000,-3.000000,-1.000000,-3.000000,1.000000,4.000000,3.000000,-7.000000,0.000000,-1.000000,0.000000,13.000000,4.000000,-5.000000,-1.000000,-4.000000,3.000000,15.000000,2.000000,-11.000000,-1.000000,-1.000000,2.000000,29.000000,7.000000,-6.000000,-1.000000,1.000000,-1.000000,23.000000,9.000000,2.000000,0.000000,-2.000000,2.000000,29.000000,3.000000,-6.000000,0.000000
|
| 23 |
+
-3.000000,2.000000,22.000000,5.000000,-8.000000,0.000000,-2.000000,2.000000,19.000000,5.000000,-7.000000,0.000000,-3.000000,2.000000,23.000000,5.000000,-7.000000,0.000000,-2.000000,2.000000,22.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,23.000000,5.000000,-7.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-7.000000,-1.000000,-2.000000,2.000000,22.000000,4.000000,-7.000000,-1.000000,0.000000,1.000000,23.000000,8.000000,-4.000000,0.000000,3.000000,0.000000,28.000000,6.000000,1.000000,-1.000000,0.000000,2.000000,18.000000,7.000000,-5.000000,-1.000000,-2.000000,3.000000,20.000000,2.000000,-7.000000,0.000000,-4.000000,3.000000,22.000000,4.000000,-11.000000,-1.000000,-2.000000,2.000000,6.000000,5.000000,-8.000000,-1.000000,-3.000000,3.000000,24.000000,4.000000,-8.000000,-1.000000
|
| 24 |
+
-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,19.000000,5.000000,-7.000000,0.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,22.000000,4.000000,-7.000000,-1.000000,-2.000000,2.000000,22.000000,4.000000,-7.000000,-1.000000,-2.000000,2.000000,22.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,22.000000,5.000000,-5.000000,-1.000000,-2.000000,2.000000,24.000000,2.000000,-6.000000,0.000000,0.000000,0.000000,22.000000,4.000000,-3.000000,-1.000000,-2.000000,1.000000,31.000000,2.000000,-6.000000,-1.000000,-3.000000,2.000000,23.000000,4.000000,-8.000000,-2.000000,-2.000000,1.000000,29.000000,7.000000,-5.000000,-2.000000,-2.000000,2.000000,41.000000,4.000000,-5.000000,-2.000000,-3.000000,1.000000,3.000000,5.000000,-6.000000,0.000000
|
| 25 |
+
-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-3.000000,1.000000,18.000000,4.000000,-6.000000,0.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,1.000000,20.000000,5.000000,-6.000000,0.000000,-3.000000,1.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,3.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-3.000000,1.000000,9.000000,7.000000,-8.000000,0.000000,-4.000000,2.000000,11.000000,3.000000,-7.000000,-1.000000,-6.000000,3.000000,23.000000,-2.000000,-11.000000,0.000000,-3.000000,0.000000,18.000000,7.000000,-10.000000,0.000000,-4.000000,0.000000,17.000000,4.000000,-9.000000,-1.000000,-4.000000,0.000000,32.000000,4.000000,-10.000000,-1.000000,-2.000000,1.000000,30.000000,5.000000,-5.000000,-1.000000
|
| 26 |
+
-2.000000,2.000000,20.000000,4.000000,-7.000000,-1.000000,-2.000000,1.000000,19.000000,5.000000,-7.000000,0.000000,-2.000000,1.000000,22.000000,5.000000,-6.000000,0.000000,-2.000000,1.000000,20.000000,5.000000,-6.000000,0.000000,-2.000000,2.000000,22.000000,4.000000,-7.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-6.000000,-1.000000,-2.000000,2.000000,20.000000,5.000000,-7.000000,-1.000000,-2.000000,1.000000,19.000000,5.000000,-7.000000,0.000000,0.000000,0.000000,16.000000,4.000000,-1.000000,0.000000,-3.000000,2.000000,20.000000,3.000000,-6.000000,0.000000,-1.000000,0.000000,25.000000,4.000000,-6.000000,-1.000000,-2.000000,1.000000,28.000000,5.000000,-4.000000,0.000000,-2.000000,4.000000,12.000000,-1.000000,-8.000000,-1.000000,-3.000000,0.000000,4.000000,6.000000,-8.000000,0.000000,-5.000000,4.000000,38.000000,-1.000000,-16.000000,-1.000000
|
| 27 |
+
-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,2.000000,20.000000,4.000000,-5.000000,-1.000000,-2.000000,2.000000,20.000000,4.000000,-5.000000,-1.000000,-2.000000,1.000000,20.000000,3.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,4.000000,-5.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,2.000000,19.000000,4.000000,-6.000000,-1.000000,-4.000000,3.000000,19.000000,0.000000,-10.000000,0.000000,-2.000000,1.000000,6.000000,3.000000,-6.000000,0.000000,-2.000000,1.000000,18.000000,5.000000,-4.000000,-1.000000,-2.000000,0.000000,20.000000,4.000000,-3.000000,-2.000000,-3.000000,3.000000,32.000000,2.000000,-6.000000,-1.000000,-2.000000,1.000000,14.000000,3.000000,-4.000000,0.000000,0.000000,0.000000,23.000000,6.000000,-3.000000,-1.000000
|
| 28 |
+
-1.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,2.000000,18.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,5.000000,-6.000000,0.000000,-1.000000,2.000000,21.000000,3.000000,-6.000000,1.000000,0.000000,1.000000,14.000000,1.000000,-3.000000,0.000000,-6.000000,5.000000,11.000000,-4.000000,-12.000000,0.000000,-4.000000,1.000000,4.000000,5.000000,-6.000000,0.000000,0.000000,0.000000,2.000000,8.000000,-5.000000,0.000000,0.000000,1.000000,36.000000,3.000000,-1.000000,-1.000000,0.000000,1.000000,14.000000,5.000000,-3.000000,1.000000,1.000000,0.000000,24.000000,11.000000,-2.000000,0.000000
|
| 29 |
+
-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,18.000000,3.000000,-5.000000,0.000000,-2.000000,1.000000,19.000000,3.000000,-5.000000,0.000000,-2.000000,1.000000,19.000000,3.000000,-5.000000,0.000000,-2.000000,1.000000,23.000000,3.000000,-5.000000,0.000000,-2.000000,1.000000,21.000000,4.000000,-5.000000,0.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-1.000000,2.000000,21.000000,4.000000,-5.000000,1.000000,-3.000000,2.000000,3.000000,-2.000000,-8.000000,2.000000,0.000000,0.000000,24.000000,6.000000,-4.000000,0.000000,-2.000000,1.000000,16.000000,4.000000,-3.000000,0.000000,-5.000000,4.000000,26.000000,-1.000000,-6.000000,1.000000,-3.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,0.000000,33.000000,6.000000,-6.000000,0.000000,-2.000000,1.000000,16.000000,5.000000,-9.000000,0.000000
|
| 30 |
+
-1.000000,2.000000,21.000000,3.000000,-6.000000,1.000000,-1.000000,2.000000,17.000000,3.000000,-6.000000,0.000000,-1.000000,2.000000,21.000000,3.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-1.000000,2.000000,21.000000,3.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-4.000000,4.000000,12.000000,1.000000,-11.000000,0.000000,-2.000000,2.000000,21.000000,2.000000,-7.000000,0.000000,1.000000,0.000000,29.000000,7.000000,-1.000000,0.000000,-1.000000,2.000000,17.000000,3.000000,-5.000000,0.000000,2.000000,0.000000,32.000000,6.000000,-1.000000,0.000000,0.000000,3.000000,17.000000,2.000000,-10.000000,1.000000,2.000000,0.000000,17.000000,8.000000,-1.000000,0.000000
|
| 31 |
+
0.000000,1.000000,15.000000,3.000000,-3.000000,0.000000,-2.000000,1.000000,32.000000,3.000000,-7.000000,0.000000,-1.000000,2.000000,4.000000,-1.000000,-7.000000,0.000000,-1.000000,1.000000,6.000000,5.000000,-6.000000,0.000000,2.000000,0.000000,17.000000,3.000000,-4.000000,0.000000,2.000000,0.000000,28.000000,3.000000,-3.000000,-1.000000,2.000000,1.000000,24.000000,1.000000,-4.000000,0.000000,0.000000,1.000000,17.000000,3.000000,-4.000000,1.000000,-1.000000,2.000000,18.000000,-2.000000,-6.000000,0.000000,0.000000,0.000000,16.000000,1.000000,-6.000000,-1.000000,-1.000000,2.000000,11.000000,-1.000000,-8.000000,0.000000,0.000000,0.000000,18.000000,1.000000,-5.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-8.000000,0.000000,-1.000000,0.000000,-1.000000,3.000000,-4.000000,0.000000,1.000000,0.000000,25.000000,0.000000,-3.000000,-2.000000
|
| 32 |
+
-1.000000,2.000000,4.000000,-1.000000,-7.000000,0.000000,-1.000000,1.000000,6.000000,5.000000,-6.000000,0.000000,2.000000,0.000000,17.000000,3.000000,-4.000000,0.000000,2.000000,0.000000,28.000000,3.000000,-3.000000,-1.000000,2.000000,1.000000,24.000000,1.000000,-4.000000,0.000000,0.000000,1.000000,17.000000,3.000000,-4.000000,1.000000,-1.000000,2.000000,18.000000,-2.000000,-6.000000,0.000000,0.000000,0.000000,16.000000,1.000000,-6.000000,-1.000000,-1.000000,2.000000,11.000000,-1.000000,-8.000000,0.000000,0.000000,0.000000,18.000000,1.000000,-5.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-8.000000,0.000000,-1.000000,0.000000,-1.000000,3.000000,-4.000000,0.000000,1.000000,0.000000,25.000000,0.000000,-3.000000,-2.000000,0.000000,0.000000,22.000000,2.000000,-6.000000,0.000000,1.000000,0.000000,17.000000,2.000000,-4.000000,0.000000
|
| 33 |
+
2.000000,0.000000,17.000000,3.000000,-4.000000,0.000000,2.000000,0.000000,28.000000,3.000000,-3.000000,-1.000000,2.000000,1.000000,24.000000,1.000000,-4.000000,0.000000,0.000000,1.000000,17.000000,3.000000,-4.000000,1.000000,-1.000000,2.000000,18.000000,-2.000000,-6.000000,0.000000,0.000000,0.000000,16.000000,1.000000,-6.000000,-1.000000,-1.000000,2.000000,11.000000,-1.000000,-8.000000,0.000000,0.000000,0.000000,18.000000,1.000000,-5.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-8.000000,0.000000,-1.000000,0.000000,-1.000000,3.000000,-4.000000,0.000000,1.000000,0.000000,25.000000,0.000000,-3.000000,-2.000000,0.000000,0.000000,22.000000,2.000000,-6.000000,0.000000,1.000000,0.000000,17.000000,2.000000,-4.000000,0.000000,2.000000,0.000000,27.000000,3.000000,-2.000000,-1.000000,2.000000,0.000000,15.000000,0.000000,-4.000000,-1.000000
|
| 34 |
+
0.000000,1.000000,17.000000,3.000000,-4.000000,1.000000,-1.000000,2.000000,18.000000,-2.000000,-6.000000,0.000000,0.000000,0.000000,16.000000,1.000000,-6.000000,-1.000000,-1.000000,2.000000,11.000000,-1.000000,-8.000000,0.000000,0.000000,0.000000,18.000000,1.000000,-5.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-8.000000,0.000000,-1.000000,0.000000,-1.000000,3.000000,-4.000000,0.000000,1.000000,0.000000,25.000000,0.000000,-3.000000,-2.000000,0.000000,0.000000,22.000000,2.000000,-6.000000,0.000000,1.000000,0.000000,17.000000,2.000000,-4.000000,0.000000,2.000000,0.000000,27.000000,3.000000,-2.000000,-1.000000,2.000000,0.000000,15.000000,0.000000,-4.000000,-1.000000,0.000000,0.000000,19.000000,-1.000000,-3.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-4.000000,0.000000,0.000000,0.000000,6.000000,0.000000,-7.000000,0.000000
|
| 35 |
+
-1.000000,2.000000,18.000000,-2.000000,-6.000000,0.000000,0.000000,0.000000,16.000000,1.000000,-6.000000,-1.000000,-1.000000,2.000000,11.000000,-1.000000,-8.000000,0.000000,0.000000,0.000000,18.000000,1.000000,-5.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-8.000000,0.000000,-1.000000,0.000000,-1.000000,3.000000,-4.000000,0.000000,1.000000,0.000000,25.000000,0.000000,-3.000000,-2.000000,0.000000,0.000000,22.000000,2.000000,-6.000000,0.000000,1.000000,0.000000,17.000000,2.000000,-4.000000,0.000000,2.000000,0.000000,27.000000,3.000000,-2.000000,-1.000000,2.000000,0.000000,15.000000,0.000000,-4.000000,-1.000000,0.000000,0.000000,19.000000,-1.000000,-3.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-4.000000,0.000000,0.000000,0.000000,6.000000,0.000000,-7.000000,0.000000,-1.000000,1.000000,13.000000,-4.000000,-9.000000,0.000000
|
| 36 |
+
0.000000,0.000000,16.000000,1.000000,-6.000000,-1.000000,-1.000000,2.000000,11.000000,-1.000000,-8.000000,0.000000,0.000000,0.000000,18.000000,1.000000,-5.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-8.000000,0.000000,-1.000000,0.000000,-1.000000,3.000000,-4.000000,0.000000,1.000000,0.000000,25.000000,0.000000,-3.000000,-2.000000,0.000000,0.000000,22.000000,2.000000,-6.000000,0.000000,1.000000,0.000000,17.000000,2.000000,-4.000000,0.000000,2.000000,0.000000,27.000000,3.000000,-2.000000,-1.000000,2.000000,0.000000,15.000000,0.000000,-4.000000,-1.000000,0.000000,0.000000,19.000000,-1.000000,-3.000000,-1.000000,0.000000,0.000000,11.000000,2.000000,-4.000000,0.000000,0.000000,0.000000,6.000000,0.000000,-7.000000,0.000000,-1.000000,1.000000,13.000000,-4.000000,-9.000000,0.000000,-2.000000,0.000000,10.000000,-4.000000,-7.000000,-2.000000
|
| 37 |
+
-1.000000,2.000000,18.000000,4.000000,-7.000000,0.000000,-1.000000,2.000000,18.000000,4.000000,-7.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-1.000000,2.000000,19.000000,4.000000,-6.000000,1.000000,-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-1.000000,2.000000,18.000000,5.000000,-6.000000,0.000000,-1.000000,2.000000,24.000000,2.000000,-9.000000,0.000000,0.000000,-1.000000,16.000000,9.000000,-3.000000,-1.000000,-2.000000,3.000000,29.000000,4.000000,-8.000000,-1.000000,-1.000000,0.000000,-4.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,23.000000,2.000000,-12.000000,0.000000,0.000000,1.000000,8.000000,4.000000,-2.000000,0.000000,0.000000,1.000000,17.000000,1.000000,-3.000000,0.000000
|
| 38 |
+
-2.000000,1.000000,19.000000,3.000000,-6.000000,0.000000,-2.000000,1.000000,18.000000,3.000000,-5.000000,1.000000,-2.000000,1.000000,21.000000,3.000000,-5.000000,0.000000,-2.000000,0.000000,19.000000,3.000000,-5.000000,1.000000,-2.000000,1.000000,21.000000,3.000000,-5.000000,0.000000,-2.000000,1.000000,18.000000,3.000000,-5.000000,0.000000,-2.000000,1.000000,19.000000,3.000000,-5.000000,0.000000,-2.000000,0.000000,19.000000,2.000000,-6.000000,1.000000,-2.000000,0.000000,19.000000,1.000000,-4.000000,1.000000,-5.000000,4.000000,18.000000,-3.000000,-12.000000,1.000000,-2.000000,0.000000,14.000000,3.000000,-4.000000,0.000000,-2.000000,1.000000,26.000000,2.000000,-6.000000,0.000000,-4.000000,2.000000,11.000000,0.000000,-9.000000,0.000000,-2.000000,0.000000,22.000000,4.000000,-4.000000,-1.000000,-1.000000,1.000000,17.000000,0.000000,-6.000000,1.000000
|
| 39 |
+
-2.000000,1.000000,19.000000,4.000000,-6.000000,0.000000,-2.000000,1.000000,18.000000,4.000000,-7.000000,0.000000,-2.000000,1.000000,20.000000,5.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,5.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,3.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,5.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,5.000000,-6.000000,0.000000,-2.000000,1.000000,21.000000,3.000000,-6.000000,0.000000,-2.000000,2.000000,15.000000,2.000000,-9.000000,0.000000,-1.000000,1.000000,29.000000,3.000000,-3.000000,-1.000000,0.000000,1.000000,23.000000,4.000000,-4.000000,0.000000,0.000000,-1.000000,3.000000,5.000000,-5.000000,0.000000,0.000000,0.000000,26.000000,5.000000,-4.000000,0.000000,-2.000000,4.000000,19.000000,1.000000,-11.000000,-1.000000,0.000000,0.000000,24.000000,2.000000,-3.000000,-1.000000
|
| 40 |
+
-4.000000,0.000000,8.000000,5.000000,-1.000000,2.000000,-4.000000,0.000000,7.000000,5.000000,-2.000000,3.000000,-4.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,0.000000,9.000000,6.000000,-2.000000,2.000000,-5.000000,1.000000,5.000000,5.000000,-1.000000,2.000000,-4.000000,0.000000,7.000000,5.000000,-2.000000,2.000000,-5.000000,1.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,1.000000,8.000000,5.000000,-1.000000,2.000000,-4.000000,0.000000,9.000000,5.000000,0.000000,0.000000,-3.000000,-1.000000,10.000000,7.000000,-3.000000,1.000000,-1.000000,-1.000000,13.000000,8.000000,7.000000,1.000000,-6.000000,2.000000,15.000000,4.000000,-4.000000,1.000000,-5.000000,0.000000,8.000000,8.000000,-1.000000,1.000000,-3.000000,1.000000,22.000000,7.000000,1.000000,0.000000,-4.000000,1.000000,-6.000000,2.000000,-2.000000,1.000000
|
| 41 |
+
-4.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,0.000000,7.000000,5.000000,-2.000000,3.000000,-4.000000,1.000000,8.000000,6.000000,-3.000000,2.000000,-5.000000,1.000000,8.000000,6.000000,-2.000000,2.000000,-5.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-5.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,1.000000,8.000000,5.000000,1.000000,1.000000,-4.000000,2.000000,7.000000,3.000000,-1.000000,2.000000,-4.000000,2.000000,3.000000,4.000000,-1.000000,2.000000,-4.000000,1.000000,0.000000,4.000000,0.000000,2.000000,-5.000000,1.000000,0.000000,4.000000,1.000000,1.000000,-3.000000,1.000000,2.000000,4.000000,-1.000000,2.000000,-3.000000,3.000000,18.000000,2.000000,1.000000,0.000000
|
| 42 |
+
-4.000000,0.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,0.000000,5.000000,5.000000,-3.000000,2.000000,-4.000000,0.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,0.000000,9.000000,5.000000,-3.000000,2.000000,-4.000000,1.000000,8.000000,5.000000,-3.000000,2.000000,-4.000000,0.000000,8.000000,4.000000,-3.000000,2.000000,-4.000000,0.000000,8.000000,6.000000,-3.000000,2.000000,-4.000000,0.000000,8.000000,4.000000,-3.000000,2.000000,-1.000000,-1.000000,17.000000,6.000000,4.000000,1.000000,-4.000000,-1.000000,12.000000,5.000000,1.000000,2.000000,-4.000000,-1.000000,7.000000,6.000000,-2.000000,0.000000,-3.000000,-1.000000,2.000000,8.000000,2.000000,1.000000,-4.000000,1.000000,2.000000,-1.000000,-5.000000,1.000000,-3.000000,-2.000000,7.000000,7.000000,-1.000000,1.000000,-3.000000,1.000000,-2.000000,3.000000,-4.000000,1.000000
|
| 43 |
+
-4.000000,0.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,0.000000,5.000000,5.000000,-3.000000,2.000000,-4.000000,0.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,0.000000,8.000000,4.000000,-3.000000,2.000000,-3.000000,0.000000,8.000000,4.000000,-2.000000,3.000000,-4.000000,0.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,0.000000,8.000000,4.000000,-2.000000,2.000000,-4.000000,0.000000,8.000000,6.000000,-2.000000,2.000000,-6.000000,0.000000,14.000000,4.000000,-4.000000,1.000000,-3.000000,0.000000,2.000000,4.000000,-1.000000,1.000000,-2.000000,-1.000000,1.000000,7.000000,2.000000,1.000000,-4.000000,1.000000,2.000000,3.000000,-5.000000,1.000000,-6.000000,0.000000,8.000000,3.000000,-7.000000,1.000000,-3.000000,-1.000000,2.000000,6.000000,2.000000,1.000000,-5.000000,2.000000,2.000000,0.000000,-5.000000,1.000000
|
| 44 |
+
-4.000000,0.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,1.000000,7.000000,6.000000,-3.000000,2.000000,-4.000000,0.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,1.000000,8.000000,6.000000,-3.000000,2.000000,-4.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-5.000000,1.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,0.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,1.000000,7.000000,5.000000,-2.000000,2.000000,-4.000000,1.000000,5.000000,6.000000,-2.000000,2.000000,-6.000000,1.000000,5.000000,5.000000,-6.000000,1.000000,-4.000000,0.000000,1.000000,5.000000,-5.000000,2.000000,-4.000000,1.000000,16.000000,7.000000,-1.000000,2.000000,-5.000000,1.000000,-5.000000,4.000000,-3.000000,1.000000,-5.000000,1.000000,7.000000,3.000000,-3.000000,1.000000,-2.000000,1.000000,2.000000,6.000000,3.000000,0.000000
|
| 45 |
+
-4.000000,0.000000,7.000000,5.000000,0.000000,2.000000,-5.000000,0.000000,5.000000,4.000000,-1.000000,2.000000,-4.000000,0.000000,9.000000,4.000000,-2.000000,2.000000,-5.000000,-1.000000,8.000000,6.000000,-2.000000,2.000000,-4.000000,-1.000000,9.000000,5.000000,-1.000000,2.000000,-4.000000,0.000000,8.000000,4.000000,-1.000000,1.000000,-4.000000,0.000000,8.000000,4.000000,-2.000000,2.000000,-4.000000,0.000000,8.000000,4.000000,0.000000,2.000000,-4.000000,1.000000,5.000000,1.000000,-2.000000,1.000000,-4.000000,-1.000000,5.000000,7.000000,-2.000000,2.000000,-6.000000,0.000000,10.000000,2.000000,-4.000000,0.000000,-4.000000,0.000000,17.000000,4.000000,0.000000,1.000000,-3.000000,-1.000000,9.000000,6.000000,3.000000,1.000000,-4.000000,0.000000,-3.000000,5.000000,-4.000000,1.000000,-2.000000,1.000000,4.000000,2.000000,2.000000,1.000000
|
| 46 |
+
-17.000000,-30.000000,-163.000000,-115.000000,33.000000,3.000000,-21.000000,-40.000000,-431.000000,-90.000000,96.000000,7.000000,-18.000000,-93.000000,-1175.000000,-231.000000,258.000000,10.000000,35.000000,-33.000000,13.000000,49.000000,51.000000,-3.000000,18.000000,-13.000000,24.000000,17.000000,22.000000,-2.000000,7.000000,-2.000000,16.000000,0.000000,6.000000,-2.000000,2.000000,0.000000,5.000000,-4.000000,-1.000000,-2.000000,7.000000,-2.000000,16.000000,0.000000,6.000000,-2.000000,-14.000000,6.000000,7.000000,-12.000000,-25.000000,-2.000000,7.000000,-2.000000,16.000000,0.000000,6.000000,-2.000000,2.000000,0.000000,5.000000,-4.000000,-1.000000,-2.000000,-14.000000,6.000000,7.000000,-12.000000,-25.000000,-2.000000,-14.000000,6.000000,7.000000,-12.000000,-25.000000,-2.000000,2.000000,0.000000,5.000000,-4.000000,-1.000000,-2.000000,-14.000000,6.000000,7.000000,-12.000000,-25.000000,-2.000000
|
| 47 |
+
16.000000,-39.000000,-170.000000,-20.000000,-116.000000,17.000000,17.000000,-32.000000,-165.000000,-32.000000,-101.000000,15.000000,23.000000,-29.000000,-156.000000,-45.000000,-91.000000,11.000000,24.000000,-34.000000,-169.000000,-38.000000,-93.000000,10.000000,24.000000,-36.000000,-164.000000,-36.000000,-96.000000,12.000000,29.000000,-37.000000,-177.000000,-39.000000,-86.000000,10.000000,28.000000,-42.000000,-224.000000,-41.000000,-74.000000,8.000000,87.000000,-117.000000,-2112.000000,154.000000,81.000000,-150.000000,-28.000000,12.000000,151.000000,-49.000000,18.000000,27.000000,18.000000,2.000000,6.000000,26.000000,45.000000,-6.000000,36.000000,16.000000,-61.000000,-33.000000,52.000000,-5.000000,-35.000000,24.000000,-54.000000,-22.000000,-12.000000,5.000000,-6.000000,15.000000,-130.000000,95.000000,12.000000,-7.000000,-4.000000,12.000000,-135.000000,115.000000,31.000000,-10.000000,-10.000000,18.000000,-163.000000,108.000000,22.000000,-11.000000
|
| 48 |
+
29.000000,-19.000000,-178.000000,-71.000000,-70.000000,-8.000000,37.000000,-33.000000,-216.000000,-64.000000,-74.000000,-1.000000,36.000000,-39.000000,-208.000000,-32.000000,-72.000000,7.000000,31.000000,-38.000000,-196.000000,-28.000000,-71.000000,7.000000,33.000000,-37.000000,-216.000000,-33.000000,-46.000000,1.000000,36.000000,-36.000000,-195.000000,-36.000000,-37.000000,2.000000,35.000000,-37.000000,-201.000000,-36.000000,-45.000000,2.000000,-29.000000,6.000000,-155.000000,72.000000,116.000000,-15.000000,-29.000000,5.000000,-142.000000,77.000000,115.000000,-20.000000,-23.000000,-4.000000,-125.000000,89.000000,106.000000,-21.000000,-1.000000,-25.000000,-629.000000,156.000000,247.000000,-21.000000,4.000000,-45.000000,-1387.000000,229.000000,402.000000,-43.000000,2.000000,9.000000,-81.000000,71.000000,39.000000,-7.000000,2.000000,7.000000,-85.000000,75.000000,34.000000,-1.000000,1.000000,11.000000,-106.000000,87.000000,42.000000,-4.000000
|
| 49 |
+
59.000000,-59.000000,-332.000000,27.000000,-50.000000,20.000000,45.000000,-71.000000,-316.000000,62.000000,-79.000000,25.000000,38.000000,-64.000000,-284.000000,63.000000,-78.000000,32.000000,10.000000,-75.000000,-244.000000,108.000000,-136.000000,48.000000,44.000000,-38.000000,-293.000000,-17.000000,-75.000000,7.000000,86.000000,-45.000000,-469.000000,-52.000000,-92.000000,0.000000,87.000000,-86.000000,-655.000000,-9.000000,-193.000000,21.000000,216.000000,-44.000000,0.000000,48.000000,299.000000,1.000000,99.000000,-39.000000,56.000000,45.000000,135.000000,-1.000000,84.000000,-48.000000,22.000000,68.000000,116.000000,-1.000000,85.000000,-29.000000,5.000000,36.000000,122.000000,-7.000000,31.000000,3.000000,13.000000,-10.000000,50.000000,-7.000000,10.000000,3.000000,6.000000,-4.000000,27.000000,-3.000000,8.000000,-12.000000,0.000000,15.000000,21.000000,-2.000000,-9.000000,4.000000,-12.000000,-9.000000,-2.000000,-3.000000
|
| 50 |
+
52.000000,-4.000000,-222.000000,52.000000,-39.000000,9.000000,48.000000,-5.000000,-203.000000,52.000000,-37.000000,16.000000,19.000000,13.000000,-109.000000,-11.000000,-30.000000,-11.000000,24.000000,3.000000,-87.000000,-5.000000,-11.000000,-1.000000,23.000000,7.000000,-75.000000,-6.000000,-13.000000,-1.000000,19.000000,7.000000,-85.000000,-9.000000,-16.000000,-1.000000,41.000000,10.000000,-273.000000,-19.000000,33.000000,-9.000000,-5.000000,-4.000000,-4.000000,-2.000000,-5.000000,1.000000,-6.000000,-4.000000,-2.000000,0.000000,-9.000000,1.000000,-39.000000,-13.000000,-193.000000,-25.000000,-26.000000,11.000000,-33.000000,-6.000000,-193.000000,-50.000000,-12.000000,13.000000,-29.000000,-3.000000,-195.000000,-56.000000,-7.000000,10.000000,-14.000000,-4.000000,-74.000000,19.000000,-41.000000,2.000000,3.000000,-12.000000,-207.000000,7.000000,37.000000,5.000000,4.000000,-20.000000,-406.000000,63.000000,68.000000,1.000000
|
| 51 |
+
2.000000,2.000000,26.000000,2.000000,1.000000,-1.000000,-1.000000,3.000000,1.000000,-7.000000,9.000000,2.000000,-1.000000,2.000000,-3.000000,1.000000,9.000000,0.000000,1.000000,4.000000,-5.000000,-2.000000,10.000000,1.000000,1.000000,4.000000,-3.000000,-2.000000,10.000000,-3.000000,-6.000000,16.000000,-29.000000,-16.000000,20.000000,12.000000,-7.000000,12.000000,-17.000000,-13.000000,21.000000,8.000000,-20.000000,16.000000,-51.000000,-2.000000,24.000000,5.000000,-13.000000,16.000000,-45.000000,-1.000000,42.000000,12.000000,-9.000000,17.000000,-53.000000,-4.000000,49.000000,12.000000,-9.000000,19.000000,-54.000000,-11.000000,51.000000,12.000000,-8.000000,22.000000,-52.000000,-16.000000,55.000000,26.000000,0.000000,0.000000,-9.000000,-14.000000,-11.000000,-4.000000,0.000000,-1.000000,-3.000000,-9.000000,-11.000000,-1.000000,1.000000,4.000000,3.000000,-18.000000,-15.000000,-2.000000
|
| 52 |
+
7.000000,-10.000000,-179.000000,-11.000000,18.000000,4.000000,7.000000,-8.000000,-176.000000,-13.000000,18.000000,1.000000,8.000000,-8.000000,-180.000000,-14.000000,21.000000,1.000000,8.000000,-10.000000,-179.000000,-13.000000,21.000000,3.000000,7.000000,-9.000000,-169.000000,-12.000000,19.000000,4.000000,6.000000,-9.000000,-165.000000,-13.000000,18.000000,4.000000,7.000000,-10.000000,-170.000000,-13.000000,18.000000,6.000000,-2.000000,-6.000000,-133.000000,-40.000000,-8.000000,-18.000000,-5.000000,-6.000000,-134.000000,-54.000000,-10.000000,-23.000000,-9.000000,0.000000,-201.000000,-70.000000,-6.000000,-25.000000,6.000000,-9.000000,-165.000000,-13.000000,18.000000,4.000000,7.000000,-10.000000,-170.000000,-13.000000,18.000000,6.000000,-2.000000,-6.000000,-133.000000,-40.000000,-8.000000,-18.000000,-5.000000,-6.000000,-134.000000,-54.000000,-10.000000,-23.000000,-9.000000,0.000000,-201.000000,-70.000000,-6.000000,-25.000000
|
| 53 |
+
-2.000000,0.000000,15.000000,0.000000,-2.000000,2.000000,0.000000,-1.000000,-4.000000,1.000000,-1.000000,2.000000,-1.000000,-1.000000,2.000000,1.000000,1.000000,2.000000,-1.000000,1.000000,-5.000000,-3.000000,-4.000000,2.000000,1.000000,-2.000000,-4.000000,3.000000,-2.000000,2.000000,-1.000000,-1.000000,-3.000000,1.000000,-1.000000,2.000000,1.000000,0.000000,-2.000000,-1.000000,2.000000,1.000000,0.000000,1.000000,-4.000000,-4.000000,-1.000000,1.000000,1.000000,0.000000,0.000000,-1.000000,0.000000,1.000000,18.000000,2.000000,-112.000000,14.000000,-20.000000,4.000000,25.000000,10.000000,-93.000000,-6.000000,-7.000000,0.000000,36.000000,2.000000,-122.000000,10.000000,12.000000,5.000000,33.000000,0.000000,-108.000000,21.000000,7.000000,8.000000,3.000000,-2.000000,-13.000000,2.000000,11.000000,4.000000,4.000000,-3.000000,-17.000000,1.000000,9.000000,3.000000
|
| 54 |
+
-3.000000,3.000000,10.000000,-2.000000,-5.000000,2.000000,-1.000000,-1.000000,-2.000000,4.000000,0.000000,3.000000,-2.000000,1.000000,0.000000,0.000000,-5.000000,2.000000,1.000000,-1.000000,2.000000,5.000000,1.000000,2.000000,-2.000000,1.000000,0.000000,1.000000,-4.000000,2.000000,-1.000000,0.000000,7.000000,2.000000,-5.000000,2.000000,-2.000000,1.000000,8.000000,1.000000,-1.000000,2.000000,-1.000000,1.000000,3.000000,1.000000,-2.000000,2.000000,1.000000,-1.000000,-6.000000,3.000000,4.000000,1.000000,-3.000000,2.000000,-3.000000,-5.000000,-9.000000,3.000000,3.000000,-1.000000,-5.000000,5.000000,2.000000,3.000000,3.000000,0.000000,-10.000000,5.000000,4.000000,3.000000,5.000000,0.000000,-19.000000,2.000000,6.000000,3.000000,3.000000,1.000000,-12.000000,0.000000,6.000000,2.000000,4.000000,-6.000000,-17.000000,17.000000,2.000000,6.000000
|
| 55 |
+
-2.000000,0.000000,-7.000000,0.000000,-6.000000,2.000000,-1.000000,0.000000,-3.000000,0.000000,-3.000000,3.000000,-1.000000,2.000000,15.000000,1.000000,-6.000000,2.000000,-1.000000,1.000000,11.000000,0.000000,-3.000000,2.000000,-1.000000,-1.000000,-7.000000,5.000000,7.000000,1.000000,-1.000000,-1.000000,-12.000000,-2.000000,6.000000,2.000000,-1.000000,0.000000,-6.000000,-5.000000,6.000000,2.000000,-2.000000,1.000000,-3.000000,-5.000000,0.000000,3.000000,-2.000000,0.000000,0.000000,-1.000000,1.000000,1.000000,0.000000,1.000000,1.000000,-4.000000,4.000000,2.000000,0.000000,-1.000000,-2.000000,-3.000000,5.000000,2.000000,-2.000000,-11.000000,-73.000000,-4.000000,54.000000,-9.000000,-1.000000,-4.000000,-13.000000,8.000000,-4.000000,1.000000,-1.000000,-2.000000,-11.000000,4.000000,-4.000000,1.000000,-4.000000,-6.000000,-11.000000,14.000000,-8.000000,1.000000
|
| 56 |
+
-2.000000,-1.000000,-9.000000,-1.000000,3.000000,1.000000,-1.000000,0.000000,-11.000000,-3.000000,4.000000,3.000000,4.000000,-1.000000,-3.000000,0.000000,18.000000,1.000000,2.000000,1.000000,-6.000000,-8.000000,10.000000,2.000000,-2.000000,4.000000,-9.000000,-5.000000,-1.000000,1.000000,-18.000000,-7.000000,-55.000000,-5.000000,6.000000,2.000000,-18.000000,-6.000000,-55.000000,-6.000000,4.000000,4.000000,-19.000000,-4.000000,-61.000000,-14.000000,2.000000,4.000000,-20.000000,-18.000000,-313.000000,-20.000000,11.000000,-1.000000,-36.000000,-35.000000,-701.000000,-53.000000,133.000000,-9.000000,-86.000000,-45.000000,-931.000000,-94.000000,100.000000,-15.000000,10.000000,-9.000000,36.000000,19.000000,9.000000,-5.000000,3.000000,0.000000,-9.000000,-2.000000,2.000000,1.000000,4.000000,1.000000,-24.000000,-4.000000,4.000000,0.000000,3.000000,2.000000,-3.000000,-3.000000,1.000000,1.000000
|
| 57 |
+
1.000000,0.000000,0.000000,4.000000,-1.000000,2.000000,-1.000000,1.000000,4.000000,0.000000,-3.000000,2.000000,0.000000,0.000000,4.000000,0.000000,1.000000,3.000000,-9.000000,-2.000000,-11.000000,-2.000000,-3.000000,3.000000,-5.000000,-3.000000,-12.000000,-1.000000,2.000000,3.000000,-2.000000,-1.000000,-9.000000,-1.000000,3.000000,1.000000,-1.000000,0.000000,-11.000000,-3.000000,4.000000,3.000000,4.000000,-1.000000,-3.000000,0.000000,18.000000,1.000000,2.000000,1.000000,-6.000000,-8.000000,10.000000,2.000000,-2.000000,4.000000,-9.000000,-5.000000,-1.000000,1.000000,-18.000000,-7.000000,-55.000000,-5.000000,6.000000,2.000000,-18.000000,-6.000000,-55.000000,-6.000000,4.000000,4.000000,-19.000000,-4.000000,-61.000000,-14.000000,2.000000,4.000000,-20.000000,-18.000000,-313.000000,-20.000000,11.000000,-1.000000,-36.000000,-35.000000,-701.000000,-53.000000,133.000000,-9.000000
|
| 58 |
+
3.000000,0.000000,-9.000000,-2.000000,2.000000,1.000000,4.000000,1.000000,-24.000000,-4.000000,4.000000,0.000000,3.000000,2.000000,-3.000000,-3.000000,1.000000,1.000000,3.000000,1.000000,-6.000000,-1.000000,1.000000,1.000000,1.000000,-1.000000,-5.000000,-2.000000,-4.000000,1.000000,5.000000,1.000000,-19.000000,-7.000000,2.000000,0.000000,3.000000,-1.000000,-17.000000,1.000000,-4.000000,1.000000,4.000000,0.000000,-10.000000,0.000000,-1.000000,1.000000,7.000000,-8.000000,-171.000000,6.000000,9.000000,2.000000,8.000000,-11.000000,-178.000000,17.000000,18.000000,3.000000,7.000000,-11.000000,-172.000000,6.000000,7.000000,1.000000,9.000000,-12.000000,-191.000000,18.000000,16.000000,2.000000,6.000000,-11.000000,-136.000000,15.000000,10.000000,2.000000,5.000000,-11.000000,-135.000000,16.000000,10.000000,3.000000,6.000000,-11.000000,-136.000000,17.000000,10.000000,3.000000
|
| 59 |
+
0.000000,1.000000,-16.000000,3.000000,1.000000,4.000000,-2.000000,2.000000,18.000000,1.000000,-3.000000,2.000000,-1.000000,-1.000000,-2.000000,3.000000,-1.000000,2.000000,-2.000000,2.000000,12.000000,0.000000,-3.000000,3.000000,-1.000000,0.000000,3.000000,1.000000,-3.000000,1.000000,-1.000000,0.000000,-4.000000,3.000000,-4.000000,2.000000,-2.000000,2.000000,10.000000,1.000000,-1.000000,1.000000,2.000000,-2.000000,8.000000,5.000000,6.000000,2.000000,0.000000,0.000000,5.000000,-2.000000,1.000000,2.000000,1.000000,0.000000,5.000000,3.000000,0.000000,2.000000,1.000000,0.000000,3.000000,2.000000,0.000000,3.000000,17.000000,11.000000,-125.000000,-6.000000,-25.000000,-1.000000,33.000000,5.000000,-141.000000,2.000000,2.000000,1.000000,16.000000,-6.000000,-58.000000,17.000000,-5.000000,4.000000,18.000000,1.000000,-65.000000,3.000000,-9.000000,4.000000
|
| 60 |
+
-1.000000,2.000000,-2.000000,4.000000,-6.000000,3.000000,-2.000000,2.000000,-6.000000,1.000000,-8.000000,2.000000,0.000000,0.000000,-9.000000,4.000000,-4.000000,1.000000,-1.000000,2.000000,-13.000000,4.000000,-4.000000,4.000000,1.000000,0.000000,5.000000,7.000000,0.000000,2.000000,-1.000000,1.000000,-10.000000,2.000000,-3.000000,4.000000,1.000000,1.000000,4.000000,4.000000,0.000000,2.000000,1.000000,1.000000,-9.000000,3.000000,0.000000,2.000000,7.000000,0.000000,-51.000000,27.000000,-17.000000,13.000000,7.000000,0.000000,-58.000000,24.000000,-19.000000,6.000000,7.000000,3.000000,-48.000000,20.000000,-14.000000,7.000000,8.000000,3.000000,-40.000000,15.000000,-15.000000,4.000000,7.000000,3.000000,-35.000000,20.000000,-13.000000,7.000000,9.000000,5.000000,-45.000000,14.000000,-15.000000,6.000000,7.000000,1.000000,-33.000000,19.000000,-15.000000,4.000000
|
| 61 |
+
1.000000,0.000000,11.000000,-1.000000,9.000000,1.000000,0.000000,0.000000,-4.000000,-4.000000,4.000000,2.000000,-1.000000,-1.000000,7.000000,-1.000000,2.000000,2.000000,-2.000000,1.000000,-5.000000,-5.000000,-3.000000,2.000000,0.000000,0.000000,2.000000,-3.000000,3.000000,2.000000,0.000000,1.000000,2.000000,-4.000000,3.000000,1.000000,0.000000,0.000000,1.000000,-4.000000,1.000000,2.000000,-62.000000,30.000000,-48.000000,-40.000000,-87.000000,0.000000,-36.000000,24.000000,-25.000000,-35.000000,-40.000000,3.000000,-43.000000,25.000000,-6.000000,-38.000000,-48.000000,1.000000,-14.000000,13.000000,-18.000000,-21.000000,-9.000000,0.000000,-4.000000,10.000000,-20.000000,-16.000000,9.000000,2.000000,-22.000000,19.000000,10.000000,-46.000000,-28.000000,4.000000,-14.000000,13.000000,-20.000000,-27.000000,-10.000000,1.000000,0.000000,-1.000000,-5.000000,-10.000000,13.000000,1.000000
|
| 62 |
+
-4.000000,10.000000,-20.000000,-16.000000,9.000000,2.000000,-22.000000,19.000000,10.000000,-46.000000,-28.000000,4.000000,-14.000000,13.000000,-20.000000,-27.000000,-10.000000,1.000000,0.000000,-1.000000,-5.000000,-10.000000,13.000000,1.000000,-2.000000,-1.000000,-6.000000,-5.000000,4.000000,2.000000,-1.000000,-2.000000,-16.000000,-4.000000,1.000000,2.000000,-2.000000,-3.000000,-6.000000,-4.000000,1.000000,2.000000,-1.000000,-3.000000,-11.000000,-4.000000,3.000000,1.000000,5.000000,-18.000000,-138.000000,26.000000,15.000000,-7.000000,5.000000,-19.000000,-140.000000,27.000000,16.000000,-7.000000,5.000000,-18.000000,-140.000000,27.000000,16.000000,-7.000000,4.000000,-18.000000,-138.000000,26.000000,15.000000,-7.000000,4.000000,-18.000000,-138.000000,26.000000,15.000000,-7.000000,4.000000,-18.000000,-139.000000,26.000000,15.000000,-7.000000,5.000000,-18.000000,-138.000000,26.000000,16.000000,-7.000000
|
| 63 |
+
-1.000000,0.000000,-6.000000,2.000000,2.000000,3.000000,-2.000000,3.000000,-3.000000,2.000000,-8.000000,4.000000,-2.000000,-1.000000,-4.000000,8.000000,-10.000000,4.000000,1.000000,0.000000,-7.000000,3.000000,-4.000000,9.000000,3.000000,1.000000,-10.000000,5.000000,2.000000,3.000000,2.000000,4.000000,-45.000000,-5.000000,-25.000000,-3.000000,5.000000,4.000000,-31.000000,2.000000,-34.000000,-2.000000,7.000000,4.000000,-32.000000,6.000000,-28.000000,1.000000,4.000000,1.000000,-41.000000,7.000000,-33.000000,0.000000,2.000000,0.000000,-30.000000,11.000000,-31.000000,6.000000,6.000000,0.000000,-35.000000,11.000000,-27.000000,0.000000,6.000000,2.000000,-29.000000,10.000000,-22.000000,6.000000,0.000000,0.000000,1.000000,-2.000000,4.000000,2.000000,-2.000000,1.000000,17.000000,-2.000000,-1.000000,2.000000,1.000000,-1.000000,-10.000000,-1.000000,1.000000,1.000000
|
| 64 |
+
-2.000000,0.000000,-7.000000,-5.000000,-1.000000,1.000000,0.000000,0.000000,2.000000,-3.000000,3.000000,1.000000,-2.000000,2.000000,10.000000,-6.000000,1.000000,2.000000,11.000000,0.000000,-45.000000,12.000000,-7.000000,4.000000,0.000000,-1.000000,-37.000000,-9.000000,-4.000000,3.000000,-3.000000,-1.000000,-2.000000,-6.000000,-1.000000,1.000000,5.000000,-6.000000,-18.000000,1.000000,9.000000,2.000000,5.000000,-5.000000,-14.000000,1.000000,10.000000,1.000000,-4.000000,-1.000000,0.000000,-3.000000,0.000000,2.000000,-5.000000,-1.000000,-7.000000,-5.000000,-5.000000,2.000000,0.000000,-2.000000,-13.000000,-5.000000,5.000000,2.000000,3.000000,-16.000000,-185.000000,9.000000,22.000000,-5.000000,3.000000,-16.000000,-178.000000,9.000000,11.000000,-5.000000,3.000000,-14.000000,-144.000000,17.000000,19.000000,-3.000000,3.000000,-15.000000,-150.000000,10.000000,15.000000,-6.000000
|
| 65 |
+
1.000000,1.000000,-14.000000,4.000000,-3.000000,3.000000,1.000000,1.000000,-4.000000,6.000000,-1.000000,3.000000,0.000000,1.000000,-1.000000,7.000000,0.000000,8.000000,0.000000,1.000000,-11.000000,6.000000,-2.000000,4.000000,1.000000,2.000000,-2.000000,1.000000,-2.000000,2.000000,1.000000,-1.000000,-4.000000,8.000000,2.000000,4.000000,-1.000000,-1.000000,-6.000000,6.000000,-3.000000,4.000000,9.000000,3.000000,-28.000000,-1.000000,-16.000000,-2.000000,10.000000,5.000000,-45.000000,0.000000,-13.000000,1.000000,10.000000,4.000000,-31.000000,-1.000000,-12.000000,1.000000,10.000000,3.000000,-38.000000,3.000000,-11.000000,1.000000,8.000000,5.000000,-27.000000,-1.000000,-16.000000,2.000000,10.000000,1.000000,-31.000000,8.000000,-7.000000,6.000000,9.000000,3.000000,-16.000000,0.000000,-6.000000,0.000000,-1.000000,1.000000,-3.000000,-1.000000,0.000000,2.000000
|
| 66 |
+
8.000000,5.000000,-24.000000,1.000000,-1.000000,3.000000,9.000000,1.000000,-12.000000,5.000000,4.000000,3.000000,6.000000,-9.000000,-17.000000,13.000000,19.000000,-2.000000,4.000000,-5.000000,-6.000000,2.000000,17.000000,1.000000,-5.000000,-2.000000,2.000000,-5.000000,-3.000000,1.000000,4.000000,-5.000000,-6.000000,2.000000,17.000000,1.000000,0.000000,-3.000000,0.000000,-2.000000,4.000000,2.000000,-1.000000,3.000000,-10.000000,-14.000000,6.000000,1.000000,7.000000,5.000000,-21.000000,-18.000000,14.000000,2.000000,7.000000,1.000000,-17.000000,-8.000000,16.000000,1.000000,7.000000,-16.000000,-138.000000,29.000000,23.000000,-3.000000,6.000000,-15.000000,-131.000000,28.000000,21.000000,-4.000000,6.000000,-15.000000,-128.000000,29.000000,18.000000,-2.000000,6.000000,-16.000000,-133.000000,32.000000,19.000000,0.000000,6.000000,-16.000000,-133.000000,32.000000,19.000000,0.000000
|
| 67 |
+
-1.000000,-1.000000,-1.000000,-1.000000,5.000000,3.000000,-4.000000,-1.000000,0.000000,1.000000,0.000000,3.000000,-1.000000,0.000000,1.000000,-1.000000,2.000000,1.000000,1.000000,0.000000,-3.000000,2.000000,5.000000,4.000000,-2.000000,1.000000,-4.000000,1.000000,-3.000000,4.000000,-1.000000,-2.000000,-9.000000,3.000000,-1.000000,3.000000,4.000000,1.000000,-45.000000,10.000000,-21.000000,7.000000,4.000000,-2.000000,-38.000000,14.000000,-26.000000,4.000000,0.000000,-5.000000,-49.000000,21.000000,-34.000000,8.000000,-1.000000,-7.000000,-42.000000,23.000000,-32.000000,7.000000,1.000000,-6.000000,-21.000000,21.000000,-31.000000,7.000000,10.000000,-2.000000,-27.000000,11.000000,-4.000000,5.000000,7.000000,1.000000,-40.000000,5.000000,-14.000000,5.000000,9.000000,-1.000000,-35.000000,10.000000,-7.000000,7.000000,12.000000,0.000000,-34.000000,6.000000,0.000000,6.000000
|
| 68 |
+
3.000000,-1.000000,-7.000000,7.000000,6.000000,5.000000,3.000000,4.000000,-45.000000,-1.000000,-21.000000,2.000000,8.000000,7.000000,-58.000000,-3.000000,-28.000000,0.000000,11.000000,6.000000,-35.000000,-3.000000,-14.000000,-2.000000,12.000000,4.000000,-56.000000,-2.000000,-12.000000,0.000000,11.000000,3.000000,-46.000000,4.000000,-7.000000,4.000000,24.000000,6.000000,-40.000000,-4.000000,20.000000,4.000000,20.000000,5.000000,-39.000000,-5.000000,16.000000,2.000000,19.000000,3.000000,-40.000000,0.000000,18.000000,5.000000,18.000000,1.000000,-34.000000,2.000000,18.000000,5.000000,-12.000000,4.000000,-23.000000,15.000000,43.000000,31.000000,35.000000,-32.000000,-1.000000,115.000000,-23.000000,22.000000,25.000000,-29.000000,1.000000,49.000000,8.000000,7.000000,-8.000000,2.000000,-12.000000,-8.000000,-11.000000,2.000000,-4.000000,0.000000,-6.000000,-2.000000,-2.000000,2.000000
|
| 69 |
+
1.000000,-1.000000,-2.000000,5.000000,1.000000,3.000000,-2.000000,2.000000,14.000000,0.000000,-3.000000,3.000000,-3.000000,-1.000000,-10.000000,3.000000,2.000000,2.000000,-3.000000,-2.000000,-6.000000,4.000000,7.000000,1.000000,-4.000000,0.000000,-16.000000,0.000000,0.000000,2.000000,-4.000000,-1.000000,-18.000000,1.000000,3.000000,1.000000,-4.000000,1.000000,4.000000,-3.000000,2.000000,4.000000,-2.000000,0.000000,-12.000000,-2.000000,4.000000,4.000000,-3.000000,2.000000,2.000000,-4.000000,2.000000,3.000000,-1.000000,0.000000,-11.000000,-1.000000,8.000000,3.000000,-2.000000,-1.000000,-7.000000,-1.000000,6.000000,3.000000,-10.000000,-4.000000,-37.000000,-1.000000,13.000000,1.000000,-11.000000,-4.000000,-31.000000,0.000000,10.000000,0.000000,-11.000000,-2.000000,-28.000000,-5.000000,12.000000,0.000000,-11.000000,-6.000000,-38.000000,0.000000,14.000000,1.000000
|
| 70 |
+
-5.000000,-1.000000,-16.000000,-3.000000,-7.000000,3.000000,-2.000000,1.000000,7.000000,0.000000,2.000000,1.000000,-132.000000,-48.000000,-496.000000,63.000000,-110.000000,-1.000000,-101.000000,-47.000000,-445.000000,80.000000,-72.000000,-7.000000,-79.000000,-43.000000,-417.000000,79.000000,-20.000000,-11.000000,-107.000000,-45.000000,-413.000000,70.000000,-101.000000,-12.000000,-68.000000,-35.000000,-355.000000,66.000000,-23.000000,-9.000000,-81.000000,-37.000000,-345.000000,44.000000,-52.000000,-9.000000,-68.000000,-39.000000,-358.000000,58.000000,-34.000000,-11.000000,-52.000000,-30.000000,-332.000000,49.000000,-16.000000,-7.000000,-9.000000,1.000000,-11.000000,-10.000000,-12.000000,2.000000,-14.000000,7.000000,-6.000000,-16.000000,-18.000000,1.000000,-9.000000,6.000000,-9.000000,-14.000000,-12.000000,1.000000,-6.000000,3.000000,-11.000000,-9.000000,-8.000000,1.000000,-8.000000,3.000000,-8.000000,-9.000000,-11.000000,2.000000
|
| 71 |
+
6.000000,-6.000000,-66.000000,-40.000000,12.000000,5.000000,15.000000,-5.000000,-112.000000,-48.000000,46.000000,-2.000000,14.000000,-7.000000,-179.000000,-44.000000,71.000000,-18.000000,13.000000,-8.000000,-182.000000,-37.000000,59.000000,-21.000000,7.000000,-1.000000,-72.000000,62.000000,39.000000,18.000000,106.000000,-149.000000,-8.000000,230.000000,59.000000,33.000000,89.000000,-64.000000,-4.000000,128.000000,49.000000,-24.000000,115.000000,-22.000000,30.000000,12.000000,146.000000,3.000000,102.000000,-37.000000,-8.000000,34.000000,131.000000,4.000000,73.000000,-37.000000,10.000000,49.000000,89.000000,2.000000,59.000000,-32.000000,3.000000,34.000000,76.000000,2.000000,-9.000000,4.000000,-9.000000,-11.000000,-11.000000,2.000000,-5.000000,4.000000,-5.000000,-11.000000,-6.000000,1.000000,-1.000000,2.000000,-5.000000,-8.000000,-1.000000,1.000000,1.000000,-1.000000,-5.000000,-4.000000,2.000000,2.000000
|
history/aob/data/robot5/labels.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
|
history/aob/data/train/data.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
history/aob/data/train/labels.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
|
history/aob/data/train/times.csv
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100
|
history/aob/descriptive/balanced_train_set.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c078d45cde85010c31fec38395b8d6248411cc1e9910d4ded735a9480bfaffa
|
| 3 |
+
size 16088486
|
history/aob/descriptive/balanced_validation_set.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
history/aob/descriptive/easysk_train_set.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:555e5517da13cab5fb39585471dbdb32cfd6295c083617fc6f7bba58062707e4
|
| 3 |
+
size 12915558
|
history/aob/descriptive/hardsk_train_set.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c7d407b000435057d95c5185a81e6bbf3852d6f10bdf6507c099418c0b29040
|
| 3 |
+
size 18431533
|
history/aob/descriptive/old_train_set.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94f7ff7ed60574f2571d4bd0dd23a87fd3c3ce129b4dc1d446a1c069110a99f0
|
| 3 |
+
size 12494150
|
history/aob/handcoded_tokenizer_OLD.py
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import json
|
| 3 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 4 |
+
from transformers import PreTrainedTokenizer
|
| 5 |
+
from transformers.utils import logging
|
| 6 |
+
|
| 7 |
+
logger = logging.get_logger(__name__)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def load_json(path: str) -> Union[Dict, List]:
|
| 11 |
+
"""
|
| 12 |
+
Load a JSON file from the given path.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
path (str): The path to the JSON file to be loaded.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
Union[Dict, List]: The parsed content of the JSON file, which could be a dictionary or a list.
|
| 19 |
+
"""
|
| 20 |
+
with open(path, "r") as f:
|
| 21 |
+
return json.load(f)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class STLTokenizer(PreTrainedTokenizer):
|
| 25 |
+
"""
|
| 26 |
+
A custom tokenizer class that extends `PreTrainedTokenizer` to handle a specific vocabulary and tokenization process.
|
| 27 |
+
|
| 28 |
+
This tokenizer can load a vocabulary from a JSON file, tokenize text, convert tokens to IDs,
|
| 29 |
+
and handle padding and special tokens.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, vocab_path: str, unk_token: str = "unk", pad_token: str = "pad",
|
| 33 |
+
bos_token: str = "/s", eos_token: str = "s", model_max_length = 512):
|
| 34 |
+
"""
|
| 35 |
+
Initializes the STLTokenizer with a given vocabulary and special tokens.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
vocab_path (str): The path to the JSON file containing the vocabulary.
|
| 39 |
+
unk_token (str, optional): The token used for unknown words. Defaults to "unk".
|
| 40 |
+
pad_token (str, optional): The token used for padding. Defaults to "pad".
|
| 41 |
+
bos_token (str, optional): The token used for the beginning of a sequence. Defaults to "/s".
|
| 42 |
+
eos_token (str, optional): The token used for the end of a sequence. Defaults to "s".
|
| 43 |
+
"""
|
| 44 |
+
self.vocab = load_json(vocab_path)
|
| 45 |
+
self.unk_token = unk_token
|
| 46 |
+
self.pad_token = pad_token
|
| 47 |
+
self.bos_token = bos_token
|
| 48 |
+
self.eos_token = eos_token
|
| 49 |
+
self.model_max_length = model_max_length
|
| 50 |
+
self.id_to_token = {v: k for k, v in self.vocab.items()} # Reverse mapping
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def vocab_size(self) -> int:
|
| 54 |
+
"""
|
| 55 |
+
Returns the size of the vocabulary.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
int: The number of tokens in the vocabulary.
|
| 59 |
+
"""
|
| 60 |
+
return len(self.vocab)
|
| 61 |
+
|
| 62 |
+
def prepad_sequence(self, sequence, undo = False):
|
| 63 |
+
"""
|
| 64 |
+
Replaces spaces in the input sequence with a specified padding token.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
sequence (str): The input sequence.
|
| 68 |
+
undo (bool): If True, replace the padding token with spaces. Defaults to False, which pads the spaces.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
str: The preprocessed sequence with spaces or padding tokens replaced.
|
| 72 |
+
"""
|
| 73 |
+
if undo:
|
| 74 |
+
return sequence.replace(f'{self.pad_token}', ' ')
|
| 75 |
+
else:
|
| 76 |
+
return sequence.replace(' ', f'{self.pad_token}')
|
| 77 |
+
|
| 78 |
+
def add_bos_eos(self, sequence: str) -> str:
|
| 79 |
+
"""
|
| 80 |
+
Aggiunge i token BOS all'inizio e EOS alla fine della sequenza.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
sequence (str): La sequenza di input.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
str: La sequenza con i token BOS ed EOS.
|
| 87 |
+
"""
|
| 88 |
+
return f'{self.bos_token} {sequence} {self.eos_token}'
|
| 89 |
+
|
| 90 |
+
def tokenize(self, text: str) -> List[str]:
|
| 91 |
+
"""
|
| 92 |
+
Tokenizes the input text into a list of tokens.
|
| 93 |
+
|
| 94 |
+
The method preprocesses the input text by replacing spaces with padding tokens and then tries to
|
| 95 |
+
find the longest possible match for each substring in the vocabulary.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
text (str): The input text to be tokenized.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
List[str]: A list of tokens representing the tokenized text.
|
| 102 |
+
"""
|
| 103 |
+
text = self.add_bos_eos(text)
|
| 104 |
+
text = self.prepad_sequence(text)
|
| 105 |
+
|
| 106 |
+
tokens = []
|
| 107 |
+
i = 0
|
| 108 |
+
while i < len(text):
|
| 109 |
+
best_match = None
|
| 110 |
+
for j in range(len(text), i, -1): # Try matching substrings of decreasing length
|
| 111 |
+
subtoken = text[i:j]
|
| 112 |
+
if subtoken in self.vocab:
|
| 113 |
+
best_match = subtoken
|
| 114 |
+
break
|
| 115 |
+
if best_match:
|
| 116 |
+
tokens.append(best_match)
|
| 117 |
+
i += len(best_match)
|
| 118 |
+
else:
|
| 119 |
+
tokens.append(self.unk_token)
|
| 120 |
+
i += 1
|
| 121 |
+
return tokens
|
| 122 |
+
|
| 123 |
+
def convert_tokens_to_ids(self, tokens: List[str]) -> List[int]:
|
| 124 |
+
"""
|
| 125 |
+
Converts a list of tokens into a list of token IDs.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
tokens (List[str]): A list of tokens to be converted into IDs.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
List[int]: A list of corresponding token IDs.
|
| 132 |
+
"""
|
| 133 |
+
return [self.vocab.get(token, self.vocab[self.unk_token]) for token in tokens]
|
| 134 |
+
|
| 135 |
+
def convert_ids_to_tokens(self, ids: List[int]) -> List[str]:
|
| 136 |
+
"""
|
| 137 |
+
Converts a list of token IDs into a list of tokens.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
ids (List[int]): A list of token IDs to be converted into tokens.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
List[str]: A list of corresponding tokens.
|
| 144 |
+
"""
|
| 145 |
+
return [self.id_to_token.get(i, self.unk_token) for i in ids]
|
| 146 |
+
|
| 147 |
+
def encode(self, sequence: str) -> List[int]:
|
| 148 |
+
"""
|
| 149 |
+
Encodes a string sequence into a list of token IDs.
|
| 150 |
+
|
| 151 |
+
This method tokenizes the input sequence using the `tokenize` method,
|
| 152 |
+
and then converts the resulting tokens into their corresponding token IDs
|
| 153 |
+
using the `convert_tokens_to_ids` method.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
sequence (str): The input sequence (text) to be encoded.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
List[int]: A list of token IDs corresponding to the input sequence.
|
| 160 |
+
"""
|
| 161 |
+
splitted_sequence = self.tokenize(sequence)
|
| 162 |
+
return self.convert_tokens_to_ids(splitted_sequence)
|
| 163 |
+
|
| 164 |
+
def postpad_sequence(self, sequence, pad_token_id):
|
| 165 |
+
"""
|
| 166 |
+
Fills the sequence up to max_length padding elements
|
| 167 |
+
"""
|
| 168 |
+
num_extra_elements = self.model_max_length - len(sequence) -1
|
| 169 |
+
if num_extra_elements > 0:
|
| 170 |
+
sequence.extend([pad_token_id] * num_extra_elements)
|
| 171 |
+
return sequence
|
| 172 |
+
|
| 173 |
+
def decode(self, token_ids: List[int]) -> str:
|
| 174 |
+
"""
|
| 175 |
+
Decodes a list of token IDs into a string of text.
|
| 176 |
+
|
| 177 |
+
The method converts the IDs to tokens and joins them to form a string.
|
| 178 |
+
It also restores the original spaces or padding tokens if `undo` is True.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
token_ids (List[int]): A list of token IDs to be decoded.
|
| 182 |
+
skip_special_tokens (bool, optional): Whether to skip special tokens during decoding. Defaults to False.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
str: The decoded string.
|
| 186 |
+
"""
|
| 187 |
+
tokens = self.convert_ids_to_tokens(token_ids)
|
| 188 |
+
decoded = "".join(tokens)
|
| 189 |
+
return self.prepad_sequence(decoded, undo=True)
|
| 190 |
+
|
| 191 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| 192 |
+
"""
|
| 193 |
+
Saves the tokenizer's vocabulary to a file.
|
| 194 |
+
Useful only when the vocabulary has to be retrieved and is not given
|
| 195 |
+
(thus this is not the case: here to further improvements with sentencepiece).
|
| 196 |
+
|
| 197 |
+
This method saves the vocabulary to a JSON file in the specified directory.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
save_directory (str): The directory where the vocabulary file will be saved.
|
| 201 |
+
filename_prefix (Optional[str]): An optional prefix for the filename.
|
| 202 |
+
|
| 203 |
+
Returns:
|
| 204 |
+
Tuple[str]: A tuple containing the path to the saved vocabulary file.
|
| 205 |
+
"""
|
| 206 |
+
vocab_file = f"{save_directory}/{filename_prefix + '-' if filename_prefix else ''}vocab.json"
|
| 207 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 208 |
+
json.dump(self.vocab, f, indent=2, ensure_ascii=False)
|
| 209 |
+
return (vocab_file,)
|
| 210 |
+
|
| 211 |
+
def get_vocab(self) -> dict:
|
| 212 |
+
"""
|
| 213 |
+
Retrieves the vocabulary used by the tokenizer.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
dict: The vocabulary as a dictionary.
|
| 217 |
+
"""
|
| 218 |
+
return self.vocab
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# EXAMPLE OF USAGE
|
| 222 |
+
|
| 223 |
+
# sequence = "( not ( x_1 <= 0.2988 ) until[11,21] x_0 <= -0.7941 )"
|
| 224 |
+
# tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 225 |
+
# token_ids = tokenizer.encode(sequence)
|
| 226 |
+
# decoded_sequence = tokenizer.decode(token_ids)
|
| 227 |
+
|
| 228 |
+
# print("Original sequence: ", sequence)
|
| 229 |
+
# print("Encoded sequence: ", token_ids)
|
| 230 |
+
# print("Decoded sequence: ", decoded_sequence)
|
| 231 |
+
|
| 232 |
+
|
history/aob/kernel_example.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from torch.nn.functional import normalize
|
| 4 |
+
import copy
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from phis_generator import StlGenerator
|
| 8 |
+
from traj_measure import BaseMeasure
|
| 9 |
+
from utils import from_string_to_formula, load_pickle, dump_pickle
|
| 10 |
+
from kernel import StlKernel
|
| 11 |
+
|
| 12 |
+
# compute kernel embeddings
|
| 13 |
+
# 1. Fix an anchor set of STL formulae, against which you compute all embeddings
|
| 14 |
+
# We randomly sample it using the following:
|
| 15 |
+
# leaf_prob determines the syntactic complexity of formulae
|
| 16 |
+
sampler = StlGenerator(leaf_prob=0.4)
|
| 17 |
+
# the number of generated formulae determines the dimension of your embeddings
|
| 18 |
+
n_phis = 3000
|
| 19 |
+
# the number of variables determines the maximum allowed dimension of signals
|
| 20 |
+
n_vars = 3
|
| 21 |
+
anchor_set = sampler.bag_sample(bag_size=n_phis, nvars=n_vars) # list of formulae
|
| 22 |
+
# IMPORTANT: the anchor set should never change within the same application, better to store it in memory!
|
| 23 |
+
# dump_pickle('anchor_set_{}_vars'.format(n_vars), anchor_set)
|
| 24 |
+
# to recover it
|
| 25 |
+
# anchor_set = load_pickle(os.getcwd() + os.path.sep + 'anchor_set.pkl')
|
| 26 |
+
# if it is too heavy you can keep the string in memory
|
| 27 |
+
anchor_set_string = list(map(str, anchor_set))
|
| 28 |
+
anchor_set_from_string = list(map(from_string_to_formula, anchor_set_string))
|
| 29 |
+
# 2. Instantiate the kernel
|
| 30 |
+
# need the measure on the space of trajectories over which to integrate
|
| 31 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 32 |
+
mu = BaseMeasure(device=device)
|
| 33 |
+
# instantiate the kernel given the measure and the number of variables (should be the same of the anchor set)
|
| 34 |
+
kernel = StlKernel(mu, varn=n_vars)
|
| 35 |
+
# 3. Embed random STL formulae
|
| 36 |
+
phis = sampler.bag_sample(1000, nvars=n_vars) # in principle, you can use a sampler with different parameters
|
| 37 |
+
# the rows of the following are the embeddings of the corresponding formula
|
| 38 |
+
gram_phis = kernel.compute_bag_bag(phis, anchor_set)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# CAVEAT: you can sample a semantically diverse set of formulae as follows
|
| 42 |
+
# just-copy paste the following in place of the previous plain anchor set sampling
|
| 43 |
+
cos_threshold = 0.9 # lower it if it struggles to fin enough formulae
|
| 44 |
+
diverse_anchor_set = [sampler.sample(nvars=n_vars)]
|
| 45 |
+
signals = mu.sample(samples=10000, varn=n_vars)
|
| 46 |
+
# always use normalized robustness with mu0
|
| 47 |
+
anchor_rob_vectors = torch.cat(
|
| 48 |
+
[phi.quantitative(signals, normalize=True).unsqueeze(0) for phi in diverse_anchor_set],0)
|
| 49 |
+
while len(diverse_anchor_set) < n_phis:
|
| 50 |
+
# sample candidate anchor formulae
|
| 51 |
+
candidate_anchors = sampler.bag_sample(n_phis - len(diverse_anchor_set), nvars=n_vars)
|
| 52 |
+
# compute robustness of candidate anchor formulae on the same signals as previous anchor set
|
| 53 |
+
candidate_robs = torch.cat(
|
| 54 |
+
[phi.quantitative(signals, normalize=True).unsqueeze(0) for phi in candidate_anchors],0)
|
| 55 |
+
# compute cosine similarity between current anchor set and candidate new formulae
|
| 56 |
+
cos_simil = torch.tril(normalize(candidate_robs) @ normalize(anchor_rob_vectors).t(), diagonal=-1)
|
| 57 |
+
# check which formulae are similar (i.e. greater cosine similarity then threshold) w.r.t. current anchors
|
| 58 |
+
similar_idx = [np.where(cos_simil[r, :] > cos_threshold)[0].tolist() for r in range(cos_simil.shape[0])]
|
| 59 |
+
# keep only those who are semantically distant
|
| 60 |
+
keep_idx = list(set(np.arange(len(candidate_anchors)).tolist()).difference(set(
|
| 61 |
+
[i for sublist in similar_idx for i in sublist])))
|
| 62 |
+
diverse_anchor_set += [copy.deepcopy(candidate_anchors[i]) for i in keep_idx]
|
| 63 |
+
anchor_rob_vectors = torch.cat([anchor_rob_vectors, copy.deepcopy(
|
| 64 |
+
torch.index_select(candidate_robs, 0, torch.tensor(keep_idx)))], 0)
|
| 65 |
+
anchor_set = diverse_anchor_set[:n_phis]
|
history/aob/old_utils.py
ADDED
|
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import copy
|
| 3 |
+
import math
|
| 4 |
+
from typing import List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import torch
|
| 9 |
+
import torch.utils.checkpoint
|
| 10 |
+
from torch import nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from torch.utils.data import Dataset
|
| 13 |
+
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from configuration import STLConfig
|
| 16 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
|
| 17 |
+
|
| 18 |
+
import copy
|
| 19 |
+
import pickle
|
| 20 |
+
import os
|
| 21 |
+
from collections import deque
|
| 22 |
+
|
| 23 |
+
from stl import *
|
| 24 |
+
|
| 25 |
+
from nltk.translate.bleu_score import sentence_bleu
|
| 26 |
+
from handcoded_tokenizer import STLTokenizer
|
| 27 |
+
|
| 28 |
+
import networkx as nx
|
| 29 |
+
import phis_generator_depth
|
| 30 |
+
|
| 31 |
+
############################################################################################################################
|
| 32 |
+
|
| 33 |
+
def load_pickle(path):
|
| 34 |
+
with open(path, 'rb') as f:
|
| 35 |
+
x = pickle.load(f)
|
| 36 |
+
return x
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def dump_pickle(name, thing):
|
| 40 |
+
with open(name + '.pickle', 'wb') as f:
|
| 41 |
+
pickle.dump(thing, f)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def set_time_thresholds(st):
|
| 45 |
+
unbound, right_unbound = [True, False]
|
| 46 |
+
left_time_bound, right_time_bound = [0, 0]
|
| 47 |
+
if st[-1] == ']':
|
| 48 |
+
unbound = False
|
| 49 |
+
time_thresholds = st[st.index('[')+1:-1].split(",")
|
| 50 |
+
left_time_bound = int(time_thresholds[0])
|
| 51 |
+
if time_thresholds[1] == 'inf':
|
| 52 |
+
right_unbound = True
|
| 53 |
+
else:
|
| 54 |
+
right_time_bound = int(time_thresholds[1])-1
|
| 55 |
+
return unbound, right_unbound, left_time_bound, right_time_bound
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def from_string_to_formula(st):
|
| 59 |
+
root_arity = 2 if st.startswith('(') else 1
|
| 60 |
+
st_split = st.split()
|
| 61 |
+
if root_arity <= 1:
|
| 62 |
+
root_op_str = copy.deepcopy(st_split[0])
|
| 63 |
+
if root_op_str.startswith('x'):
|
| 64 |
+
atom_sign = True if st_split[1] == '<=' else False
|
| 65 |
+
root_phi = Atom(var_index=int(st_split[0][2]), lte=atom_sign, threshold=float(st_split[2]))
|
| 66 |
+
return root_phi
|
| 67 |
+
else:
|
| 68 |
+
assert (root_op_str.startswith('not') or root_op_str.startswith('eventually')
|
| 69 |
+
or root_op_str.startswith('always'))
|
| 70 |
+
current_st = copy.deepcopy(st_split[2:-1])
|
| 71 |
+
if root_op_str == 'not':
|
| 72 |
+
root_phi = Not(child=from_string_to_formula(' '.join(current_st)))
|
| 73 |
+
elif root_op_str.startswith('eventually'):
|
| 74 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 75 |
+
root_phi = Eventually(child=from_string_to_formula(' '.join(current_st)), unbound=unbound,
|
| 76 |
+
right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 77 |
+
right_time_bound=right_time_bound)
|
| 78 |
+
else:
|
| 79 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 80 |
+
root_phi = Globally(child=from_string_to_formula(' '.join(current_st)), unbound=unbound,
|
| 81 |
+
right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 82 |
+
right_time_bound=right_time_bound)
|
| 83 |
+
else:
|
| 84 |
+
# 1 - delete everything which is contained in other sets of parenthesis (if any)
|
| 85 |
+
current_st = copy.deepcopy(st_split[1:-1])
|
| 86 |
+
if '(' in current_st:
|
| 87 |
+
par_queue = deque()
|
| 88 |
+
par_idx_list = []
|
| 89 |
+
for i, sub in enumerate(current_st):
|
| 90 |
+
if sub == '(':
|
| 91 |
+
par_queue.append(i)
|
| 92 |
+
elif sub == ')':
|
| 93 |
+
par_idx_list.append(tuple([par_queue.pop(), i]))
|
| 94 |
+
# open_par_idx, close_par_idx = [current_st.index(p) for p in ['(', ')']]
|
| 95 |
+
# union of parentheses range --> from these we may extract the substrings to be the children!!!
|
| 96 |
+
children_range = []
|
| 97 |
+
for begin, end in sorted(par_idx_list):
|
| 98 |
+
if children_range and children_range[-1][1] >= begin - 1:
|
| 99 |
+
children_range[-1][1] = max(children_range[-1][1], end)
|
| 100 |
+
else:
|
| 101 |
+
children_range.append([begin, end])
|
| 102 |
+
n_children = len(children_range)
|
| 103 |
+
assert (n_children in [1, 2])
|
| 104 |
+
if n_children == 1:
|
| 105 |
+
# one of the children is a variable --> need to individuate it
|
| 106 |
+
var_child_idx = 1 if children_range[0][0] <= 1 else 0 # 0 is left child, 1 is right child
|
| 107 |
+
if children_range[0][0] != 0 and current_st[children_range[0][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 108 |
+
children_range[0][0] -= 1
|
| 109 |
+
left_child_str = current_st[:3] if var_child_idx == 0 else \
|
| 110 |
+
current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 111 |
+
right_child_str = current_st[-3:] if var_child_idx == 1 else \
|
| 112 |
+
current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 113 |
+
root_op_str = current_st[children_range[0][1] + 1] if var_child_idx == 1 else \
|
| 114 |
+
current_st[children_range[0][0] - 1]
|
| 115 |
+
assert (root_op_str[:2] in ['an', 'or', 'un'])
|
| 116 |
+
else:
|
| 117 |
+
if children_range[0][0] != 0 and current_st[children_range[0][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 118 |
+
children_range[0][0] -= 1
|
| 119 |
+
if current_st[children_range[1][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 120 |
+
children_range[1][0] -= 1
|
| 121 |
+
# if there are two children, with parentheses, the element in the middle is the root
|
| 122 |
+
root_op_str = current_st[children_range[0][1] + 1]
|
| 123 |
+
assert (root_op_str[:2] in ['an', 'or', 'un'])
|
| 124 |
+
left_child_str = current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 125 |
+
right_child_str = current_st[children_range[1][0]:children_range[1][1] + 1]
|
| 126 |
+
else:
|
| 127 |
+
# no parentheses means that both children are variables
|
| 128 |
+
left_child_str = current_st[:3]
|
| 129 |
+
right_child_str = current_st[-3:]
|
| 130 |
+
root_op_str = current_st[3]
|
| 131 |
+
left_child_str = ' '.join(left_child_str)
|
| 132 |
+
right_child_str = ' '.join(right_child_str)
|
| 133 |
+
if root_op_str == 'and':
|
| 134 |
+
root_phi = And(left_child=from_string_to_formula(left_child_str),
|
| 135 |
+
right_child=from_string_to_formula(right_child_str))
|
| 136 |
+
elif root_op_str == 'or':
|
| 137 |
+
root_phi = Or(left_child=from_string_to_formula(left_child_str),
|
| 138 |
+
right_child=from_string_to_formula(right_child_str))
|
| 139 |
+
else:
|
| 140 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 141 |
+
root_phi = Until(left_child=from_string_to_formula(left_child_str),
|
| 142 |
+
right_child=from_string_to_formula(right_child_str),
|
| 143 |
+
unbound=unbound, right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 144 |
+
right_time_bound=right_time_bound)
|
| 145 |
+
return root_phi
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def scale_trajectories(traj):
|
| 149 |
+
traj_min = torch.min(torch.min(traj, dim=0)[0], dim=0)[0]
|
| 150 |
+
traj_max = torch.max(torch.max(traj, dim=0)[0], dim=0)[0]
|
| 151 |
+
scaled_traj = -1 + 2*(traj - traj_min) / (traj_max - traj_min)
|
| 152 |
+
return scaled_traj
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def standardize_trajectories(traj_data, n_var):
|
| 156 |
+
means, stds = [[] for _ in range(2)]
|
| 157 |
+
for i in range(n_var):
|
| 158 |
+
means.append(torch.mean(traj_data[:, i, :]))
|
| 159 |
+
stds.append(torch.std(traj_data[:, i, :]))
|
| 160 |
+
for i in range(n_var):
|
| 161 |
+
traj_data[:, i, :] = (traj_data[:, i, :] - means[i]) / stds[i]
|
| 162 |
+
return traj_data
|
| 163 |
+
|
| 164 |
+
############################################################################################################################
|
| 165 |
+
|
| 166 |
+
class STLSinusoidalPositionalEmbedding(nn.Embedding):
|
| 167 |
+
"""This module produces sinusoidal positional embeddings of any length."""
|
| 168 |
+
|
| 169 |
+
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
|
| 170 |
+
super().__init__(num_positions, embedding_dim)
|
| 171 |
+
self.weight = self._init_weight(self.weight)
|
| 172 |
+
|
| 173 |
+
@staticmethod
|
| 174 |
+
def _init_weight(out: nn.Parameter) -> nn.Parameter:
|
| 175 |
+
"""
|
| 176 |
+
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
|
| 177 |
+
the 2nd half of the vector. [dim // 2:]
|
| 178 |
+
"""
|
| 179 |
+
n_pos, dim = out.shape
|
| 180 |
+
position_enc = np.array(
|
| 181 |
+
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
|
| 182 |
+
)
|
| 183 |
+
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
|
| 184 |
+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
|
| 185 |
+
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
|
| 186 |
+
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
|
| 187 |
+
out.detach_()
|
| 188 |
+
return out
|
| 189 |
+
|
| 190 |
+
@torch.no_grad()
|
| 191 |
+
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
|
| 192 |
+
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
|
| 193 |
+
bsz, seq_len = input_ids_shape[:2]
|
| 194 |
+
positions = torch.arange(
|
| 195 |
+
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
|
| 196 |
+
)
|
| 197 |
+
return super().forward(positions)
|
| 198 |
+
|
| 199 |
+
class STLAttention(nn.Module):
|
| 200 |
+
""" Multi-Head Attention as depicted from 'Attention is all you need' """
|
| 201 |
+
|
| 202 |
+
def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0,
|
| 203 |
+
is_decoder: bool = False, bias: bool = False, is_causal: bool = False):
|
| 204 |
+
|
| 205 |
+
super().__init__()
|
| 206 |
+
self.embed_dim = embed_dim # overall embedding dimension -> to be divided between multiple heads
|
| 207 |
+
self.num_heads = num_heads
|
| 208 |
+
self.dropout = dropout
|
| 209 |
+
self.head_dim = embed_dim // num_heads
|
| 210 |
+
assert (self.head_dim * num_heads) == self.embed_dim
|
| 211 |
+
self.scaling = self.head_dim ** -0.5 # used to normalize values when projected using `W_` matrices
|
| 212 |
+
self.is_decoder = is_decoder
|
| 213 |
+
self.is_causal = is_causal
|
| 214 |
+
|
| 215 |
+
# 'roleplaying' matrices
|
| 216 |
+
self.W_k = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 217 |
+
self.W_q = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 218 |
+
self.W_v = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 219 |
+
|
| 220 |
+
# to project the heads' outputs into a single vector
|
| 221 |
+
self.W_o = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
|
| 225 |
+
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def forward(self,
|
| 229 |
+
hidden_states: torch.Tensor, # previous values, passed to the multi-head attn layer
|
| 230 |
+
key_value_states: Optional[torch.Tensor] = None, # different key, value items (used in cross-attn)
|
| 231 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None, # stores the key and values of previous steps
|
| 232 |
+
attention_mask: Optional[torch.Tensor] = None, # masks non-allowed items (padded or future ones)
|
| 233 |
+
layer_head_mask: Optional[torch.Tensor] = None, # used to de-activate specific attn heads
|
| 234 |
+
output_attentions: bool = False # flag to control the output of the attn values
|
| 235 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 236 |
+
|
| 237 |
+
is_cross_attention = key_value_states is not None # cross-attn if key_value_states is not None
|
| 238 |
+
|
| 239 |
+
batch_size, tgt_len, embed_dim = hidden_states.size()
|
| 240 |
+
|
| 241 |
+
# Project the current input in the `query` role:
|
| 242 |
+
query = self.W_q(hidden_states) * self.scaling
|
| 243 |
+
|
| 244 |
+
if (is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1]):
|
| 245 |
+
key = past_key_value[0]
|
| 246 |
+
value = past_key_value[1]
|
| 247 |
+
elif is_cross_attention:
|
| 248 |
+
key = self._shape(self.W_k(key_value_states), -1, batch_size)
|
| 249 |
+
value = self._shape(self.W_v(key_value_states), -1, batch_size)
|
| 250 |
+
elif past_key_value is not None:
|
| 251 |
+
key = self._shape(self.W_k(hidden_states), -1, batch_size)
|
| 252 |
+
value = self._shape(self.W_v(hidden_states), -1, batch_size)
|
| 253 |
+
key = torch.cat([past_key_value[0], key], dim=2)
|
| 254 |
+
value = torch.cat([past_key_value[1], value], dim=2)
|
| 255 |
+
else:
|
| 256 |
+
key = self._shape(self.W_k(hidden_states), -1, batch_size)
|
| 257 |
+
value = self._shape(self.W_v(hidden_states), -1, batch_size)
|
| 258 |
+
|
| 259 |
+
if self.is_decoder:
|
| 260 |
+
past_key_value = (key, value)
|
| 261 |
+
|
| 262 |
+
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
|
| 263 |
+
|
| 264 |
+
query = self._shape(query, tgt_len, batch_size).view(*proj_shape)
|
| 265 |
+
key = key.reshape(*proj_shape)
|
| 266 |
+
value = value.reshape(*proj_shape)
|
| 267 |
+
|
| 268 |
+
src_len = key.size(1)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
######################################################################################################
|
| 272 |
+
|
| 273 |
+
# 'traditional' attention computation
|
| 274 |
+
# i.e. softmax(Q*K^T / sqrt(d_model) + self_attn_mask) * V
|
| 275 |
+
|
| 276 |
+
# Batch-wise matrix multiplication between `query` and (TRANSPOSED) `key`
|
| 277 |
+
attn_weights = torch.bmm(query, key.transpose(1, 2))
|
| 278 |
+
|
| 279 |
+
if attention_mask is not None:
|
| 280 |
+
attn_weights = attn_weights.view(batch_size, self.num_heads, tgt_len, src_len) + attention_mask
|
| 281 |
+
attn_weights = attn_weights.view(batch_size * self.num_heads, tgt_len, src_len)
|
| 282 |
+
|
| 283 |
+
# Normalize values on the `key` axis (dim=-1)
|
| 284 |
+
attn_weights = F.softmax(attn_weights, dim=-1)
|
| 285 |
+
|
| 286 |
+
# if layer_head_mask is not None:
|
| 287 |
+
# attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(batch_size, self.num_heads, tgt_len, src_len)
|
| 288 |
+
# attn_weights = attn_weights.view(batch_size * self.num_heads, tgt_len, src_len)
|
| 289 |
+
|
| 290 |
+
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 291 |
+
|
| 292 |
+
# Batch-wise matrix multiplication between the resulting probs and the value
|
| 293 |
+
attn_output = torch.bmm(attn_probs, value)
|
| 294 |
+
|
| 295 |
+
######################################################################################################
|
| 296 |
+
|
| 297 |
+
attn_output = attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim)
|
| 298 |
+
attn_output = attn_output.transpose(1, 2)
|
| 299 |
+
|
| 300 |
+
attn_output = attn_output.reshape(batch_size, tgt_len, self.embed_dim)
|
| 301 |
+
attn_output = self.W_o(attn_output)
|
| 302 |
+
|
| 303 |
+
return attn_output, None, past_key_value
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# Create a `CustomDataset` class to properly format input data with respect to
|
| 307 |
+
# the `input_ids`, `labels`, and `attention_mask` attributes for model training.
|
| 308 |
+
class CustomDataset(Dataset):
|
| 309 |
+
def __init__(self, df, device='cpu'):
|
| 310 |
+
"""
|
| 311 |
+
Initializes the dataset by storing the DataFrame and setting the device.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
- df: A pandas DataFrame containing the data (e.g., `Encoded_Formula`, `Embedding`).
|
| 315 |
+
- device: The device ('cpu' or 'cuda') where the tensors will be moved for processing.
|
| 316 |
+
"""
|
| 317 |
+
self.df = df
|
| 318 |
+
self.device = device
|
| 319 |
+
|
| 320 |
+
def __len__(self):
|
| 321 |
+
"""
|
| 322 |
+
Returns the length of the dataset, i.e., the number of examples in the DataFrame.
|
| 323 |
+
|
| 324 |
+
Returns:
|
| 325 |
+
- Length of the DataFrame (number of samples).
|
| 326 |
+
"""
|
| 327 |
+
return len(self.df)
|
| 328 |
+
|
| 329 |
+
def __getitem__(self, idx):
|
| 330 |
+
"""
|
| 331 |
+
Retrieves a specific example from the dataset, processes it, and formats it
|
| 332 |
+
into the required structure for the model (e.g., `input_ids`, `labels`, `attention_mask`).
|
| 333 |
+
|
| 334 |
+
Args:
|
| 335 |
+
- idx: Index of the example to retrieve.
|
| 336 |
+
|
| 337 |
+
Returns:
|
| 338 |
+
- A dictionary containing the formatted input data, including:
|
| 339 |
+
- `input_ids`: The tokenized input sequence (excluding the last token).
|
| 340 |
+
- `labels`: The tokenized target sequence (excluding the first token).
|
| 341 |
+
- `attention_mask`: A mask indicating which tokens should be attended to.
|
| 342 |
+
- `encoder_hidden_states`: Embedding for each formula (precomputed, used as hidden states).
|
| 343 |
+
"""
|
| 344 |
+
# Extract the encoded formula (tokenized input sequence) from the DataFrame
|
| 345 |
+
encoded_formula = self.df['Encoded_Formula'][idx]
|
| 346 |
+
# Convert the string representation of a list back to a Python list using ast.literal_eval
|
| 347 |
+
encoded_formula = ast.literal_eval(encoded_formula.strip())
|
| 348 |
+
|
| 349 |
+
# Extract the precomputed formula embedding (hidden states) from the DataFrame
|
| 350 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 351 |
+
# Clean the string and convert it back to a tensor
|
| 352 |
+
formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 353 |
+
formula_embedding = eval(formula_embedding)
|
| 354 |
+
|
| 355 |
+
# Define the input_ids by excluding the last token (shifted tokens for prediction)
|
| 356 |
+
input_ids = encoded_formula[:-1] # All tokens except the last
|
| 357 |
+
# Define the labels by excluding the first token (shifted tokens for teacher forcing)
|
| 358 |
+
labels = encoded_formula[1:] # All tokens except the first
|
| 359 |
+
|
| 360 |
+
# Create the attention mask to indicate which tokens should be attended to.
|
| 361 |
+
# Tokens equal to '1' (typically padding tokens) will be masked (set to 0),
|
| 362 |
+
# and the rest will be visible (set to 1).
|
| 363 |
+
attention_mask = [0 if token == '1' else 1 for token in input_ids]
|
| 364 |
+
|
| 365 |
+
# Convert `input_ids`, `labels`, and `attention_mask` to tensors and move them to the desired device (e.g., GPU or CPU)
|
| 366 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 367 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 368 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 369 |
+
|
| 370 |
+
# Convert the formula embedding (list of hidden states) to a tensor and move it to the device
|
| 371 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 372 |
+
|
| 373 |
+
# Return the formatted data as a dictionary, which the model can use directly for training or evaluation
|
| 374 |
+
return {
|
| 375 |
+
'input_ids': input_ids,
|
| 376 |
+
'labels': labels,
|
| 377 |
+
'attention_mask': attention_mask,
|
| 378 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
############################################################################################################################
|
| 382 |
+
|
| 383 |
+
# METRICS
|
| 384 |
+
|
| 385 |
+
def token_division(input_string):
|
| 386 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 387 |
+
return [element for element in tokenizer.tokenize(input_string) if element != "pad"]
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def bleu_score(dataset):
|
| 392 |
+
|
| 393 |
+
bleu_scores = []
|
| 394 |
+
|
| 395 |
+
for idx in range(len(dataset)):
|
| 396 |
+
gold = token_division(dataset["Gold Formula"][idx])
|
| 397 |
+
generated = token_division(dataset["Generated Formula"][idx])
|
| 398 |
+
|
| 399 |
+
bleu_scores.append(sentence_bleu(gold, generated))
|
| 400 |
+
|
| 401 |
+
return np.min(bleu_scores), np.mean(bleu_scores), np.max(bleu_scores)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def exact_match(dataset):
|
| 406 |
+
|
| 407 |
+
percentage = []
|
| 408 |
+
|
| 409 |
+
for idx in range(len(dataset)):
|
| 410 |
+
gold = token_division(dataset["Gold Formula"][idx])
|
| 411 |
+
generated = token_division(dataset["Generated Formula"][idx])
|
| 412 |
+
|
| 413 |
+
match_count = 0
|
| 414 |
+
for gold_token, gen_token in zip(gold, generated):
|
| 415 |
+
if gold_token == gen_token:
|
| 416 |
+
match_count += 1
|
| 417 |
+
|
| 418 |
+
percentage.append(match_count/len(gold))
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
return np.min(percentage), np.mean(percentage), np.max(percentage)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def cosine_similarity(dataset):
|
| 426 |
+
|
| 427 |
+
similarities = []
|
| 428 |
+
|
| 429 |
+
for idx in range(len(dataset)):
|
| 430 |
+
gold = ast.literal_eval(dataset["Embedding Gold Formula"][idx])
|
| 431 |
+
gen = ast.literal_eval(dataset["Embedding Generated Formula"][idx])
|
| 432 |
+
|
| 433 |
+
dot_product = np.dot(gold, gen)
|
| 434 |
+
gold_norm = np.linalg.norm(gold)
|
| 435 |
+
gen_norm = np.linalg.norm(gen)
|
| 436 |
+
|
| 437 |
+
similarities.append(dot_product / (gold_norm * gen_norm))
|
| 438 |
+
|
| 439 |
+
return np.min(similarities), np.mean(similarities), np.max(similarities)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def euclidean_distance(dataset):
|
| 443 |
+
|
| 444 |
+
distances = []
|
| 445 |
+
|
| 446 |
+
for idx in range(len(dataset)):
|
| 447 |
+
|
| 448 |
+
gold = torch.tensor(ast.literal_eval(dataset["Embedding Gold Formula"][idx]))
|
| 449 |
+
generated = torch.tensor(ast.literal_eval(dataset["Embedding Generated Formula"][idx]))
|
| 450 |
+
|
| 451 |
+
distances.append(torch.dist(gold, generated))
|
| 452 |
+
|
| 453 |
+
return np.min(distances), np.mean(distances), np.max(distances)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
#######################################################################################################
|
| 457 |
+
|
| 458 |
+
def get_name_given_type(formula):
|
| 459 |
+
"""
|
| 460 |
+
Returns the type of node (as a string) of the top node of the formula/sub-formula
|
| 461 |
+
"""
|
| 462 |
+
name_dict = {And: 'and', Or: 'or', Not: 'not', Eventually: 'F', Globally: 'G', Until: 'U',
|
| 463 |
+
Atom: 'x'}
|
| 464 |
+
return name_dict[type(formula)]
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def get_id(child_name, name, label_dict, idx):
|
| 468 |
+
"""
|
| 469 |
+
Get unique identifier for a node
|
| 470 |
+
"""
|
| 471 |
+
while child_name in label_dict.keys(): # if the name is already present
|
| 472 |
+
idx += 1
|
| 473 |
+
child_name = name + "(" + str(idx) + ")"
|
| 474 |
+
return child_name, idx # returns both the child name and the identifier
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def get_temporal_list(temporal_node):
|
| 478 |
+
"""
|
| 479 |
+
Returns the features vector for temporal nodes (the two bounds of the temporal interval)
|
| 480 |
+
Variant and num_arg modify the length of the list to return (3, 4 or 5)
|
| 481 |
+
"""
|
| 482 |
+
left = float(temporal_node.left_time_bound) if temporal_node.unbound is False else 0.
|
| 483 |
+
right = float(temporal_node.right_time_bound) if (temporal_node.unbound is False and
|
| 484 |
+
temporal_node.right_unbound is False) else -1.
|
| 485 |
+
vector_l = [left, right, 0.] # third slot for sign and fourth for threshold # add another slot for argument number
|
| 486 |
+
return vector_l
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def add_internal_child(current_child, current_idx, label_dict):
|
| 490 |
+
child_name = get_name_given_type(current_child) + '(' + str(current_idx) + ')'
|
| 491 |
+
child_name, current_idx = get_id(child_name, get_name_given_type(current_child), label_dict, current_idx)
|
| 492 |
+
return child_name, current_idx
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def add_leaf_child(node, name, label_dict, idx):
|
| 496 |
+
"""
|
| 497 |
+
Add the edges and update the label_dictionary and the identifier count for a leaf node (variable)
|
| 498 |
+
variant = ['original', 'threshold-sign', 'all-in-var']
|
| 499 |
+
shared_var = [True, False] denotes if shared variables for all the DAG or single variables (tree-like)
|
| 500 |
+
num_arg = [True, False] if true argument number is one-hot encoded in the feature vector
|
| 501 |
+
until_right is a flag to detect when the argument number encoding should be 1
|
| 502 |
+
"""
|
| 503 |
+
new_e = []
|
| 504 |
+
label_dict[name] = [0., 0., 0.] # te
|
| 505 |
+
atom_idx =str(node).split()[0] + '(' + str(idx) + ')'
|
| 506 |
+
# different names for the same variables (e.g. x_1(5), x_1(8))
|
| 507 |
+
idx += 1
|
| 508 |
+
if atom_idx not in label_dict.keys():
|
| 509 |
+
label_dict[atom_idx] = [0., 0., 0.]
|
| 510 |
+
|
| 511 |
+
if str(node).split()[1] == '<=':
|
| 512 |
+
label_dict[name] = [0., 0., round(node.threshold, 4)]
|
| 513 |
+
else:
|
| 514 |
+
label_dict[name] = [0., 0., round(node.threshold, 4)]
|
| 515 |
+
new_e.append([name, atom_idx])
|
| 516 |
+
return new_e, label_dict, idx+1
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
def traverse_formula(formula, idx, label_dict):
|
| 520 |
+
current_node = formula
|
| 521 |
+
edges = []
|
| 522 |
+
if type(current_node) is not Atom:
|
| 523 |
+
current_name = get_name_given_type(current_node) + '(' + str(idx) + ')'
|
| 524 |
+
if (type(current_node) is And) or (type(current_node) is Or) or (type(current_node) is Not):
|
| 525 |
+
label_dict[current_name] = [0., 0., 0. ] # temp_left, temp_right, threshold
|
| 526 |
+
else:
|
| 527 |
+
label_dict[current_name] = get_temporal_list(current_node)
|
| 528 |
+
if (type(current_node) is And) or (type(current_node) is Or) or (type(current_node) is Until):
|
| 529 |
+
left_child_name, current_idx = add_internal_child(current_node.left_child, idx + 1, label_dict)
|
| 530 |
+
edges.append([current_name, left_child_name])
|
| 531 |
+
if type(current_node.left_child) is Atom:
|
| 532 |
+
e, d, current_idx = add_leaf_child(current_node.left_child, left_child_name, label_dict, current_idx+1)
|
| 533 |
+
edges += e
|
| 534 |
+
label_dict.update(d)
|
| 535 |
+
e, d = traverse_formula(current_node.left_child, current_idx, label_dict)
|
| 536 |
+
edges += e
|
| 537 |
+
label_dict.update(d)
|
| 538 |
+
right_child_name, current_idx = add_internal_child(current_node.right_child, current_idx + 1, label_dict)
|
| 539 |
+
edges.append([current_name, right_child_name])
|
| 540 |
+
if type(current_node.right_child) is Atom:
|
| 541 |
+
e, d, current_idx = add_leaf_child(current_node.right_child, right_child_name, label_dict,
|
| 542 |
+
current_idx+1)
|
| 543 |
+
edges += e
|
| 544 |
+
label_dict.update(d)
|
| 545 |
+
e, d = traverse_formula(current_node.right_child, current_idx, label_dict)
|
| 546 |
+
edges += e
|
| 547 |
+
label_dict.update(d)
|
| 548 |
+
else:
|
| 549 |
+
# eventually, globally, not
|
| 550 |
+
child_name, current_idx = add_internal_child(current_node.child, idx + 1, label_dict)
|
| 551 |
+
edges.append([current_name, child_name])
|
| 552 |
+
if type(current_node.child) is Atom:
|
| 553 |
+
e, d, current_idx = add_leaf_child(current_node.child, child_name, label_dict, current_idx+1)
|
| 554 |
+
edges += e
|
| 555 |
+
label_dict.update(d)
|
| 556 |
+
e, d = traverse_formula(current_node.child, current_idx, label_dict)
|
| 557 |
+
edges += e
|
| 558 |
+
label_dict.update(d)
|
| 559 |
+
return edges, label_dict
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def build_dag(formula):
|
| 563 |
+
edges, label_dict = traverse_formula(formula, 0, {})
|
| 564 |
+
graph = nx.from_edgelist(edges, create_using=nx.DiGraph)
|
| 565 |
+
assert(nx.is_directed_acyclic_graph(graph))
|
| 566 |
+
return graph, label_dict
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def get_depth(formula):
|
| 570 |
+
phi_g = build_dag(formula)[0]
|
| 571 |
+
return len(nx.dag_longest_path(phi_g)) - 1
|
history/aob/train-16batch-bis.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from handcoded_tokenizer import STLTokenizer
|
| 4 |
+
from configuration import STLConfig
|
| 5 |
+
from modeling_stldec import STLForCausalLM
|
| 6 |
+
|
| 7 |
+
from transformers import AutoConfig, AutoModel
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import json
|
| 11 |
+
import logging
|
| 12 |
+
import math
|
| 13 |
+
import random
|
| 14 |
+
from itertools import chain
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import datasets
|
| 18 |
+
import torch
|
| 19 |
+
from torch.utils.data import Dataset
|
| 20 |
+
import ast
|
| 21 |
+
from accelerate import Accelerator, DistributedType
|
| 22 |
+
from accelerate.logging import get_logger
|
| 23 |
+
from accelerate.utils import set_seed
|
| 24 |
+
from datasets import load_dataset
|
| 25 |
+
from huggingface_hub import HfApi
|
| 26 |
+
from torch.utils.data import DataLoader
|
| 27 |
+
from tqdm.auto import tqdm
|
| 28 |
+
|
| 29 |
+
import transformers
|
| 30 |
+
from transformers import (
|
| 31 |
+
CONFIG_MAPPING,
|
| 32 |
+
MODEL_MAPPING,
|
| 33 |
+
AutoConfig,
|
| 34 |
+
AutoModelForCausalLM,
|
| 35 |
+
AutoTokenizer,
|
| 36 |
+
SchedulerType,
|
| 37 |
+
default_data_collator,
|
| 38 |
+
get_scheduler,
|
| 39 |
+
)
|
| 40 |
+
from transformers.utils import check_min_version, send_example_telemetry
|
| 41 |
+
from transformers.utils.versions import require_version
|
| 42 |
+
|
| 43 |
+
logger = get_logger(__name__)
|
| 44 |
+
|
| 45 |
+
# Extend `AutoClasses` to support the custom model
|
| 46 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 47 |
+
AutoModel.register(STLConfig, STLForCausalLM)
|
| 48 |
+
|
| 49 |
+
# Initialize the model with random weights and the desired architecture
|
| 50 |
+
config = STLConfig()
|
| 51 |
+
model = AutoModel.from_config(config)
|
| 52 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 53 |
+
|
| 54 |
+
args = {
|
| 55 |
+
'dataset_name': None, # or a custom dataset path
|
| 56 |
+
'train_file': 'datasets/train_set.csv',
|
| 57 |
+
'validation_file': 'datasets/validation_set.csv',
|
| 58 |
+
'output_dir': './tf_output_test_16batch',
|
| 59 |
+
'model_name_or_path': 'STLForCausalLM',
|
| 60 |
+
'tokenizer_name': 'STLTokenizer',
|
| 61 |
+
'block_size': 500,
|
| 62 |
+
'batch_size': 32,
|
| 63 |
+
'gradient_accumulation_steps': 1,
|
| 64 |
+
'num_train_epochs': 10,
|
| 65 |
+
'learning_rate': 5e-5,
|
| 66 |
+
'weight_decay': 0.01,
|
| 67 |
+
'seed': 42,
|
| 68 |
+
'with_tracking': True,
|
| 69 |
+
'hub_model_id': None,
|
| 70 |
+
'push_to_hub': False,
|
| 71 |
+
'trust_remote_code': False,
|
| 72 |
+
'overwrite_cache': False,
|
| 73 |
+
'per_device_train_batch_size': 32,
|
| 74 |
+
'per_device_eval_batch_size': 32,
|
| 75 |
+
'checkpointing_steps': '500',
|
| 76 |
+
'resume_from_checkpoint': 'tf_output_test_16batch/step_16000',
|
| 77 |
+
'lr_scheduler_type': 'linear',
|
| 78 |
+
'num_warmup_steps': 5000,
|
| 79 |
+
'max_train_steps': 50000,
|
| 80 |
+
# 'lr': 0.01,
|
| 81 |
+
# 'report_to': "tensorboard"
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
| 86 |
+
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
|
| 87 |
+
# in the environment
|
| 88 |
+
accelerator_log_kwargs = {}
|
| 89 |
+
|
| 90 |
+
if args["with_tracking"]:
|
| 91 |
+
# accelerator_log_kwargs["log_with"] = args["report_to"]
|
| 92 |
+
accelerator_log_kwargs["project_dir"] = args["output_dir"]
|
| 93 |
+
|
| 94 |
+
accelerator = Accelerator(gradient_accumulation_steps=args["gradient_accumulation_steps"], **accelerator_log_kwargs)
|
| 95 |
+
|
| 96 |
+
# Make one log on every process with the configuration for debugging.
|
| 97 |
+
logging.basicConfig(
|
| 98 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 99 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 100 |
+
level=logging.INFO,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
if accelerator.is_local_main_process:
|
| 104 |
+
datasets.utils.logging.set_verbosity_warning()
|
| 105 |
+
transformers.utils.logging.set_verbosity_info()
|
| 106 |
+
else:
|
| 107 |
+
datasets.utils.logging.set_verbosity_error()
|
| 108 |
+
transformers.utils.logging.set_verbosity_error()
|
| 109 |
+
|
| 110 |
+
# If passed along, set the training seed now.
|
| 111 |
+
if args["seed"] is not None:
|
| 112 |
+
set_seed(args["seed"])
|
| 113 |
+
|
| 114 |
+
# Handle the repository creation
|
| 115 |
+
if accelerator.is_main_process:
|
| 116 |
+
if args["push_to_hub"]:
|
| 117 |
+
# Retrieve of infer repo_name
|
| 118 |
+
repo_name = args["hub_model_id"]
|
| 119 |
+
if repo_name is None:
|
| 120 |
+
repo_name = Path(args["output_dir"]).absolute().name
|
| 121 |
+
# Create repo and retrieve repo_id
|
| 122 |
+
api = HfApi()
|
| 123 |
+
repo_id = api.create_repo(repo_name, exist_ok=True, token=args["hub_token"]).repo_id
|
| 124 |
+
with open(os.path.join(args["output_dir"], ".gitignore"), "w+") as gitignore:
|
| 125 |
+
if "step_*" not in gitignore:
|
| 126 |
+
gitignore.write("step_*\n")
|
| 127 |
+
if "epoch_*" not in gitignore:
|
| 128 |
+
gitignore.write("epoch_*\n")
|
| 129 |
+
elif args["output_dir"] is not None:
|
| 130 |
+
os.makedirs(args["output_dir"], exist_ok=True)
|
| 131 |
+
|
| 132 |
+
accelerator.wait_for_everyone()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if args["dataset_name"] is not None:
|
| 136 |
+
# Downloading and loading a dataset from the hub.
|
| 137 |
+
raw_datasets = load_dataset(
|
| 138 |
+
args.dataset_name, args["dataset_config_name"], trust_remote_code=args["trust_remote_code"]
|
| 139 |
+
)
|
| 140 |
+
if "validation" not in raw_datasets.keys():
|
| 141 |
+
raw_datasets["validation"] = load_dataset(
|
| 142 |
+
args["dataset_name"],
|
| 143 |
+
args["dataset_config_name"],
|
| 144 |
+
trust_remote_code=args["trust_remote_code"],
|
| 145 |
+
)
|
| 146 |
+
raw_datasets["train"] = load_dataset(
|
| 147 |
+
args["dataset_name"],
|
| 148 |
+
args["dataset_config_name"],
|
| 149 |
+
trust_remote_code=args["trust_remote_code"],
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
else:
|
| 153 |
+
data_files = {}
|
| 154 |
+
dataset_args = {}
|
| 155 |
+
if args["train_file"] is not None:
|
| 156 |
+
data_files["train"] = args["train_file"]
|
| 157 |
+
extension = args["train_file"].split(".")[-1]
|
| 158 |
+
if args["validation_file"] is not None:
|
| 159 |
+
data_files["validation"] = args["validation_file"]
|
| 160 |
+
extension = args["validation_file"].split(".")[-1]
|
| 161 |
+
if extension == "txt":
|
| 162 |
+
extension = "text"
|
| 163 |
+
|
| 164 |
+
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
# Create a `CustomDataset` class to format properly the input data wrt the
|
| 168 |
+
# `input_ids`, `labels` and `attention_mask` attributes
|
| 169 |
+
class CustomDataset(Dataset):
|
| 170 |
+
def __init__(self, df, device='cpu'):
|
| 171 |
+
self.df = df
|
| 172 |
+
self.device = device
|
| 173 |
+
|
| 174 |
+
def __len__(self):
|
| 175 |
+
return len(self.df)
|
| 176 |
+
|
| 177 |
+
def __getitem__(self, idx):
|
| 178 |
+
# Start from `Encoded_Formula`
|
| 179 |
+
encoded_formula = self.df['Encoded_Formula'][idx]
|
| 180 |
+
encoded_formula = ast.literal_eval(encoded_formula.strip())
|
| 181 |
+
|
| 182 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 183 |
+
formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 184 |
+
formula_embedding = eval(formula_embedding)
|
| 185 |
+
|
| 186 |
+
input_ids = encoded_formula[:-1] # Tutti tranne l'ultimo
|
| 187 |
+
labels = encoded_formula[1:] # Tutti tranne il primo
|
| 188 |
+
|
| 189 |
+
attention_mask = [0 if token == '1' else 1 for token in input_ids]
|
| 190 |
+
# if 1 (i.e. tokenized `pad`), then neglect that token
|
| 191 |
+
|
| 192 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 193 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 194 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 195 |
+
|
| 196 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
return {
|
| 200 |
+
'input_ids': input_ids,
|
| 201 |
+
'labels': labels,
|
| 202 |
+
'attention_mask': attention_mask,
|
| 203 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 208 |
+
train_dataset = CustomDataset(raw_datasets['train'], device=device)
|
| 209 |
+
eval_dataset = CustomDataset(raw_datasets['validation'], device=device)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# DataLoaders creation:
|
| 213 |
+
train_dataloader = DataLoader(
|
| 214 |
+
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
eval_dataloader = DataLoader(
|
| 218 |
+
eval_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
# Optimizer
|
| 222 |
+
# Split weights in two groups, one with weight decay and the other not.
|
| 223 |
+
no_decay = ["bias", "layer_norm.weight"]
|
| 224 |
+
optimizer_grouped_parameters = [
|
| 225 |
+
{
|
| 226 |
+
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 227 |
+
"weight_decay": args["weight_decay"],
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
| 231 |
+
"weight_decay": 0.0,
|
| 232 |
+
},
|
| 233 |
+
]
|
| 234 |
+
|
| 235 |
+
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], betas=(0.9, 0.99))
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# Scheduler and math around the number of training steps.
|
| 239 |
+
overrode_max_train_steps = False
|
| 240 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 241 |
+
if args["max_train_steps"] is None:
|
| 242 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 243 |
+
overrode_max_train_steps = True
|
| 244 |
+
lr_scheduler = get_scheduler(
|
| 245 |
+
name=args["lr_scheduler_type"],
|
| 246 |
+
optimizer=optimizer,
|
| 247 |
+
num_warmup_steps=args["num_warmup_steps"] * accelerator.num_processes,
|
| 248 |
+
num_training_steps=args["max_train_steps"]
|
| 249 |
+
if overrode_max_train_steps
|
| 250 |
+
else args["max_train_steps"] * accelerator.num_processes,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Prepare everything with our `accelerator`.
|
| 254 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 255 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
| 259 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 260 |
+
if overrode_max_train_steps:
|
| 261 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 262 |
+
# Afterwards we recalculate our number of training epochs
|
| 263 |
+
args["num_train_epochs"] = math.ceil(args["max_train_steps"] / num_update_steps_per_epoch)
|
| 264 |
+
|
| 265 |
+
# Figure out how many steps we should save the Accelerator states
|
| 266 |
+
checkpointing_steps = args["checkpointing_steps"]
|
| 267 |
+
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
| 268 |
+
checkpointing_steps = int(checkpointing_steps)
|
| 269 |
+
|
| 270 |
+
# We need to initialize the trackers we use, and also store our configuration.
|
| 271 |
+
# The trackers initializes automatically on the main process.
|
| 272 |
+
if args["with_tracking"]:
|
| 273 |
+
experiment_config = args
|
| 274 |
+
# TensorBoard cannot log Enums, need the raw value
|
| 275 |
+
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"]
|
| 276 |
+
accelerator.init_trackers("clm_no_trainer", experiment_config)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# Train!
|
| 280 |
+
total_batch_size = args["per_device_train_batch_size"] * accelerator.num_processes * args["gradient_accumulation_steps"]
|
| 281 |
+
|
| 282 |
+
logger.info("***** Running training *****")
|
| 283 |
+
logger.info(f" Num examples = {len(train_dataset)}")
|
| 284 |
+
|
| 285 |
+
num_train_epochs = args["num_train_epochs"]
|
| 286 |
+
per_device_train_batch_size = args["per_device_train_batch_size"]
|
| 287 |
+
gradient_acc_steps = args["gradient_accumulation_steps"]
|
| 288 |
+
max_train_steps = args["max_train_steps"]
|
| 289 |
+
# num_train_steps = args["num_train_steps"]
|
| 290 |
+
logger.info(f" Num Epochs = {num_train_epochs}")
|
| 291 |
+
logger.info(f" Instantaneous batch size per device = {per_device_train_batch_size}")
|
| 292 |
+
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
| 293 |
+
logger.info(f" Gradient Accumulation steps = {gradient_acc_steps}")
|
| 294 |
+
# logger.info(f" Optimization steps per epoch = {num_train_steps}")
|
| 295 |
+
logger.info(f" Max optimization steps = {max_train_steps}")
|
| 296 |
+
|
| 297 |
+
# Only show the progress bar once on each machine.
|
| 298 |
+
progress_bar = tqdm(range(max_train_steps), disable=not accelerator.is_local_main_process) # i 1000 sono questi!
|
| 299 |
+
completed_steps = 0
|
| 300 |
+
starting_epoch = 0
|
| 301 |
+
# Potentially load in the weights and states from a previous save
|
| 302 |
+
if args["resume_from_checkpoint"]:
|
| 303 |
+
# print("questo non dovrebbe succedere")
|
| 304 |
+
if args["resume_from_checkpoint"] is not None or args["resume_from_checkpoint"] != "":
|
| 305 |
+
checkpoint_path = args["resume_from_checkpoint"]
|
| 306 |
+
print("fin qua ci arrivo!")
|
| 307 |
+
path = os.path.basename(args["resume_from_checkpoint"])
|
| 308 |
+
else:
|
| 309 |
+
# Get the most recent checkpoint
|
| 310 |
+
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
|
| 311 |
+
dirs.sort(key=os.path.getctime)
|
| 312 |
+
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
|
| 313 |
+
checkpoint_path = path
|
| 314 |
+
path = os.path.basename(checkpoint_path)
|
| 315 |
+
|
| 316 |
+
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
|
| 317 |
+
accelerator.load_state(checkpoint_path)
|
| 318 |
+
# Extract `epoch_{i}` or `step_{i}`
|
| 319 |
+
training_difference = os.path.splitext(path)[0]
|
| 320 |
+
|
| 321 |
+
starting_epoch = 5
|
| 322 |
+
resume_step = 16000
|
| 323 |
+
|
| 324 |
+
logger.info(f"Starting epoch = {starting_epoch}, resume step = {resume_step}")
|
| 325 |
+
|
| 326 |
+
if "epoch" in training_difference:
|
| 327 |
+
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
| 328 |
+
resume_step = None
|
| 329 |
+
completed_steps = starting_epoch * num_update_steps_per_epoch
|
| 330 |
+
|
| 331 |
+
else:
|
| 332 |
+
# need to multiply `gradient_accumulation_steps` to reflect real steps
|
| 333 |
+
# resume_step = int(training_difference.replace("step_", "")) * args["gradient_accumulation_steps"]
|
| 334 |
+
resume_step = 16000
|
| 335 |
+
starting_epoch = resume_step // len(train_dataloader)
|
| 336 |
+
completed_steps = resume_step // args["gradient_accumulation_steps"]
|
| 337 |
+
resume_step = starting_epoch * len(train_dataloader)
|
| 338 |
+
|
| 339 |
+
for epoch in range(starting_epoch, num_train_epochs):
|
| 340 |
+
model.train()
|
| 341 |
+
if args["with_tracking"]:
|
| 342 |
+
total_loss = 0
|
| 343 |
+
if args["resume_from_checkpoint"] and epoch == starting_epoch and resume_step is not None:
|
| 344 |
+
logger.info("correct scenario")
|
| 345 |
+
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
| 346 |
+
else:
|
| 347 |
+
active_dataloader = train_dataloader
|
| 348 |
+
|
| 349 |
+
total_steps = num_train_epochs * len(active_dataloader)
|
| 350 |
+
logger.info(f"Step totali previsti: {total_steps}")
|
| 351 |
+
|
| 352 |
+
for step, batch in enumerate(active_dataloader):
|
| 353 |
+
with accelerator.accumulate(model):
|
| 354 |
+
outputs = model(**batch)
|
| 355 |
+
loss = outputs.loss
|
| 356 |
+
if args["with_tracking"]:
|
| 357 |
+
total_loss += loss.detach().float()
|
| 358 |
+
accelerator.backward(loss)
|
| 359 |
+
optimizer.step()
|
| 360 |
+
lr_scheduler.step()
|
| 361 |
+
optimizer.zero_grad()
|
| 362 |
+
logger.info(f" Loss = {loss}, epoch = {epoch}, step = {step + resume_step}")
|
| 363 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 364 |
+
if accelerator.sync_gradients:
|
| 365 |
+
progress_bar.update(1)
|
| 366 |
+
completed_steps += 1
|
| 367 |
+
|
| 368 |
+
if isinstance(checkpointing_steps, int):
|
| 369 |
+
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
| 370 |
+
output_dir = f"step_{completed_steps}"
|
| 371 |
+
if args["output_dir"] is not None:
|
| 372 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 373 |
+
accelerator.save_state(output_dir)
|
| 374 |
+
if completed_steps >= args["max_train_steps"]:
|
| 375 |
+
break
|
| 376 |
+
|
| 377 |
+
logger.info("***** Running validation *****")
|
| 378 |
+
model.eval()
|
| 379 |
+
losses = []
|
| 380 |
+
for step, batch in enumerate(eval_dataloader):
|
| 381 |
+
with torch.no_grad():
|
| 382 |
+
outputs = model(**batch)
|
| 383 |
+
loss = outputs.loss
|
| 384 |
+
losses.append(accelerator.gather_for_metrics(loss.repeat(args["per_device_eval_batch_size"])))
|
| 385 |
+
|
| 386 |
+
losses = torch.cat(losses)
|
| 387 |
+
try:
|
| 388 |
+
eval_loss = torch.mean(losses)
|
| 389 |
+
perplexity = math.exp(eval_loss)
|
| 390 |
+
except OverflowError:
|
| 391 |
+
perplexity = float("inf")
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
|
| 395 |
+
|
| 396 |
+
if args["with_tracking"]:
|
| 397 |
+
accelerator.log(
|
| 398 |
+
{
|
| 399 |
+
"perplexity": perplexity,
|
| 400 |
+
"eval_loss": eval_loss,
|
| 401 |
+
"train_loss": total_loss.item() / len(train_dataloader),
|
| 402 |
+
"epoch": epoch,
|
| 403 |
+
"step": completed_steps,
|
| 404 |
+
},
|
| 405 |
+
step=completed_steps,
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
if args["checkpointing_steps"] == "epoch":
|
| 409 |
+
output_dir = f"epoch_{epoch}"
|
| 410 |
+
if args["output_dir"] is not None:
|
| 411 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 412 |
+
accelerator.save_state(output_dir)
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
|
history/aob/train-16batch.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from handcoded_tokenizer import STLTokenizer
|
| 4 |
+
from configuration import STLConfig
|
| 5 |
+
from modeling_stldec import STLForCausalLM
|
| 6 |
+
|
| 7 |
+
from transformers import AutoConfig, AutoModel
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import json
|
| 11 |
+
import logging
|
| 12 |
+
import math
|
| 13 |
+
import random
|
| 14 |
+
from itertools import chain
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import datasets
|
| 18 |
+
import torch
|
| 19 |
+
from torch.utils.data import Dataset
|
| 20 |
+
import ast
|
| 21 |
+
from accelerate import Accelerator, DistributedType
|
| 22 |
+
from accelerate.logging import get_logger
|
| 23 |
+
from accelerate.utils import set_seed
|
| 24 |
+
from datasets import load_dataset
|
| 25 |
+
from huggingface_hub import HfApi
|
| 26 |
+
from torch.utils.data import DataLoader
|
| 27 |
+
from tqdm.auto import tqdm
|
| 28 |
+
|
| 29 |
+
import transformers
|
| 30 |
+
from transformers import (
|
| 31 |
+
CONFIG_MAPPING,
|
| 32 |
+
MODEL_MAPPING,
|
| 33 |
+
AutoConfig,
|
| 34 |
+
AutoModelForCausalLM,
|
| 35 |
+
AutoTokenizer,
|
| 36 |
+
SchedulerType,
|
| 37 |
+
default_data_collator,
|
| 38 |
+
get_scheduler,
|
| 39 |
+
)
|
| 40 |
+
from transformers.utils import check_min_version, send_example_telemetry
|
| 41 |
+
from transformers.utils.versions import require_version
|
| 42 |
+
|
| 43 |
+
logger = get_logger(__name__)
|
| 44 |
+
|
| 45 |
+
# Extend `AutoClasses` to support the custom model
|
| 46 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 47 |
+
AutoModel.register(STLConfig, STLForCausalLM)
|
| 48 |
+
|
| 49 |
+
# Initialize the model with random weights and the desired architecture
|
| 50 |
+
config = STLConfig()
|
| 51 |
+
model = AutoModel.from_config(config)
|
| 52 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 53 |
+
|
| 54 |
+
args = {
|
| 55 |
+
'dataset_name': None, # or a custom dataset path
|
| 56 |
+
'train_file': 'datasets/train_set.csv',
|
| 57 |
+
'validation_file': 'datasets/validation_set.csv',
|
| 58 |
+
'output_dir': './comparison',
|
| 59 |
+
'model_name_or_path': 'STLForCausalLM',
|
| 60 |
+
'tokenizer_name': 'STLTokenizer',
|
| 61 |
+
'block_size': 500,
|
| 62 |
+
'batch_size': 32,
|
| 63 |
+
'gradient_accumulation_steps': 1,
|
| 64 |
+
'num_train_epochs': 15,
|
| 65 |
+
'learning_rate': 5e-5,
|
| 66 |
+
'weight_decay': 0.01,
|
| 67 |
+
'seed': 42,
|
| 68 |
+
'with_tracking': True,
|
| 69 |
+
'hub_model_id': None,
|
| 70 |
+
'push_to_hub': False,
|
| 71 |
+
'trust_remote_code': False,
|
| 72 |
+
'overwrite_cache': False,
|
| 73 |
+
'per_device_train_batch_size': 32,
|
| 74 |
+
'per_device_eval_batch_size': 32,
|
| 75 |
+
'checkpointing_steps': '500',
|
| 76 |
+
'resume_from_checkpoint': False,
|
| 77 |
+
'lr_scheduler_type': 'linear',
|
| 78 |
+
'num_warmup_steps': 5000,
|
| 79 |
+
'max_train_steps': 50000,
|
| 80 |
+
# 'lr': 0.01,
|
| 81 |
+
# 'report_to': "tensorboard"
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
| 86 |
+
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
|
| 87 |
+
# in the environment
|
| 88 |
+
accelerator_log_kwargs = {}
|
| 89 |
+
|
| 90 |
+
if args["with_tracking"]:
|
| 91 |
+
# accelerator_log_kwargs["log_with"] = args["report_to"]
|
| 92 |
+
accelerator_log_kwargs["project_dir"] = args["output_dir"]
|
| 93 |
+
|
| 94 |
+
accelerator = Accelerator(gradient_accumulation_steps=args["gradient_accumulation_steps"], **accelerator_log_kwargs)
|
| 95 |
+
|
| 96 |
+
# Make one log on every process with the configuration for debugging.
|
| 97 |
+
logging.basicConfig(
|
| 98 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 99 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 100 |
+
level=logging.INFO,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
if accelerator.is_local_main_process:
|
| 104 |
+
datasets.utils.logging.set_verbosity_warning()
|
| 105 |
+
transformers.utils.logging.set_verbosity_info()
|
| 106 |
+
else:
|
| 107 |
+
datasets.utils.logging.set_verbosity_error()
|
| 108 |
+
transformers.utils.logging.set_verbosity_error()
|
| 109 |
+
|
| 110 |
+
# If passed along, set the training seed now.
|
| 111 |
+
if args["seed"] is not None:
|
| 112 |
+
set_seed(args["seed"])
|
| 113 |
+
|
| 114 |
+
# Handle the repository creation
|
| 115 |
+
if accelerator.is_main_process:
|
| 116 |
+
if args["push_to_hub"]:
|
| 117 |
+
# Retrieve of infer repo_name
|
| 118 |
+
repo_name = args["hub_model_id"]
|
| 119 |
+
if repo_name is None:
|
| 120 |
+
repo_name = Path(args["output_dir"]).absolute().name
|
| 121 |
+
# Create repo and retrieve repo_id
|
| 122 |
+
api = HfApi()
|
| 123 |
+
repo_id = api.create_repo(repo_name, exist_ok=True, token=args["hub_token"]).repo_id
|
| 124 |
+
with open(os.path.join(args["output_dir"], ".gitignore"), "w+") as gitignore:
|
| 125 |
+
if "step_*" not in gitignore:
|
| 126 |
+
gitignore.write("step_*\n")
|
| 127 |
+
if "epoch_*" not in gitignore:
|
| 128 |
+
gitignore.write("epoch_*\n")
|
| 129 |
+
elif args["output_dir"] is not None:
|
| 130 |
+
os.makedirs(args["output_dir"], exist_ok=True)
|
| 131 |
+
|
| 132 |
+
accelerator.wait_for_everyone()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if args["dataset_name"] is not None:
|
| 136 |
+
# Downloading and loading a dataset from the hub.
|
| 137 |
+
raw_datasets = load_dataset(
|
| 138 |
+
args.dataset_name, args["dataset_config_name"], trust_remote_code=args["trust_remote_code"]
|
| 139 |
+
)
|
| 140 |
+
if "validation" not in raw_datasets.keys():
|
| 141 |
+
raw_datasets["validation"] = load_dataset(
|
| 142 |
+
args["dataset_name"],
|
| 143 |
+
args["dataset_config_name"],
|
| 144 |
+
trust_remote_code=args["trust_remote_code"],
|
| 145 |
+
)
|
| 146 |
+
raw_datasets["train"] = load_dataset(
|
| 147 |
+
args["dataset_name"],
|
| 148 |
+
args["dataset_config_name"],
|
| 149 |
+
trust_remote_code=args["trust_remote_code"],
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
else:
|
| 153 |
+
data_files = {}
|
| 154 |
+
dataset_args = {}
|
| 155 |
+
if args["train_file"] is not None:
|
| 156 |
+
data_files["train"] = args["train_file"]
|
| 157 |
+
extension = args["train_file"].split(".")[-1]
|
| 158 |
+
if args["validation_file"] is not None:
|
| 159 |
+
data_files["validation"] = args["validation_file"]
|
| 160 |
+
extension = args["validation_file"].split(".")[-1]
|
| 161 |
+
if extension == "txt":
|
| 162 |
+
extension = "text"
|
| 163 |
+
|
| 164 |
+
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
# Create a `CustomDataset` class to format properly the input data wrt the
|
| 168 |
+
# `input_ids`, `labels` and `attention_mask` attributes
|
| 169 |
+
class CustomDataset(Dataset):
|
| 170 |
+
def __init__(self, df, device='cpu'):
|
| 171 |
+
self.df = df
|
| 172 |
+
self.device = device
|
| 173 |
+
|
| 174 |
+
def __len__(self):
|
| 175 |
+
return len(self.df)
|
| 176 |
+
|
| 177 |
+
def __getitem__(self, idx):
|
| 178 |
+
# Start from `Encoded_Formula`
|
| 179 |
+
encoded_formula = self.df['Encoded_Formula'][idx]
|
| 180 |
+
encoded_formula = ast.literal_eval(encoded_formula.strip())
|
| 181 |
+
|
| 182 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 183 |
+
formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 184 |
+
formula_embedding = eval(formula_embedding)
|
| 185 |
+
|
| 186 |
+
input_ids = encoded_formula[:-1] # Tutti tranne l'ultimo
|
| 187 |
+
labels = encoded_formula[1:] # Tutti tranne il primo
|
| 188 |
+
|
| 189 |
+
attention_mask = [0 if token == '1' else 1 for token in input_ids]
|
| 190 |
+
# if 1 (i.e. tokenized `pad`), then neglect that token
|
| 191 |
+
|
| 192 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 193 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 194 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 195 |
+
|
| 196 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
return {
|
| 200 |
+
'input_ids': input_ids,
|
| 201 |
+
'labels': labels,
|
| 202 |
+
'attention_mask': attention_mask,
|
| 203 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 208 |
+
train_dataset = CustomDataset(raw_datasets['train'], device=device)
|
| 209 |
+
eval_dataset = CustomDataset(raw_datasets['validation'], device=device)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# DataLoaders creation:
|
| 213 |
+
train_dataloader = DataLoader(
|
| 214 |
+
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
eval_dataloader = DataLoader(
|
| 218 |
+
eval_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
# Optimizer
|
| 222 |
+
# Split weights in two groups, one with weight decay and the other not.
|
| 223 |
+
no_decay = ["bias", "layer_norm.weight"]
|
| 224 |
+
optimizer_grouped_parameters = [
|
| 225 |
+
{
|
| 226 |
+
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 227 |
+
"weight_decay": args["weight_decay"],
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
| 231 |
+
"weight_decay": 0.0,
|
| 232 |
+
},
|
| 233 |
+
]
|
| 234 |
+
|
| 235 |
+
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], betas=(0.9, 0.99))
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# Scheduler and math around the number of training steps.
|
| 239 |
+
overrode_max_train_steps = False
|
| 240 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 241 |
+
if args["max_train_steps"] is None:
|
| 242 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 243 |
+
overrode_max_train_steps = True
|
| 244 |
+
lr_scheduler = get_scheduler(
|
| 245 |
+
name=args["lr_scheduler_type"],
|
| 246 |
+
optimizer=optimizer,
|
| 247 |
+
num_warmup_steps=args["num_warmup_steps"] * accelerator.num_processes,
|
| 248 |
+
num_training_steps=args["max_train_steps"]
|
| 249 |
+
if overrode_max_train_steps
|
| 250 |
+
else args["max_train_steps"] * accelerator.num_processes,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Prepare everything with our `accelerator`.
|
| 254 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 255 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
| 259 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 260 |
+
if overrode_max_train_steps:
|
| 261 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 262 |
+
# Afterwards we recalculate our number of training epochs
|
| 263 |
+
args["num_train_epochs"] = math.ceil(args["max_train_steps"] / num_update_steps_per_epoch)
|
| 264 |
+
|
| 265 |
+
# Figure out how many steps we should save the Accelerator states
|
| 266 |
+
checkpointing_steps = args["checkpointing_steps"]
|
| 267 |
+
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
| 268 |
+
checkpointing_steps = int(checkpointing_steps)
|
| 269 |
+
|
| 270 |
+
# We need to initialize the trackers we use, and also store our configuration.
|
| 271 |
+
# The trackers initializes automatically on the main process.
|
| 272 |
+
if args["with_tracking"]:
|
| 273 |
+
experiment_config = args
|
| 274 |
+
# TensorBoard cannot log Enums, need the raw value
|
| 275 |
+
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"]
|
| 276 |
+
accelerator.init_trackers("clm_no_trainer", experiment_config)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# Train!
|
| 280 |
+
total_batch_size = args["per_device_train_batch_size"] * accelerator.num_processes * args["gradient_accumulation_steps"]
|
| 281 |
+
|
| 282 |
+
logger.info("***** Running training *****")
|
| 283 |
+
logger.info(f" Num examples = {len(train_dataset)}")
|
| 284 |
+
|
| 285 |
+
num_train_epochs = args["num_train_epochs"]
|
| 286 |
+
per_device_train_batch_size = args["per_device_train_batch_size"]
|
| 287 |
+
gradient_acc_steps = args["gradient_accumulation_steps"]
|
| 288 |
+
max_train_steps = args["max_train_steps"]
|
| 289 |
+
# num_train_steps = args["num_train_steps"]
|
| 290 |
+
logger.info(f" Num Epochs = {num_train_epochs}")
|
| 291 |
+
logger.info(f" Instantaneous batch size per device = {per_device_train_batch_size}")
|
| 292 |
+
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
| 293 |
+
logger.info(f" Gradient Accumulation steps = {gradient_acc_steps}")
|
| 294 |
+
# logger.info(f" Optimization steps per epoch = {num_train_steps}")
|
| 295 |
+
logger.info(f" Max optimization steps = {max_train_steps}")
|
| 296 |
+
|
| 297 |
+
# Only show the progress bar once on each machine.
|
| 298 |
+
progress_bar = tqdm(range(max_train_steps), disable=not accelerator.is_local_main_process) # i 1000 sono questi!
|
| 299 |
+
completed_steps = 0
|
| 300 |
+
starting_epoch = 0
|
| 301 |
+
# Potentially load in the weights and states from a previous save
|
| 302 |
+
if args["resume_from_checkpoint"]:
|
| 303 |
+
# print("questo non dovrebbe succedere")
|
| 304 |
+
if args["resume_from_checkpoint"] is not None or args["resume_from_checkpoint"] != "":
|
| 305 |
+
checkpoint_path = args["resume_from_checkpoint"]
|
| 306 |
+
print("fin qua ci arrivo!")
|
| 307 |
+
path = os.path.basename(args["resume_from_checkpoint"])
|
| 308 |
+
else:
|
| 309 |
+
# Get the most recent checkpoint
|
| 310 |
+
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
|
| 311 |
+
dirs.sort(key=os.path.getctime)
|
| 312 |
+
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
|
| 313 |
+
checkpoint_path = path
|
| 314 |
+
path = os.path.basename(checkpoint_path)
|
| 315 |
+
|
| 316 |
+
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
|
| 317 |
+
accelerator.load_state(checkpoint_path)
|
| 318 |
+
# Extract `epoch_{i}` or `step_{i}`
|
| 319 |
+
training_difference = os.path.splitext(path)[0]
|
| 320 |
+
|
| 321 |
+
starting_epoch = 0
|
| 322 |
+
resume_step = 0
|
| 323 |
+
|
| 324 |
+
logger.info(f"Starting epoch = {starting_epoch}, resume step = {resume_step}")
|
| 325 |
+
|
| 326 |
+
if "epoch" in training_difference:
|
| 327 |
+
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
| 328 |
+
resume_step = None
|
| 329 |
+
completed_steps = starting_epoch * num_update_steps_per_epoch
|
| 330 |
+
|
| 331 |
+
else:
|
| 332 |
+
# need to multiply `gradient_accumulation_steps` to reflect real steps
|
| 333 |
+
# resume_step = int(training_difference.replace("step_", "")) * args["gradient_accumulation_steps"]
|
| 334 |
+
resume_step = 0
|
| 335 |
+
starting_epoch = resume_step // len(train_dataloader)
|
| 336 |
+
completed_steps = resume_step // args["gradient_accumulation_steps"]
|
| 337 |
+
resume_step -= starting_epoch * len(train_dataloader)
|
| 338 |
+
|
| 339 |
+
for epoch in range(starting_epoch, num_train_epochs):
|
| 340 |
+
model.train()
|
| 341 |
+
if args["with_tracking"]:
|
| 342 |
+
total_loss = 0
|
| 343 |
+
if args["resume_from_checkpoint"] and epoch == starting_epoch and resume_step is not None:
|
| 344 |
+
logger.info("correct scenario")
|
| 345 |
+
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
| 346 |
+
else:
|
| 347 |
+
active_dataloader = train_dataloader
|
| 348 |
+
|
| 349 |
+
total_steps = len(train_dataset) / per_device_train_batch_size * num_train_epochs - resume_step
|
| 350 |
+
logger.info(f"Active DataLoader length: {len(active_dataloader)}")
|
| 351 |
+
logger.info(f"Step totali previsti: {total_steps}")
|
| 352 |
+
|
| 353 |
+
for step, batch in enumerate(active_dataloader):
|
| 354 |
+
# print("entro nel training")
|
| 355 |
+
with accelerator.accumulate(model):
|
| 356 |
+
outputs = model(**batch)
|
| 357 |
+
loss = outputs.loss
|
| 358 |
+
if args["with_tracking"]:
|
| 359 |
+
total_loss += loss.detach().float()
|
| 360 |
+
accelerator.backward(loss)
|
| 361 |
+
optimizer.step()
|
| 362 |
+
lr_scheduler.step()
|
| 363 |
+
optimizer.zero_grad()
|
| 364 |
+
logger.info(f" Loss = {loss}, epoch = {epoch}, step = {step + resume_step}")
|
| 365 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 366 |
+
if accelerator.sync_gradients:
|
| 367 |
+
progress_bar.update(1)
|
| 368 |
+
completed_steps += 1
|
| 369 |
+
|
| 370 |
+
if isinstance(checkpointing_steps, int):
|
| 371 |
+
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
| 372 |
+
output_dir = f"step_{completed_steps}"
|
| 373 |
+
if args["output_dir"] is not None:
|
| 374 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 375 |
+
accelerator.save_state(output_dir)
|
| 376 |
+
if completed_steps >= args["max_train_steps"]:
|
| 377 |
+
break
|
| 378 |
+
# writer.flush()
|
| 379 |
+
|
| 380 |
+
logger.info("***** Running validation *****")
|
| 381 |
+
model.eval()
|
| 382 |
+
losses = []
|
| 383 |
+
for step, batch in enumerate(eval_dataloader):
|
| 384 |
+
with torch.no_grad():
|
| 385 |
+
outputs = model(**batch)
|
| 386 |
+
loss = outputs.loss
|
| 387 |
+
losses.append(accelerator.gather_for_metrics(loss.repeat(args["per_device_eval_batch_size"])))
|
| 388 |
+
|
| 389 |
+
losses = torch.cat(losses)
|
| 390 |
+
try:
|
| 391 |
+
eval_loss = torch.mean(losses)
|
| 392 |
+
perplexity = math.exp(eval_loss)
|
| 393 |
+
except OverflowError:
|
| 394 |
+
perplexity = float("inf")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
|
| 398 |
+
|
| 399 |
+
if args["with_tracking"]:
|
| 400 |
+
accelerator.log(
|
| 401 |
+
{
|
| 402 |
+
"perplexity": perplexity,
|
| 403 |
+
"eval_loss": eval_loss,
|
| 404 |
+
"train_loss": total_loss.item() / len(train_dataloader),
|
| 405 |
+
"epoch": epoch,
|
| 406 |
+
"step": completed_steps,
|
| 407 |
+
},
|
| 408 |
+
step=completed_steps,
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
if args["checkpointing_steps"] == "epoch":
|
| 412 |
+
output_dir = f"epoch_{epoch}"
|
| 413 |
+
if args["output_dir"] is not None:
|
| 414 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 415 |
+
accelerator.save_state(output_dir)
|
history/aob/train-modified.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from handcoded_tokenizer import STLTokenizer
|
| 4 |
+
from configuration import STLConfig
|
| 5 |
+
from modeling_stldec import STLForCausalLM
|
| 6 |
+
|
| 7 |
+
from transformers import AutoConfig, AutoModel
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import json
|
| 11 |
+
import logging
|
| 12 |
+
import math
|
| 13 |
+
import random
|
| 14 |
+
from itertools import chain
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import datasets
|
| 18 |
+
import torch
|
| 19 |
+
from torch.utils.data import Dataset
|
| 20 |
+
import ast
|
| 21 |
+
from accelerate import Accelerator, DistributedType
|
| 22 |
+
from accelerate.logging import get_logger
|
| 23 |
+
from accelerate.utils import set_seed
|
| 24 |
+
from datasets import load_dataset
|
| 25 |
+
from huggingface_hub import HfApi
|
| 26 |
+
from torch.utils.data import DataLoader
|
| 27 |
+
from tqdm.auto import tqdm
|
| 28 |
+
|
| 29 |
+
import transformers
|
| 30 |
+
from transformers import (
|
| 31 |
+
CONFIG_MAPPING,
|
| 32 |
+
MODEL_MAPPING,
|
| 33 |
+
AutoConfig,
|
| 34 |
+
AutoModelForCausalLM,
|
| 35 |
+
AutoTokenizer,
|
| 36 |
+
SchedulerType,
|
| 37 |
+
default_data_collator,
|
| 38 |
+
get_scheduler,
|
| 39 |
+
)
|
| 40 |
+
from transformers.utils import check_min_version, send_example_telemetry
|
| 41 |
+
from transformers.utils.versions import require_version
|
| 42 |
+
|
| 43 |
+
logger = get_logger(__name__)
|
| 44 |
+
|
| 45 |
+
# Extend `AutoClasses` to support the custom model
|
| 46 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 47 |
+
AutoModel.register(STLConfig, STLForCausalLM)
|
| 48 |
+
|
| 49 |
+
# Initialize the model with random weights and the desired architecture
|
| 50 |
+
config = STLConfig()
|
| 51 |
+
model = AutoModel.from_config(config)
|
| 52 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 53 |
+
|
| 54 |
+
args = {
|
| 55 |
+
'dataset_name': None, # or a custom dataset path
|
| 56 |
+
'train_file': 'datasets/train_set.csv',
|
| 57 |
+
'validation_file': 'datasets/validation_set.csv',
|
| 58 |
+
'output_dir': './tf_output_test_16batch',
|
| 59 |
+
'model_name_or_path': 'STLForCausalLM',
|
| 60 |
+
'tokenizer_name': 'STLTokenizer',
|
| 61 |
+
'block_size': 500,
|
| 62 |
+
'batch_size': 32,
|
| 63 |
+
'gradient_accumulation_steps': 1,
|
| 64 |
+
'num_train_epochs': 10,
|
| 65 |
+
'checkpoint_epoch': 5,
|
| 66 |
+
'checkpoint_steps': 16000,
|
| 67 |
+
'learning_rate': 5e-5,
|
| 68 |
+
'weight_decay': 0.01,
|
| 69 |
+
'seed': 42,
|
| 70 |
+
'with_tracking': True,
|
| 71 |
+
'hub_model_id': None,
|
| 72 |
+
'push_to_hub': False,
|
| 73 |
+
'trust_remote_code': False,
|
| 74 |
+
'overwrite_cache': False,
|
| 75 |
+
'per_device_train_batch_size': 32,
|
| 76 |
+
'per_device_eval_batch_size': 32,
|
| 77 |
+
'checkpointing_steps': '500',
|
| 78 |
+
'resume_from_checkpoint': 'tf_output_test_16batch/step_16000',
|
| 79 |
+
'lr_scheduler_type': 'linear',
|
| 80 |
+
'num_warmup_steps': 5000,
|
| 81 |
+
'max_train_steps': 50000,
|
| 82 |
+
# 'lr': 0.01,
|
| 83 |
+
# 'report_to': "tensorboard"
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
| 88 |
+
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
|
| 89 |
+
# in the environment
|
| 90 |
+
accelerator_log_kwargs = {}
|
| 91 |
+
|
| 92 |
+
if args["with_tracking"]:
|
| 93 |
+
# accelerator_log_kwargs["log_with"] = args["report_to"]
|
| 94 |
+
accelerator_log_kwargs["project_dir"] = args["output_dir"]
|
| 95 |
+
|
| 96 |
+
accelerator = Accelerator(gradient_accumulation_steps=args["gradient_accumulation_steps"], **accelerator_log_kwargs)
|
| 97 |
+
|
| 98 |
+
# Make one log on every process with the configuration for debugging.
|
| 99 |
+
logging.basicConfig(
|
| 100 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 101 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 102 |
+
level=logging.INFO,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
if accelerator.is_local_main_process:
|
| 106 |
+
datasets.utils.logging.set_verbosity_warning()
|
| 107 |
+
transformers.utils.logging.set_verbosity_info()
|
| 108 |
+
else:
|
| 109 |
+
datasets.utils.logging.set_verbosity_error()
|
| 110 |
+
transformers.utils.logging.set_verbosity_error()
|
| 111 |
+
|
| 112 |
+
# If passed along, set the training seed now.
|
| 113 |
+
if args["seed"] is not None:
|
| 114 |
+
set_seed(args["seed"])
|
| 115 |
+
|
| 116 |
+
# Handle the repository creation
|
| 117 |
+
if accelerator.is_main_process:
|
| 118 |
+
if args["push_to_hub"]:
|
| 119 |
+
# Retrieve of infer repo_name
|
| 120 |
+
repo_name = args["hub_model_id"]
|
| 121 |
+
if repo_name is None:
|
| 122 |
+
repo_name = Path(args["output_dir"]).absolute().name
|
| 123 |
+
# Create repo and retrieve repo_id
|
| 124 |
+
api = HfApi()
|
| 125 |
+
repo_id = api.create_repo(repo_name, exist_ok=True, token=args["hub_token"]).repo_id
|
| 126 |
+
with open(os.path.join(args["output_dir"], ".gitignore"), "w+") as gitignore:
|
| 127 |
+
if "step_*" not in gitignore:
|
| 128 |
+
gitignore.write("step_*\n")
|
| 129 |
+
if "epoch_*" not in gitignore:
|
| 130 |
+
gitignore.write("epoch_*\n")
|
| 131 |
+
elif args["output_dir"] is not None:
|
| 132 |
+
os.makedirs(args["output_dir"], exist_ok=True)
|
| 133 |
+
|
| 134 |
+
accelerator.wait_for_everyone()
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
if args["dataset_name"] is not None:
|
| 138 |
+
# Downloading and loading a dataset from the hub.
|
| 139 |
+
raw_datasets = load_dataset(
|
| 140 |
+
args.dataset_name, args["dataset_config_name"], trust_remote_code=args["trust_remote_code"]
|
| 141 |
+
)
|
| 142 |
+
if "validation" not in raw_datasets.keys():
|
| 143 |
+
raw_datasets["validation"] = load_dataset(
|
| 144 |
+
args["dataset_name"],
|
| 145 |
+
args["dataset_config_name"],
|
| 146 |
+
trust_remote_code=args["trust_remote_code"],
|
| 147 |
+
)
|
| 148 |
+
raw_datasets["train"] = load_dataset(
|
| 149 |
+
args["dataset_name"],
|
| 150 |
+
args["dataset_config_name"],
|
| 151 |
+
trust_remote_code=args["trust_remote_code"],
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
else:
|
| 155 |
+
data_files = {}
|
| 156 |
+
dataset_args = {}
|
| 157 |
+
if args["train_file"] is not None:
|
| 158 |
+
data_files["train"] = args["train_file"]
|
| 159 |
+
extension = args["train_file"].split(".")[-1]
|
| 160 |
+
if args["validation_file"] is not None:
|
| 161 |
+
data_files["validation"] = args["validation_file"]
|
| 162 |
+
extension = args["validation_file"].split(".")[-1]
|
| 163 |
+
if extension == "txt":
|
| 164 |
+
extension = "text"
|
| 165 |
+
|
| 166 |
+
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# Create a `CustomDataset` class to format properly the input data wrt the
|
| 170 |
+
# `input_ids`, `labels` and `attention_mask` attributes
|
| 171 |
+
class CustomDataset(Dataset):
|
| 172 |
+
def __init__(self, df, device='cpu'):
|
| 173 |
+
self.df = df
|
| 174 |
+
self.device = device
|
| 175 |
+
|
| 176 |
+
def __len__(self):
|
| 177 |
+
return len(self.df)
|
| 178 |
+
|
| 179 |
+
def __getitem__(self, idx):
|
| 180 |
+
# Start from `Encoded_Formula`
|
| 181 |
+
encoded_formula = self.df['Encoded_Formula'][idx]
|
| 182 |
+
encoded_formula = ast.literal_eval(encoded_formula.strip())
|
| 183 |
+
|
| 184 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 185 |
+
formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 186 |
+
formula_embedding = eval(formula_embedding)
|
| 187 |
+
|
| 188 |
+
input_ids = encoded_formula[:-1] # Tutti tranne l'ultimo
|
| 189 |
+
labels = encoded_formula[1:] # Tutti tranne il primo
|
| 190 |
+
|
| 191 |
+
attention_mask = [0 if token == '1' else 1 for token in input_ids]
|
| 192 |
+
# if 1 (i.e. tokenized `pad`), then neglect that token
|
| 193 |
+
|
| 194 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 195 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 196 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 197 |
+
|
| 198 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
return {
|
| 202 |
+
'input_ids': input_ids,
|
| 203 |
+
'labels': labels,
|
| 204 |
+
'attention_mask': attention_mask,
|
| 205 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 210 |
+
train_dataset = CustomDataset(raw_datasets['train'], device=device)
|
| 211 |
+
eval_dataset = CustomDataset(raw_datasets['validation'], device=device)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# DataLoaders creation:
|
| 215 |
+
train_dataloader = DataLoader(
|
| 216 |
+
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
eval_dataloader = DataLoader(
|
| 220 |
+
eval_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
# Optimizer
|
| 224 |
+
# Split weights in two groups, one with weight decay and the other not.
|
| 225 |
+
no_decay = ["bias", "layer_norm.weight"]
|
| 226 |
+
optimizer_grouped_parameters = [
|
| 227 |
+
{
|
| 228 |
+
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 229 |
+
"weight_decay": args["weight_decay"],
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
| 233 |
+
"weight_decay": 0.0,
|
| 234 |
+
},
|
| 235 |
+
]
|
| 236 |
+
|
| 237 |
+
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], betas=(0.9, 0.99))
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
# Scheduler and math around the number of training steps.
|
| 241 |
+
overrode_max_train_steps = False
|
| 242 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 243 |
+
if args["max_train_steps"] is None:
|
| 244 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 245 |
+
overrode_max_train_steps = True
|
| 246 |
+
lr_scheduler = get_scheduler(
|
| 247 |
+
name=args["lr_scheduler_type"],
|
| 248 |
+
optimizer=optimizer,
|
| 249 |
+
num_warmup_steps=args["num_warmup_steps"] * accelerator.num_processes,
|
| 250 |
+
num_training_steps=args["max_train_steps"]
|
| 251 |
+
if overrode_max_train_steps
|
| 252 |
+
else args["max_train_steps"] * accelerator.num_processes,
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# Prepare everything with our `accelerator`.
|
| 256 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 257 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
| 261 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 262 |
+
if overrode_max_train_steps:
|
| 263 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 264 |
+
# Afterwards we recalculate our number of training epochs
|
| 265 |
+
args["num_train_epochs"] = math.ceil(args["max_train_steps"] / num_update_steps_per_epoch)
|
| 266 |
+
|
| 267 |
+
# Figure out how many steps we should save the Accelerator states
|
| 268 |
+
checkpointing_steps = args["checkpointing_steps"]
|
| 269 |
+
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
| 270 |
+
checkpointing_steps = int(checkpointing_steps)
|
| 271 |
+
|
| 272 |
+
# We need to initialize the trackers we use, and also store our configuration.
|
| 273 |
+
# The trackers initializes automatically on the main process.
|
| 274 |
+
if args["with_tracking"]:
|
| 275 |
+
experiment_config = args
|
| 276 |
+
# TensorBoard cannot log Enums, need the raw value
|
| 277 |
+
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"]
|
| 278 |
+
accelerator.init_trackers("clm_no_trainer", experiment_config)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
# Train!
|
| 282 |
+
total_batch_size = args["per_device_train_batch_size"] * accelerator.num_processes * args["gradient_accumulation_steps"]
|
| 283 |
+
num_train_epochs = args["num_train_epochs"]
|
| 284 |
+
per_device_train_batch_size = args["per_device_train_batch_size"]
|
| 285 |
+
gradient_acc_steps = args["gradient_accumulation_steps"]
|
| 286 |
+
max_train_steps = args["max_train_steps"]
|
| 287 |
+
checkpoint_epoch = args["checkpoint_epoch"]
|
| 288 |
+
completed_steps = args["checkpoint_steps"]
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
logger.info("***** Running training *****")
|
| 292 |
+
logger.info(f" Num examples = {len(train_dataset)}")
|
| 293 |
+
logger.info(f" Max Training Epochs = {num_train_epochs}")
|
| 294 |
+
logger.info(f" Checkpointed Training Epoch = {checkpoint_epoch}")
|
| 295 |
+
logger.info(f" Instantaneous batch size per device = {per_device_train_batch_size}")
|
| 296 |
+
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
| 297 |
+
logger.info(f" Gradient Accumulation steps = {gradient_acc_steps}")
|
| 298 |
+
logger.info(f" Max optimization steps = {max_train_steps}")
|
| 299 |
+
logger.info(f" Already completed optimization steps = {completed_steps}")
|
| 300 |
+
|
| 301 |
+
progress_bar = tqdm(range(max_train_steps), disable=not accelerator.is_local_main_process, initial=completed_steps)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
# Potentially load in the weights and states from a previous save
|
| 305 |
+
if args["resume_from_checkpoint"]:
|
| 306 |
+
# print("questo non dovrebbe succedere")
|
| 307 |
+
if args["resume_from_checkpoint"] is not None or args["resume_from_checkpoint"] != "":
|
| 308 |
+
checkpoint_path = args["resume_from_checkpoint"]
|
| 309 |
+
path = os.path.basename(args["resume_from_checkpoint"])
|
| 310 |
+
else:
|
| 311 |
+
# Get the most recent checkpoint
|
| 312 |
+
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
|
| 313 |
+
dirs.sort(key=os.path.getctime)
|
| 314 |
+
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
|
| 315 |
+
checkpoint_path = path
|
| 316 |
+
path = os.path.basename(checkpoint_path)
|
| 317 |
+
|
| 318 |
+
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
|
| 319 |
+
accelerator.load_state(checkpoint_path)
|
| 320 |
+
|
| 321 |
+
# Extract `epoch_{i}` or `step_{i}`
|
| 322 |
+
|
| 323 |
+
# starting_epoch = 5
|
| 324 |
+
# resume_step = 15500
|
| 325 |
+
|
| 326 |
+
# logger.info(f"Starting epoch = {starting_epoch}, resume step = {resume_step}")
|
| 327 |
+
|
| 328 |
+
# if "epoch" in training_difference:
|
| 329 |
+
# starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
| 330 |
+
# resume_step = None
|
| 331 |
+
# completed_steps = starting_epoch * num_update_steps_per_epoch
|
| 332 |
+
|
| 333 |
+
# else:
|
| 334 |
+
# # need to multiply `gradient_accumulation_steps` to reflect real steps
|
| 335 |
+
# # resume_step = int(training_difference.replace("step_", "")) * args["gradient_accumulation_steps"]
|
| 336 |
+
# resume_step = 15500
|
| 337 |
+
# starting_epoch = resume_step // len(train_dataloader)
|
| 338 |
+
# completed_steps = resume_step // args["gradient_accumulation_steps"]
|
| 339 |
+
# resume_step -= starting_epoch * len(train_dataloader)
|
| 340 |
+
|
| 341 |
+
for epoch in range(checkpoint_epoch, num_train_epochs):
|
| 342 |
+
model.train()
|
| 343 |
+
if args["with_tracking"]:
|
| 344 |
+
total_loss = 0
|
| 345 |
+
if args["resume_from_checkpoint"] and epoch == checkpoint_epoch and completed_steps is not None:
|
| 346 |
+
active_dataloader = accelerator.skip_first_batches(train_dataloader, completed_steps)
|
| 347 |
+
else:
|
| 348 |
+
active_dataloader = train_dataloader
|
| 349 |
+
|
| 350 |
+
total_steps = num_train_epochs * len(active_dataloader)
|
| 351 |
+
logger.info(f"Step totali previsti: {total_steps}")
|
| 352 |
+
|
| 353 |
+
for step, batch in enumerate(active_dataloader):
|
| 354 |
+
# print("entro nel training")
|
| 355 |
+
with accelerator.accumulate(model):
|
| 356 |
+
outputs = model(**batch)
|
| 357 |
+
loss = outputs.loss
|
| 358 |
+
if args["with_tracking"]:
|
| 359 |
+
total_loss += loss.detach().float()
|
| 360 |
+
accelerator.backward(loss)
|
| 361 |
+
optimizer.step()
|
| 362 |
+
lr_scheduler.step()
|
| 363 |
+
optimizer.zero_grad()
|
| 364 |
+
logger.info(f" Loss = {loss}, epoch = {starting_epoch}, step = {step + completed_steps}")
|
| 365 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 366 |
+
if accelerator.sync_gradients:
|
| 367 |
+
progress_bar.update(1)
|
| 368 |
+
completed_steps += 1
|
| 369 |
+
|
| 370 |
+
if isinstance(checkpointing_steps, int):
|
| 371 |
+
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
| 372 |
+
output_dir = f"step_{completed_steps}"
|
| 373 |
+
if args["output_dir"] is not None:
|
| 374 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 375 |
+
accelerator.save_state(output_dir)
|
| 376 |
+
if completed_steps >= args["max_train_steps"]:
|
| 377 |
+
break
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
logger.info("***** Running validation *****")
|
| 384 |
+
model.eval()
|
| 385 |
+
losses = []
|
| 386 |
+
for step, batch in enumerate(eval_dataloader):
|
| 387 |
+
with torch.no_grad():
|
| 388 |
+
outputs = model(**batch)
|
| 389 |
+
loss = outputs.loss
|
| 390 |
+
losses.append(accelerator.gather_for_metrics(loss.repeat(args["per_device_eval_batch_size"])))
|
| 391 |
+
|
| 392 |
+
losses = torch.cat(losses)
|
| 393 |
+
try:
|
| 394 |
+
eval_loss = torch.mean(losses)
|
| 395 |
+
perplexity = math.exp(eval_loss)
|
| 396 |
+
except OverflowError:
|
| 397 |
+
perplexity = float("inf")
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
|
| 401 |
+
|
| 402 |
+
if args["with_tracking"]:
|
| 403 |
+
accelerator.log(
|
| 404 |
+
{
|
| 405 |
+
"perplexity": perplexity,
|
| 406 |
+
"eval_loss": eval_loss,
|
| 407 |
+
"train_loss": total_loss.item() / len(train_dataloader),
|
| 408 |
+
"epoch": epoch,
|
| 409 |
+
"step": completed_steps,
|
| 410 |
+
},
|
| 411 |
+
step=completed_steps,
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
if args["checkpointing_steps"] == "epoch":
|
| 415 |
+
output_dir = f"epoch_{epoch}"
|
| 416 |
+
if args["output_dir"] is not None:
|
| 417 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 418 |
+
accelerator.save_state(output_dir)
|
history/aob/train-oldfile.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from handcoded_tokenizer import STLTokenizer
|
| 4 |
+
from configuration import STLConfig
|
| 5 |
+
from modeling_stldec import STLForCausalLM
|
| 6 |
+
|
| 7 |
+
from transformers import AutoConfig, AutoModel
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import json
|
| 11 |
+
import logging
|
| 12 |
+
import math
|
| 13 |
+
import random
|
| 14 |
+
from itertools import chain
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import datasets
|
| 18 |
+
import torch
|
| 19 |
+
from torch.utils.data import Dataset
|
| 20 |
+
import ast
|
| 21 |
+
from accelerate import Accelerator, DistributedType
|
| 22 |
+
from accelerate.logging import get_logger
|
| 23 |
+
from accelerate.utils import set_seed
|
| 24 |
+
from datasets import load_dataset
|
| 25 |
+
from huggingface_hub import HfApi
|
| 26 |
+
from torch.utils.data import DataLoader
|
| 27 |
+
from tqdm.auto import tqdm
|
| 28 |
+
|
| 29 |
+
import transformers
|
| 30 |
+
from transformers import (
|
| 31 |
+
CONFIG_MAPPING,
|
| 32 |
+
MODEL_MAPPING,
|
| 33 |
+
AutoConfig,
|
| 34 |
+
AutoModelForCausalLM,
|
| 35 |
+
AutoTokenizer,
|
| 36 |
+
SchedulerType,
|
| 37 |
+
default_data_collator,
|
| 38 |
+
get_scheduler,
|
| 39 |
+
)
|
| 40 |
+
from transformers.utils import check_min_version, send_example_telemetry
|
| 41 |
+
from transformers.utils.versions import require_version
|
| 42 |
+
|
| 43 |
+
logger = get_logger(__name__)
|
| 44 |
+
|
| 45 |
+
# Extend `AutoClasses` to support the custom model
|
| 46 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 47 |
+
AutoModel.register(STLConfig, STLForCausalLM)
|
| 48 |
+
|
| 49 |
+
# Initialize the model with random weights and the desired architecture
|
| 50 |
+
config = STLConfig()
|
| 51 |
+
model = AutoModel.from_config(config)
|
| 52 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 53 |
+
|
| 54 |
+
args = {
|
| 55 |
+
'dataset_name': None, # or a custom dataset path
|
| 56 |
+
'train_file': 'datasets/train_set.csv',
|
| 57 |
+
'validation_file': 'datasets/validation_set.csv',
|
| 58 |
+
'output_dir': './tf_output_test_16batch',
|
| 59 |
+
'model_name_or_path': 'STLForCausalLM',
|
| 60 |
+
'tokenizer_name': 'STLTokenizer',
|
| 61 |
+
'block_size': 500,
|
| 62 |
+
'batch_size': 32,
|
| 63 |
+
'gradient_accumulation_steps': 1,
|
| 64 |
+
'num_train_epochs': 10,
|
| 65 |
+
'learning_rate': 5e-5,
|
| 66 |
+
'weight_decay': 0.01,
|
| 67 |
+
'seed': 42,
|
| 68 |
+
'with_tracking': True,
|
| 69 |
+
'hub_model_id': None,
|
| 70 |
+
'push_to_hub': False,
|
| 71 |
+
'trust_remote_code': False,
|
| 72 |
+
'overwrite_cache': False,
|
| 73 |
+
'per_device_train_batch_size': 32,
|
| 74 |
+
'per_device_eval_batch_size': 32,
|
| 75 |
+
'checkpointing_steps': '500',
|
| 76 |
+
'resume_from_checkpoint': 'tf_output_test_16batch/step_16000',
|
| 77 |
+
'lr_scheduler_type': 'linear',
|
| 78 |
+
'num_warmup_steps': 5000,
|
| 79 |
+
'max_train_steps': 50000,
|
| 80 |
+
# 'lr': 0.01,
|
| 81 |
+
# 'report_to': "tensorboard"
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
| 86 |
+
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
|
| 87 |
+
# in the environment
|
| 88 |
+
accelerator_log_kwargs = {}
|
| 89 |
+
|
| 90 |
+
if args["with_tracking"]:
|
| 91 |
+
# accelerator_log_kwargs["log_with"] = args["report_to"]
|
| 92 |
+
accelerator_log_kwargs["project_dir"] = args["output_dir"]
|
| 93 |
+
|
| 94 |
+
accelerator = Accelerator(gradient_accumulation_steps=args["gradient_accumulation_steps"], **accelerator_log_kwargs)
|
| 95 |
+
|
| 96 |
+
# Make one log on every process with the configuration for debugging.
|
| 97 |
+
logging.basicConfig(
|
| 98 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 99 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 100 |
+
level=logging.INFO,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
if accelerator.is_local_main_process:
|
| 104 |
+
datasets.utils.logging.set_verbosity_warning()
|
| 105 |
+
transformers.utils.logging.set_verbosity_info()
|
| 106 |
+
else:
|
| 107 |
+
datasets.utils.logging.set_verbosity_error()
|
| 108 |
+
transformers.utils.logging.set_verbosity_error()
|
| 109 |
+
|
| 110 |
+
# If passed along, set the training seed now.
|
| 111 |
+
if args["seed"] is not None:
|
| 112 |
+
set_seed(args["seed"])
|
| 113 |
+
|
| 114 |
+
# Handle the repository creation
|
| 115 |
+
if accelerator.is_main_process:
|
| 116 |
+
if args["push_to_hub"]:
|
| 117 |
+
# Retrieve of infer repo_name
|
| 118 |
+
repo_name = args["hub_model_id"]
|
| 119 |
+
if repo_name is None:
|
| 120 |
+
repo_name = Path(args["output_dir"]).absolute().name
|
| 121 |
+
# Create repo and retrieve repo_id
|
| 122 |
+
api = HfApi()
|
| 123 |
+
repo_id = api.create_repo(repo_name, exist_ok=True, token=args["hub_token"]).repo_id
|
| 124 |
+
with open(os.path.join(args["output_dir"], ".gitignore"), "w+") as gitignore:
|
| 125 |
+
if "step_*" not in gitignore:
|
| 126 |
+
gitignore.write("step_*\n")
|
| 127 |
+
if "epoch_*" not in gitignore:
|
| 128 |
+
gitignore.write("epoch_*\n")
|
| 129 |
+
elif args["output_dir"] is not None:
|
| 130 |
+
os.makedirs(args["output_dir"], exist_ok=True)
|
| 131 |
+
|
| 132 |
+
accelerator.wait_for_everyone()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if args["dataset_name"] is not None:
|
| 136 |
+
# Downloading and loading a dataset from the hub.
|
| 137 |
+
raw_datasets = load_dataset(
|
| 138 |
+
args.dataset_name, args["dataset_config_name"], trust_remote_code=args["trust_remote_code"]
|
| 139 |
+
)
|
| 140 |
+
if "validation" not in raw_datasets.keys():
|
| 141 |
+
raw_datasets["validation"] = load_dataset(
|
| 142 |
+
args["dataset_name"],
|
| 143 |
+
args["dataset_config_name"],
|
| 144 |
+
trust_remote_code=args["trust_remote_code"],
|
| 145 |
+
)
|
| 146 |
+
raw_datasets["train"] = load_dataset(
|
| 147 |
+
args["dataset_name"],
|
| 148 |
+
args["dataset_config_name"],
|
| 149 |
+
trust_remote_code=args["trust_remote_code"],
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
else:
|
| 153 |
+
data_files = {}
|
| 154 |
+
dataset_args = {}
|
| 155 |
+
if args["train_file"] is not None:
|
| 156 |
+
data_files["train"] = args["train_file"]
|
| 157 |
+
extension = args["train_file"].split(".")[-1]
|
| 158 |
+
if args["validation_file"] is not None:
|
| 159 |
+
data_files["validation"] = args["validation_file"]
|
| 160 |
+
extension = args["validation_file"].split(".")[-1]
|
| 161 |
+
if extension == "txt":
|
| 162 |
+
extension = "text"
|
| 163 |
+
|
| 164 |
+
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
# Create a `CustomDataset` class to format properly the input data wrt the
|
| 168 |
+
# `input_ids`, `labels` and `attention_mask` attributes
|
| 169 |
+
class CustomDataset(Dataset):
|
| 170 |
+
def __init__(self, df, device='cpu'):
|
| 171 |
+
self.df = df
|
| 172 |
+
self.device = device
|
| 173 |
+
|
| 174 |
+
def __len__(self):
|
| 175 |
+
return len(self.df)
|
| 176 |
+
|
| 177 |
+
def __getitem__(self, idx):
|
| 178 |
+
# Start from `Encoded_Formula`
|
| 179 |
+
encoded_formula = self.df['Encoded_Formula'][idx]
|
| 180 |
+
encoded_formula = ast.literal_eval(encoded_formula.strip())
|
| 181 |
+
|
| 182 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 183 |
+
formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 184 |
+
formula_embedding = eval(formula_embedding)
|
| 185 |
+
|
| 186 |
+
input_ids = encoded_formula[:-1] # Tutti tranne l'ultimo
|
| 187 |
+
labels = encoded_formula[1:] # Tutti tranne il primo
|
| 188 |
+
|
| 189 |
+
attention_mask = [0 if token == '1' else 1 for token in input_ids]
|
| 190 |
+
# if 1 (i.e. tokenized `pad`), then neglect that token
|
| 191 |
+
|
| 192 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 193 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 194 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 195 |
+
|
| 196 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
return {
|
| 200 |
+
'input_ids': input_ids,
|
| 201 |
+
'labels': labels,
|
| 202 |
+
'attention_mask': attention_mask,
|
| 203 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 208 |
+
train_dataset = CustomDataset(raw_datasets['train'], device=device)
|
| 209 |
+
eval_dataset = CustomDataset(raw_datasets['validation'], device=device)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# DataLoaders creation:
|
| 213 |
+
train_dataloader = DataLoader(
|
| 214 |
+
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
eval_dataloader = DataLoader(
|
| 218 |
+
eval_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args["batch_size"]
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
# Optimizer
|
| 222 |
+
# Split weights in two groups, one with weight decay and the other not.
|
| 223 |
+
no_decay = ["bias", "layer_norm.weight"]
|
| 224 |
+
optimizer_grouped_parameters = [
|
| 225 |
+
{
|
| 226 |
+
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 227 |
+
"weight_decay": args["weight_decay"],
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
| 231 |
+
"weight_decay": 0.0,
|
| 232 |
+
},
|
| 233 |
+
]
|
| 234 |
+
|
| 235 |
+
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], betas=(0.9, 0.99))
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# Scheduler and math around the number of training steps.
|
| 239 |
+
overrode_max_train_steps = False
|
| 240 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 241 |
+
if args["max_train_steps"] is None:
|
| 242 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 243 |
+
overrode_max_train_steps = True
|
| 244 |
+
lr_scheduler = get_scheduler(
|
| 245 |
+
name=args["lr_scheduler_type"],
|
| 246 |
+
optimizer=optimizer,
|
| 247 |
+
num_warmup_steps=args["num_warmup_steps"] * accelerator.num_processes,
|
| 248 |
+
num_training_steps=args["max_train_steps"]
|
| 249 |
+
if overrode_max_train_steps
|
| 250 |
+
else args["max_train_steps"] * accelerator.num_processes,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Prepare everything with our `accelerator`.
|
| 254 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 255 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
| 259 |
+
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args["gradient_accumulation_steps"])
|
| 260 |
+
if overrode_max_train_steps:
|
| 261 |
+
args["max_train_steps"] = args["num_train_epochs"] * num_update_steps_per_epoch
|
| 262 |
+
# Afterwards we recalculate our number of training epochs
|
| 263 |
+
args["num_train_epochs"] = math.ceil(args["max_train_steps"] / num_update_steps_per_epoch)
|
| 264 |
+
|
| 265 |
+
# Figure out how many steps we should save the Accelerator states
|
| 266 |
+
checkpointing_steps = args["checkpointing_steps"]
|
| 267 |
+
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
| 268 |
+
checkpointing_steps = int(checkpointing_steps)
|
| 269 |
+
|
| 270 |
+
# We need to initialize the trackers we use, and also store our configuration.
|
| 271 |
+
# The trackers initializes automatically on the main process.
|
| 272 |
+
if args["with_tracking"]:
|
| 273 |
+
experiment_config = args
|
| 274 |
+
# TensorBoard cannot log Enums, need the raw value
|
| 275 |
+
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"]
|
| 276 |
+
accelerator.init_trackers("clm_no_trainer", experiment_config)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# Train!
|
| 280 |
+
total_batch_size = args["per_device_train_batch_size"] * accelerator.num_processes * args["gradient_accumulation_steps"]
|
| 281 |
+
|
| 282 |
+
logger.info("***** Running training *****")
|
| 283 |
+
logger.info(f" Num examples = {len(train_dataset)}")
|
| 284 |
+
|
| 285 |
+
num_train_epochs = args["num_train_epochs"]
|
| 286 |
+
per_device_train_batch_size = args["per_device_train_batch_size"]
|
| 287 |
+
gradient_acc_steps = args["gradient_accumulation_steps"]
|
| 288 |
+
max_train_steps = args["max_train_steps"]
|
| 289 |
+
# num_train_steps = args["num_train_steps"]
|
| 290 |
+
logger.info(f" Num Epochs = {num_train_epochs}")
|
| 291 |
+
logger.info(f" Instantaneous batch size per device = {per_device_train_batch_size}")
|
| 292 |
+
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
| 293 |
+
logger.info(f" Gradient Accumulation steps = {gradient_acc_steps}")
|
| 294 |
+
# logger.info(f" Optimization steps per epoch = {num_train_steps}")
|
| 295 |
+
logger.info(f" Max optimization steps = {max_train_steps}")
|
| 296 |
+
|
| 297 |
+
# Only show the progress bar once on each machine.
|
| 298 |
+
progress_bar = tqdm(range(max_train_steps), disable=not accelerator.is_local_main_process) # i 1000 sono questi!
|
| 299 |
+
completed_steps = 0
|
| 300 |
+
starting_epoch = 0
|
| 301 |
+
# Potentially load in the weights and states from a previous save
|
| 302 |
+
if args["resume_from_checkpoint"]:
|
| 303 |
+
# print("questo non dovrebbe succedere")
|
| 304 |
+
if args["resume_from_checkpoint"] is not None or args["resume_from_checkpoint"] != "":
|
| 305 |
+
checkpoint_path = args["resume_from_checkpoint"]
|
| 306 |
+
print("fin qua ci arrivo!")
|
| 307 |
+
path = os.path.basename(args["resume_from_checkpoint"])
|
| 308 |
+
else:
|
| 309 |
+
# Get the most recent checkpoint
|
| 310 |
+
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
|
| 311 |
+
dirs.sort(key=os.path.getctime)
|
| 312 |
+
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
|
| 313 |
+
checkpoint_path = path
|
| 314 |
+
path = os.path.basename(checkpoint_path)
|
| 315 |
+
|
| 316 |
+
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
|
| 317 |
+
accelerator.load_state(checkpoint_path)
|
| 318 |
+
# Extract `epoch_{i}` or `step_{i}`
|
| 319 |
+
training_difference = os.path.splitext(path)[0]
|
| 320 |
+
|
| 321 |
+
starting_epoch = 5
|
| 322 |
+
resume_step = 16000
|
| 323 |
+
|
| 324 |
+
logger.info(f"Starting epoch = {starting_epoch}, resume step = {resume_step}")
|
| 325 |
+
|
| 326 |
+
if "epoch" in training_difference:
|
| 327 |
+
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
| 328 |
+
resume_step = None
|
| 329 |
+
completed_steps = starting_epoch * num_update_steps_per_epoch
|
| 330 |
+
|
| 331 |
+
else:
|
| 332 |
+
# need to multiply `gradient_accumulation_steps` to reflect real steps
|
| 333 |
+
resume_step = int(training_difference.replace("step_", "")) * args["gradient_accumulation_steps"]
|
| 334 |
+
resume_step = 16000
|
| 335 |
+
starting_epoch = resume_step // len(train_dataloader) - 1
|
| 336 |
+
completed_steps = resume_step // args["gradient_accumulation_steps"]
|
| 337 |
+
resume_step -= starting_epoch * len(train_dataloader)
|
| 338 |
+
|
| 339 |
+
for epoch in range(starting_epoch, num_train_epochs):
|
| 340 |
+
model.train()
|
| 341 |
+
if args["with_tracking"]:
|
| 342 |
+
total_loss = 0
|
| 343 |
+
if args["resume_from_checkpoint"] and epoch == starting_epoch and resume_step is not None:
|
| 344 |
+
logger.info("correct scenario")
|
| 345 |
+
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
| 346 |
+
else:
|
| 347 |
+
active_dataloader = train_dataloader
|
| 348 |
+
|
| 349 |
+
total_steps = num_train_epochs * len(active_dataloader)
|
| 350 |
+
logger.info(f"Step totali previsti: {total_steps}")
|
| 351 |
+
|
| 352 |
+
for step, batch in enumerate(active_dataloader):
|
| 353 |
+
# print("entro nel training")
|
| 354 |
+
with accelerator.accumulate(model):
|
| 355 |
+
outputs = model(**batch)
|
| 356 |
+
loss = outputs.loss
|
| 357 |
+
if args["with_tracking"]:
|
| 358 |
+
total_loss += loss.detach().float()
|
| 359 |
+
accelerator.backward(loss)
|
| 360 |
+
optimizer.step()
|
| 361 |
+
lr_scheduler.step()
|
| 362 |
+
optimizer.zero_grad()
|
| 363 |
+
logger.info(f" Loss = {loss}, epoch = {starting_epoch}, step = {step + resume_step}")
|
| 364 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 365 |
+
if accelerator.sync_gradients:
|
| 366 |
+
progress_bar.update(1)
|
| 367 |
+
completed_steps += 1
|
| 368 |
+
|
| 369 |
+
if isinstance(checkpointing_steps, int):
|
| 370 |
+
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
| 371 |
+
output_dir = f"step_{completed_steps}"
|
| 372 |
+
if args["output_dir"] is not None:
|
| 373 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 374 |
+
accelerator.save_state(output_dir)
|
| 375 |
+
if completed_steps >= args["max_train_steps"]:
|
| 376 |
+
break
|
| 377 |
+
# writer.flush()
|
| 378 |
+
|
| 379 |
+
logger.info("***** Running validation *****")
|
| 380 |
+
model.eval()
|
| 381 |
+
losses = []
|
| 382 |
+
for step, batch in enumerate(eval_dataloader):
|
| 383 |
+
with torch.no_grad():
|
| 384 |
+
outputs = model(**batch)
|
| 385 |
+
loss = outputs.loss
|
| 386 |
+
losses.append(accelerator.gather_for_metrics(loss.repeat(args["per_device_eval_batch_size"])))
|
| 387 |
+
|
| 388 |
+
losses = torch.cat(losses)
|
| 389 |
+
try:
|
| 390 |
+
eval_loss = torch.mean(losses)
|
| 391 |
+
perplexity = math.exp(eval_loss)
|
| 392 |
+
except OverflowError:
|
| 393 |
+
perplexity = float("inf")
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
|
| 397 |
+
|
| 398 |
+
if args["with_tracking"]:
|
| 399 |
+
accelerator.log(
|
| 400 |
+
{
|
| 401 |
+
"perplexity": perplexity,
|
| 402 |
+
"eval_loss": eval_loss,
|
| 403 |
+
"train_loss": total_loss.item() / len(train_dataloader),
|
| 404 |
+
"epoch": epoch,
|
| 405 |
+
"step": completed_steps,
|
| 406 |
+
},
|
| 407 |
+
step=completed_steps,
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
if args["checkpointing_steps"] == "epoch":
|
| 411 |
+
output_dir = f"epoch_{epoch}"
|
| 412 |
+
if args["output_dir"] is not None:
|
| 413 |
+
output_dir = os.path.join(args["output_dir"], output_dir)
|
| 414 |
+
accelerator.save_state(output_dir)
|
history/aob/train16.sh
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#SBATCH --no-requeue
|
| 3 |
+
#SBATCH --job-name="train16"
|
| 4 |
+
#SBATCH --account IscrC_IRA-LLMs
|
| 5 |
+
#SBATCH --partition=boost_usr_prod
|
| 6 |
+
#SBATCH --nodes=1
|
| 7 |
+
#SBATCH --ntasks-per-node=1
|
| 8 |
+
#SBATCH --gres=gpu:1
|
| 9 |
+
#SBATCH --exclusive
|
| 10 |
+
#SBATCH --time=20:00:00
|
| 11 |
+
#SBATCH --mem=471G
|
| 12 |
+
#SBATCH --output=slurm_outputs/train_comparison.out
|
| 13 |
+
|
| 14 |
+
module load python/3.11.6--gcc--8.5.0
|
| 15 |
+
|
| 16 |
+
echo "Running on $SLURM__NNODES nodes"
|
| 17 |
+
|
| 18 |
+
# Standard preamble for debugging
|
| 19 |
+
echo "---------------------------------------------"
|
| 20 |
+
echo "SLURM job ID: $SLURM_JOB_ID"
|
| 21 |
+
echo "SLURM job node list: $SLURM_JOB_NODELIST"
|
| 22 |
+
echo "DATE: $(date)"
|
| 23 |
+
echo "---------------------------------------------"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
source /leonardo/home/userexternal/scanduss/.venv/bin/activate
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Needed exports
|
| 30 |
+
# export <export_name>=<export_value>
|
| 31 |
+
#variables
|
| 32 |
+
|
| 33 |
+
srun python3 train-16batch.py
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
echo "DONE!"
|
history/aob/utils2.py
ADDED
|
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import copy
|
| 3 |
+
import math
|
| 4 |
+
from typing import List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import torch
|
| 9 |
+
import torch.utils.checkpoint
|
| 10 |
+
from torch import nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from torch.utils.data import Dataset
|
| 13 |
+
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from configuration import STLConfig
|
| 16 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
|
| 17 |
+
|
| 18 |
+
import copy
|
| 19 |
+
import pickle
|
| 20 |
+
import os
|
| 21 |
+
from collections import deque
|
| 22 |
+
|
| 23 |
+
from stl import *
|
| 24 |
+
|
| 25 |
+
from nltk.translate.bleu_score import sentence_bleu
|
| 26 |
+
from handcoded_tokenizer import STLTokenizer
|
| 27 |
+
|
| 28 |
+
import networkx as nx
|
| 29 |
+
import phis_generator_depth
|
| 30 |
+
|
| 31 |
+
from datasets import load_dataset
|
| 32 |
+
|
| 33 |
+
############################################################################################################################
|
| 34 |
+
|
| 35 |
+
def load_pickle(path):
|
| 36 |
+
with open(path, 'rb') as f:
|
| 37 |
+
x = pickle.load(f)
|
| 38 |
+
return x
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def dump_pickle(name, thing):
|
| 42 |
+
with open(name + '.pickle', 'wb') as f:
|
| 43 |
+
pickle.dump(thing, f)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def set_time_thresholds(st):
|
| 47 |
+
unbound, right_unbound = [True, False]
|
| 48 |
+
left_time_bound, right_time_bound = [0, 0]
|
| 49 |
+
if st[-1] == ']':
|
| 50 |
+
unbound = False
|
| 51 |
+
time_thresholds = st[st.index('[')+1:-1].split(",")
|
| 52 |
+
left_time_bound = int(time_thresholds[0])
|
| 53 |
+
if time_thresholds[1] == 'inf':
|
| 54 |
+
right_unbound = True
|
| 55 |
+
else:
|
| 56 |
+
right_time_bound = int(time_thresholds[1])-1
|
| 57 |
+
return unbound, right_unbound, left_time_bound, right_time_bound
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def from_string_to_formula(st):
|
| 61 |
+
root_arity = 2 if st.startswith('(') else 1
|
| 62 |
+
st_split = st.split()
|
| 63 |
+
if root_arity <= 1:
|
| 64 |
+
root_op_str = copy.deepcopy(st_split[0])
|
| 65 |
+
if root_op_str.startswith('x'):
|
| 66 |
+
atom_sign = True if st_split[1] == '<=' else False
|
| 67 |
+
root_phi = Atom(var_index=int(st_split[0][2]), lte=atom_sign, threshold=float(st_split[2]))
|
| 68 |
+
return root_phi
|
| 69 |
+
else:
|
| 70 |
+
assert (root_op_str.startswith('not') or root_op_str.startswith('eventually')
|
| 71 |
+
or root_op_str.startswith('always'))
|
| 72 |
+
current_st = copy.deepcopy(st_split[2:-1])
|
| 73 |
+
if root_op_str == 'not':
|
| 74 |
+
root_phi = Not(child=from_string_to_formula(' '.join(current_st)))
|
| 75 |
+
elif root_op_str.startswith('eventually'):
|
| 76 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 77 |
+
root_phi = Eventually(child=from_string_to_formula(' '.join(current_st)), unbound=unbound,
|
| 78 |
+
right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 79 |
+
right_time_bound=right_time_bound)
|
| 80 |
+
else:
|
| 81 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 82 |
+
root_phi = Globally(child=from_string_to_formula(' '.join(current_st)), unbound=unbound,
|
| 83 |
+
right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 84 |
+
right_time_bound=right_time_bound)
|
| 85 |
+
else:
|
| 86 |
+
# 1 - delete everything which is contained in other sets of parenthesis (if any)
|
| 87 |
+
current_st = copy.deepcopy(st_split[1:-1])
|
| 88 |
+
if '(' in current_st:
|
| 89 |
+
par_queue = deque()
|
| 90 |
+
par_idx_list = []
|
| 91 |
+
for i, sub in enumerate(current_st):
|
| 92 |
+
if sub == '(':
|
| 93 |
+
par_queue.append(i)
|
| 94 |
+
elif sub == ')':
|
| 95 |
+
par_idx_list.append(tuple([par_queue.pop(), i]))
|
| 96 |
+
# open_par_idx, close_par_idx = [current_st.index(p) for p in ['(', ')']]
|
| 97 |
+
# union of parentheses range --> from these we may extract the substrings to be the children!!!
|
| 98 |
+
children_range = []
|
| 99 |
+
for begin, end in sorted(par_idx_list):
|
| 100 |
+
if children_range and children_range[-1][1] >= begin - 1:
|
| 101 |
+
children_range[-1][1] = max(children_range[-1][1], end)
|
| 102 |
+
else:
|
| 103 |
+
children_range.append([begin, end])
|
| 104 |
+
n_children = len(children_range)
|
| 105 |
+
assert (n_children in [1, 2])
|
| 106 |
+
if n_children == 1:
|
| 107 |
+
# one of the children is a variable --> need to individuate it
|
| 108 |
+
var_child_idx = 1 if children_range[0][0] <= 1 else 0 # 0 is left child, 1 is right child
|
| 109 |
+
if children_range[0][0] != 0 and current_st[children_range[0][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 110 |
+
children_range[0][0] -= 1
|
| 111 |
+
left_child_str = current_st[:3] if var_child_idx == 0 else \
|
| 112 |
+
current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 113 |
+
right_child_str = current_st[-3:] if var_child_idx == 1 else \
|
| 114 |
+
current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 115 |
+
root_op_str = current_st[children_range[0][1] + 1] if var_child_idx == 1 else \
|
| 116 |
+
current_st[children_range[0][0] - 1]
|
| 117 |
+
assert (root_op_str[:2] in ['an', 'or', 'un'])
|
| 118 |
+
else:
|
| 119 |
+
if children_range[0][0] != 0 and current_st[children_range[0][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 120 |
+
children_range[0][0] -= 1
|
| 121 |
+
if current_st[children_range[1][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 122 |
+
children_range[1][0] -= 1
|
| 123 |
+
# if there are two children, with parentheses, the element in the middle is the root
|
| 124 |
+
root_op_str = current_st[children_range[0][1] + 1]
|
| 125 |
+
assert (root_op_str[:2] in ['an', 'or', 'un'])
|
| 126 |
+
left_child_str = current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 127 |
+
right_child_str = current_st[children_range[1][0]:children_range[1][1] + 1]
|
| 128 |
+
else:
|
| 129 |
+
# no parentheses means that both children are variables
|
| 130 |
+
left_child_str = current_st[:3]
|
| 131 |
+
right_child_str = current_st[-3:]
|
| 132 |
+
root_op_str = current_st[3]
|
| 133 |
+
left_child_str = ' '.join(left_child_str)
|
| 134 |
+
right_child_str = ' '.join(right_child_str)
|
| 135 |
+
if root_op_str == 'and':
|
| 136 |
+
root_phi = And(left_child=from_string_to_formula(left_child_str),
|
| 137 |
+
right_child=from_string_to_formula(right_child_str))
|
| 138 |
+
elif root_op_str == 'or':
|
| 139 |
+
root_phi = Or(left_child=from_string_to_formula(left_child_str),
|
| 140 |
+
right_child=from_string_to_formula(right_child_str))
|
| 141 |
+
else:
|
| 142 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 143 |
+
root_phi = Until(left_child=from_string_to_formula(left_child_str),
|
| 144 |
+
right_child=from_string_to_formula(right_child_str),
|
| 145 |
+
unbound=unbound, right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 146 |
+
right_time_bound=right_time_bound)
|
| 147 |
+
return root_phi
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def scale_trajectories(traj):
|
| 151 |
+
traj_min = torch.min(torch.min(traj, dim=0)[0], dim=0)[0]
|
| 152 |
+
traj_max = torch.max(torch.max(traj, dim=0)[0], dim=0)[0]
|
| 153 |
+
scaled_traj = -1 + 2*(traj - traj_min) / (traj_max - traj_min)
|
| 154 |
+
return scaled_traj
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def standardize_trajectories(traj_data, n_var):
|
| 158 |
+
means, stds = [[] for _ in range(2)]
|
| 159 |
+
for i in range(n_var):
|
| 160 |
+
means.append(torch.mean(traj_data[:, i, :]))
|
| 161 |
+
stds.append(torch.std(traj_data[:, i, :]))
|
| 162 |
+
for i in range(n_var):
|
| 163 |
+
traj_data[:, i, :] = (traj_data[:, i, :] - means[i]) / stds[i]
|
| 164 |
+
return traj_data
|
| 165 |
+
|
| 166 |
+
############################################################################################################################
|
| 167 |
+
|
| 168 |
+
class STLSinusoidalPositionalEmbedding(nn.Embedding):
|
| 169 |
+
"""This module produces sinusoidal positional embeddings of any length."""
|
| 170 |
+
|
| 171 |
+
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
|
| 172 |
+
super().__init__(num_positions, embedding_dim)
|
| 173 |
+
self.weight = self._init_weight(self.weight)
|
| 174 |
+
|
| 175 |
+
@staticmethod
|
| 176 |
+
def _init_weight(out: nn.Parameter) -> nn.Parameter:
|
| 177 |
+
"""
|
| 178 |
+
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
|
| 179 |
+
the 2nd half of the vector. [dim // 2:]
|
| 180 |
+
"""
|
| 181 |
+
n_pos, dim = out.shape
|
| 182 |
+
position_enc = np.array(
|
| 183 |
+
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
|
| 184 |
+
)
|
| 185 |
+
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
|
| 186 |
+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
|
| 187 |
+
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
|
| 188 |
+
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
|
| 189 |
+
out.detach_()
|
| 190 |
+
return out
|
| 191 |
+
|
| 192 |
+
@torch.no_grad()
|
| 193 |
+
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
|
| 194 |
+
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
|
| 195 |
+
bsz, seq_len = input_ids_shape[:2]
|
| 196 |
+
positions = torch.arange(
|
| 197 |
+
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
|
| 198 |
+
)
|
| 199 |
+
return super().forward(positions)
|
| 200 |
+
|
| 201 |
+
class STLAttention(nn.Module):
|
| 202 |
+
""" Multi-Head Attention as depicted from 'Attention is all you need' """
|
| 203 |
+
|
| 204 |
+
def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0,
|
| 205 |
+
is_decoder: bool = False, bias: bool = False, is_causal: bool = False):
|
| 206 |
+
|
| 207 |
+
super().__init__()
|
| 208 |
+
self.embed_dim = embed_dim # overall embedding dimension -> to be divided between multiple heads
|
| 209 |
+
self.num_heads = num_heads
|
| 210 |
+
self.dropout = dropout
|
| 211 |
+
self.head_dim = embed_dim // num_heads
|
| 212 |
+
assert (self.head_dim * num_heads) == self.embed_dim
|
| 213 |
+
self.scaling = self.head_dim ** -0.5 # used to normalize values when projected using `W_` matrices
|
| 214 |
+
self.is_decoder = is_decoder
|
| 215 |
+
self.is_causal = is_causal
|
| 216 |
+
|
| 217 |
+
# 'roleplaying' matrices
|
| 218 |
+
self.W_k = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 219 |
+
self.W_q = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 220 |
+
self.W_v = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 221 |
+
|
| 222 |
+
# to project the heads' outputs into a single vector
|
| 223 |
+
self.W_o = nn.Linear(embed_dim, embed_dim, bias = bias)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
|
| 227 |
+
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def forward(self,
|
| 231 |
+
hidden_states: torch.Tensor, # previous values, passed to the multi-head attn layer
|
| 232 |
+
key_value_states: Optional[torch.Tensor] = None, # different key, value items (used in cross-attn)
|
| 233 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None, # stores the key and values of previous steps
|
| 234 |
+
attention_mask: Optional[torch.Tensor] = None, # masks non-allowed items (padded or future ones)
|
| 235 |
+
layer_head_mask: Optional[torch.Tensor] = None, # used to de-activate specific attn heads
|
| 236 |
+
output_attentions: bool = False # flag to control the output of the attn values
|
| 237 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 238 |
+
|
| 239 |
+
is_cross_attention = key_value_states is not None # cross-attn if key_value_states is not None
|
| 240 |
+
|
| 241 |
+
batch_size, tgt_len, embed_dim = hidden_states.size()
|
| 242 |
+
|
| 243 |
+
# Project the current input in the `query` role:
|
| 244 |
+
query = self.W_q(hidden_states) * self.scaling
|
| 245 |
+
|
| 246 |
+
if (is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1]):
|
| 247 |
+
key = past_key_value[0]
|
| 248 |
+
value = past_key_value[1]
|
| 249 |
+
elif is_cross_attention:
|
| 250 |
+
key = self._shape(self.W_k(key_value_states), -1, batch_size)
|
| 251 |
+
value = self._shape(self.W_v(key_value_states), -1, batch_size)
|
| 252 |
+
elif past_key_value is not None:
|
| 253 |
+
key = self._shape(self.W_k(hidden_states), -1, batch_size)
|
| 254 |
+
value = self._shape(self.W_v(hidden_states), -1, batch_size)
|
| 255 |
+
key = torch.cat([past_key_value[0], key], dim=2)
|
| 256 |
+
value = torch.cat([past_key_value[1], value], dim=2)
|
| 257 |
+
else:
|
| 258 |
+
key = self._shape(self.W_k(hidden_states), -1, batch_size)
|
| 259 |
+
value = self._shape(self.W_v(hidden_states), -1, batch_size)
|
| 260 |
+
|
| 261 |
+
if self.is_decoder:
|
| 262 |
+
past_key_value = (key, value)
|
| 263 |
+
|
| 264 |
+
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
|
| 265 |
+
|
| 266 |
+
query = self._shape(query, tgt_len, batch_size).view(*proj_shape)
|
| 267 |
+
key = key.reshape(*proj_shape)
|
| 268 |
+
value = value.reshape(*proj_shape)
|
| 269 |
+
|
| 270 |
+
src_len = key.size(1)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
######################################################################################################
|
| 274 |
+
|
| 275 |
+
# 'traditional' attention computation
|
| 276 |
+
# i.e. softmax(Q*K^T / sqrt(d_model) + self_attn_mask) * V
|
| 277 |
+
|
| 278 |
+
# Batch-wise matrix multiplication between `query` and (TRANSPOSED) `key`
|
| 279 |
+
attn_weights = torch.bmm(query, key.transpose(1, 2))
|
| 280 |
+
|
| 281 |
+
if attention_mask is not None:
|
| 282 |
+
attn_weights = attn_weights.view(batch_size, self.num_heads, tgt_len, src_len) + attention_mask
|
| 283 |
+
attn_weights = attn_weights.view(batch_size * self.num_heads, tgt_len, src_len)
|
| 284 |
+
|
| 285 |
+
# Normalize values on the `key` axis (dim=-1)
|
| 286 |
+
attn_weights = F.softmax(attn_weights, dim=-1)
|
| 287 |
+
|
| 288 |
+
# if layer_head_mask is not None:
|
| 289 |
+
# attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(batch_size, self.num_heads, tgt_len, src_len)
|
| 290 |
+
# attn_weights = attn_weights.view(batch_size * self.num_heads, tgt_len, src_len)
|
| 291 |
+
|
| 292 |
+
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 293 |
+
|
| 294 |
+
# Batch-wise matrix multiplication between the resulting probs and the value
|
| 295 |
+
attn_output = torch.bmm(attn_probs, value)
|
| 296 |
+
|
| 297 |
+
######################################################################################################
|
| 298 |
+
|
| 299 |
+
attn_output = attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim)
|
| 300 |
+
attn_output = attn_output.transpose(1, 2)
|
| 301 |
+
|
| 302 |
+
attn_output = attn_output.reshape(batch_size, tgt_len, self.embed_dim)
|
| 303 |
+
attn_output = self.W_o(attn_output)
|
| 304 |
+
|
| 305 |
+
return attn_output, None, past_key_value
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class DatasetProcessor:
|
| 309 |
+
def __init__(self, dataset_name, split="train", device="cuda" if torch.cuda.is_available() else "cpu"):
|
| 310 |
+
self.device = device
|
| 311 |
+
self.original_dataset = pd.read_pickle(dataset_name) # Load the dataset from the pickle file
|
| 312 |
+
self.processed_dataset = self._create_processed_dataset()
|
| 313 |
+
|
| 314 |
+
def _create_processed_dataset(self):
|
| 315 |
+
# Transform a single entry
|
| 316 |
+
def transform_entry(entry):
|
| 317 |
+
# Convert 'Embedding' from string to list of floats if necessary
|
| 318 |
+
formula_embedding = entry['Embedding512']
|
| 319 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 320 |
+
|
| 321 |
+
# Convert 'Encoded_Formula' from string to list of integers
|
| 322 |
+
encoded_formula = entry['Encoded_Formula']
|
| 323 |
+
input_ids = encoded_formula[:-1] # All tokens except the last
|
| 324 |
+
labels = encoded_formula[1:] # All tokens except the first
|
| 325 |
+
attention_mask = [0 if token == 1 else 1 for token in input_ids]
|
| 326 |
+
|
| 327 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long).to(self.device)
|
| 328 |
+
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
|
| 329 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
# Return only the transformed columns
|
| 333 |
+
return {
|
| 334 |
+
'input_ids': input_ids,
|
| 335 |
+
'labels': labels,
|
| 336 |
+
'attention_mask': attention_mask,
|
| 337 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
# Apply the transformation to each row in the dataset using pandas .apply()
|
| 341 |
+
transformed_data = self.original_dataset.apply(transform_entry, axis=1)
|
| 342 |
+
|
| 343 |
+
return transformed_data
|
| 344 |
+
|
| 345 |
+
def get_processed_dataset(self):
|
| 346 |
+
return self.processed_dataset
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class DatasetProcessor2:
|
| 350 |
+
def __init__(self, dataset_name, split="train", device="cuda" if torch.cuda.is_available() else "cpu"):
|
| 351 |
+
self.device = device
|
| 352 |
+
# self.original_dataset = pd.read_pickle(dataset_name)
|
| 353 |
+
self.original_dataset = load_dataset('csv', data_files=dataset_name, split=split)
|
| 354 |
+
self.processed_dataset = self._create_processed_dataset()
|
| 355 |
+
|
| 356 |
+
def _create_processed_dataset(self):
|
| 357 |
+
def transform_entry(entry):
|
| 358 |
+
# Convertire 'Embedding' da stringa a lista di float e poi a tensor
|
| 359 |
+
formula_embedding = eval(entry['Embedding']) # Converti la stringa in lista
|
| 360 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 361 |
+
|
| 362 |
+
# Convertire 'Encoded_Formula' da stringa a lista di interi
|
| 363 |
+
encoded_formula = eval(entry['Encoded_Formula'])
|
| 364 |
+
input_ids = encoded_formula[:-1] # Tutti i token tranne l'ultimo
|
| 365 |
+
labels = encoded_formula[1:] # Tutti i token tranne il primo
|
| 366 |
+
attention_mask = [0 if token == 1 else 1 for token in input_ids]
|
| 367 |
+
|
| 368 |
+
# Restituiamo solo le nuove colonne
|
| 369 |
+
return {
|
| 370 |
+
'input_ids': input_ids,
|
| 371 |
+
'labels': labels,
|
| 372 |
+
'attention_mask': attention_mask,
|
| 373 |
+
'encoder_hidden_states': encoder_hidden_states.tolist() # Convertire tensor in lista per compatibilità Dataset
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
# Creiamo un dataset nuovo applicando `.map()` con tqdm
|
| 377 |
+
# removed_columns = list(self.original_dataset.columns)
|
| 378 |
+
new_dataset = self.original_dataset.map(
|
| 379 |
+
transform_entry, num_proc=1, remove_columns=self.original_dataset.column_names
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
# Convertiamo input_ids, labels e attention_mask in tensori PyTorch
|
| 383 |
+
def convert_to_tensors(batch):
|
| 384 |
+
batch["input_ids"] = [torch.tensor(x, dtype=torch.long).to(self.device) for x in batch["input_ids"]]
|
| 385 |
+
batch["labels"] = [torch.tensor(x, dtype=torch.long).to(self.device) for x in batch["labels"]]
|
| 386 |
+
batch["attention_mask"] = [torch.tensor(x, dtype=torch.long).to(self.device) for x in batch["attention_mask"]]
|
| 387 |
+
batch["encoder_hidden_states"] = [torch.tensor(x, dtype=torch.float32).to(self.device) for x in batch["encoder_hidden_states"]]
|
| 388 |
+
return batch
|
| 389 |
+
|
| 390 |
+
# Applichiamo la conversione in batch per efficienza
|
| 391 |
+
new_dataset = new_dataset.map(convert_to_tensors, batched=True)
|
| 392 |
+
|
| 393 |
+
return new_dataset
|
| 394 |
+
|
| 395 |
+
def get_processed_dataset(self):
|
| 396 |
+
return self.processed_dataset
|
| 397 |
+
|
| 398 |
+
class DatasetProcessorOLD:
|
| 399 |
+
def __init__(self, dataset_name, split="train", device="cuda" if torch.cuda.is_available() else "cpu"):
|
| 400 |
+
self.device = device
|
| 401 |
+
self.original_dataset = load_dataset('csv', data_files=dataset_name, split=split)
|
| 402 |
+
self.processed_dataset = self._create_processed_dataset()
|
| 403 |
+
|
| 404 |
+
def _create_processed_dataset(self):
|
| 405 |
+
def transform_entry(entry):
|
| 406 |
+
# Convertire 'Embedding' da stringa a lista di float e poi a tensor
|
| 407 |
+
formula_embedding = eval(entry['Embedding']) # Converti la stringa in lista
|
| 408 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 409 |
+
|
| 410 |
+
# Convertire 'Encoded_Formula' da stringa a lista di interi
|
| 411 |
+
encoded_formula = eval(entry['Encoded_Formula']) # Converti la stringa in lista
|
| 412 |
+
input_ids = encoded_formula[:-1] # Tutti i token tranne l'ultimo
|
| 413 |
+
labels = encoded_formula[1:] # Tutti i token tranne il primo
|
| 414 |
+
attention_mask = [0 if token == 1 else 1 for token in input_ids]
|
| 415 |
+
|
| 416 |
+
# Restituiamo solo le nuove colonne
|
| 417 |
+
return {
|
| 418 |
+
'input_ids': torch.tensor(input_ids, dtype=torch.long).to(self.device),
|
| 419 |
+
'labels': torch.tensor(labels, dtype=torch.long).to(self.device),
|
| 420 |
+
'attention_mask': torch.tensor(attention_mask, dtype=torch.long).to(self.device),
|
| 421 |
+
'encoder_hidden_states': encoder_hidden_states
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
# Applica la trasformazione con map() e mostra la barra di avanzamento
|
| 425 |
+
processed_dataset = self.original_dataset.map(
|
| 426 |
+
transform_entry, desc="Processing Dataset", num_proc=1
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
return processed_dataset
|
| 430 |
+
|
| 431 |
+
def get_processed_dataset(self):
|
| 432 |
+
return self.processed_dataset
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
# Create a `CustomDataset` class to properly format input data with respect to
|
| 436 |
+
# the `input_ids`, `labels`, and `attention_mask` attributes for model training.
|
| 437 |
+
class CustomDataset(Dataset):
|
| 438 |
+
def __init__(self, df, device='cpu'):
|
| 439 |
+
"""
|
| 440 |
+
Initializes the dataset by storing the DataFrame and setting the device.
|
| 441 |
+
|
| 442 |
+
Args:
|
| 443 |
+
- df: A pandas DataFrame containing the data (e.g., `Encoded_Formula`, `Embedding`).
|
| 444 |
+
- device: The device ('cpu' or 'cuda') where the tensors will be moved for processing.
|
| 445 |
+
"""
|
| 446 |
+
self.df = df['train']
|
| 447 |
+
self.device = device
|
| 448 |
+
|
| 449 |
+
encoded_formulae = []
|
| 450 |
+
formulae_embeddings = []
|
| 451 |
+
input_ids = []
|
| 452 |
+
labels = []
|
| 453 |
+
attention_masks = []
|
| 454 |
+
|
| 455 |
+
for idx in range(len(self.df)):
|
| 456 |
+
# Extract the encoded formula (tokenized input sequence) from the DataFrame
|
| 457 |
+
# encoded_formula = self.df['Encoded_Formula'][idx]
|
| 458 |
+
# Convert the string representation of a list back to a Python list using ast.literal_eval
|
| 459 |
+
encoded_formula = ast.literal_eval(self.df['Encoded_Formula'][idx])
|
| 460 |
+
# encoded_formula = [int(x) for x in encoded_formula.split()]
|
| 461 |
+
encoded_formulae.append(encoded_formula)
|
| 462 |
+
|
| 463 |
+
# Extract the precomputed formula embedding (hidden states) from the DataFrame
|
| 464 |
+
formula_embedding = self.df['Embedding'][idx]
|
| 465 |
+
|
| 466 |
+
# Clean the string and convert it back to a tensor
|
| 467 |
+
# formula_embedding = formula_embedding.replace("tensor(", "").rstrip(")")
|
| 468 |
+
# formula_embedding = eval(formula_embedding)
|
| 469 |
+
formula_embedding = ast.literal_eval(formula_embedding.strip())
|
| 470 |
+
encoder_hidden_states = torch.tensor(formula_embedding, dtype=torch.float32).to(self.device)
|
| 471 |
+
formulae_embeddings.append(encoder_hidden_states)
|
| 472 |
+
|
| 473 |
+
# Define the input_ids by excluding the last token (shifted tokens for prediction)
|
| 474 |
+
input_ids.append(torch.tensor(encoded_formula[:-1], dtype=torch.long).to(self.device)) # All tokens except the last
|
| 475 |
+
# Define the labels by excluding the first token (shifted tokens for teacher forcing)
|
| 476 |
+
labels.append(torch.tensor(encoded_formula[1:], dtype=torch.long).to(self.device)) # All tokens except the first
|
| 477 |
+
|
| 478 |
+
# Create the attention mask to indicate which tokens should be attended to.
|
| 479 |
+
# Tokens equal to '1' (typically padding tokens) will be masked (set to 0),
|
| 480 |
+
# and the rest will be visible (set to 1).
|
| 481 |
+
attention_mask = [0 if token == 1 else 1 for token in encoded_formula[:-1]] # Use encoded_formula for mask
|
| 482 |
+
attention_mask = torch.tensor(attention_mask, dtype=torch.long).to(self.device)
|
| 483 |
+
attention_masks.append(attention_mask)
|
| 484 |
+
|
| 485 |
+
# Create the DataFrame with the processed tensors
|
| 486 |
+
self.df = {
|
| 487 |
+
'input_ids': input_ids,
|
| 488 |
+
'labels': labels,
|
| 489 |
+
'attention_mask': attention_masks,
|
| 490 |
+
'encoder_hidden_states': formulae_embeddings
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
# self.df = pd.DataFrame(temp, device=device)
|
| 494 |
+
|
| 495 |
+
def __len__(self):
|
| 496 |
+
"""
|
| 497 |
+
Returns the length of the dataset, i.e., the number of examples in the DataFrame.
|
| 498 |
+
|
| 499 |
+
Returns:
|
| 500 |
+
- Length of the DataFrame (number of samples).
|
| 501 |
+
"""
|
| 502 |
+
return len(self.df)
|
| 503 |
+
|
| 504 |
+
def __getitem__(self, idx):
|
| 505 |
+
"""
|
| 506 |
+
Retrieves the dataset item at the given index.
|
| 507 |
+
|
| 508 |
+
Args:
|
| 509 |
+
- idx: The index of the sample to retrieve.
|
| 510 |
+
|
| 511 |
+
Returns:
|
| 512 |
+
- A dictionary containing the input data for the model.
|
| 513 |
+
"""
|
| 514 |
+
return {
|
| 515 |
+
'input_ids': self.df['input_ids'][idx],
|
| 516 |
+
'labels': self.df['labels'][idx],
|
| 517 |
+
'attention_mask': self.df['attention_mask'][idx],
|
| 518 |
+
'encoder_hidden_states': self.df['encoder_hidden_states'][idx]
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
############################################################################################################################
|
| 522 |
+
|
| 523 |
+
# METRICS
|
| 524 |
+
|
| 525 |
+
def token_division(input_string):
|
| 526 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 527 |
+
return [element for element in tokenizer.tokenize(input_string) if element != "pad"]
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def bleu_score(dataset):
|
| 532 |
+
|
| 533 |
+
bleu_scores = []
|
| 534 |
+
|
| 535 |
+
for idx in range(len(dataset)):
|
| 536 |
+
gold = token_division(dataset["Gold Formula"][idx])
|
| 537 |
+
generated = token_division(dataset["Generated Formula"][idx])
|
| 538 |
+
|
| 539 |
+
bleu_scores.append(sentence_bleu(gold, generated))
|
| 540 |
+
|
| 541 |
+
return np.min(bleu_scores), np.mean(bleu_scores), np.max(bleu_scores)
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
def exact_match(dataset, gold_formula_column: str, generated_formula_column: str):
|
| 546 |
+
|
| 547 |
+
percentage = []
|
| 548 |
+
|
| 549 |
+
for idx in range(len(dataset)):
|
| 550 |
+
gold = token_division(dataset[gold_formula_column][idx])
|
| 551 |
+
generated = token_division(dataset[generated_formula_column][idx])
|
| 552 |
+
|
| 553 |
+
match_count = 0
|
| 554 |
+
for gold_token, gen_token in zip(gold, generated):
|
| 555 |
+
if gold_token == gen_token:
|
| 556 |
+
match_count += 1
|
| 557 |
+
|
| 558 |
+
percentage.append(match_count/len(gold))
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
return np.mean(percentage)
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def cosine_similarity(dataset):
|
| 566 |
+
|
| 567 |
+
similarities = []
|
| 568 |
+
|
| 569 |
+
for idx in range(len(dataset)):
|
| 570 |
+
gold = ast.literal_eval(dataset["Embedding Gold Formula"][idx])
|
| 571 |
+
gen = ast.literal_eval(dataset["Embedding Generated Formula"][idx])
|
| 572 |
+
|
| 573 |
+
dot_product = np.dot(gold, gen)
|
| 574 |
+
gold_norm = np.linalg.norm(gold)
|
| 575 |
+
gen_norm = np.linalg.norm(gen)
|
| 576 |
+
|
| 577 |
+
similarities.append(dot_product / (gold_norm * gen_norm))
|
| 578 |
+
|
| 579 |
+
return np.min(similarities), np.mean(similarities), np.max(similarities)
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def euclidean_distance(dataset):
|
| 583 |
+
|
| 584 |
+
distances = []
|
| 585 |
+
|
| 586 |
+
for idx in range(len(dataset)):
|
| 587 |
+
|
| 588 |
+
gold = torch.tensor(ast.literal_eval(dataset["Embedding Gold Formula"][idx]))
|
| 589 |
+
generated = torch.tensor(ast.literal_eval(dataset["Embedding Generated Formula"][idx]))
|
| 590 |
+
|
| 591 |
+
distances.append(torch.dist(gold, generated))
|
| 592 |
+
|
| 593 |
+
return np.min(distances), np.mean(distances), np.max(distances)
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
#######################################################################################################
|
| 597 |
+
|
| 598 |
+
def get_name_given_type(formula):
|
| 599 |
+
"""
|
| 600 |
+
Returns the type of node (as a string) of the top node of the formula/sub-formula
|
| 601 |
+
"""
|
| 602 |
+
name_dict = {And: 'and', Or: 'or', Not: 'not', Eventually: 'F', Globally: 'G', Until: 'U',
|
| 603 |
+
Atom: 'x'}
|
| 604 |
+
return name_dict[type(formula)]
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def get_id(child_name, name, label_dict, idx):
|
| 608 |
+
"""
|
| 609 |
+
Get unique identifier for a node
|
| 610 |
+
"""
|
| 611 |
+
while child_name in label_dict.keys(): # if the name is already present
|
| 612 |
+
idx += 1
|
| 613 |
+
child_name = name + "(" + str(idx) + ")"
|
| 614 |
+
return child_name, idx # returns both the child name and the identifier
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
def get_temporal_list(temporal_node):
|
| 618 |
+
"""
|
| 619 |
+
Returns the features vector for temporal nodes (the two bounds of the temporal interval)
|
| 620 |
+
Variant and num_arg modify the length of the list to return (3, 4 or 5)
|
| 621 |
+
"""
|
| 622 |
+
left = float(temporal_node.left_time_bound) if temporal_node.unbound is False else 0.
|
| 623 |
+
right = float(temporal_node.right_time_bound) if (temporal_node.unbound is False and
|
| 624 |
+
temporal_node.right_unbound is False) else -1.
|
| 625 |
+
vector_l = [left, right, 0.] # third slot for sign and fourth for threshold # add another slot for argument number
|
| 626 |
+
return vector_l
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def add_internal_child(current_child, current_idx, label_dict):
|
| 630 |
+
child_name = get_name_given_type(current_child) + '(' + str(current_idx) + ')'
|
| 631 |
+
child_name, current_idx = get_id(child_name, get_name_given_type(current_child), label_dict, current_idx)
|
| 632 |
+
return child_name, current_idx
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
def add_leaf_child(node, name, label_dict, idx):
|
| 636 |
+
"""
|
| 637 |
+
Add the edges and update the label_dictionary and the identifier count for a leaf node (variable)
|
| 638 |
+
variant = ['original', 'threshold-sign', 'all-in-var']
|
| 639 |
+
shared_var = [True, False] denotes if shared variables for all the DAG or single variables (tree-like)
|
| 640 |
+
num_arg = [True, False] if true argument number is one-hot encoded in the feature vector
|
| 641 |
+
until_right is a flag to detect when the argument number encoding should be 1
|
| 642 |
+
"""
|
| 643 |
+
new_e = []
|
| 644 |
+
label_dict[name] = [0., 0., 0.] # te
|
| 645 |
+
atom_idx =str(node).split()[0] + '(' + str(idx) + ')'
|
| 646 |
+
# different names for the same variables (e.g. x_1(5), x_1(8))
|
| 647 |
+
idx += 1
|
| 648 |
+
if atom_idx not in label_dict.keys():
|
| 649 |
+
label_dict[atom_idx] = [0., 0., 0.]
|
| 650 |
+
|
| 651 |
+
if str(node).split()[1] == '<=':
|
| 652 |
+
label_dict[name] = [0., 0., round(node.threshold, 4)]
|
| 653 |
+
else:
|
| 654 |
+
label_dict[name] = [0., 0., round(node.threshold, 4)]
|
| 655 |
+
new_e.append([name, atom_idx])
|
| 656 |
+
return new_e, label_dict, idx+1
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def traverse_formula(formula, idx, label_dict):
|
| 660 |
+
current_node = formula
|
| 661 |
+
edges = []
|
| 662 |
+
if type(current_node) is not Atom:
|
| 663 |
+
current_name = get_name_given_type(current_node) + '(' + str(idx) + ')'
|
| 664 |
+
if (type(current_node) is And) or (type(current_node) is Or) or (type(current_node) is Not):
|
| 665 |
+
label_dict[current_name] = [0., 0., 0. ] # temp_left, temp_right, threshold
|
| 666 |
+
else:
|
| 667 |
+
label_dict[current_name] = get_temporal_list(current_node)
|
| 668 |
+
if (type(current_node) is And) or (type(current_node) is Or) or (type(current_node) is Until):
|
| 669 |
+
left_child_name, current_idx = add_internal_child(current_node.left_child, idx + 1, label_dict)
|
| 670 |
+
edges.append([current_name, left_child_name])
|
| 671 |
+
if type(current_node.left_child) is Atom:
|
| 672 |
+
e, d, current_idx = add_leaf_child(current_node.left_child, left_child_name, label_dict, current_idx+1)
|
| 673 |
+
edges += e
|
| 674 |
+
label_dict.update(d)
|
| 675 |
+
e, d = traverse_formula(current_node.left_child, current_idx, label_dict)
|
| 676 |
+
edges += e
|
| 677 |
+
label_dict.update(d)
|
| 678 |
+
right_child_name, current_idx = add_internal_child(current_node.right_child, current_idx + 1, label_dict)
|
| 679 |
+
edges.append([current_name, right_child_name])
|
| 680 |
+
if type(current_node.right_child) is Atom:
|
| 681 |
+
e, d, current_idx = add_leaf_child(current_node.right_child, right_child_name, label_dict,
|
| 682 |
+
current_idx+1)
|
| 683 |
+
edges += e
|
| 684 |
+
label_dict.update(d)
|
| 685 |
+
e, d = traverse_formula(current_node.right_child, current_idx, label_dict)
|
| 686 |
+
edges += e
|
| 687 |
+
label_dict.update(d)
|
| 688 |
+
else:
|
| 689 |
+
# eventually, globally, not
|
| 690 |
+
child_name, current_idx = add_internal_child(current_node.child, idx + 1, label_dict)
|
| 691 |
+
edges.append([current_name, child_name])
|
| 692 |
+
if type(current_node.child) is Atom:
|
| 693 |
+
e, d, current_idx = add_leaf_child(current_node.child, child_name, label_dict, current_idx+1)
|
| 694 |
+
edges += e
|
| 695 |
+
label_dict.update(d)
|
| 696 |
+
e, d = traverse_formula(current_node.child, current_idx, label_dict)
|
| 697 |
+
edges += e
|
| 698 |
+
label_dict.update(d)
|
| 699 |
+
return edges, label_dict
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def build_dag(formula):
|
| 703 |
+
edges, label_dict = traverse_formula(formula, 0, {})
|
| 704 |
+
graph = nx.from_edgelist(edges, create_using=nx.DiGraph)
|
| 705 |
+
assert(nx.is_directed_acyclic_graph(graph))
|
| 706 |
+
return graph, label_dict
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def get_depth(formula):
|
| 710 |
+
phi_g = build_dag(formula)[0]
|
| 711 |
+
return len(nx.dag_longest_path(phi_g)) - 1
|
| 712 |
+
|
| 713 |
+
def get_n_nodes(str_phi):
|
| 714 |
+
f_split = str_phi.split()
|
| 715 |
+
f_nodes_list = [sub_f for sub_f in f_split if sub_f in ['not', 'and', 'or', 'always', 'eventually', '<=', '>=',
|
| 716 |
+
'until']]
|
| 717 |
+
return len(f_nodes_list)
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
def get_n_leaves(str_phi):
|
| 721 |
+
phi_split = str_phi.split()
|
| 722 |
+
phi_var = [sub for sub in phi_split if sub.startswith('x_')]
|
| 723 |
+
return len(phi_var)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
def get_n_temp(str_phi):
|
| 727 |
+
phi_split = str_phi.split()
|
| 728 |
+
phi_temp = [sub for sub in phi_split if sub[:2] in ['ev', 'al', 'un']]
|
| 729 |
+
return len(phi_temp)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def get_n_tokens(str_phi):
|
| 733 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 734 |
+
return len(tokenizer.encode(str_phi))
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
def get_n_depth(str_phi):
|
| 738 |
+
phi = from_string_to_formula(str_phi)
|
| 739 |
+
return get_depth(phi)
|
history/aob/utils3.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import pickle
|
| 3 |
+
import os
|
| 4 |
+
from collections import deque
|
| 5 |
+
|
| 6 |
+
from stl import *
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def load_pickle(folder, name):
|
| 10 |
+
with open(folder + os.path.sep + name, 'rb') as f:
|
| 11 |
+
x = pickle.load(f)
|
| 12 |
+
return x
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def dump_pickle(name, thing):
|
| 16 |
+
with open(name + '.pickle', 'wb') as f:
|
| 17 |
+
pickle.dump(thing, f)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def set_time_thresholds(st):
|
| 21 |
+
unbound, right_unbound = [True, False]
|
| 22 |
+
left_time_bound, right_time_bound = [0, 0]
|
| 23 |
+
if st[-1] == ']':
|
| 24 |
+
unbound = False
|
| 25 |
+
time_thresholds = st[st.index('[')+1:-1].split(",")
|
| 26 |
+
left_time_bound = int(time_thresholds[0])
|
| 27 |
+
if time_thresholds[1] == 'inf':
|
| 28 |
+
right_unbound = True
|
| 29 |
+
else:
|
| 30 |
+
right_time_bound = int(time_thresholds[1])-1
|
| 31 |
+
return unbound, right_unbound, left_time_bound, right_time_bound
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def from_string_to_formula(st):
|
| 35 |
+
root_arity = 2 if st.startswith('(') else 1
|
| 36 |
+
st_split = st.split()
|
| 37 |
+
if root_arity <= 1:
|
| 38 |
+
root_op_str = copy.deepcopy(st_split[0])
|
| 39 |
+
if root_op_str.startswith('x'):
|
| 40 |
+
atom_sign = True if st_split[1] == '<=' else False
|
| 41 |
+
root_phi = Atom(var_index=int(st_split[0][2]), lte=atom_sign, threshold=float(st_split[2]))
|
| 42 |
+
return root_phi
|
| 43 |
+
else:
|
| 44 |
+
assert (root_op_str.startswith('not') or root_op_str.startswith('eventually')
|
| 45 |
+
or root_op_str.startswith('always'))
|
| 46 |
+
current_st = copy.deepcopy(st_split[2:-1])
|
| 47 |
+
if root_op_str == 'not':
|
| 48 |
+
root_phi = Not(child=from_string_to_formula(' '.join(current_st)))
|
| 49 |
+
elif root_op_str.startswith('eventually'):
|
| 50 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 51 |
+
root_phi = Eventually(child=from_string_to_formula(' '.join(current_st)), unbound=unbound,
|
| 52 |
+
right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 53 |
+
right_time_bound=right_time_bound)
|
| 54 |
+
else:
|
| 55 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 56 |
+
root_phi = Globally(child=from_string_to_formula(' '.join(current_st)), unbound=unbound,
|
| 57 |
+
right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 58 |
+
right_time_bound=right_time_bound)
|
| 59 |
+
else:
|
| 60 |
+
# 1 - delete everything which is contained in other sets of parenthesis (if any)
|
| 61 |
+
current_st = copy.deepcopy(st_split[1:-1])
|
| 62 |
+
if '(' in current_st:
|
| 63 |
+
par_queue = deque()
|
| 64 |
+
par_idx_list = []
|
| 65 |
+
for i, sub in enumerate(current_st):
|
| 66 |
+
if sub == '(':
|
| 67 |
+
par_queue.append(i)
|
| 68 |
+
elif sub == ')':
|
| 69 |
+
par_idx_list.append(tuple([par_queue.pop(), i]))
|
| 70 |
+
# open_par_idx, close_par_idx = [current_st.index(p) for p in ['(', ')']]
|
| 71 |
+
# union of parentheses range --> from these we may extract the substrings to be the children!!!
|
| 72 |
+
children_range = []
|
| 73 |
+
for begin, end in sorted(par_idx_list):
|
| 74 |
+
if children_range and children_range[-1][1] >= begin - 1:
|
| 75 |
+
children_range[-1][1] = max(children_range[-1][1], end)
|
| 76 |
+
else:
|
| 77 |
+
children_range.append([begin, end])
|
| 78 |
+
n_children = len(children_range)
|
| 79 |
+
assert (n_children in [1, 2])
|
| 80 |
+
if n_children == 1:
|
| 81 |
+
# one of the children is a variable --> need to individuate it
|
| 82 |
+
var_child_idx = 1 if children_range[0][0] <= 1 else 0 # 0 is left child, 1 is right child
|
| 83 |
+
if children_range[0][0] != 0 and current_st[children_range[0][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 84 |
+
children_range[0][0] -= 1
|
| 85 |
+
left_child_str = current_st[:3] if var_child_idx == 0 else \
|
| 86 |
+
current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 87 |
+
right_child_str = current_st[-3:] if var_child_idx == 1 else \
|
| 88 |
+
current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 89 |
+
root_op_str = current_st[children_range[0][1] + 1] if var_child_idx == 1 else \
|
| 90 |
+
current_st[children_range[0][0] - 1]
|
| 91 |
+
assert (root_op_str[:2] in ['an', 'or', 'un'])
|
| 92 |
+
else:
|
| 93 |
+
if children_range[0][0] != 0 and current_st[children_range[0][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 94 |
+
children_range[0][0] -= 1
|
| 95 |
+
if current_st[children_range[1][0] - 1][0:2] in ['no', 'ev', 'al']:
|
| 96 |
+
children_range[1][0] -= 1
|
| 97 |
+
# if there are two children, with parentheses, the element in the middle is the root
|
| 98 |
+
root_op_str = current_st[children_range[0][1] + 1]
|
| 99 |
+
assert (root_op_str[:2] in ['an', 'or', 'un'])
|
| 100 |
+
left_child_str = current_st[children_range[0][0]:children_range[0][1] + 1]
|
| 101 |
+
right_child_str = current_st[children_range[1][0]:children_range[1][1] + 1]
|
| 102 |
+
else:
|
| 103 |
+
# no parentheses means that both children are variables
|
| 104 |
+
left_child_str = current_st[:3]
|
| 105 |
+
right_child_str = current_st[-3:]
|
| 106 |
+
root_op_str = current_st[3]
|
| 107 |
+
left_child_str = ' '.join(left_child_str)
|
| 108 |
+
right_child_str = ' '.join(right_child_str)
|
| 109 |
+
if root_op_str == 'and':
|
| 110 |
+
root_phi = And(left_child=from_string_to_formula(left_child_str),
|
| 111 |
+
right_child=from_string_to_formula(right_child_str))
|
| 112 |
+
elif root_op_str == 'or':
|
| 113 |
+
root_phi = Or(left_child=from_string_to_formula(left_child_str),
|
| 114 |
+
right_child=from_string_to_formula(right_child_str))
|
| 115 |
+
else:
|
| 116 |
+
unbound, right_unbound, left_time_bound, right_time_bound = set_time_thresholds(root_op_str)
|
| 117 |
+
root_phi = Until(left_child=from_string_to_formula(left_child_str),
|
| 118 |
+
right_child=from_string_to_formula(right_child_str),
|
| 119 |
+
unbound=unbound, right_unbound=right_unbound, left_time_bound=left_time_bound,
|
| 120 |
+
right_time_bound=right_time_bound)
|
| 121 |
+
return root_phi
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def rescale_trajectories(traj, min_traj=[0, 0, 0], max_traj=[99, 100, 97]):
|
| 125 |
+
rescaled_traj = torch.zeros_like(traj)
|
| 126 |
+
for i in range(traj.shape[1]):
|
| 127 |
+
new_i = min_traj[i] + (traj[:, i, :] + 1)*(max_traj[i] - min_traj[i])/2
|
| 128 |
+
rescaled_traj[:, i, :] = new_i
|
| 129 |
+
return rescaled_traj
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def scale_trajectories(traj):
|
| 133 |
+
traj_min = torch.min(torch.min(traj, dim=0)[0], dim=0)[0]
|
| 134 |
+
traj_max = torch.max(torch.max(traj, dim=0)[0], dim=0)[0]
|
| 135 |
+
scaled_traj = -1 + 2*(traj - traj_min) / (traj_max - traj_min)
|
| 136 |
+
return scaled_traj
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def execution_time(start, end, p=False):
|
| 140 |
+
hours, rem = divmod(end - start, 3600)
|
| 141 |
+
minutes, seconds = divmod(rem, 60)
|
| 142 |
+
if p:
|
| 143 |
+
print("Execution time = {:0>2}:{:0>2}:{:0>2}".format(int(hours), int(minutes), int(seconds)))
|
| 144 |
+
return int(hours), int(minutes), int(seconds)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def from_str_to_n_nodes(f):
|
| 148 |
+
f_str = str(f)
|
| 149 |
+
f_split = f_str.split()
|
| 150 |
+
f_nodes_list = [sub_f for sub_f in f_split if sub_f in ['not', 'and', 'or', 'always', 'eventually', '<=', '>=',
|
| 151 |
+
'until']]
|
| 152 |
+
return len(f_nodes_list)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def get_leaves_idx(phi):
|
| 156 |
+
# needed when one only wants to set var thresholds
|
| 157 |
+
phi_str = str(phi)
|
| 158 |
+
phi_split = phi_str.split()
|
| 159 |
+
phi_var = [sub for sub in phi_split if sub.startswith('x_')]
|
| 160 |
+
var_idx = [int(sub[2:]) for sub in phi_var]
|
| 161 |
+
return len(phi_var), var_idx
|
| 162 |
+
|
history/aob/validate.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from accelerate import Accelerator
|
| 3 |
+
from safetensors import safe_open
|
| 4 |
+
from safetensors.torch import load_file
|
| 5 |
+
import torch
|
| 6 |
+
from torch.utils.data import Dataset
|
| 7 |
+
from torch.utils.data import DataLoader
|
| 8 |
+
import ast
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from handcoded_tokenizer import STLTokenizer
|
| 11 |
+
from configuration import STLConfig
|
| 12 |
+
from modeling_stldec import STLForCausalLM
|
| 13 |
+
from encoder import STLEncoder
|
| 14 |
+
|
| 15 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
| 16 |
+
|
| 17 |
+
##################################################################
|
| 18 |
+
|
| 19 |
+
model_path = "../../../../../../../../../leonardo_scratch/fast/IscrC_IRA-LLMs/balanced_@/step_12000"
|
| 20 |
+
optimizer_path = "../../../../../../../../../leonardo_scratch/fast/IscrC_IRA-LLMs/balanced_@/step_12000/optimizer.bin"
|
| 21 |
+
scheduler_path = "../../../../../../../../../leonardo_scratch/fast/IscrC_IRA-LLMs/balanced_@/step_12000/scheduler.bin"
|
| 22 |
+
|
| 23 |
+
##################################################################
|
| 24 |
+
|
| 25 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 26 |
+
AutoModelForCausalLM.register(STLConfig, STLForCausalLM)
|
| 27 |
+
|
| 28 |
+
config = STLConfig()
|
| 29 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 30 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, config = config).to(device) # Sposta il modello sulla device
|
| 31 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 32 |
+
encoder = STLEncoder(embed_dim=1024, anchor_filename='anchor_set_1024_dim.pickle')
|
| 33 |
+
|
| 34 |
+
accelerator = Accelerator()
|
| 35 |
+
|
| 36 |
+
optimizer = torch.load(optimizer_path)
|
| 37 |
+
scheduler = torch.load(scheduler_path)
|
| 38 |
+
optimizer = accelerator.prepare(optimizer)
|
| 39 |
+
scheduler = accelerator.prepare(scheduler)
|
| 40 |
+
|
| 41 |
+
eval_df = pd.read_pickle("datasets/new_balanced_validation_set.pkl")
|
| 42 |
+
|
| 43 |
+
##################################################################
|
| 44 |
+
|
| 45 |
+
eval_df = eval_df.head(200)
|
| 46 |
+
|
| 47 |
+
formulae_dataset = []
|
| 48 |
+
|
| 49 |
+
for idx in range(len(eval_df)):
|
| 50 |
+
embedding = eval_df["Embedding"][idx]
|
| 51 |
+
encoder_hidden_states = torch.tensor(embedding, dtype = torch.float32, device=device).unsqueeze(0).unsqueeze(0)
|
| 52 |
+
|
| 53 |
+
with torch.no_grad():
|
| 54 |
+
generated_ids = model.generate(
|
| 55 |
+
encoder_hidden_states=encoder_hidden_states, # Usa gli ID tokenizzati
|
| 56 |
+
pad_token_id=model.config.pad_token_id, # ID del token di padding, se presente
|
| 57 |
+
bos_token_id=model.config.bos_token_id,
|
| 58 |
+
forced_eos_token_id = config.forced_eos_token_id,
|
| 59 |
+
max_new_tokens = 500
|
| 60 |
+
)
|
| 61 |
+
# print(generated_ids[0])
|
| 62 |
+
generated_text = tokenizer.decode(generated_ids[0].tolist())
|
| 63 |
+
# print(generated_text)
|
| 64 |
+
generated_text = generated_text[3:-2]
|
| 65 |
+
# print(generated_text)
|
| 66 |
+
formulae_dataset.append(generated_text)
|
| 67 |
+
|
| 68 |
+
encoder = STLEncoder(embed_dim=1024, anchor_filename='anchor_set_1024_dim.pickle')
|
| 69 |
+
|
| 70 |
+
generated_embeddings = encoder.compute_embeddings(formulae_dataset)
|
| 71 |
+
gold_embeddings = encoder.compute_embeddings(eval_df["Formula"])
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# eval_df.head()
|
| 77 |
+
|
| 78 |
+
# gold_embeddings = encoder.compute_embeddings(eval_df["Gold Formula"])
|
| 79 |
+
# generated_embeddings = encoder.compute_embeddings(eval_df["Generated Formula"])
|
| 80 |
+
|
| 81 |
+
eval_df['Embedding Gold Formula'] = gold_embeddings.tolist()
|
| 82 |
+
eval_df['Embedding Generated Formula'] = generated_embeddings.tolist()
|
| 83 |
+
|
| 84 |
+
euclidean_distance = []
|
| 85 |
+
|
| 86 |
+
for idx in range(len(eval_df)):
|
| 87 |
+
gold = torch.tensor(eval_df["Embedding Gold Formula"][idx])
|
| 88 |
+
generated = torch.tensor(eval_df["Embedding Generated Formula"][idx])
|
| 89 |
+
euclidean_distance.append(torch.dist(gold, generated))
|
| 90 |
+
|
| 91 |
+
print(f"Mean euclidean distance: {np.mean(euclidean_distance)}")
|
| 92 |
+
|
| 93 |
+
# eval_df.to_csv('balanced/step_7000_formulae.csv')
|
| 94 |
+
|
history/aob/validate_step.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from accelerate import Accelerator
|
| 3 |
+
from safetensors import safe_open
|
| 4 |
+
from safetensors.torch import load_file
|
| 5 |
+
import torch
|
| 6 |
+
from torch.utils.data import Dataset
|
| 7 |
+
from torch.utils.data import DataLoader
|
| 8 |
+
import ast
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from handcoded_tokenizer import STLTokenizer
|
| 11 |
+
from configuration import STLConfig
|
| 12 |
+
from modeling_stldec import STLForCausalLM
|
| 13 |
+
from encoder import STLEncoder
|
| 14 |
+
|
| 15 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
##################################################################
|
| 19 |
+
|
| 20 |
+
eval_df = pd.read_pickle("datasets/new_balanced_validation_set.pkl")
|
| 21 |
+
|
| 22 |
+
gold_formulae = eval_df['Formula']
|
| 23 |
+
|
| 24 |
+
formulae_dataset = []
|
| 25 |
+
|
| 26 |
+
model_path = f"../../../../../../../../../leonardo_scratch/fast/IscrC_IRA-LLMs/balanced_@/step_14000"
|
| 27 |
+
optimizer_path = f"../../../../../../../../../leonardo_scratch/fast/IscrC_IRA-LLMs/balanced_@/step_14000/optimizer.bin"
|
| 28 |
+
scheduler_path = f"../../../../../../../../../leonardo_scratch/fast/IscrC_IRA-LLMs/balanced_@/step_14000/scheduler.bin"
|
| 29 |
+
|
| 30 |
+
##################################################################
|
| 31 |
+
|
| 32 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 33 |
+
AutoModelForCausalLM.register(STLConfig, STLForCausalLM)
|
| 34 |
+
|
| 35 |
+
config = STLConfig()
|
| 36 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 37 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, config = config).to(device) # Sposta il modello sulla device
|
| 38 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 39 |
+
encoder = STLEncoder(embed_dim=1024, anchor_filename='anchor_set_1024_dim.pickle')
|
| 40 |
+
|
| 41 |
+
accelerator = Accelerator()
|
| 42 |
+
|
| 43 |
+
optimizer = torch.load(optimizer_path)
|
| 44 |
+
scheduler = torch.load(scheduler_path)
|
| 45 |
+
optimizer = accelerator.prepare(optimizer)
|
| 46 |
+
scheduler = accelerator.prepare(scheduler)
|
| 47 |
+
|
| 48 |
+
##################################################################
|
| 49 |
+
|
| 50 |
+
generated_formulae = []
|
| 51 |
+
|
| 52 |
+
for idx in range(len(eval_df)):
|
| 53 |
+
embedding = eval_df["Embedding"][idx]
|
| 54 |
+
encoder_hidden_states = torch.tensor(embedding, dtype=torch.float32).to(device)
|
| 55 |
+
encoder_hidden_states = encoder_hidden_states.unsqueeze(0).unsqueeze(0)
|
| 56 |
+
|
| 57 |
+
with torch.no_grad():
|
| 58 |
+
generated_ids = model.generate(
|
| 59 |
+
encoder_hidden_states=encoder_hidden_states, # Usa gli ID tokenizzati
|
| 60 |
+
pad_token_id=model.config.pad_token_id, # ID del token di padding, se presente
|
| 61 |
+
bos_token_id=model.config.bos_token_id,
|
| 62 |
+
eos_token_id=model.config.forced_eos_token_id,
|
| 63 |
+
max_new_tokens = 500
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
generated_text = tokenizer.decode(generated_ids[0].tolist())
|
| 67 |
+
generated_text = generated_text[3:-2]
|
| 68 |
+
generated_formulae.append(generated_text)
|
| 69 |
+
|
| 70 |
+
formulae_dataset.append(generated_formulae)
|
| 71 |
+
eval_df = pd.DataFrame(formulae_dataset).transpose()
|
| 72 |
+
|
| 73 |
+
# formulae_dataset.append(eval_df['Formula'])
|
| 74 |
+
|
| 75 |
+
eval_df['gold formula'] = gold_formulae
|
| 76 |
+
|
| 77 |
+
# eval_df = pd.concat([pd.DataFrame({'Gold Formula': gold_formulae}), eval_df], axis=1)
|
| 78 |
+
|
| 79 |
+
eval_df.to_csv('step_14000.csv', index=False)
|
history/aob/validation-evaluation.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from accelerate import Accelerator
|
| 3 |
+
from safetensors import safe_open
|
| 4 |
+
from safetensors.torch import load_file
|
| 5 |
+
import torch
|
| 6 |
+
from torch.utils.data import Dataset
|
| 7 |
+
from torch.utils.data import DataLoader
|
| 8 |
+
import ast
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from handcoded_tokenizer import STLTokenizer
|
| 11 |
+
from configuration import STLConfig
|
| 12 |
+
from modeling_stldec import STLForCausalLM
|
| 13 |
+
from encoder import STLEncoder
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
|
| 16 |
+
eval_df = pd.read_csv('predicted_gold_formulae.csv')
|
| 17 |
+
encoder = STLEncoder(embed_dim=1024, anchor_filename='anchor_set_1024_dim.pickle')
|
| 18 |
+
|
| 19 |
+
gold_embeddings = encoder.compute_embeddings(eval_df["Gold Formula"])
|
| 20 |
+
generated_embeddings = encoder.compute_embeddings(eval_df["Generated Formula"])
|
| 21 |
+
|
| 22 |
+
eval_df['Embedding Gold Formula'] = gold_embeddings.tolist()
|
| 23 |
+
eval_df['Embedding Generated Formula'] = generated_embeddings.tolist()
|
| 24 |
+
|
| 25 |
+
euclidean_distance = []
|
| 26 |
+
cosine_distance = []
|
| 27 |
+
|
| 28 |
+
for idx in range(len(eval_df)):
|
| 29 |
+
gold = torch.tensor(eval_df["Embedding Gold Formula"][idx])
|
| 30 |
+
generated = torch.tensor(eval_df["Embedding Generated Formula"][idx])
|
| 31 |
+
|
| 32 |
+
euclidean_distance.append(torch.dist(gold, generated))
|
| 33 |
+
# cosine_distance.append(1-F.cosine_similarity(gold, generated))
|
| 34 |
+
|
| 35 |
+
print(f"Mean euclidean distance: {np.mean(euclidean_distance)}")
|
| 36 |
+
print("Mean cosine distance: {np.mean(cosine_distance)}")
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
eval_df.to_csv('updated_predicted_gold_formulae.csv')
|
| 40 |
+
|
| 41 |
+
|
history/configuration_stldec.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
class STLConfig(PretrainedConfig):
|
| 4 |
+
|
| 5 |
+
model_type = "stldec"
|
| 6 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 7 |
+
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
vocab_size=35,
|
| 12 |
+
decoder_vocab_size=None, # unused
|
| 13 |
+
max_position_embeddings=1024,
|
| 14 |
+
encoder_layers=12,
|
| 15 |
+
encoder_ffn_dim=4096,
|
| 16 |
+
encoder_attention_heads=16,
|
| 17 |
+
decoder_layers=12,
|
| 18 |
+
decoder_ffn_dim=4096,
|
| 19 |
+
decoder_attention_heads=16,
|
| 20 |
+
encoder_layerdrop=0.0,
|
| 21 |
+
decoder_layerdrop=0.0,
|
| 22 |
+
use_cache=True,
|
| 23 |
+
is_encoder_decoder=True,
|
| 24 |
+
activation_function="gelu",
|
| 25 |
+
d_model=1024,
|
| 26 |
+
dropout=0.1,
|
| 27 |
+
attention_dropout=0.0,
|
| 28 |
+
activation_dropout=0.0,
|
| 29 |
+
init_std=0.02,
|
| 30 |
+
decoder_start_token_id=3,
|
| 31 |
+
scale_embedding=False,
|
| 32 |
+
pad_token_id=1,
|
| 33 |
+
eos_token_id=3,
|
| 34 |
+
bos_token_id=2,
|
| 35 |
+
forced_eos_token_id=3,
|
| 36 |
+
share_encoder_decoder_embeddings=True,
|
| 37 |
+
**kwargs,
|
| 38 |
+
):
|
| 39 |
+
self.vocab_size = vocab_size
|
| 40 |
+
self.decoder_vocab_size = decoder_vocab_size or vocab_size
|
| 41 |
+
self.max_position_embeddings = max_position_embeddings
|
| 42 |
+
self.d_model = d_model
|
| 43 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
| 44 |
+
self.encoder_layers = encoder_layers
|
| 45 |
+
self.encoder_attention_heads = encoder_attention_heads
|
| 46 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
| 47 |
+
self.decoder_layers = decoder_layers
|
| 48 |
+
self.decoder_attention_heads = decoder_attention_heads
|
| 49 |
+
self.dropout = dropout
|
| 50 |
+
self.attention_dropout = attention_dropout
|
| 51 |
+
self.activation_dropout = activation_dropout
|
| 52 |
+
self.activation_function = activation_function
|
| 53 |
+
self.init_std = init_std
|
| 54 |
+
self.encoder_layerdrop = encoder_layerdrop
|
| 55 |
+
self.decoder_layerdrop = decoder_layerdrop
|
| 56 |
+
self.use_cache = use_cache
|
| 57 |
+
self.num_hidden_layers = encoder_layers
|
| 58 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
| 59 |
+
self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings
|
| 60 |
+
|
| 61 |
+
super().__init__(
|
| 62 |
+
bos_token_id=bos_token_id,
|
| 63 |
+
pad_token_id=pad_token_id,
|
| 64 |
+
eos_token_id=eos_token_id,
|
| 65 |
+
is_encoder_decoder=is_encoder_decoder,
|
| 66 |
+
decoder_start_token_id=decoder_start_token_id,
|
| 67 |
+
forced_eos_token_id=forced_eos_token_id,
|
| 68 |
+
**kwargs,
|
| 69 |
+
)
|
history/datasets_things/analysis.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
history/datasets_things/compose_datasets.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
d2 = pd.read_csv('datasets/fragments/depth_2_formulae.csv')
|
| 5 |
+
d3 = pd.read_csv('datasets/fragments/depth_3_formulae.csv')
|
| 6 |
+
d4 = pd.read_csv('datasets/fragments/depth_4_formulae.csv')
|
| 7 |
+
d5 = pd.read_csv('datasets/fragments/depth_5_formulae.csv')
|
| 8 |
+
d6 = pd.read_csv('datasets/fragments/depth_6_formulae.csv')
|
| 9 |
+
d7 = pd.read_csv('datasets/fragments/depth_7_formulae.csv')
|
| 10 |
+
|
| 11 |
+
sub2 = d2[13000:14000]
|
| 12 |
+
sub3 = d3[13000:14000]
|
| 13 |
+
sub4 = d4[13000:14000]
|
| 14 |
+
sub5 = d5[13000:14000]
|
| 15 |
+
sub6 = d6[13000:14000]
|
| 16 |
+
sub7 = d7[13000:14000]
|
| 17 |
+
|
| 18 |
+
final_data = pd.concat([sub2, sub3, sub4, sub5, sub6, sub7], axis=0, ignore_index=True)
|
| 19 |
+
final_data.to_csv('datasets/balanced_validation_set.csv')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
history/datasets_things/descriptive_analysis.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import networkx as nx
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import stl
|
| 5 |
+
from utils2 import from_string_to_formula
|
| 6 |
+
from handcoded_tokenizer import STLTokenizer
|
| 7 |
+
|
| 8 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 9 |
+
|
| 10 |
+
def mean_formulae_depth(dataset):
|
| 11 |
+
formulae_depths = []
|
| 12 |
+
for idx in range(len(dataset)):
|
| 13 |
+
object_formula = from_string_to_formula(dataset['Formula'][idx])
|
| 14 |
+
formulae_depths.append(get_depth(object_formula))
|
| 15 |
+
return np.mean(formulae_depths)
|
| 16 |
+
|
| 17 |
+
def get_depth(formula):
|
| 18 |
+
formula = from_string_to_formula(formula)
|
| 19 |
+
phi_g = build_dag(formula)[0]
|
| 20 |
+
return len(nx.dag_longest_path(phi_g)) - 1
|
| 21 |
+
|
| 22 |
+
#######################################################################
|
| 23 |
+
|
| 24 |
+
def get_name_given_type(formula):
|
| 25 |
+
"""
|
| 26 |
+
Returns the type of node (as a string) of the top node of the formula/sub-formula
|
| 27 |
+
"""
|
| 28 |
+
name_dict = {stl.And: 'and', stl.Or: 'or', stl.Not: 'not', stl.Eventually: 'F', stl.Globally: 'G', stl.Until: 'U',
|
| 29 |
+
stl.Atom: 'x'}
|
| 30 |
+
return name_dict[type(formula)]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_id(child_name, name, label_dict, idx):
|
| 34 |
+
"""
|
| 35 |
+
Get unique identifier for a node
|
| 36 |
+
"""
|
| 37 |
+
while child_name in label_dict.keys(): # if the name is already present
|
| 38 |
+
idx += 1
|
| 39 |
+
child_name = name + "(" + str(idx) + ")"
|
| 40 |
+
return child_name, idx # returns both the child name and the identifier
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_temporal_list(temporal_node):
|
| 44 |
+
"""
|
| 45 |
+
Returns the features vector for temporal nodes (the two bounds of the temporal interval)
|
| 46 |
+
Variant and num_arg modify the length of the list to return (3, 4 or 5)
|
| 47 |
+
"""
|
| 48 |
+
left = float(temporal_node.left_time_bound) if temporal_node.unbound is False else 0.
|
| 49 |
+
right = float(temporal_node.right_time_bound) if (temporal_node.unbound is False and
|
| 50 |
+
temporal_node.right_unbound is False) else -1.
|
| 51 |
+
vector_l = [left, right, 0.] # third slot for sign and fourth for threshold # add another slot for argument number
|
| 52 |
+
return vector_l
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def add_internal_child(current_child, current_idx, label_dict):
|
| 56 |
+
child_name = get_name_given_type(current_child) + '(' + str(current_idx) + ')'
|
| 57 |
+
child_name, current_idx = get_id(child_name, get_name_given_type(current_child), label_dict, current_idx)
|
| 58 |
+
return child_name, current_idx
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def add_leaf_child(node, name, label_dict, idx):
|
| 62 |
+
"""
|
| 63 |
+
Add the edges and update the label_dictionary and the identifier count for a leaf node (variable)
|
| 64 |
+
variant = ['original', 'threshold-sign', 'all-in-var']
|
| 65 |
+
shared_var = [True, False] denotes if shared variables for all the DAG or single variables (tree-like)
|
| 66 |
+
num_arg = [True, False] if true argument number is one-hot encoded in the feature vector
|
| 67 |
+
until_right is a flag to detect when the argument number encoding should be 1
|
| 68 |
+
"""
|
| 69 |
+
new_e = []
|
| 70 |
+
label_dict[name] = [0., 0., 0.] # te
|
| 71 |
+
atom_idx =str(node).split()[0] + '(' + str(idx) + ')'
|
| 72 |
+
# different names for the same variables (e.g. x_1(5), x_1(8))
|
| 73 |
+
idx += 1
|
| 74 |
+
if atom_idx not in label_dict.keys():
|
| 75 |
+
label_dict[atom_idx] = [0., 0., 0.]
|
| 76 |
+
|
| 77 |
+
if str(node).split()[1] == '<=':
|
| 78 |
+
label_dict[name] = [0., 0., round(node.threshold, 4)]
|
| 79 |
+
else:
|
| 80 |
+
label_dict[name] = [0., 0., round(node.threshold, 4)]
|
| 81 |
+
new_e.append([name, atom_idx])
|
| 82 |
+
return new_e, label_dict, idx+1
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def traverse_formula(formula, idx, label_dict):
|
| 86 |
+
current_node = formula
|
| 87 |
+
edges = []
|
| 88 |
+
if type(current_node) is not stl.Atom:
|
| 89 |
+
current_name = get_name_given_type(current_node) + '(' + str(idx) + ')'
|
| 90 |
+
if (type(current_node) is stl.And) or (type(current_node) is stl.Or) or (type(current_node) is stl.Not):
|
| 91 |
+
label_dict[current_name] = [0., 0., 0. ] # temp_left, temp_right, threshold
|
| 92 |
+
else:
|
| 93 |
+
label_dict[current_name] = get_temporal_list(current_node)
|
| 94 |
+
if (type(current_node) is stl.And) or (type(current_node) is stl.Or) or (type(current_node) is stl.Until):
|
| 95 |
+
left_child_name, current_idx = add_internal_child(current_node.left_child, idx + 1, label_dict)
|
| 96 |
+
edges.append([current_name, left_child_name])
|
| 97 |
+
if type(current_node.left_child) is stl.Atom:
|
| 98 |
+
e, d, current_idx = add_leaf_child(current_node.left_child, left_child_name, label_dict, current_idx+1)
|
| 99 |
+
edges += e
|
| 100 |
+
label_dict.update(d)
|
| 101 |
+
e, d = traverse_formula(current_node.left_child, current_idx, label_dict)
|
| 102 |
+
edges += e
|
| 103 |
+
label_dict.update(d)
|
| 104 |
+
right_child_name, current_idx = add_internal_child(current_node.right_child, current_idx + 1, label_dict)
|
| 105 |
+
edges.append([current_name, right_child_name])
|
| 106 |
+
if type(current_node.right_child) is stl.Atom:
|
| 107 |
+
e, d, current_idx = add_leaf_child(current_node.right_child, right_child_name, label_dict,
|
| 108 |
+
current_idx+1)
|
| 109 |
+
edges += e
|
| 110 |
+
label_dict.update(d)
|
| 111 |
+
e, d = traverse_formula(current_node.right_child, current_idx, label_dict)
|
| 112 |
+
edges += e
|
| 113 |
+
label_dict.update(d)
|
| 114 |
+
else:
|
| 115 |
+
# eventually, globally, not
|
| 116 |
+
child_name, current_idx = add_internal_child(current_node.child, idx + 1, label_dict)
|
| 117 |
+
edges.append([current_name, child_name])
|
| 118 |
+
if type(current_node.child) is stl.Atom:
|
| 119 |
+
e, d, current_idx = add_leaf_child(current_node.child, child_name, label_dict, current_idx+1)
|
| 120 |
+
edges += e
|
| 121 |
+
label_dict.update(d)
|
| 122 |
+
e, d = traverse_formula(current_node.child, current_idx, label_dict)
|
| 123 |
+
edges += e
|
| 124 |
+
label_dict.update(d)
|
| 125 |
+
return edges, label_dict
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def build_dag(formula):
|
| 129 |
+
edges, label_dict = traverse_formula(formula, 0, {})
|
| 130 |
+
graph = nx.from_edgelist(edges, create_using=nx.DiGraph)
|
| 131 |
+
assert(nx.is_directed_acyclic_graph(graph))
|
| 132 |
+
return graph, label_dict
|
| 133 |
+
|
| 134 |
+
#######################################################################
|
| 135 |
+
|
| 136 |
+
def get_n_nodes(str_phi):
|
| 137 |
+
f_split = str_phi.split()
|
| 138 |
+
f_nodes_list = [sub_f for sub_f in f_split if sub_f in ['not', 'and', 'or', 'always', 'eventually', '<=', '>=',
|
| 139 |
+
'until']]
|
| 140 |
+
return len(f_nodes_list)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def get_n_leaves(str_phi):
|
| 144 |
+
phi_split = str_phi.split()
|
| 145 |
+
phi_var = [sub for sub in phi_split if sub.startswith('x_')]
|
| 146 |
+
return len(phi_var)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def get_n_temp(str_phi):
|
| 150 |
+
phi_split = str_phi.split()
|
| 151 |
+
phi_temp = [sub for sub in phi_split if sub[:2] in ['ev', 'al', 'un']]
|
| 152 |
+
return len(phi_temp)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def get_n_tokens(str_phi):
|
| 156 |
+
return len(tokenizer.encode(str_phi))
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def get_n_depth(str_phi):
|
| 160 |
+
phi = from_string_to_formula(str_phi)
|
| 161 |
+
return get_depth(phi)
|
| 162 |
+
|
| 163 |
+
#######################################################################
|
| 164 |
+
|
| 165 |
+
study = pd.read_pickle('datasets/hardsk_train_set.pkl')
|
| 166 |
+
|
| 167 |
+
depths = study['Formula'].apply(get_depth)
|
| 168 |
+
tokens = study['Formula'].apply(get_n_tokens)
|
| 169 |
+
nodes = study['Formula'].apply(get_n_nodes)
|
| 170 |
+
temps = study['Formula'].apply(get_n_temp)
|
| 171 |
+
leaves = study['Formula'].apply(get_n_leaves)
|
| 172 |
+
|
| 173 |
+
descr = pd.DataFrame({
|
| 174 |
+
'Formula': study['Formula'],
|
| 175 |
+
'Depths': depths,
|
| 176 |
+
'Tokens': tokens,
|
| 177 |
+
'Nodes': nodes,
|
| 178 |
+
'Temps': temps,
|
| 179 |
+
'Leaves': leaves
|
| 180 |
+
})
|
| 181 |
+
|
| 182 |
+
descr.to_pickle('descriptive/hardsk_train_set.pkl')
|
history/datasets_things/download_datasets.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
|
| 3 |
+
splits = {'train': 'train.csv', 'validation': 'validation.csv', 'test': 'test.csv'}
|
| 4 |
+
test_df = pd.read_csv("hf://datasets/saracandu/stldecoding/" + splits["test"])
|
| 5 |
+
validation_df = pd.read_csv("hf://datasets/saracandu/stldecoding/" + splits["validation"])
|
| 6 |
+
train_df = pd.read_csv("hf://datasets/saracandu/stldecoding/" + splits["train"])
|
| 7 |
+
|
| 8 |
+
test_df.to_csv('datasets/test_set.csv')
|
| 9 |
+
train_df.to_csv('datasets/train_set.csv')
|
| 10 |
+
validation_df.to_csv('dataset/validation_set.csv')
|
history/datasets_things/formulae_generation.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from accelerate import Accelerator
|
| 5 |
+
from safetensors import safe_open
|
| 6 |
+
from safetensors.torch import load_file
|
| 7 |
+
from torch.utils.data import Dataset
|
| 8 |
+
from torch.utils.data import DataLoader
|
| 9 |
+
import ast
|
| 10 |
+
import pandas as pd
|
| 11 |
+
from handcoded_tokenizer import STLTokenizer
|
| 12 |
+
from configuration import STLConfig
|
| 13 |
+
from modeling_stldec import STLForCausalLM
|
| 14 |
+
from encoder import STLEncoder
|
| 15 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
| 16 |
+
|
| 17 |
+
# Percorsi di base
|
| 18 |
+
model_dir = "balanced/"
|
| 19 |
+
eval_df = pd.read_csv("datasets/test_balanced_validation_set.csv")
|
| 20 |
+
|
| 21 |
+
# Inizializzazione di STL
|
| 22 |
+
AutoConfig.register("STLdec", STLConfig)
|
| 23 |
+
AutoModelForCausalLM.register(STLConfig, STLForCausalLM)
|
| 24 |
+
|
| 25 |
+
config = STLConfig()
|
| 26 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 27 |
+
|
| 28 |
+
# Tokenizer ed encoder
|
| 29 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 30 |
+
encoder = STLEncoder(embed_dim=1024, anchor_filename='anchor_set_1024_dim.pickle')
|
| 31 |
+
|
| 32 |
+
accelerator = Accelerator()
|
| 33 |
+
|
| 34 |
+
# Trova tutti i checkpoint step_*
|
| 35 |
+
step_folders = [f for f in os.listdir(model_dir) if f.startswith('step_')]
|
| 36 |
+
|
| 37 |
+
# Prepara il dataset per la generazione
|
| 38 |
+
formulae_dataset = []
|
| 39 |
+
|
| 40 |
+
for idx in range(len(eval_df)):
|
| 41 |
+
embedding = eval(eval_df["Embedding"][idx])
|
| 42 |
+
embedding = torch.tensor(embedding, dtype=torch.long).to(device)
|
| 43 |
+
tok_formula = eval(eval_df["Encoded_Formula"][idx])
|
| 44 |
+
gold_formula = eval_df["Formula"][idx]
|
| 45 |
+
|
| 46 |
+
# Genera la lista di "generated_text" per ogni step_*
|
| 47 |
+
generated_texts = {"Gold Formula": gold_formula}
|
| 48 |
+
|
| 49 |
+
for step_folder in step_folders:
|
| 50 |
+
# Carica il modello, l'optimizer e lo scheduler per ogni step_*
|
| 51 |
+
model_path = os.path.join(model_dir, step_folder)
|
| 52 |
+
optimizer_path = os.path.join(model_path, 'optimizer.bin')
|
| 53 |
+
scheduler_path = os.path.join(model_path, 'scheduler.bin')
|
| 54 |
+
|
| 55 |
+
# Carica il modello
|
| 56 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, config=config).to(device)
|
| 57 |
+
|
| 58 |
+
# Carica l'ottimizzatore e lo scheduler
|
| 59 |
+
optimizer = torch.load(optimizer_path)
|
| 60 |
+
scheduler = torch.load(scheduler_path)
|
| 61 |
+
optimizer = accelerator.prepare(optimizer)
|
| 62 |
+
scheduler = accelerator.prepare(scheduler)
|
| 63 |
+
|
| 64 |
+
# Genera il testo
|
| 65 |
+
with torch.no_grad():
|
| 66 |
+
generated_ids = model.generate(
|
| 67 |
+
encoder_hidden_states=embedding,
|
| 68 |
+
pad_token_id=model.config.pad_token_id,
|
| 69 |
+
bos_token_id=model.config.bos_token_id,
|
| 70 |
+
max_new_tokens=500
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
generated_text = tokenizer.decode(generated_ids[0][2:-2].tolist())
|
| 74 |
+
generated_texts[step_folder] = generated_text
|
| 75 |
+
|
| 76 |
+
formulae_dataset.append(generated_texts)
|
| 77 |
+
|
| 78 |
+
# Crea il dataframe finale
|
| 79 |
+
final_df = pd.DataFrame(formulae_dataset)
|
| 80 |
+
|
| 81 |
+
# Salva il dataframe come CSV
|
| 82 |
+
final_df.to_csv('balanced_all_steps.csv', index=False)
|
| 83 |
+
|
history/datasets_things/generate_sets.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from torch.nn.functional import normalize
|
| 4 |
+
import copy
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import stl
|
| 8 |
+
from encoder import STLEncoder
|
| 9 |
+
from handcoded_tokenizer import STLTokenizer
|
| 10 |
+
|
| 11 |
+
from phis_generator_depth import StlGenerator
|
| 12 |
+
from traj_measure import BaseMeasure
|
| 13 |
+
from utils import from_string_to_formula, load_pickle, dump_pickle, get_depth
|
| 14 |
+
from kernel import StlKernel
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
encoder = STLEncoder(embed_dim = 512, anchor_filename = "anchor_set_512.pickle")
|
| 18 |
+
tokenizer = STLTokenizer('tokenizer_files/tokenizer.json')
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Helper function to generate and filter formulae
|
| 22 |
+
def generate_and_filter(n_phis, n_vars, depth):
|
| 23 |
+
sampler = StlGenerator(max_depth=depth, min_depth=depth)
|
| 24 |
+
sampled_objs = sampler.bag_sample(bag_size=n_phis, nvars=n_vars)
|
| 25 |
+
|
| 26 |
+
# convert to string
|
| 27 |
+
sampled_objs = list(map(str, sampled_objs))
|
| 28 |
+
|
| 29 |
+
lengths = []
|
| 30 |
+
for obj in sampled_objs:
|
| 31 |
+
lengths.append(len(tokenizer.encode(obj)))
|
| 32 |
+
|
| 33 |
+
# filter sampled_objs where length is less than 500
|
| 34 |
+
|
| 35 |
+
filtered_objs = [obj for i, obj in enumerate(sampled_objs) if lengths[i] < 500]
|
| 36 |
+
return filtered_objs
|
| 37 |
+
# return sampled_objs
|
| 38 |
+
|
| 39 |
+
def generate_formulae_depth(n_phis, n_vars, depth):
|
| 40 |
+
|
| 41 |
+
# Generate initial batch of formulae
|
| 42 |
+
formulae = generate_and_filter(n_phis, n_vars, depth)
|
| 43 |
+
|
| 44 |
+
# If we don't have enough formulae, regenerate until we meet the required number
|
| 45 |
+
while len(formulae) < n_phis:
|
| 46 |
+
delta = n_phis - len(formulae)
|
| 47 |
+
additional_formulae = generate_and_filter(delta, n_vars, depth)
|
| 48 |
+
formulae.extend(additional_formulae)
|
| 49 |
+
|
| 50 |
+
# Truncate the list to exactly n_phis formulae if needed
|
| 51 |
+
# formulae = formulae[:n_phis]
|
| 52 |
+
|
| 53 |
+
return formulae
|
| 54 |
+
|
| 55 |
+
def embed_generated_formulae(df):
|
| 56 |
+
# sampled_formulae = list(map(str, df['Formula Obj']))
|
| 57 |
+
formulae_embeddings = encoder.compute_embeddings(df)
|
| 58 |
+
return formulae_embeddings.tolist()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# df = pd.read_csv('datasets/train_set.csv')
|
| 62 |
+
df = pd.read_pickle('real_phis.pkl')
|
| 63 |
+
formulae_to_embed = df['Formula']
|
| 64 |
+
# formulae_to_embed = ["always ( eventually[6,20] ( x_1 >= -0.7636 ) )", "always ( eventually[1,25] ( x_1 >= -0.5783 ) )"]
|
| 65 |
+
|
| 66 |
+
# here we do not pass an anchor set so the Encoder creates a new one of dimension set to `embed_dim`
|
| 67 |
+
# encoder = STLEncoder(embed_dim=1024, anchor_filename='anchor_set_1024_dim.pickle')
|
| 68 |
+
# print('computing embeddings')
|
| 69 |
+
|
| 70 |
+
formulae_embeddings = encoder.compute_embeddings(formulae_to_embed)
|
| 71 |
+
|
| 72 |
+
# print(formulae_embeddings.tolist())
|
| 73 |
+
|
| 74 |
+
df['Embedding512'] = formulae_embeddings.tolist()
|
| 75 |
+
|
| 76 |
+
# print('produce new file')
|
| 77 |
+
df.to_pickle('real_phis.pkl')
|