text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import unittest
from hashlib import sha256
import numpy as np
import pandas as pd
from rowgenerators.appurl import parse_app_url
from publicdata.census.api.censusapi import CensusApi
from publicdata.census.api.url import CensusApiUrl
def test_data(*paths):
from os.path import dirname, join, abspath
return abspath(join(dirname(abspath(__file__)), 'test_data', *paths))
class BasicTests(unittest.TestCase):
def test_basic(self):
u = CensusApiUrl('censusapi://ACSST1Y2018/state:01/county:*#NAME,S2001_C06_007E')
self.assertEqual('censusapi',u.proto)
self.assertIsInstance(u, CensusApiUrl)
dataset_id, in_spec, for_spec = u.path.split('/')
if not dataset_id: # The url has a :// in it
dataset_id = u.netloc
self.assertEqual('ACSST1Y2018', dataset_id)
self.assertEqual('state:01', in_spec)
self.assertEqual('county:*', for_spec)
m = sha256()
# Iterate and check the result.
for row in u.generator:
if row:
m.update((' '.join(str(e) for e in row)).encode('utf8'))
self.assertEqual('1647c540edc0b03e5e37bef0b4cef34e5e57384a32996437ac8e1dbcba2ecc4a',
m.hexdigest())
m = sha256()
for e in list(u.dataframe.NAME):
m.update(e.encode('utf8') )
self.assertEqual('52e616c47998a796921a8987cde8d5b466000557cea6e1fef9fa7f960a504ed7',
m.hexdigest())
def test_url_entrypoint(self):
m = sha256()
u = parse_app_url('censusapi://ACSST1Y2018/state:01/county:*#NAME,S2001_C06_007E')
# Iterate and check the result.
for row in u.generator:
if row:
m.update((' '.join(str(e) for e in row)).encode('utf8'))
self.assertEqual('1647c540edc0b03e5e37bef0b4cef34e5e57384a32996437ac8e1dbcba2ecc4a',
m.hexdigest())
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "9839cf532b57a58bd0086aea9592d039111d52e3", "size": 1989, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/publicdata/census/api/test/test_censusapi.py", "max_stars_repo_name": "Metatab/publicdata_census", "max_stars_repo_head_hexsha": "ea2319bb2bd16718b522924fa690b3154ea3dc32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/publicdata/census/api/test/test_censusapi.py", "max_issues_repo_name": "Metatab/publicdata_census", "max_issues_repo_head_hexsha": "ea2319bb2bd16718b522924fa690b3154ea3dc32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-04T20:23:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T20:23:05.000Z", "max_forks_repo_path": "src/publicdata/census/api/test/test_censusapi.py", "max_forks_repo_name": "Metatab/publicdata_census", "max_forks_repo_head_hexsha": "ea2319bb2bd16718b522924fa690b3154ea3dc32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-01T14:59:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-01T14:59:45.000Z", "avg_line_length": 26.8783783784, "max_line_length": 92, "alphanum_fraction": 0.6375062846, "include": true, "reason": "import numpy", "num_tokens": 541}
|
[STATEMENT]
lemma ok_SKIP2 [iff]: "F ok SKIP"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. F ok \<bottom>
[PROOF STEP]
by (simp add: ok_def)
|
{"llama_tokens": 67, "file": null, "length": 1}
|
[STATEMENT]
lemma index_simp:
"(u = v) = (u none = v none \<and> u some = v some)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (u = v) = (u none = v none \<and> u some = v some)
[PROOF STEP]
by (safe, rule ext, case_tac "x", auto)
|
{"llama_tokens": 103, "file": "GraphMarkingIBP_DSWMark", "length": 1}
|
[STATEMENT]
lemma start_of_lessE[elim]: "\<lbrakk>abc_fetch as ap = Some (Dec n e);
start_of (layout_of ap) as < start_of (layout_of ap) e;
start_of (layout_of ap) e \<le> Suc (start_of (layout_of ap) as + 2 * n)\<rbrakk>
\<Longrightarrow> RR"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>abc_fetch as ap = Some (Dec n e); start_of (layout_of ap) as < start_of (layout_of ap) e; start_of (layout_of ap) e \<le> Suc (start_of (layout_of ap) as + 2 * n)\<rbrakk> \<Longrightarrow> RR
[PROOF STEP]
using start_of_less[of e as "layout_of ap"] start_of_ge[of as ap n e "layout_of ap"]
[PROOF STATE]
proof (prove)
using this:
e < as \<Longrightarrow> start_of (layout_of ap) e \<le> start_of (layout_of ap) as
\<lbrakk>abc_fetch as ap = Some (Dec n e); layout_of ap = layout_of ap; as < e\<rbrakk> \<Longrightarrow> start_of (layout_of ap) as + 2 * n + 16 \<le> start_of (layout_of ap) e
goal (1 subgoal):
1. \<lbrakk>abc_fetch as ap = Some (Dec n e); start_of (layout_of ap) as < start_of (layout_of ap) e; start_of (layout_of ap) e \<le> Suc (start_of (layout_of ap) as + 2 * n)\<rbrakk> \<Longrightarrow> RR
[PROOF STEP]
apply(cases "as < e", simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>abc_fetch as ap = Some (Dec n e); start_of (layout_of ap) as < start_of (layout_of ap) e; start_of (layout_of ap) e \<le> Suc (start_of (layout_of ap) as + 2 * n); e < as \<Longrightarrow> start_of (layout_of ap) e \<le> start_of (layout_of ap) as; \<lbrakk>abc_fetch as ap = Some (Dec n e); layout_of ap = layout_of ap; as < e\<rbrakk> \<Longrightarrow> start_of (layout_of ap) as + 2 * n + 16 \<le> start_of (layout_of ap) e; \<not> as < e\<rbrakk> \<Longrightarrow> RR
[PROOF STEP]
apply(cases "as = e", simp, simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 747, "file": "Universal_Turing_Machine_Abacus", "length": 4}
|
from PIL import Image
import numpy
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import *
# Create image data generator
image_generator = ImageDataGenerator(
validation_split=0.15,
horizontal_flip=True,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=5,
rescale=1. / 255
)
train_generator = image_generator.flow_from_directory("dataset", subset="training", target_size=(224, 224),
batch_size=8)
validation_generator = image_generator.flow_from_directory("dataset", subset="validation", target_size=(224, 224),
batch_size=8)
# Show an image from train set
Image.fromarray((next(train_generator)[0][0] * 255).astype(numpy.uint8)).show()
# Create model
mobile = MobileNet(
input_shape=(224, 224, 3),
include_top=False,
weights='imagenet',
pooling='avg',
alpha=0.5
)
output = Dropout(0.4)(mobile.output)
output = Dense(8, activation="relu")(output)
output = Dense(3, activation="sigmoid")(output)
model = Model(inputs=mobile.input, outputs=output)
model.summary()
# Compile model
model.compile(optimizer=Adam(amsgrad=True), loss="categorical_crossentropy", metrics=["accuracy"])
callbacks = [
ReduceLROnPlateau(
patience=3,
factor=0.2,
verbose=1,
min_lr=1e-5
),
ModelCheckpoint(
filepath="croissant.hdf5",
verbose=1,
save_best_only=True
)
]
# Train
model.fit_generator(
generator=train_generator,
steps_per_epoch=256,
epochs=50,
verbose=1,
validation_data=validation_generator,
validation_steps=40,
callbacks=callbacks
)
|
{"hexsha": "f76feb57c37edd471173755ae80575b821e01189", "size": 2031, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/02_train.py", "max_stars_repo_name": "PPACI/Devoxx19-TensorflowJS", "max_stars_repo_head_hexsha": "4096c8ea460af8a9f8a36df01e88309568318ab8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-07-20T12:15:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-06T16:56:37.000Z", "max_issues_repo_path": "python/02_train.py", "max_issues_repo_name": "PPACI/Devoxx19-TensorflowJS", "max_issues_repo_head_hexsha": "4096c8ea460af8a9f8a36df01e88309568318ab8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/02_train.py", "max_forks_repo_name": "PPACI/Devoxx19-TensorflowJS", "max_forks_repo_head_hexsha": "4096c8ea460af8a9f8a36df01e88309568318ab8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-03-04T20:56:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T16:26:44.000Z", "avg_line_length": 29.0142857143, "max_line_length": 114, "alphanum_fraction": 0.694239291, "include": true, "reason": "import numpy", "num_tokens": 459}
|
# -*- coding: utf-8 -*-
"""
workflow.py
Richard Wen (rwenite@gmail.com)
===============================================================
A script for interfacing with input and output files via
a workflow based approach. Handles progress saving by only
incorporating file checks to see if a particular process
has already been run, and skipping the processing step.
===============================================================
"""
"""
===============================================================
Modules
===============================================================
"""
import base64
from io import BytesIO
from math import sqrt
from modules import helpers
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from statsmodels.robust.scale import mad
from treeinterpreter import treeinterpreter as ti
import geopandas as gpd
import logging
import numpy as np
import os
import pandas as pd
import pickle
"""
===============================================================
Variables
===============================================================
"""
_report_template = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Report: {{ title }}</title>
<meta name="description" content="Random forest summary metrics for {{ title }}">
<style>
h1{
font-size: 300%;
text-align: center;
padding: 30px;
border-bottom: 5px double black;
}
h2{
font-size: 200%;
text-align: center;
padding: 10px;
padding-top: 50px;
border-bottom: 1px solid black;
}
p {
text-align: center;
}
table{
border: 0;
text-align: left;
font-size: 120%;
padding: 10px;
margin-left:auto;
margin-right:auto;
}
th{
border-bottom: 1px solid black;
border-collapse: collapse;
padding: 10px;
}
td{
padding: 5px;
padding-left: 10px;
}
img{
height: 100vh;
display: block;
margin-left: auto;
margin-right: auto;
padding: 30px;
}
</style>
</head>
<body>
<h1>Summary Report: {{ exp_title }}</h1>
<h2>Geospatial Semantic Features</h2><br>
{{ cd_plot }}
<h2>Multicollinearity Reduction</h2><br>
{{ ocorr_table }}
{{ ocorr_plot }}
<h2>Parameter Optimization</h2><br>
{{ grid_table }}
<h2>Cross Validation Performance</h2><br>
{{ cv_plot }}
<h2>Class Probabilities</h2><br>
{{ prob_plot }}
<h2>Feature Importance</h2><br>
{{ imp_plot }}
<h2>Outliers</h2><br>
{{ outlier_plot }}
</body>
</html>
"""
"""
===============================================================
Configuration Functions
===============================================================
"""
def _global_config(config, settings=None, avail=None):
"""
_global_config: obj -> obj
---------------------------------------------------------------
Sets the local [config] to the global [settings] if a global
setting exists. If all local [config] has been set, this function
returns the original [config] or if no [settings] and [avail] settings
exist.
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None OR obj
The global settings to use if it exists, otherwise
use the defaults.
* avail: None OR (listof str)
The available [config] keys to set. Only these keys
will be set to global defaults if they exist.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
if settings is not None and avail is not None:
for k in settings.keys():
if k not in config and k in avail:
config[k] = settings[k]
return config
def analysis_config(config, settings=None):
"""
analysis_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom analysis configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings, ['cross_validation_tests', 'high_correlations', 'outlier_value', 'persist'])
config['cross_validation_tests'] = [2, 5, 10] if 'cross_validation_tests' not in config else config['cross_validation_tests']
config['high_correlations'] = [-0.7, 0.7] if 'high_correlations' not in config else config['high_correlations']
config['outlier_value'] = 10 if 'outlier_value' not in config else config['outlier_value']
config['persist'] = True if 'persist' not in config else config['persist']
return config
def forest_config(config, n_jobs=[-1], settings=None):
"""
forest_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom forest configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings, RandomForestClassifier._get_param_names())
config['n_estimators'] = [10, 64, 96, 128] if 'n_estimators' not in config else config['n_estimators']
config['criterion'] = ['entropy'] if 'criterion' not in config else config['criterion']
config['oob_score'] = [True] if 'oob_score' not in config else [True]
config['class_weight'] = ['balanced'] if 'class_weight' not in config else config['class_weight']
config['n_jobs'] = [n_jobs] if 'n_jobs' not in config else config['n_jobs']
return config
def experiment_config(config):
"""
experiment_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment info configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config['title'] = '' if 'title' not in config else config['title']
config['filter'] = [] if 'filter' not in config else config['filter']
config['id'] = [] if 'id' not in config else config['id']
config['keep_columns'] = [] if 'keep_columns' not in config else config['keep_columns']
config['epsg'] = '4326' if 'epsg' not in config else config['epsg']
config['units'] = 'units' if 'units' not in config else config['units']
return config
def plot_config(config, settings=None):
"""
plot_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment plot configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings)
config['plot_style'] = 'whitegrid' if 'plot_style' not in config else config['plot_style']
config['plot_color'] = 'gray' if 'plot_color' not in config else config['plot_color']
config['plot_dpi'] = 300 if 'plot_dpi' not in config else config['plot_dpi']
config['plot_ext'] = '.png' if 'plot_ext' not in config else config['plot_ext']
return config
def settings_config(config):
"""
settings_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom settings configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
# (Settings) Configure the global settings
settings = config['settings']
config['settings']['cores'] = -1 if 'cores' not in settings else settings['cores']
# (Plots) Configure global plot settings
config['settings']['plot'] = {} if 'plot' not in settings else settings['plot']
config['settings']['plot'] = plot_config(config['settings']['plot'])
# (Analysis) Configure global analysis settings
config['settings']['analysis'] = {} if 'analysis' not in settings else settings['analysis']
config['settings']['analysis'] = analysis_config(config['settings']['analysis'])
# (Forest) Configure global forest settings
config['settings']['forest'] = {} if 'forest' not in settings else settings['forest']
config['settings']['forest'] = forest_config(config['settings']['forest'], n_jobs=config['settings']['cores'])
logging.info('Checked configuration file with defaults set when applicable')
return config
"""
===============================================================
Functions
===============================================================
"""
def gen_contrib(pkl, rf, outliers, variables, suspect_value=10, outlier_col='outlier_measure', cls_col='class', persist=True):
"""
gen_contrib: str obj pd.DataFrame pd.DataFrame float str str bool -> pd.DataFrame
---------------------------------------------------------------
Generates the contributions for each outlier given a [suspect] value.
Required Parameters
-------------------
* pkl: str
The pickle file to store the probabilities
* rf: obj
The sklearn random forest model that has been previously trained.
* outliers: pd.DataFrame
The outlier measures obtained from the [rf] model from sklearn.
It consists of two columns: [outlier_col] and [cls_col].
* variables: pd.DataFrame
The variables used to train the [rf] model from sklearn.
* suspect_value: float
The cutoff range to suspect an outlier. Any outlier measure
greater than this value is considered an outlier.
* outlier_col: str
The outlier measure column name of [outliers].
* cls_col: str
The class column name of [outliers].
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* df: pd.DataFrame
The result dataframe with the classes, and the variable
contributions for each outlier.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) and persist:
# (Suspects) Obtain suspecting outliers
suspects = pd.concat([outliers, variables], axis=1)
suspects = suspects[suspects[outlier_col] > suspect_value]
svariables = suspects.drop([outlier_col, cls_col], axis=1) # variables of outliers
# (Feat_Contrib) Obtain variable contributions to assigned class
fc = ti.predict(rf, svariables.values)[2]
contrib = []
for c, cls in zip(fc, outliers[cls_col]):
idx = np.where(rf.classes_ == cls)
fci = [ft[idx][0] for ft in c]
contrib.append(fci)
# (Contrib_DF) Build informative contribution dataframe
df = pd.DataFrame(contrib)
df.columns = svariables.columns
df.index = svariables.index
df = pd.concat([suspects[cls_col], df], axis=1)
with open(pkl, 'wb') as f:
pickle.dump(df, f, protocol=4)
logging.info('Pickled outlier variable contributions ' + pkl)
else:
with open(pkl, 'rb') as f:
df = pickle.load(f)
logging.info('Pickled outlier variable contributions already exists, skipping ' + pkl)
return df
def gen_csv(out, df, persist=True, *args, **kwargs):
"""
gen_csv: str obj bool *args **kwargs -> None
---------------------------------------------------------------
Generates a csv file from a pandas [df] object. Skips
the generation if the csv file already exists.
Required Parameters
-------------------
* out: str
The path to store the csv file with extension
* df: obj
A pandas dataframe to save
* *args: *args
Arguments to be passed to to_csv from pandas
* **kwargs: **kwargs
Keyword arguments to be passed to to_csv from pandas
Optional Parameters
-------------------
* persist: bool
Whether to regenerate a pickle file or not.
---------------------------------------------------------------
"""
if not os.path.isfile(out):
df.to_csv(out, *args, **kwargs)
logging.info('Table saved at ' + out)
else:
logging.info('Table already exists, skipping ' + out)
def gen_f1_scores(pkl, obj, variables, targets, cv_files, cvs, persist=True, n_jobs=-1):
"""
gen_f1_scores: str obj pd.DataFrame pd.Series (listof str) (listof int) bool int -> pd.DataFrame
---------------------------------------------------------------
Generates the f1 scores for each cross validation test
specified by [cvs].
Required Parameters
-------------------
* pkl: str
The pickle file to store the probabilities
* obj: obj
The sklearn model that has been previously trained.
* variables: pd.DataFrame
The variables used to train the [obj] model from sklearn.
* targets: pd.DataFrame
The true target classes used to train the [obj] model from sklearn.
* cv_files: (listof str)
The cv files to save each cross_val_score object from
sklearn.
* cvs: (listof int)
The cross validation folds for each test in list form.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
* n_jobs: int
Number of cores to use for parallel processing.
Returns
-------
* cv_scores: pd.DataFrame
The result dataframe with a column for the folds
used for each cross validation and the respective
mean f1 scores.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) and persist:
scores = []
for cv_pkl, cv in zip(cv_files, cvs):
f1_scores = gen_pkl(cv_pkl, _func=cross_val_score, _persist=persist, estimator=obj, X=variables.values, y=targets.values, cv=cv, scoring='f1_weighted', n_jobs=n_jobs)
scores.append(f1_scores.mean())
cvs = pd.Series(cvs, name='cv_folds')
scores = pd.Series(scores, name='mean_f1_score')
cv_scores = pd.concat([cvs, scores], axis=1)
with open(pkl, 'wb') as f:
pickle.dump(cv_scores, f, protocol=4)
logging.info('Pickled F1 scores of cross validation tests ' + pkl)
else:
with open(pkl, 'rb') as f:
cv_scores = pickle.load(f)
logging.info('Pickled F1 scores of cross validation tests already exists, skipping ' + pkl)
return cv_scores
def gen_gdc(data_files, target, epsg, pkl, cols=[], persist=True):
"""
gen_gdc: (listof str) str str bool -> pd.DataFrame
---------------------------------------------------------------
Reads the list of files containing geodata and combines
them into one dataframe, before pickling them into a file
at [pkl]. The data will also be projected to [epsg] and
is assumed to all have the same coordinate reference system.
Geometric variables such as geom_type, length, area (units^2), vertices,
repx, and repy will also be included. Only the target variable
will be included from the data files for classification.
Required Parameters
-------------------
* data_files: (listof str)
The geodata files to be read by geopandas via fiona.
See http://www.gdal.org/ogr_formats.html
* target: str
The classification col in [gdc] with class data
* epsg: str
The coordinate reference system number in epsg to project the data to.
http://geopandas.org/user.html#GeoSeries.to_crs
* pkl: str
The pickle file path the save the read geodata
Optional Parameters
-------------------
* cols: (listof str)
The list of column names to keep.
* col_index: str OR None
The unique id column to use as the index.
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* gd: pd.DataFrame
The combined data from [data_files] projected
to [epsg]
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) and persist:
gdc = helpers.concat_gdf(data_files, epsg=epsg)
crs = gdc.crs
variables = helpers.get_series(gdc, series_cols=cols + ['geom_type', 'length', 'area'])
variables['area'] = variables['area'].apply(sqrt)
vtx = helpers.get_vtx(gdc)
pts = gdc.representative_point()
pts = pd.DataFrame([[p.x, p.y] for p in pts], columns=['repx', 'repy'])
gdc = pd.concat([gdc[target], pts, variables, vtx, gdc.geometry], axis=1)
gdc = gpd.GeoDataFrame(gdc)
gdc.crs = crs
with open(pkl, 'wb') as f:
pickle.dump(gdc, f, protocol=4)
logging.info('Pickled GeoDataFrame file ' + pkl)
else:
with open(pkl, 'rb') as f:
gdc = pickle.load(f)
logging.info('GeoDataFrame file exists, skipping pickle for ' + pkl)
return gdc
def gen_gdcn(gdc,
gdn,
target,
pkl,
gdn_ipattern='near_',
corr_pkl=None,
corr_range=(-0.8, 0.8),
ignr_corr=None,
scaler=None,
ignr_scale=None,
persist=True):
"""
gen_gdcn: gpd.GeoDataFrame
gpd.GeoDataFrame
str
str
str OR None
str OR None
(tupleof num)
(listof str) OR None
obj
(listof str) OR None
bool
-> pd.DataFrame
---------------------------------------------------------------
Combines the relationship data [gdn] with [gdc]. Also performs
preprocessing of multicollinearity reduction, removal of 0 variance
variables, and scaling depending on [corr_pkl]..[scaler] arguments.
Required Parameters
-------------------
* gdf: gpd.GeoDataFrame
The geodataframe with the geometric variables and the original
data used to generate [gdn]
* gdn: gpd.GeoDataFrame
The geodataframe with the nearest distance to each
[target] class of [gdc] for each row of [gdc]
* target: str
The group col in [gdc] representing the classification groups
* pkl: str
The pickle file path the save the combined variables data
Optional Parameters
-------------------
* gdn_ipattern: str OR None
If not None, set this to the alias for the [gdn] variables
pattern in which each column corresponds to a unique class in the
[target] column with an added alias in front of its name.
E.g. If gdn_ipattern = 'near_' and a class from target is 'bus_stop',
the corresponding target class col from [gdn] would be 'near_bus_stop'
Once set, this will order the [gdn] columns in descending order
of [target] class counts - thus the class with the most counts are
first and the the class with the least counts are last. This is
useful for the ordered reduction of multicollinearity included
with this function.
* corr_pkl: str OR None
If not None, reduces multicollinearity in the data by only
limiting to variables that are not correlated to each
other. This considers variables to keep in order starting
with variables from the [gdf] then variables from the [gdn].
Specify a path to pickle the details of the correlation
removal in order to apply it.
* corr_range: (tupleof num)
If [corr_pkl] is not None, specify the negative (1st item)
and positive (2nd item) correlation thresholds to considering
multicollinearity.
* ignr_corr: (listof str) OR None
If [corr_pkl] is not None, specify the columns to ignore
when checking for high correlation removal
* scaler: obj OR None
If not None, uses a sklearn scaler object to scale the
non-categorical variables.
* ignr_scale: (listof str) OR None
If [scaler] is not None, specify the columns to ignore
when scaling variables.
* persist: bool
Whether to regenerate a pickle file or not.
Returns
-------
* gdcn: pd.DataFrame
The [gdc] data with the added relationship variables
modified with preprocessing from [corr_pkl]..[scaled]
adjustments if applicable.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
gdn = gdn['near_' + gdc[target].value_counts().index] if gdn_ipattern is not None else gdn # order by freq of [target]
gdcn = pd.concat([gdc, gdn], axis=1)
# (Variance) Remove zero variance variables
var = gdcn.var()
gdcn = gdcn.drop(var[var == 0].index, axis=1)
# (Multicollinearity) Remove colinear variables in order
if corr_pkl is not None:
if ignr_corr is None:
ocorr = helpers.ocorr_df(gdcn.drop(target, axis=1), corr_range[0], corr_range[1])
vkeep = ocorr.keep.tolist() + [target]
else:
corr_cols = [c for c in gdcn.columns if c not in ignr_corr and c != target]
corr_chk = gdcn.drop(target, axis=1)[corr_cols]
ocorr = helpers.ocorr_df(corr_chk, corr_range[0], corr_range[1])
vkeep = ocorr.keep.tolist() + [target] + ignr_corr
gdcn = gdcn[vkeep] # keep non-correlated variables
with open(corr_pkl, 'wb') as f:
pickle.dump(ocorr, f, protocol=4)
logging.info('Pickled dictionary of removed correlated variables at ' + corr_pkl)
# (Scale) Use a scaler to transform variables
if scaler is not None:
if ignr_scale is None:
scale_cols = gdcn.columns
else:
scale_cols = [c for c in gdcn.columns if c not in ignr_scale]
gdcn[scale_cols] = scaler.fit_transform(gdcn[scale_cols].values)
# (Save) Pickle the [complete] data
with open(pkl, 'wb') as f:
pickle.dump(gdcn, f, protocol=4)
logging.info('Calculated and pickled combined geodata file ' + pkl)
else:
with open(pkl, 'rb') as f:
gdcn = pickle.load(f)
logging.info('Combined geodata already calculated, skipping pickle for ' + pkl)
return gdcn
def gen_html_plot(_fig, *args, **kwargs):
"""
gen_html_plot: obj -> str
---------------------------------------------------------------
Converts a matplotlib figure [obj] to bytes for use in
data uri of html templates. Original code modified from
[1].
References
----------
* [1] http://stackoverflow.com/questions/31492525/converting-matplotlib-png-to-base64-for-viewing-in-html-template
Required Parameters
-------------------
* _fig: obj
A matplotlib figure obj.
Optional Parameters
-------------------
* *args: *args
Arguments to be passed to [fig].savefig
* **kwargs: **kwargs
Keyword arguments to be passed to [fig].savefig
Returns
-------
* html_plot: str
The string representation of image data from [fig]
to be embedded as a data uri in an html template.
---------------------------------------------------------------
"""
fig_io = BytesIO()
_fig.savefig(fig_io, *args, **kwargs)
fig_io.seek(0)
data_uri = base64.b64encode(fig_io.getvalue()).decode('utf8')
html_plot = '<img src="data:image/png;base64,' + data_uri + '"\>'
return html_plot
def gen_imp(pkl, obj, variable_cols, persist=True):
"""
gen_imp: str obj (listof str) bool -> pd.DataFrame
---------------------------------------------------------------
Uses a trained model [obj] from sklearn to extract the
variable importances.
Required Parameters
-------------------
* pkl: str
The pickle file to store the variable importances.
* obj: obj
The sklearn model that has been previously trained.
* variable_cols: pd.DataFrame
The names of the variables used to train the [obj] model
from sklearn in order.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* rf_imp: pd.DataFrame
The variable importance dataframe.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
imp = pd.DataFrame(obj.feature_importances_, columns=['importance'], index=variable_cols)
imp['variable'] = imp.index.values
imp = imp.sort_values(by='importance', ascending=False)
with open(pkl, 'wb') as f:
pickle.dump(imp, f, protocol=4)
logging.info('Pickled random forest variable importances ' + pkl)
else:
with open(pkl, 'rb') as f:
imp = pickle.load(f)
logging.info('Pickled random forest variable importances already exists, skipping ' + pkl)
return imp
def gen_mprob(pkl, prob, cls_col='predict', prob_col='max_prob', persist=True):
"""
gen_mprob: str pd.DataFrame str str bool -> pd.DataFrame
---------------------------------------------------------------
Obtains the mean probability for each class given the generated
probabilities.
Required Parameters
-------------------
* pkl: str
The pickle file to store the mean class probabilities.
* prob: pd.DataFrame
The probabilities to calculate the mean class probabilities
from. There must be a class column named [target] and
a probability column named [prob_col].
Optional Parameters
-------------------
* cls_col: str
The class column name from [prob].
* prob_col: str
The probability column name from [prob].
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* mprob: pd.DataFrame
The dataframe with information on the mean probabilities
for each class sorted from largest to smallest probability.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
mprob = pd.DataFrame(prob.groupby(cls_col)[prob_col].mean())
mprob[cls_col] = mprob.index.values
mprob = mprob.sort_values(by=prob_col, ascending=False)
with open(pkl, 'wb') as f:
pickle.dump(mprob, f, protocol=4)
logging.info('Pickled mean class probabilities ' + pkl)
else:
with open(pkl, 'rb') as f:
mprob = pickle.load(f)
logging.info('Pickled mean class probabilities already exists, skipping ' + pkl)
return mprob
def gen_outliers(pkl, prox_files, target_cls, persist=True):
"""
gen_outliers: str (listof str) pd.Series bool -> pd.DataFrame
---------------------------------------------------------------
Obtains the class outlier measures for each instance of data
using proximities as described by [1].
References
----------
* [1] Breiman, Leo: https://www.stat.berkeley.edu/~breiman/Using_random_forests_v4.0.pdf
Required Parameters
-------------------
* pkl: str
The pickle file to store the mean proximities.
* prox_files: (listof str)
The joblib pickle files with the stored proximities.
Each file represents a proximity matrix for a class
in the same order as [target_cls].
* target_cls: pd.Series
The series of classes to generate the mean proximities on.
Each class must have a corresponding [prox_files] in order.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* outliers: pd.DataFrame
The dataframe of outlier measures and the true classes
for each instance of data.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
iprox = [] # within-class mean prox of each instance
icls = [] # classes of each instance
idx = [] # original instance indices
for prox_pkl, cls in zip(prox_files, target_cls):
prox_df = joblib.load(prox_pkl)
prox = prox_df.values
np.fill_diagonal(prox, np.nan) # set matching instances to nan
out_n = len(prox) / np.nansum(prox**2, axis=0) # outlier measure of instances n
iout = (out_n - np.median(out_n)) / mad(out_n, center=np.median) # normalized outlier measure
iprox = iprox + list(iout)
icls = icls + [cls] * len(prox)
idx = idx + list(prox_df.index.values)
iprox = pd.Series(iprox, name='outlier_measure')
icls = pd.Series(icls, name='class')
outliers = pd.concat([iprox, icls], axis=1)
outliers.index = idx
with open(pkl, 'wb') as f:
pickle.dump(outliers, f, protocol=4)
logging.info('Pickled outlier measures ' + pkl)
else:
with open(pkl, 'rb') as f:
outliers = pickle.load(f)
logging.info('Pickled outlier measures already exists, skipping ' + pkl)
return outliers
def gen_pkl(_pkl, _func, _lib='pickle', _persist=True, *args, **kwargs):
"""
gen_pkl: (listof str) function str bool *args **kwargs -> any
---------------------------------------------------------------
Generates a pickled file from data returned from [-func] after
passing [*args] and/or [**kwargs].
Required Parameters
-------------------
* _pkl: str
The path to store the pickled file
* _func: function
A function that returns data to be pickled
Optional Parameters
-------------------
* _lib: str
An object that loads and dumps pickle files
from data returned from [_func]. Currently
supported inputs are 'pickle' and 'joblib'.
* persist: bool
Whether to regenerate a pickle file or not.
* *args: *args
Arguments to be passed to [_func]
* **kwargs: **kwargs
Keyword arguments to be passed to [_func]
Returns
-------
* data: any
The return value from [_func] after passing
[*args] and [**kawargs].
---------------------------------------------------------------
"""
if _lib not in ['pickle', 'joblib']:
raise(Exception('Error: ' + _lib + ' is not a supported object for load and dump.'))
if not os.path.isfile(_pkl) or not _persist:
data = _func(*args, **kwargs)
if _lib == 'pickle' and _persist:
with open(_pkl, 'wb') as f:
pickle.dump(data, f, protocol=4)
elif _lib == 'joblib' and _persist:
joblib.dump(data, _pkl)
logging.info('Pickled data from ' + _func.__name__ + ' for ' + _pkl)
else:
if _lib == 'pickle':
with open(_pkl, 'rb') as f:
data = pickle.load(f)
elif _lib == 'joblib':
data = joblib.load(_pkl)
logging.info('Pickled data from ' + _func.__name__ + ' already exists, skipping ' + _pkl)
return data
def gen_prob(pkl, obj, variables, persist=True):
"""
gen_prob: str obj pd.DataFrame bool -> pd.DataFrame
---------------------------------------------------------------
Uses a trained model [obj] from sklearn to extract the
probabilities for each class, the maximum probability of the
predicted class, and the predicted class information given
the attributes of predict_proba and classes_, and method
of predict.
Required Parameters
-------------------
* pkl: str
The pickle file to store the probabilities
* obj: obj
The sklearn model that has been previously trained.
* variables: pd.DataFrame
The variables used to train the [obj] model from sklearn.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* prob: pd.DataFrame
The probability dataframe with information on the
probabilities for each class, the maximum probabilty
for the predicted class, and the predicted class
itself in the respective order.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
pred = pd.Series(obj.predict(variables.values), name='predict')
cls_prob = pd.DataFrame(obj.predict_proba(variables.values), columns=obj.classes_)
max_prob = pd.Series(cls_prob.apply(max, axis=1).values, name='max_prob')
prob = pd.concat([cls_prob, max_prob, pred], axis=1)
with open(pkl, 'wb') as f:
pickle.dump(prob, f, protocol=4)
logging.info('Pickled random forest probabilities ' + pkl)
else:
with open(pkl, 'rb') as f:
prob = pickle.load(f)
logging.info('Pickled random forest probabilities already exists, skipping ' + pkl)
return prob
def gen_prox(pkl, obj, variables, persist=True):
"""
gen_prox: str obj pd.DataFrame str bool
---------------------------------------------------------------
Uses a trained model [obj] from sklearn to extract the
proximities for each [variables] and saves it to a [pkl].
This function is designed for parallel processing and reduction
of memory for large datasets, and thus does not return data. To
retrieve the results, load the data from the file at [pkl] using
joblib.load.
Required Parameters
-------------------
* pkl: str
The pickle file to store the proximities. This is created
using joblib.
* obj: obj
The sklearn model that has been previously trained.
* variables: pd.DataFrame
The training variables matching the [obj] model from sklearn.
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
---------------------------------------------------------------
"""
if not os.path.isfile(pkl) or not persist:
prox = 1. - helpers.rf_prox(obj, variables.values)
prox = pd.DataFrame(prox, index=variables.index)
with open(pkl, 'wb') as f:
pickle.dump(prox, f, protocol=4)
logging.info('Pickled random forest proximities ' + pkl)
else:
logging.info('Pickled random forest proximities already exists, skipping ' + pkl)
def gen_rfg(rfg_files, grid, variables, targets, persist=True):
"""
gen_rfg: (listof str) obj pd.DataFrame pd.Series bool -> pd.DataFrame
---------------------------------------------------------------
Trains a random forest classifier for each [grid] parameter combination and
returns a dataframe that summarizes its oob score, fit,
and the path to the stored pickle files.
Required Parameters
-------------------
* rft_files: (listof str)
The list of pickle files to save each random forest
* grid: obj
The parameter grid to generate random forests combinations on.
* variables: pd.DataFrame
The variables to use for training the random forest classifier
* targets: pd.Series
The prediction targets to use for training the random forest
classifier
Optional Parameters
-------------------
* persist: bool
Whether to generate a pickle file or not.
Returns
-------
* : pd.DataFrame
The summary dataframe consisting of the number of trees
used for experimentation, out of bag score, score (fit)
of the random forest model and a the pickled file
that the random forest model is stored in.
---------------------------------------------------------------
"""
rfg_oob = []
grid_names = list(list(grid)[0].keys())
for pkl, g in zip(rfg_files, grid):
if not os.path.isfile(pkl) or not persist:
rfg = RandomForestClassifier(**g)
rfg = gen_pkl(pkl, _func=rfg.fit, _lib='joblib', _persist=persist, X=variables.values, y=targets.values)
else:
rfg = joblib.load(pkl)
logging.info('Pickled random forest grid already exists, skipping ' + pkl)
rfg_oob.append(list(g.values()) + [1 - rfg.oob_score_, rfg.score(variables.values, targets.values), pkl])
return pd.DataFrame(rfg_oob, columns=grid_names + ['oob_error', 'score', 'pkl'])
|
{"hexsha": "38ce5ba81844e610f4632dcccc46f63560bf2ac3", "size": 41979, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/modules/workflow.py", "max_stars_repo_name": "rwenite/msa-thesis", "max_stars_repo_head_hexsha": "4b72d5571b91ef1ca5266c8e151fdc5e387d57ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-13T13:57:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T07:44:54.000Z", "max_issues_repo_path": "py/modules/workflow.py", "max_issues_repo_name": "rwenite/msa-thesis", "max_issues_repo_head_hexsha": "4b72d5571b91ef1ca5266c8e151fdc5e387d57ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/modules/workflow.py", "max_forks_repo_name": "rwenite/msa-thesis", "max_forks_repo_head_hexsha": "4b72d5571b91ef1ca5266c8e151fdc5e387d57ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-20T14:21:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T14:21:52.000Z", "avg_line_length": 36.5034782609, "max_line_length": 178, "alphanum_fraction": 0.5359584554, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 8874}
|
# coding: utf-8
import rospy
import pid_controll
import numpy as np
from time import sleep
import matplotlib.pyplot as plt
from sensor_msgs.msg import LaserScan
from ackermann_msgs.msg import AckermannDriveStamped
class lidar_controll:
def __init__(self):
self.left_data = []
self.right_data = []
self.lidar_angle = 0
self.lidar_speed = 0
def set_lidar_angle(self,angle):
self.lidar_angle = angle
def get_lidar_angle(self):
return self.lidar_angle
def get_left_data(self):
return self.left_data
def get_right_data(self):
return self.right_data
def lidar(self,msg):
self.left_data = msg.ranges [270:540]
self.right_data = msg.ranges [540:810]
left_min= min(self.left_data)
right_min= min(self.right_data)
pid_controller = pid_controll.pid()
Left_set_point=60
pid_controller.setPoint(Left_set_point)
state = "right"
if left_min != 'inf' and( left_min < right_min or right_min == 'inf') :
current_value = left_min*100
state = "left"
elif right_min != 'inf' and (right_min < left_min or left_min == 'inf'):
current_value = right_min*100
state = "right"
else:
current_value = 80
angle=pid_controller.update(current_value)
if (pid_controller.getError()>0.0):
if state == "right":
x = angle*-1
self.lidar_angle = x
elif state == "left":
self.lidar_angle = angle
|
{"hexsha": "ddd2ccbdbb7d33b2703f3c85d68dd77b3a24399a", "size": 1591, "ext": "py", "lang": "Python", "max_stars_repo_path": "lidar_module.py", "max_stars_repo_name": "CanKorkut/PID-Control-with-ROS-Gazebo", "max_stars_repo_head_hexsha": "4380d17fc65b46a82384574917acfd1ad2b80b62", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-07T18:24:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-16T12:55:10.000Z", "max_issues_repo_path": "lidar_module.py", "max_issues_repo_name": "CanKorkut/PID-Control-with-ROS-Gazebo", "max_issues_repo_head_hexsha": "4380d17fc65b46a82384574917acfd1ad2b80b62", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lidar_module.py", "max_forks_repo_name": "CanKorkut/PID-Control-with-ROS-Gazebo", "max_forks_repo_head_hexsha": "4380d17fc65b46a82384574917acfd1ad2b80b62", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.462962963, "max_line_length": 80, "alphanum_fraction": 0.6109365179, "include": true, "reason": "import numpy", "num_tokens": 378}
|
# -*- coding:utf-8 -*-
# Created Time: 2018/05/11 11:50:23
# Author: Taihong Xiao <xiaotaihong@126.com>
from dataset import config, ShapeNet
from nets import Generator, Discriminator
import os, argparse
import torch
import numpy as np
import scipy.io as sio
from tensorboardX import SummaryWriter
from itertools import chain
from datetime import datetime
class _3DGAN(object):
def __init__(self, args, config=config):
self.args = args
self.attribute = args.attribute
self.gpu = args.gpu
self.mode = args.mode
self.restore = args.restore
# init dataset and networks
self.config = config
if args.batch_size is not None:
self.config.set_batchsize(args.batch_size)
self.dataset = ShapeNet(self.attribute)
self.G = Generator()
self.D = Discriminator()
self.adv_criterion = torch.nn.BCELoss()
self.set_mode_and_gpu()
self.restore_from_file()
def set_mode_and_gpu(self):
if self.mode == 'train':
self.G.train()
self.D.train()
if self.gpu:
with torch.cuda.device(self.gpu[0]):
self.G.cuda()
self.D.cuda()
self.adv_criterion.cuda()
if len(self.gpu) > 1:
self.G = torch.nn.DataParallel(self.G, device_ids=self.gpu)
self.D = torch.nn.DataParallel(self.D, device_ids=self.gpu)
elif self.mode == 'test':
self.G.eval()
self.D.eval()
if self.gpu:
with torch.cuda.device(self.gpu[0]):
self.G.cuda()
self.D.cuda()
if len(self.gpu) > 1:
self.G = torch.nn.DataParallel(self.G, device_ids=self.gpu)
self.D = torch.nn.DataParallel(self.D, device_ids=self.gpu)
else:
raise NotImplementationError()
def restore_from_file(self):
if self.restore is not None:
ckpt_file_G = os.path.join(self.config.model_dir, 'G_iter_{:06d}.pth'.format(self.restore))
assert os.path.exists(ckpt_file_G)
self.G.load_state_dict(torch.load(ckpt_file_G))
if self.mode == 'train':
ckpt_file_D = os.path.join(self.config.model_dir, 'D_iter_{:06d}.pth'.format(self.restore))
assert os.path.exists(ckpt_file_D)
self.D.load_state_dict(torch.load(ckpt_file_D))
self.start_step = self.restore + 1
else:
self.start_step = 1
def save_log(self):
scalar_info = {
'loss_D': self.loss_D,
'loss_G': self.loss_G,
'G_lr' : self.G_lr_scheduler.get_lr()[0],
'D_lr' : self.D_lr_scheduler.get_lr()[0],
}
for key, value in self.G_loss.items():
scalar_info['G_loss/' + key] = value
for key, value in self.D_loss.items():
scalar_info['D_loss/' + key] = value
for tag, value in scalar_info.items():
self.writer.add_scalar(tag, value, self.step)
def save_img(self, save_num=5):
for i in range(save_num):
mdict = {
'instance': self.fake_X[i,0].data.cpu().numpy()
}
sio.savemat(os.path.join(self.config.img_dir, '{:06d}_{:02d}.mat'.format(self.step, i)), mdict)
def save_model(self):
torch.save({key: val.cpu() for key, val in self.G.state_dict().items()}, os.path.join(self.config.model_dir, 'G_iter_{:06d}.pth'.format(self.step)))
torch.save({key: val.cpu() for key, val in self.D.state_dict().items()}, os.path.join(self.config.model_dir, 'D_iter_{:06d}.pth'.format(self.step)))
def train(self):
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
self.writer = SummaryWriter(os.path.join(self.config.log_dir, current_time))
self.opt_G = torch.optim.Adam(self.G.parameters(), lr=self.config.G_lr, betas=(0.5, 0.999))
self.opt_D = torch.optim.Adam(self.D.parameters(), lr=self.config.D_lr, betas=(0.5, 0.999))
self.G_lr_scheduler = torch.optim.lr_scheduler.StepLR(self.opt_G, step_size=self.config.step_size, gamma=self.config.gamma)
self.D_lr_scheduler = torch.optim.lr_scheduler.StepLR(self.opt_D, step_size=self.config.step_size, gamma=self.config.gamma)
print('Start training')
# start training
for step in range(self.start_step, 1 + self.config.max_iter):
self.step = step
self.G_lr_scheduler.step()
self.D_lr_scheduler.step()
self.real_X = next(self.dataset.gen(True))
self.noise = torch.randn(self.config.nchw[0], 200)
if len(self.gpu):
with torch.cuda.device(self.gpu[0]):
self.real_X = self.real_X.cuda()
self.noise = self.noise.cuda()
self.fake_X = self.G(self.noise)
# update D
self.D_real = self.D(self.real_X)
self.D_fake = self.D(self.fake_X.detach())
self.D_loss = {
'adv_real': self.adv_criterion(self.D_real, torch.ones_like(self.D_real)),
'adv_fake': self.adv_criterion(self.D_fake, torch.zeros_like(self.D_fake)),
}
self.loss_D = sum(self.D_loss.values())
D_real_acu = torch.ge(self.D_real.squeeze(), 0.5).float()
D_fake_acu = torch.le(self.D_fake.squeeze(), 0.5).float()
D_total_acu = torch.mean(torch.cat((D_real_acu, D_fake_acu),0))
if D_total_acu <= 0.8:
self.opt_D.zero_grad()
self.loss_D.backward()
self.opt_D.step()
# update G
self.D_fake = self.D(self.fake_X)
self.G_loss = {
'adv_fake': self.adv_criterion(self.D_fake, torch.ones_like(self.D_fake))
}
self.loss_G = sum(self.G_loss.values())
self.opt_G.zero_grad()
self.loss_G.backward()
self.opt_G.step()
# print('step: {:06d}, loss_D: {:.6f}, loss_G: {:.6f}'.format(self.step, self.loss_D.data.cpu().numpy(), self.loss_G.data.cpu().numpy()))
if self.step % 100 == 0:
self.save_log()
print('Reached step #{}'.format(self.step))
if self.step % 1000 == 0:
self.save_img()
self.save_model()
print('Finished training!')
self.writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--attribute', type=str, required=True, help='Specify category for training.')
parser.add_argument('-g', '--gpu', default=[], nargs='+', type=int, help='Specify GPU ids.')
parser.add_argument('-r', '--restore', default=None, action='store', type=int, help='Specify checkpoint id to restore.')
parser.add_argument('-m', '--mode', default='train', type=str, choices=['train', 'test'])
parser.add_argument('-b', '--batch_size', type=int)
args = parser.parse_args()
print(args)
model = _3DGAN(args)
if args.mode == 'train':
model.train()
|
{"hexsha": "07acf12658c2671045453845a327b5bae8d55d02", "size": 7217, "ext": "py", "lang": "Python", "max_stars_repo_path": "3dgan.py", "max_stars_repo_name": "yodahuang/3D-GAN-pytorch", "max_stars_repo_head_hexsha": "4671a73001b11db718a892c8e3560344ddd50425", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "3dgan.py", "max_issues_repo_name": "yodahuang/3D-GAN-pytorch", "max_issues_repo_head_hexsha": "4671a73001b11db718a892c8e3560344ddd50425", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3dgan.py", "max_forks_repo_name": "yodahuang/3D-GAN-pytorch", "max_forks_repo_head_hexsha": "4671a73001b11db718a892c8e3560344ddd50425", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-28T07:18:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-28T07:18:57.000Z", "avg_line_length": 37.9842105263, "max_line_length": 156, "alphanum_fraction": 0.5772481641, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1668}
|
from typing import Tuple, List
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.utils.linear_assignment_ import linear_assignment
from lib.trace import Trace
from utils import box
def iou_distance(first: List[Trace], second: List[Trace]) \
-> np.ndarray:
"""Compute cost based on IoU
Args:
first:
second:
Returns: cost_matrix
"""
unions = np.zeros((len(first), len(second)), dtype=np.float)
if unions.size:
unions = box.iou(
np.ascontiguousarray([track.to_tlbr for track in first], dtype=np.float),
np.ascontiguousarray([track.to_tlbr for track in second], dtype=np.float)
)
return 1 - unions
def nearest_distance(tracks: list, detections: list, metric='cosine') \
-> np.ndarray:
"""Compute cost based on ReID features
Args:
tracks:
detections:
metric:
Returns: cost matrix
"""
cost = np.zeros((len(tracks), len(detections)), dtype=np.float32)
if cost.size:
features = np.asarray(list(map(lambda t: t.feature_current, detections)), dtype=np.float32)
for index, track in enumerate(tracks):
cost[index, :] = np.maximum(0.0, cdist(track.features, features, metric).min(axis=0))
return cost
def gate_cost(motion, cost: np.ndarray,
tracks: list, detections: list, only_position: bool = False) \
-> np.ndarray:
"""Gate cost matrix
Args:
motion:
cost:
tracks:
detections:
only_position:
Returns: cost matrix
"""
if cost.size:
dimension = 2 if only_position else 4
threshold = motion.threshold[dimension]
measurements = np.stack(list(map(lambda d: box.calibrate(d.to_tlwh), detections)))
for index, track in enumerate(tracks):
distance = motion.gating_distance(
measurements,
track.mean,
track.cov,
only_position
)
cost[index, distance > threshold] = np.inf
return cost
def assignment(cost: np.ndarray, threshold: float, epsilon: float = 1e-4) \
-> Tuple[np.ndarray, Tuple[int], Tuple[int]]:
if not cost.size:
return np.empty((0, 2), dtype=int), \
tuple(range(np.size(cost, 0))), \
tuple(range(np.size(cost, 1)))
cost[cost > threshold] = threshold + epsilon
indices = linear_assignment(cost)
matches = indices[cost[tuple(zip(*indices))] <= threshold]
return matches, \
tuple(set(range(np.size(cost, 0))) - set(matches[:, 0])), \
tuple(set(range(np.size(cost, 1))) - set(matches[:, 1]))
|
{"hexsha": "934d2cf905d0f6ac2f2fb2110ec04dfdfed8abdb", "size": 2712, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/matching.py", "max_stars_repo_name": "MaybeS/MOT", "max_stars_repo_head_hexsha": "bae66c46c0cd74b29a0e66c5af58422ad050977b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-06-03T17:51:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-21T11:16:40.000Z", "max_issues_repo_path": "lib/matching.py", "max_issues_repo_name": "MaybeS/MOT", "max_issues_repo_head_hexsha": "bae66c46c0cd74b29a0e66c5af58422ad050977b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2018-03-12T02:29:06.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-28T08:06:35.000Z", "max_forks_repo_path": "lib/matching.py", "max_forks_repo_name": "MaybeS/MOT", "max_forks_repo_head_hexsha": "bae66c46c0cd74b29a0e66c5af58422ad050977b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6734693878, "max_line_length": 99, "alphanum_fraction": 0.6002949853, "include": true, "reason": "import numpy,from scipy", "num_tokens": 619}
|
# Tests of rankings
using Stats
using Base.Test
a = [1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0, 5.0]
x = [3.0, 1.0, 2.0, 4.0, 4.0, 2.0, 5.0, 4.0] # x is a permutated version of a
@test ordinalrank(a) == [1, 2, 3, 4, 5, 6, 7, 8]
@test ordinalrank(x) == [4, 1, 2, 5, 6, 3, 8, 7]
@test competerank(a) == [1, 2, 2, 4, 5, 5, 5, 8]
@test competerank(x) == [4, 1, 2, 5, 5, 2, 8, 5]
@test denserank(a) == [1, 2, 2, 3, 4, 4, 4, 5]
@test denserank(x) == [3, 1, 2, 4, 4, 2, 5, 4]
@test tiedrank(a) == [1.0, 2.5, 2.5, 4.0, 6.0, 6.0, 6.0, 8.0]
@test tiedrank(x) == [4.0, 1.0, 2.5, 6.0, 6.0, 2.5, 8.0, 6.0]
|
{"hexsha": "dc734a67a8f0780da5aa7140334fa7ae336d85bf", "size": 592, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/ranking.jl", "max_stars_repo_name": "rened/Stats.jl", "max_stars_repo_head_hexsha": "0efba512a2bf8faa21e61c9568222ae1ae96acbb", "max_stars_repo_licenses": ["Xnet", "X11"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-20T16:09:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-20T16:09:54.000Z", "max_issues_repo_path": "test/ranking.jl", "max_issues_repo_name": "rened/Stats.jl", "max_issues_repo_head_hexsha": "0efba512a2bf8faa21e61c9568222ae1ae96acbb", "max_issues_repo_licenses": ["Xnet", "X11"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/ranking.jl", "max_forks_repo_name": "rened/Stats.jl", "max_forks_repo_head_hexsha": "0efba512a2bf8faa21e61c9568222ae1ae96acbb", "max_forks_repo_licenses": ["Xnet", "X11"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6, "max_line_length": 78, "alphanum_fraction": 0.4966216216, "num_tokens": 384}
|
"""
A program that will accept a singlevariable function and a value of that variable and check
whether the input function is continuous at the point where the variable
assumes the value input
"""
from sympy import Symbol, sympify, Limit
def is_continuous(epxr, var, value):
l_limit = Limit(expr, var, value, dir="-").doit()
r_limit = Limit(expr, var, value, dir="+").doit()
func_value = expr.subs({var: value})
if l_limit == r_limit and func_value == l_limit:
return True
return False
if __name__ == "__main__":
try:
expr = sympify(input("Enter a function in one variable: "))
var = Symbol(input("Enter the variable: "))
value = float(input("Enter the point to check the continuity at: "))
except Exception as err:
print("Wrong input", err)
else:
result = is_continuous(expr, var, value)
if result:
print(f"{expr} is continuous at {value}")
else:
print(f"{expr} is not continuous at {value}")
|
{"hexsha": "0540df2a44a465891368c8d5bfdfcffc3cc40392", "size": 1020, "ext": "py", "lang": "Python", "max_stars_repo_path": "Math/Continuity.py", "max_stars_repo_name": "Gerile3/My_Python", "max_stars_repo_head_hexsha": "8623470ddd866b6b0c3eb34a2572a91458a3e1b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Math/Continuity.py", "max_issues_repo_name": "Gerile3/My_Python", "max_issues_repo_head_hexsha": "8623470ddd866b6b0c3eb34a2572a91458a3e1b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Math/Continuity.py", "max_forks_repo_name": "Gerile3/My_Python", "max_forks_repo_head_hexsha": "8623470ddd866b6b0c3eb34a2572a91458a3e1b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9090909091, "max_line_length": 91, "alphanum_fraction": 0.6401960784, "include": true, "reason": "from sympy", "num_tokens": 241}
|
(** ** Variadic Preservation *)
Require Export ARS Program.Equality.
Require Import core fintype.
Import ScopedNotations.
From Chapter6 Require Export variadic_fin.
Set Implicit Arguments.
Unset Strict Implicit.
Ltac inv H := dependent destruction H.
Hint Constructors star.
(** *** Single-step reduction *)
Inductive step {n} : tm n -> tm n -> Prop :=
| step_beta p b (t: fin p -> tm n) : step (app _ (lam p b) t) (b[scons_p p t ids])
| step_abs p b1 b2 : @step (p + n) b1 b2 -> step (lam p b1) (lam p b2)
| step_appL p s1 s2 t : step s1 s2 -> step (app p s1 t) (app _ s2 t).
Hint Constructors step.
Lemma step_beta' n p b (t: fin p -> tm n) t':
t' = b[scons_p p t ids] -> step (app _ (lam p b) t) t'.
Proof. intros ->. now constructor. Qed.
(** *** Substitutivity *)
Require Import Setoid Morphisms.
Require Import core_axioms.
Lemma step_inst {m n} (f : fin m -> tm n) (s t : tm m) :
step s t -> step (subst_tm f s) (subst_tm f t).
Proof.
intros st. revert n f. induction st; intros; cbn.
- apply step_beta'.
asimpl.
(* DONE a.d. cod_map not unfolded because it's part of core_axioms *)
unfold cod_map.
now asimpl.
- apply step_abs. eapply IHst.
- apply step_appL, IHst.
Qed.
Lemma mstep_inst m n (f : fin m -> tm n) (s t : tm m) :
star step s t -> star step (s[f]) (t[f]).
Proof. induction 1; eauto using step_inst. Qed.
(** *** Variadic typing *)
Inductive ty : Type :=
| Base : ty
| arr p : (fin p -> ty) -> ty -> ty .
Definition ctx n := fin n -> ty.
Inductive has_type {n} (Gamma : ctx n) : tm n -> ty -> Prop :=
| ty_var (x : fin n) :
has_type Gamma (var_tm x) (Gamma x)
| ty_abs p (S1 : fin p -> ty) ( S2 : ty) (M : tm (p + n)) :
@has_type (p + n) (scons_p p S1 Gamma) M S2 ->
has_type Gamma (lam p M) (arr S1 S2)
| ty_app p (T : fin p -> ty) ( S : ty) (M : tm n) N :
has_type Gamma M (arr T S) ->
(forall x, has_type Gamma (N x) (T x)) ->
has_type Gamma (app p M N) S.
Notation "Gamma |- M : T" := (has_type Gamma M T) (at level 20, M at level 99).
Lemma ty_var' n (x : fin n) A Gamma:
A = Gamma x -> has_type Gamma (var_tm x) A.
Proof. intros. subst. constructor. Qed.
Definition ltc {k k'} (Gamma: ctx k) (Delta: ctx k') rho := forall x, Delta (rho x) = Gamma x.
Lemma typing_ren n k (Gamma: ctx n) (Delta: ctx k) (rho: fin n -> fin k) (M: tm n) T :
ltc Gamma Delta rho -> Gamma |- M : T -> Delta |- (M⟨rho⟩) : T.
Proof.
intros C H. revert k Delta rho C. induction H; intros; asimpl; eauto using has_type.
- unfold ltc in C. rewrite <- C. constructor.
- constructor. apply IHhas_type. intros x.
destruct (destruct_fin x) as [(?&->)|(?&->)]; eauto; asimpl; unfold upRen_p; asimpl; eauto.
- econstructor; eauto.
Qed.
Lemma typing_inst n k (Gamma: ctx n) (Delta: ctx k) (sigma: fin n -> tm k) (M: tm n) T :
(forall x, Delta |- sigma x : Gamma x) -> Gamma |- M : T -> Delta |- (M[sigma]) : T.
Proof.
Proof.
intros C H. revert k Delta sigma C. induction H; intros; asimpl; eauto using has_type.
- unfold ltc in C. apply C.
- constructor. apply IHhas_type. intros x.
destruct (destruct_fin x) as [(?&->)|(?&->)]; asimpl.
+ apply ty_var'. now asimpl.
+ eapply typing_ren; eauto. intros x. now asimpl.
- econstructor; eauto.
Qed.
(** *** Preservation *)
Lemma step_typing k (Gamma: ctx k) M T :
Gamma |- M : T -> forall M', step M M' -> Gamma |- M' : T.
Proof.
induction 1; intros; cbn.
- inv H.
- inv H0. econstructor. now apply IHhas_type.
- inv H2.
+ inv H. eapply typing_inst; try eassumption.
intros x. destruct (destruct_fin x) as [(?&->)|(?&->)]; asimpl; eauto.
* apply ty_var'; eauto.
+ eapply ty_app; eauto.
Qed.
|
{"author": "addap", "repo": "autosubst-ocaml", "sha": "f820bde3c51299b5f54ef21af39ac4654854d124", "save_path": "github-repos/coq/addap-autosubst-ocaml", "path": "github-repos/coq/addap-autosubst-ocaml/autosubst-ocaml-f820bde3c51299b5f54ef21af39ac4654854d124/case-studies/kathrin/coq/Chapter9/variadic_preservation.v"}
|
!
! CalculiX - A 3-dimensional finite element program
! Copyright (C) 1998-2020 Guido Dhondt
!
! This program is free software; you can redistribute it and/or
! modify it under the terms of the GNU General Public License as
! published by the Free Software Foundation(version 2);
!
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program; if not, write to the Free Software
! Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
!
subroutine liquidchannel(node1,node2,nodem,nelem,lakon,
& nactdog,identity,ielprop,prop,iflag,v,xflow,f,
& nodef,idirf,df,rho,g,co,dvi,numf,mi,ipkon,kon,iplausi)
!
! open channel for incompressible media
!
! SG: sluice gate
! SO: sluice opening
! WE: weir
! WO: weir opening
! DS: discontinuous slope
! DO: discontinuous slope opening
! : default channel mit linearly varying trapezoid cross
! section
!
implicit none
!
logical identity,bresse,jump
character*8 lakon(*)
!
integer nelem,nactdog(0:3,*),node1,node2,nodem,indexup,i,
& ielprop(*),nodef(*),idirf(*),index,iflag,mi(*),nsol,
& inv,numf,nodesg,nelemdown,nelemup,node0,kon(*),ipkon(*),
& iplausi
!
real*8 prop(*),v(0:mi(2),*),xflow,f,df(*),b,d,c,p,
& h1,h2,rho,dvi,friction,reynolds,dg,b1,b2,
& g(3),dl,xks,z1,z2,co(3,*),xflow2,dyg3dbj,dyg4dbj,
& s0,sqrts0,hk,form_fact,h1ns,h2ns,h0,dyg3deta,dyg4deta,
& dh3dh1,dh4dh2,dh3dm,dh4dm,eta,dA3deta,dA4deta,bj,
& theta,cth,tth,um1,um2,A1,A2,P1,P2,D1,D2,dA1dh1,dA2dh2,
& dP1dh1,dP2dh2,dD1dh1,dD2dh2,h3,h4,dh3deta,xn1,xn2,xt1,xt2,
& dh4deta,yg3,yg4,dyg3dh3,dyg4dh4,A3,A4,dA3dh3,dA4dh4,
& dum1dh1,dum2dh2,c1,c2,dbds,dbjdeta,e0,e1,e2,e3,
& dyg3dm,dyg4dm,dA3dm,dA4dm,dyg3dh1,dyg4dh2,
& dA3dh1,dA4dh2,solreal(3),solimag(3),dist
!
!
!
! iflag=0: check whether all parameters in the element equation
! are known => equation is not needed
! iflag=1: calculation of the initial flux
! iflag=2: evaluate the element equation and all derivatives
! iflag=3: correct the channel depth in order to move a jump
!
if (iflag.eq.0) then
identity=.true.
!
if(nactdog(2,node1).ne.0)then
identity=.false.
elseif(nactdog(2,node2).ne.0)then
identity=.false.
elseif(nactdog(1,nodem).ne.0)then
identity=.false.
endif
!
elseif((iflag.eq.1).or.(iflag.eq.2))then
!
index=ielprop(nelem)
!
h1=v(2,node1)
h2=v(2,node2)
!
z1=-g(1)*co(1,node1)-g(2)*co(2,node1)-g(3)*co(3,node1)
z2=-g(1)*co(1,node2)-g(2)*co(2,node2)-g(3)*co(3,node2)
!
dg=dsqrt(g(1)*g(1)+g(2)*g(2)+g(3)*g(3))
!
if(iflag.eq.1) then
!
! in a first call of liquidchannel the flow is determined,
! in a second call the channel depth is calculated
!
if(lakon(nelem)(6:7).eq.'SG') then
!
! sluice gate
!
b=prop(index+1)
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=dasin((z1-z2)/dl)
endif
sqrts0=dsqrt(1.d0-s0*s0)
theta=0.d0
h2=prop(index+3)
!
if(dabs(xflow).lt.1.d-30) then
!
! determine initial mass flow
!
if(nactdog(2,node1).ne.0) then
!
! upstream level not known
!
xflow=0.d0
else
xflow=2.d0*dg*(rho*b*h2)**2*(h1-h2*sqrts0)
if(xflow.lt.0.d0) then
write(*,*)'*ERROR in liquidchannel: water level'
write(*,*) ' upstream of sluice gate is '
write(*,*) ' smaller than downstream heigh
&t'
call exit(201)
else
xflow=dsqrt(xflow)
endif
endif
else
!
! determine the downstream depth
! and the upstream depth if not defined as BC
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
if(h2.gt.hk) then
!
! for initial conditions
!
if(nactdog(2,node1).ne.0) v(2,node1)=3.d0*hk/2.d0
v(2,node2)=hk
else
!
! for initial conditions
!
if(nactdog(2,node1).ne.0) v(2,node1)=
& xflow**2/(2.d0*dg*(rho*b*h2)**2)+h2*sqrts0
v(2,node2)=h2
endif
endif
elseif(lakon(nelem)(6:7).eq.'WE') then
!
! weir
!
b=prop(index+1)
p=prop(index+2)
c=prop(index+3)
sqrts0=1.d0
theta=0.d0
!
if(dabs(xflow).lt.1.d-30) then
!
! determine initial mass flow
!
if(nactdog(2,node1).ne.0) then
!
! upstream level unknown
!
xflow=0.d0
else
if(h1.le.p) then
write(*,*) '*ERROR in liquidchannel'
write(*,*) ' weir height exceeds'
write(*,*) ' upstream level'
call exit(201)
endif
xflow=rho*c*b*(h1-p)**(1.5d0)
endif
else
!
! determine the downstream depth
! and the upstream depth if not defined as BC
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
! for initial conditions
!
if(nactdog(2,node1).ne.0) v(2,node1)=p+3.d0*hk/2.d0
!
! next value is used for downstream initial values
!
v(2,node2)=hk
endif
!
elseif(lakon(nelem)(6:7).eq.'DS') then
if(dabs(xflow).lt.1.d-30) then
!
! initial mass flow cannot be determined for this
! type of element
!
xflow=0.d0
else
!
! determine the downstream depth
!
b=prop(index+1)
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=dasin((z1-z2)/dl)
endif
sqrts0=dsqrt(1.d0-s0*s0)
theta=prop(index+4)
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
! initial condition for fluid depth
! supercritical value
!
v(2,node2)=hk/2.d0
endif
!
endif
else
!
! calculating f and its derivatives
!
bresse=.false.
jump=.false.
!
xflow2=xflow*xflow
!
! element properties
!
if((lakon(nelem)(6:7).eq.'SG').or.
& (lakon(nelem)(6:7).eq.'SO').or.
& (lakon(nelem)(6:7).eq.'WO').or.
& (lakon(nelem)(6:7).eq.'RE').or.
& (lakon(nelem)(6:7).eq.' ').or.
& (lakon(nelem)(6:7).eq.'DS').or.
& (lakon(nelem)(6:7).eq.'DO')) then
b=prop(index+1)
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=dasin((z1-z2)/dl)
endif
sqrts0=dsqrt(1.d0-s0*s0)
if(lakon(nelem)(6:7).ne.'SG') then
dl=prop(index+3)
theta=prop(index+4)
xks=prop(index+5)
if(dl.le.0.d0) then
dl=dsqrt((co(1,node2)-co(1,node1))**2+
& (co(2,node2)-co(2,node1))**2+
& (co(3,node2)-co(3,node1))**2)
endif
else
theta=0.d0
endif
elseif(lakon(nelem)(6:7).eq.'WE') then
b=prop(index+1)
p=prop(index+2)
c=prop(index+3)
sqrts0=1.d0
theta=0.d0
elseif((lakon(nelem)(6:7).eq.'CO').or.
& (lakon(nelem)(6:7).eq.'EL')) then
b1=prop(index+1)
!
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=0.d0
endif
sqrts0=dsqrt(1.d0-s0*s0)
!
dl=prop(index+3)
if(dl.le.0.d0) then
dl=dsqrt((co(1,node2)-co(1,node1))**2+
& (co(2,node2)-co(2,node1))**2+
& (co(3,node2)-co(3,node1))**2)
endif
!
b2=prop(index+4)
b=(b1+b2)/2.d0
theta=0.d0
xks=0.d0
elseif((lakon(nelem)(6:7).eq.'ST').or.
& (lakon(nelem)(6:7).eq.'DR')) then
b=prop(index+1)
!
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=0.d0
endif
sqrts0=dsqrt(1.d0-s0*s0)
!
dl=prop(index+3)
if(dl.le.0.d0) then
dl=dsqrt((co(1,node2)-co(1,node1))**2+
& (co(2,node2)-co(2,node1))**2+
& (co(3,node2)-co(3,node1))**2)
endif
!
d=prop(index+4)
b1=b
b2=b
theta=0.d0
xks=0.d0
endif
!
if(xflow.ge.0.d0) then
inv=1
else
inv=-1
endif
!
! standard element equation: unknowns are the mass flow
! and the depth upstream and downstream
!
numf=3
nodef(1)=node1
nodef(2)=nodem
nodef(3)=node2
idirf(1)=2
idirf(2)=1
idirf(3)=2
!
if(lakon(nelem)(6:7).eq.'SG') then
!
! sluice gate
! 1-SG-2-SO-3
!
! h2 cannot exceed HKmax
!
h2=prop(index+3)
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
if(h2.gt.hk) h2=hk
!
nelemdown=nint(prop(index+5))
h3=v(2,kon(ipkon(nelemdown)+3))
call hns(b,theta,rho,dg,sqrts0,xflow,h2,h2ns)
if(h3.lt.h2ns) then
!
! Q=f_SG(h1,h2): sluice gate equation between
! 1 and 2
!
! next line for output only
!
v(2,node2)=h2
c write(30,*) 'SG: sluice gate equation '
c write(30,*)'h1= ',h1,'h2= ',h2,'h3= ',h3,'h2ns= ',h2ns
df(1)=2.d0*dg*(rho*b*h2)**2
df(2)=-2.d0*xflow
f=df(1)*(h1-h2*sqrts0)
df(3)=2.d0*f/h2-df(1)*sqrts0
f=f-xflow2
else
!
! fake equation
!
c write(30,*) 'SG: fake equation '
c write(30,*)'h1= ',h1,'h2= ',h2,'h3= ',h3,'h2ns= ',h2ns
numf=1
nodef(1)=nodem
idirf(1)=3
f=prop(index+4)-0.5d0
df(1)=1.d0
endif
elseif(lakon(nelem)(6:7).eq.'SO') then
!
! sluice opening (element streamdown of sluice gate)
! 0-SG-1-SO-2
!
nelemup=nint(prop(index+6))
node0=kon(ipkon(nelemup)+1)
h0=v(2,node0)
h1=prop(ielprop(nelemup)+3)
!
! h1 cannot exceed HKmax
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
if(h1.gt.hk) h1=hk
!
call hns(b,theta,rho,dg,sqrts0,xflow,h1,h1ns)
if(h2.lt.h1ns) then
!
! bresse (frontwater)
!
c write(30,*) 'SO: Bresse equation '
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2,'h1ns= ',h1ns
bresse=.true.
else
!
! Q=f_SG(h0,h2): sluice gate equation between 0 and 2
! (backwater)
!
! reset gate height
!
h1=prop(ielprop(nelemup)+3)
!
c write(30,*) 'SO: Sluice gate eqn. between 0 and 2 '
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2,'h1ns= ',h1ns
numf=4
nodef(4)=node0
idirf(4)=2
!
if(h2.gt.h1) then
!
! gate flow (water touches gate)
! section = b * h1
!
! next line for output only
!
v(2,node1)=h1
df(4)=2.d0*dg*(rho*b*h1)**2
df(3)=-df(4)*sqrts0
df(2)=-2.d0*xflow
f=df(4)*(h0-h2*sqrts0)
df(1)=2.d0*f/h1
else
!
! incomplete inflexion (water does not touch gate)
! section = b * h2
!
! next line for output only
!
v(2,node1)=h2
df(4)=2.d0*dg*(rho*b*h2)**2
df(3)=-df(4)*sqrts0
df(2)=-2.d0*xflow
f=df(4)*(h0-h2*sqrts0)
df(3)=df(3)+2.d0*f/h2
df(1)=0.d0
endif
f=f-xflow2
endif
elseif(lakon(nelem)(6:7).eq.'WE') then
!
! weir
! 1-WE-2-WO-3
!
nelemdown=nint(prop(index+5))
h3=v(2,kon(ipkon(nelemdown)+3))
!
! default depth for weir is hk
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(h3.lt.p+hk) then
!
! output only
!
v(2,node2)=p+hk
!
! Q=f_WE(h1): weir equation
!
c write(30,*) 'WE: weir equation '
c write(30,*)'h1= ',h1,'h2= ',h2,'h3= ',h3,'hk= ',hk
f=rho*c*b*(h1-p)**(1.5d0)
df(1)=3.d0*f/(2.d0*(h1-p))
f=f-xflow
df(2)=-1.d0
df(3)=0.d0
else
!
! fake equation
!
c write(30,*) 'WE: weir equation '
c write(30,*)'h1= ',h1,'h2= ',h2,'h3= ',h3,'hk= ',hk
numf=1
nodef(1)=nodem
idirf(1)=3
f=prop(index+4)-0.5d0
df(1)=1.d0
endif
elseif(lakon(nelem)(6:7).eq.'WO') then
!
! weir opening (element streamdown of weir)
! 0-WE-1-WO-2
!
nelemup=nint(prop(index+6))
node0=kon(ipkon(nelemup)+1)
h0=v(2,node0)
!
p=prop(ielprop(nelemup)+2)
!
! default depth for weir is hk
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(h2.lt.p+hk) then
!
! bresse between 1 and 2
!
h1=hk
c write(30,*) 'WO: Bresse equation '
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2,'hk= ',hk
p=prop(ielprop(nelemup)+2)
s0=dasin(p/dsqrt(dl**2+p**2))
c write(*,*) 's0=',p,dl,s0
sqrts0=dsqrt(1.d0-s0*s0)
bresse=.true.
else
!
! output only
!
v(2,node1)=h2
!
! bresse between 0 and 2
!
c write(30,*) 'WO: Bresse eqn. between 0 and 2 '
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2,'hk= ',hk
nodef(1)=node0
h1=h0
bresse=.true.
endif
elseif(lakon(nelem)(6:7).eq.'DS') then
!
! discontinuous slope
! 1-DS-2-DO-3
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(h1.gt.hk) then
nelemdown=nint(prop(index+8))
h3=v(2,kon(ipkon(nelemdown)+3))
if(h3.le.hk) then
!
! upstream: backwater curve
! downstream: frontwater curve
!
h2=hk
bresse=.true.
c write(30,*) 'DS: back/front bresse'
c write(30,*)'h1= ',h1,'h2= ',h2,'h3= ',h3
!
! for output purposes
!
v(2,node2)=h2
else
!
! both curves are backwater curves
! fake equation
!
c write(30,*) 'DS: back/back fake equation '
c write(30,*)'h1= ',h1,'h2= ',h2,'h3= ',h3
numf=1
nodef(1)=nodem
idirf(1)=3
f=prop(index+7)-0.5d0
df(1)=1.d0
endif
else
!
! both curves are frontwater curves
! fake equation
!
c write(30,*) 'DS: front/front fake equation '
c write(30,*)'h1= ',h1,'h2= ',h2
nelemup=nint(prop(index+6))
numf=1
nodef(1)=kon(ipkon(nelemup)+2)
idirf(1)=3
f=prop(index+7)-0.5d0
df(1)=1.d0
endif
elseif(lakon(nelem)(6:7).eq.'DO') then
!
! discontinuous slope opening
! (element streamdown of discontinuous slope)
! 0-DS-1-DO-2
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
nelemup=nint(prop(index+6))
node0=kon(ipkon(nelemup)+1)
h0=v(2,node0)
!
if(h0.gt.hk) then
if(h2.le.hk) then
!
! upstream: backwater curve
! downstream: frontwater curve
! bresse between 1 and 2
!
h1=hk
c write(30,*) 'DO: back/front bresse 1-2'
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2
bresse=.true.
else
!
! both curves are backwater curves
! bresse between 0 and 2
!
c write(30,*) 'DO: back/back bresse 0-2'
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2
nodef(1)=node0
h1=h0
bresse=.true.
!
! output purposes
!
v(2,node1)=(h0+h2)/2.d0
endif
else
!
! both curves are frontwater curves
! bresse between 0 and 2
!
c write(30,*) 'DO: front/front bresse 0-2'
c write(30,*)'h0= ',h0,'h1= ',h1,'h2= ',h2
nodef(1)=node0
h1=h0
bresse=.true.
!
! output purposes
!
v(2,node1)=(h0+h2)/2.d0
endif
elseif(lakon(nelem)(6:7).eq.'RE') then
!
! element upstream of a reservoir
! calculating the critical depth
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
if(h1.ge.hk) then
!
! backwater curve
!
if(h2.lt.hk) h2=hk
c write(30,*) 'RE: Bresse downstream equation '
c write(30,*) 'h1= ',h1,'h2= ',h2,'hk= ',hk
bresse=.true.
else
!
! frontwater curve
!
call hns(b,theta,rho,dg,sqrts0,xflow,h1,h1ns)
if(h2.le.h1ns) then
c write(30,*) 'RE: fake equation '
c write(30,*) 'h1= ',h1,'h2= ',h2,'h1ns= ',h1ns
!
! fake equation
!
nelemup=nint(prop(index+6))
nodesg=kon(ipkon(nelemup)+2)
numf=1
nodef(1)=nodesg
idirf(1)=3
!
! retrieving previous value of eta
!
index=ielprop(nelemup)
if(lakon(nelemup)(6:7).eq.'SG') then
f=prop(index+4)-0.5d0
elseif(lakon(nelemup)(6:7).eq.'WE') then
f=prop(index+4)-0.5d0
elseif(lakon(nelemup)(6:7).eq.'DS') then
f=prop(index+7)-0.5d0
endif
df(1)=1.d0
else
c write(30,*) 'RE: Bresse downstream equation '
c write(30,*) 'h1= ',h1,'h2= ',h2,'h1ns= ',h1ns
bresse=.true.
endif
endif
elseif(lakon(nelem)(6:7).eq.'CO') then
c write(30,*) 'CO: contraction '
c write(30,*)'h1= ',h1,'h2= ',h2
!
call hcrit(xflow,rho,b2,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(inv.eq.-1) then
if((h1.gt.hk).and.(h2.lt.hk)) then
jump=.true.
endif
else
if((h1.lt.hk).and.(h2.gt.hk)) then
jump=.true.
endif
endif
!
c write(*,*) 'CO ',jump
!
if(.not.jump) then
c1=rho*rho*dg
c2=b1*b2*h1*h2
df(1)=b1*(2.d0*xflow2+c1*b1*b2*h2**3)
df(3)=b2*(2.d0*xflow2+c1*b1*b1*h1**3)
f=h1*df(1)-h2*df(3)
df(1)=df(1)-3.d0*c1*c2*b1*h1
df(3)=3.d0*c1*c2*b1*h2-df(3)
df(2)=4.d0*(b1*h1-b2*h2)*xflow
endif
elseif(lakon(nelem)(6:7).eq.'EL') then
c write(30,*) 'EL: enlargement '
c write(30,*)'h1= ',h1,'h2= ',h2
!
call hcrit(xflow,rho,b2,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(inv.eq.-1) then
if((h1.gt.hk).and.(h2.lt.hk)) then
jump=.true.
endif
else
if((h1.lt.hk).and.(h2.gt.hk)) then
jump=.true.
endif
endif
!
c write(*,*) 'EL ',jump
!
if(.not.jump) then
c1=rho*rho*dg
c2=b1*b2*h1*h2
df(1)=b1*(2.d0*xflow2+c1*b2*b2*h2**3)
df(3)=b2*(2.d0*xflow2+c1*b1*b2*h1**3)
f=h1*df(1)-h2*df(3)
df(1)=df(1)-3.d0*c1*c2*b2*h1
df(3)=3.d0*c1*c2*b2*h2-df(3)
df(2)=4.d0*(b1*h1-b2*h2)*xflow
endif
elseif(lakon(nelem)(6:7).eq.'DR') then
c write(30,*) 'DR: drop '
c write(30,*)'h1= ',h1,'h2= ',h2
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(inv.eq.-1) then
if((h1.gt.hk).and.(h2.lt.hk)) then
jump=.true.
endif
else
if((h1.lt.hk).and.(h2.gt.hk)) then
jump=.true.
endif
endif
!
if(.not.jump) then
c1=rho*rho*dg
df(1)=2.d0*xflow2+c1*b*b*h2**3
df(3)=2.d0*xflow2+c1*b*b*h1*(h1+d)**2
f=h1*df(1)-h2*df(3)
df(1)=df(1)-c1*b*b*h2*(3.d0*h1+d)*(h1+d)
df(3)=3.d0*c1*b*b*h1*h2*h2-df(3)
df(2)=4.d0*(h1-h2)*xflow
endif
elseif(lakon(nelem)(6:7).eq.'ST') then
c write(30,*) 'ST: step '
c write(30,*)'h1= ',h1,'h2= ',h2
!
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
!
if(inv.eq.-1) then
if((h1.gt.hk).and.(h2.lt.hk)) then
jump=.true.
endif
else
if((h1.lt.hk).and.(h2.gt.hk)) then
jump=.true.
endif
endif
!
if(.not.jump) then
c1=rho*rho*dg
df(1)=2.d0*xflow2+c1*b*b*h2*(h2+d)**2
df(3)=2.d0*xflow2+c1*b*b*h1**3
f=h1*df(1)-h2*df(3)
df(1)=df(1)-3.d0*c1*b*b*h1*h1*h2
df(3)=c1*b*b*h1*(3.d0*h2+d)*(h2+d)-df(3)
df(2)=4.d0*(h1-h2)*xflow
endif
elseif(lakon(nelem)(6:7).eq.' ') then
bresse=.true.
c write(30,*) 'straight: Bresse equation '
c write(30,*) 'h1= ',h1,'h2= ',h2
endif
!
! bresse equation
!
if((bresse).or.(jump)) then
!
if(xks.gt.0.d0) then
!
! White-Coolebrook
!
! hydraulic diameter
!
d=2.d0*(h1+h2)
reynolds=4.d0*xflow/(b*dvi)
form_fact=1.d0
call friction_coefficient(dl,d,xks,reynolds,form_fact,
& friction)
endif
!
if(bresse) then
call hcrit(xflow,rho,b,theta,dg,sqrts0,hk)
v(3,node2)=hk
if(inv.eq.-1) then
if((h1.gt.hk).and.(h2.lt.hk)) then
jump=.true.
endif
else
if((h1.lt.hk).and.(h2.gt.hk)) then
jump=.true.
endif
endif
b1=b
b2=b
endif
!
! geometric data
!
cth=dcos(theta)
tth=dtan(theta)
!
! nonprismatic cross section
!
if(lakon(nelem)(6:7).eq.' ') then
dbds=prop(index+7)
else
dbds=0.d0
endif
!
! width at water surface
!
dD1dh1=2.d0*tth
dD2dh2=dD1dh1
D1=b1+h1*dD1dh1
D2=b2+dl*dbds+h2*dD2dh2
!
! cross section
!
A1=h1*(b1+h1*tth)
A2=h2*(b2+dl*dbds+h2*tth)
dA1dh1=D1
dA2dh2=D2
!
! perimeter
!
P1=b1+2.d0*h1/cth
P2=b2+dl*dbds+2.d0*h2/cth
dP1dh1=2.d0/cth
dP2dh2=dP1dh1
!
! factor for friction
!
if(xks.gt.0.d0) then
! White-Coolebrook
um1=friction/8.d0
um2=um1
dum1dh1=0.d0
dum2dh2=0.d0
else
! Manning
um1=xks*xks*dg*(P1/A1)**(1.d0/3.d0)
um2=xks*xks*dg*(P2/A2)**(1.d0/3.d0)
dum1dh1=xks*xks*dg*
& (P1**(-2.d0/3.d0)*dP1dh1*A1**(1.d0/3.d0)-
& A1**(-2.d0/3.d0)*dA1dh1*P1**(1.d0/3.d0))/
& (3.d0*A1**(2.d0/3d0))
dum2dh2=xks*xks*dg*
& (P2**(-2.d0/3.d0)*dP2dh2*A2**(1.d0/3.d0)-
& A2**(-2.d0/3.d0)*dA2dh2*P2**(1.d0/3.d0))/
& (3.d0*A2**(2.d0/3d0))
endif
!
! constants
!
c1=rho*rho*dg
c2=c1*sqrts0
c1=c1*s0
!
! hydraulic jump
!
if(jump) then
c write(30,*)
c & 'liquidchannel: jump in element,hk ',nelem,hk
nelemup=prop(index+6)
indexup=ielprop(nelemup)
if(lakon(nelemup)(6:7).eq.'SG') then
eta=prop(indexup+4)
prop(indexup+7)=nelem
elseif(lakon(nelemup)(6:7).eq.'WE') then
eta=prop(indexup+4)
prop(indexup+7)=nelem
elseif(lakon(nelemup)(6:7).eq.'DS') then
eta=prop(indexup+7)
prop(indexup+9)=nelem
endif
!
! determining h3, h4 and derivatives
!
! numerator
!
xt1=c1*A1**3+(h1*dbds-um1*P1)*xflow2
xt2=c1*A2**3+(h2*dbds-um2*P2)*xflow2
!
! denominator
!
xn1=c2*A1**3-D1*xflow2
xn2=c2*A2**3-D2*xflow2
!
! h3 and h4
!
h3=h1+dl*xt1/xn1*eta
h4=h2-dl*xt2/xn2*(1.d0-eta)
c write(30,*)
c & 'liquidchannel: h3,h4,eta ',h3,h4,eta
!
if(bresse) then
!
! width at jump
!
bj=b+dbds*eta*dl
!
! cross sections and derivatives
!
A3=h3*(bj+h3*tth)
A4=h4*(bj+h4*tth)
dA3dh3=bj+2.d0*h3*tth
dA4dh4=bj+2.d0*h4*tth
!
! center of gravity and derivatives
!
yg3=h3*(3.d0*bj+2.d0*h3*tth)/(6.d0*(bj+h3*tth))
yg4=h4*(3.d0*bj+2.d0*h4*tth)/(6.d0*(bj+h4*tth))
dyg3dh3=((3.d0*bj+4.d0*h3*tth)*(bj+tth)
& -tth*h3*(3.d0*bj+2.d0*h3*tth))/
& (6.d0*(bj+h3*tth)**2)
dyg4dh4=((3.d0*bj+4.d0*h4*tth)*(bj+tth)
& -tth*h4*(3.d0*bj+2.d0*h4*tth))/
& (6.d0*(bj+h4*tth)**2)
dyg3dbj=h3*h3*tth/(6.d0*(bj+h3*tth)**2)
dyg4dbj=h4*h4*tth/(6.d0*(bj+h4*tth)**2)
endif
!
! derivative of h3 w.r.t. h1 and of h4 w.r.t. h2
!
dh3dh1=1.d0+((3.d0*c1*A1*A1*dA1dh1
& +(dbds-dum1dh1*P1-um1*dP1dh1)*xflow2)*xn1
& -(3.d0*c2*A1*A1*dA1dh1-dD1dh1*xflow2)*xt1)/
& (xn1*xn1)*eta*dl
dh4dh2=1.d0-((3.d0*c1*A2*A2*dA2dh2
& +(dbds-dum2dh2*P2-um2*dP2dh2)*xflow2)*xn2
& -(3.d0*c2*A2*A2*dA2dh2-dD2dh2*xflow2)*xt2)/
& (xn2*xn2)*(1.d0-eta)*dl
!
if(bresse) then
dA3dh1=dA3dh3*dh3dh1
dA4dh2=dA4dh4*dh4dh2
dyg3dh1=dyg3dh3*dh3dh1
dyg4dh2=dyg4dh4*dh4dh2
endif
!
! derivative of h3 and h4 w.r.t. the mass flow
!
dh3dm=((dbds*h1-um1*P1)*xn1+D1*xt1)*2.d0*xflow/
& (xn1*xn1)*eta*dl
dh4dm=-((dbds*h2-um2*P2)*xn2+D2*xt2)*2.d0*xflow/
& (xn2*xn2)*(1.d0-eta)*dl
!
if(bresse) then
dA3dm=dA3dh3*dh3dm
dA4dm=dA4dh4*dh4dm
dyg3dm=dyg3dh3*dh3dm
dyg4dm=dyg4dh4*dh4dm
endif
!
! derivative of h3 and h4 w.r.t. eta
!
dh3deta=dl*xt1/xn1
dh4deta=dl*xt2/xn2
!
if(bresse) then
dbjdeta=dbds*dl
!
! derivative of A3, A4, yg3 and yg4 w.r.t. eta
!
dA3deta=dA3dh3*dh3deta+h3*dbjdeta
dA4deta=dA4dh4*dh4deta+h4*dbjdeta
dyg3deta=dyg3dh3*dh3deta+dyg3dbj*dbjdeta
dyg4deta=dyg4dh4*dh4deta+dyg4dbj*dbjdeta
endif
!
numf=4
nodef(4)=kon(ipkon(nelemup)+2)
idirf(4)=3
!
if(bresse) then
f=A4*xflow2+c2*(A3*A3*A4*yg3-A3*A4*A4*yg4)
& -A3*xflow2
df(1)=c2*(2.d0*A3*dA3dh1*A4*yg3+A3*A3*A4*dyg3dh1
& -dA3dh1*A4*A4*yg4)-dA3dh1*xflow2
df(2)=2.d0*xflow*(A4-A3)+
& (c2*(2.d0*A3*A4*yg3-A4*A4*yg4)-xflow2)*dA3dm+
& (c2*(A3*A3*yg3-2.d0*A3*A4*yg4)+xflow2)*dA4dm+
& c2*A3*A3*A4*dyg3dm-c2*A3*A4*A4*dyg4dm
df(3)=c2*(A3*A3*dA4dh2*yg3-2.d0*A3*A4*dA4dh2*yg4
& -A3*A4*A4*dyg4dh2)+dA4dh2*xflow2
df(4)=dA4deta*xflow2+
& c2*(2.d0*A3*dA3deta*A4*yg3+A3*A3*dA4deta*yg3
& +A3*A3*A4*dyg3deta-dA3deta*A4*A4*yg4
& -A3*2.d0*A4*dA4deta*yg4-A3*A4*A4*dyg4deta)
& -dA3deta*xflow2
elseif(lakon(nelem)(6:7).eq.'CO') then
f=b2*h4*(2.d0*xflow2+c2*b1*b1*h3**3)-
& b1*h3*(2.d0*xflow2+c2*b1*b2*h4**3)
! dfdh3
df(1)=3.d0*b2*h4*c2*b1*b1*h3*h3-
& b1*(2.d0*xflow2+c2*b1*b2*h4**3)
! dfdh4
df(3)=b2*(2.d0*xflow2+c2*b1*b1*h3**3)-
& 3.d0*b1*h3*c2*b1*b2*h4*h4
! dfdm
df(2)=4.d0*xflow*(b2*h4-b1*h3)+
& df(1)*dh3dm+df(3)*dh4dm
! dfdeta
df(4)=df(1)*dh3deta+df(3)*dh4deta
! dfdh1
df(1)=df(1)*dh3dh1
! dfdh2
df(3)=df(3)*dh4dh2
elseif(lakon(nelem)(6:7).eq.'EL') then
f=b2*h4*(2.d0*xflow2+c2*b1*b2*h3**3)-
& b1*h3*(2.d0*xflow2+c2*b2*b2*h4**3)
! dfdh3
df(1)=3.d0*b2*h4*c2*b1*b2*h3*h3-
& b1*(2.d0*xflow2+c2*b2*b2*h4**3)
! dfdh4
df(3)=b2*(2.d0*xflow2+c2*b1*b2*h3**3)-
& 3.d0*b1*h3*c2*b2*b2*h4*h4
! dfdm
df(2)=4.d0*xflow*(b2*h4-b1*h3)+
& df(1)*dh3dm+df(3)*dh4dm
! dfdeta
df(4)=df(1)*dh3deta+df(3)*dh4deta
! dfdh1
df(1)=df(1)*dh3dh1
! dfdh2
df(3)=df(3)*dh4dh2
elseif(lakon(nelem)(6:7).eq.'DR') then
f=h4*(2.d0*xflow2+c2*b*b*h3*(h3+d)**2)-
& h3*(2.d0*xflow2+c2*b*b*h4**3)
! dfdh3
df(1)=h4*c2*b*b*(3.d0*h3+d)*(h3+d)-
& (2.d0*xflow2+c2*b*b*h4**3)
! dfdh4
df(3)=(2.d0*xflow2+c2*b*b*h3*(h3+d)**2)-
& 3.d0*h3*c2*b*b*h4*h4
! dfdm
df(2)=4.d0*xflow*(h4-h3)+
& df(1)*dh3dm+df(3)*dh4dm
! dfdeta
df(4)=df(1)*dh3deta+df(3)*dh4deta
! dfdh1
df(1)=df(1)*dh3dh1
! dfdh2
df(3)=df(3)*dh4dh2
elseif(lakon(nelem)(6:7).eq.'ST') then
f=h4*(2.d0*xflow2+c2*b*b*h3**3)-
& h3*(2.d0*xflow2+c2*b*b*h4*(h4+d)**2)
! dfdh3
df(1)=3.d0*h4*c2*b*b*h3*h3-
& (2.d0*xflow2+c2*b*b*h4*(h4+d)**2)
! dfdh4
df(3)=(2.d0*xflow2+c2*b*b*h3**3)-
& h3*c2*b*b*(3.d0*h4+d)*(h4+d)
! dfdm
df(2)=4.d0*xflow*(h4-h3)+
& df(1)*dh3dm+df(3)*dh4dm
! dfdeta
df(4)=df(1)*dh3deta+df(3)*dh4deta
! dfdh1
df(1)=df(1)*dh3dh1
! dfdh2
df(3)=df(3)*dh4dh2
endif
else
!
! regular Bresse equation
!
f=c2*(A1**3+A2**3)-xflow2*(D1+D2)
df(1)=-f+(h2-h1)*(c2*dA1dh1*3.d0*A1*A1-xflow2*dD1dh1)
& -dl*(c1*3.d0*A1*A1*dA1dh1
& -(dum1dh1*P1+um1*dP1dh1-dbds)*xflow2)
df(2)=(-(h2-h1)*(D1+D2)
& +dl*(um1*P1+um2*P2-(h1+h2)*dbds))*2.d0*xflow
df(3)=f+(h2-h1)*(c2*dA2dh2*3.d0*A2*A2-xflow2*dD2dh2)
& -dl*(c1*3.d0*A2*A2*dA2dh2
& -(dum2dh2*P2+um2*dP2dh2-dbds)*xflow2)
f=(h2-h1)*f-dl*(c1*(A1**3+A2**3)
& -(um1*P1+um2*P2-(h1+h2)*dbds)*xflow2)
endif
endif
endif
elseif(iflag.eq.3) then
!
! only if called from resultnet in case the element contains
! a hydraulic jump and eta<0 or eta>1. This means that the
! jump does not take place in the element itself. By adjusting
! h1 or h2 the jump is forced into a neighboring element
!
index=ielprop(nelem)
c write(30,*) 'iflag=3, nelem',nelem,lakon(nelem)
!
h1=v(2,node1)
h2=v(2,node2)
!
z1=-g(1)*co(1,node1)-g(2)*co(2,node1)-g(3)*co(3,node1)
z2=-g(1)*co(1,node2)-g(2)*co(2,node2)-g(3)*co(3,node2)
!
dg=dsqrt(g(1)*g(1)+g(2)*g(2)+g(3)*g(3))
!
xflow2=xflow*xflow
!
! determine eta (present location of jump)
!
nelemup=prop(index+6)
indexup=ielprop(nelemup)
if(lakon(nelemup)(6:7).eq.'SG') then
eta=prop(indexup+4)
prop(indexup+4)=0.5d0
prop(indexup+7)=0.d0
elseif(lakon(nelemup)(6:7).eq.'WE') then
eta=prop(indexup+4)
prop(indexup+4)=0.5d0
prop(indexup+7)=0.d0
elseif(lakon(nelemup)(6:7).eq.'DS') then
eta=prop(indexup+7)
prop(indexup+7)=0.5d0
prop(indexup+9)=0.d0
endif
!
! element properties
!
if((lakon(nelem)(6:7).eq.'SG').or.
& (lakon(nelem)(6:7).eq.'SO').or.
& (lakon(nelem)(6:7).eq.'RE').or.
& (lakon(nelem)(6:7).eq.' ').or.
& (lakon(nelem)(6:7).eq.'DS').or.
& (lakon(nelem)(6:7).eq.'DO')) then
b=prop(index+1)
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=dasin((z1-z2)/dl)
endif
sqrts0=dsqrt(1.d0-s0*s0)
if(lakon(nelem)(6:7).ne.'SG') then
dl=prop(index+3)
theta=prop(index+4)
xks=prop(index+5)
if(dl.le.0.d0) then
dl=dsqrt((co(1,node2)-co(1,node1))**2+
& (co(2,node2)-co(2,node1))**2+
& (co(3,node2)-co(3,node1))**2)
endif
else
theta=0.d0
endif
elseif(lakon(nelem)(6:7).eq.'WE') then
b=prop(index+1)
p=prop(index+2)
c=prop(index+3)
elseif((lakon(nelem)(6:7).eq.'CO').or.
& (lakon(nelem)(6:7).eq.'EL')) then
b1=prop(index+1)
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=dasin((z1-z2)/dl)
endif
sqrts0=dsqrt(1.d0-s0*s0)
b2=prop(index+4)
elseif((lakon(nelem)(6:7).eq.'DR').or.
& (lakon(nelem)(6:7).eq.'ST'))then
b=prop(index+1)
s0=prop(index+2)
if(s0.lt.-1.d0) then
s0=dasin((z1-z2)/dl)
endif
sqrts0=dsqrt(1.d0-s0*s0)
d=prop(index+4)
endif
!
! contraction, enlargement, drop and step:
! adjust h1 or h2 by solving the appropriate
! momentum equation
!
if((lakon(nelem)(6:7).eq.'CO').or.
& (lakon(nelem)(6:7).eq.'EL').or.
& (lakon(nelem)(6:7).eq.'DR').or.
& (lakon(nelem)(6:7).eq.'ST'))then
c2=rho*rho*dg*sqrts0
!
if(eta.gt.1.d0) then
!
! h1 is given, h2 is unknown
!
if(lakon(nelem)(6:7).eq.'CO') then
e3=b1*h1*c2*b1*b2
e0=2.d0*b1*h1*xflow2/e3
e1=-(2.d0*xflow2+c2*b1*b1*h1**3)*b2/e3
e2=0.d0
elseif(lakon(nelem)(6:7).eq.'EL') then
e3=b1*h1*c2*b2*b2
e0=2.d0*b1*h1*xflow2/e3
e1=-(2.d0*xflow2+c2*b1*b2*h1**3)*b2/e3
e2=0.d0
elseif(lakon(nelem)(6:7).eq.'DR') then
e3=h1*c2*b*b
e0=h1*2.d0*xflow2/e3
e1=-(2.d0*xflow2+c2*b*b*h1*(h1+d)**2)/e3
e2=0.d0
elseif(lakon(nelem)(6:7).eq.'ST') then
e3=h1*c2*b*b
e0=h1*2.d0*xflow2/e3
e1=(h1*c2*b*b*d*d-(2.d0*xflow2+c2*b*b*h1**3))/e3
e2=h1*c2*b*b*2.d0*d/e3
endif
!
! solve the cubic equation
!
call cubic(e0,e1,e2,solreal,solimag,nsol)
!
! determine the real solution closest to h1
!
dist=1.d30
do i=1,nsol
if(dabs(solreal(i)-h1).lt.dist) then
dist=dabs(solreal(i)-h1)
h2=solreal(i)
endif
enddo
if(nactdog(2,node2).ne.0) v(2,node2)=h2
elseif(eta.lt.0.d0) then
!
! h2 is given, h1 is unknown
!
if(lakon(nelem)(6:7).eq.'CO') then
e3=c2*b1*b1*b2*h2
e0=2.d0*xflow2*b2*h2/e3
e1=-b1*(2.d0*xflow2+c2*b1*b2*h2**3)/e3
e2=0.d0
elseif(lakon(nelem)(6:7).eq.'EL') then
e3=c2*b1*b2*b2*h2
e0=2.d0*xflow2*b2*h2/e3
e1=-b1*(2.d0*xflow2+c2*b2*b2*h2**3)/e3
e2=0.d0
elseif(lakon(nelem)(6:7).eq.'DR') then
e3=c2*b*b*h2
e0=2.d0*xflow2*h2/e3
e1=(c2*b*b*d*d*h2-(2.d0*xflow2+c2*b*b*h2**3))/e3
e2=c2*b*b*2.d0*d*h2/e3
elseif(lakon(nelem)(6:7).eq.'ST') then
e3=c2*b*b*h2
e0=2.d0*xflow2*h2/e3
e1=-(2.d0*xflow2+c2*b*b*h2*(h2+d)**2)/e3
e2=0.d0
endif
!
! solve the cubic equation
!
call cubic(e0,e1,e2,solreal,solimag,nsol)
c write(30,*) 'check ',solreal(1)**3+e1*solreal(1)+e0
!
c write(30,*) 'nsol',nsol
c write(30,*) 'solreal',(solreal(i),i=1,3)
c write(30,*) 'solimag',(solimag(i),i=1,3)
!
! determine the real solution closest to h2
!
dist=1.d30
do i=1,nsol
if(dabs(solreal(i)-h2).lt.dist) then
dist=dabs(solreal(i)-h2)
h1=solreal(i)
endif
enddo
if(nactdog(2,node1).ne.0) v(2,node1)=h1
endif
return
endif
!
if(xks.gt.0.d0) then
!
! White-Coolebrook
!
! hydraulic diameter
!
d=2.d0*(h1+h2)
reynolds=4.d0*xflow/(b*dvi)
form_fact=1.d0
call friction_coefficient(dl,d,xks,reynolds,form_fact,
& friction)
endif
!
! geometric data
!
cth=dcos(theta)
tth=dtan(theta)
!
! nonprismatic cross section
!
if(lakon(nelem)(6:7).eq.' ') then
dbds=prop(index+7)
else
dbds=0.d0
endif
!
! width at water surface
!
dD1dh1=2.d0*tth
dD2dh2=dD1dh1
D1=b+h1*dD1dh1
D2=b+dl*dbds+h2*dD2dh2
!
! cross section
!
A1=h1*(b+h1*tth)
A2=h2*(b+dl*dbds+h2*tth)
!
! perimeter
!
P1=b+2.d0*h1/cth
P2=b+dl*dbds+2.d0*h2/cth
!
! factor for friction
!
if(xks.gt.0.d0) then
! White-Coolebrook
um1=friction/8.d0
um2=um1
else
! Manning
um1=xks*xks*dg*(P1/A1)**(1.d0/3.d0)
um2=xks*xks*dg*(P2/A2)**(1.d0/3.d0)
endif
!
! constants
!
c1=rho*rho*dg
c2=c1*sqrts0
c1=c1*s0
!
if(eta.gt.1.d0) then
xt1=c1*A1**3+(h1*dbds-um1*P1)*xflow2
xn1=c2*A2**3-D2*xflow2
if(nactdog(2,node2).ne.0) v(2,node2)=h1+dl*xt1/xn1
c write(30,*) 'move jump: h1 h2,h2new ',h1,h2,v(2,node2)
elseif(eta.lt.0.d0) then
xt2=c1*A2**3+(h2*dbds-um2*P2)*xflow2
xn2=c2*A2**3-D2*xflow2
if(nactdog(2,node1).ne.0)
& v(2,node1)=h2-dl*xt2/xn2
c write(30,*) 'move jump: h1 h1new h2 ',h1,v(2,node1),h2
endif
endif
!
return
end
|
{"hexsha": "f3557f7a5de64bbf3e49d6acb0e62e3dfe81b776", "size": 45574, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ccx_prool/CalculiX/ccx_2.17/src/liquidchannel.f", "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccx_prool/CalculiX/ccx_2.17/src/liquidchannel.f", "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ccx_prool/CalculiX/ccx_2.17/src/liquidchannel.f", "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1930080117, "max_line_length": 73, "alphanum_fraction": 0.400996182, "num_tokens": 14735}
|
"""
Presents a unified API for the various weights methods
"""
from SparseSC.fit_loo import loo_weights
from SparseSC.fit_ct import ct_weights
from SparseSC.fit_fold import fold_weights
import numpy as np
def weights(X, X_treat=None, grad_splits=None, custom_donor_pool=None, **kwargs):
""" Calculate synthetic control weights
"""
# PARAMETER QC
try:
X = np.float64(X)
except ValueError:
raise TypeError("X is not coercible to float64")
X = np.asmatrix(X) # this needs to be deprecated properly -- bc Array.dot(Array) != matrix(Array).dot(matrix(Array)) -- not even close !!!
if X_treat is not None:
# weight for the control units against the remaining controls:
if X_treat.shape[1] == 0:
raise ValueError("X_treat.shape[1] == 0")
# PARAMETER QC
try:
X_treat = np.float64(X_treat)
except ValueError:
raise ValueError("X_treat is not coercible to float64")
# this needs to be deprecated properly -- bc Array.dot(Array) != matrix(Array).dot(matrix(Array)) -- not even close !!!
X_treat = np.asmatrix(X_treat)
if X_treat.shape[1] == 0:
raise ValueError("X_treat.shape[1] == 0")
# FIT THE V-MATRIX AND POSSIBLY CALCULATE THE w_pen
# note that the weights, score, and loss function value returned here
# are for the in-sample predictions
return ct_weights(
X=np.vstack((X, X_treat)),
control_units=np.arange(X.shape[0]),
treated_units=np.arange(X_treat.shape[0]) + X.shape[0],
custom_donor_pool=custom_donor_pool,
**kwargs
)
# === X_treat is None: ===
if grad_splits is not None:
return fold_weights(X=X, grad_splits=grad_splits, **kwargs)
# === X_treat is None and grad_splits is None: ===
# weight for the control units against the remaining controls
return loo_weights(
X=X,
control_units=np.arange(X.shape[0]),
treated_units=np.arange(X.shape[0]),
custom_donor_pool=custom_donor_pool,
**kwargs
)
|
{"hexsha": "b1b194b117534c613b56b647318adbffe9b2768b", "size": 2137, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/SparseSC/weights.py", "max_stars_repo_name": "wofein/SparseSC", "max_stars_repo_head_hexsha": "fd8125015c65829458bfee2ae94c24981112d2d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2019-05-14T11:05:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T19:16:21.000Z", "max_issues_repo_path": "src/SparseSC/weights.py", "max_issues_repo_name": "wofein/SparseSC", "max_issues_repo_head_hexsha": "fd8125015c65829458bfee2ae94c24981112d2d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-01-16T18:57:01.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-17T16:50:23.000Z", "max_forks_repo_path": "src/SparseSC/weights.py", "max_forks_repo_name": "wofein/SparseSC", "max_forks_repo_head_hexsha": "fd8125015c65829458bfee2ae94c24981112d2d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-05-14T11:06:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-13T12:30:58.000Z", "avg_line_length": 32.8769230769, "max_line_length": 142, "alphanum_fraction": 0.6321946654, "include": true, "reason": "import numpy", "num_tokens": 518}
|
import numpy as np
from probflow.data import ArrayDataGenerator, make_generator
def test_make_generator():
# Create some data
x = np.random.randn(100, 3)
w = np.random.randn(3, 1)
b = np.random.randn()
y = x @ w + b
# Should return an ArrayDataGenerator
dg = make_generator(x, y)
assert isinstance(dg, ArrayDataGenerator)
# Should just return what passed if passed an ArrayDataGenerator
dg = ArrayDataGenerator(x, y, batch_size=5)
dg_out = make_generator(dg)
assert isinstance(dg_out, ArrayDataGenerator)
|
{"hexsha": "44506114e6e87f7b6fa2bd97296b4af6d157abe6", "size": 559, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/tensorflow/data/test_make_generator.py", "max_stars_repo_name": "chiragnagpal/probflow", "max_stars_repo_head_hexsha": "1ba0619cd4f482a015cd25633d2f113d5d0f3476", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 134, "max_stars_repo_stars_event_min_datetime": "2019-02-18T09:45:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T22:17:34.000Z", "max_issues_repo_path": "tests/unit/tensorflow/data/test_make_generator.py", "max_issues_repo_name": "chiragnagpal/probflow", "max_issues_repo_head_hexsha": "1ba0619cd4f482a015cd25633d2f113d5d0f3476", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2019-04-18T17:41:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-14T00:40:55.000Z", "max_forks_repo_path": "tests/unit/tensorflow/data/test_make_generator.py", "max_forks_repo_name": "chiragnagpal/probflow", "max_forks_repo_head_hexsha": "1ba0619cd4f482a015cd25633d2f113d5d0f3476", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-10-17T05:45:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T11:58:02.000Z", "avg_line_length": 25.4090909091, "max_line_length": 68, "alphanum_fraction": 0.7012522361, "include": true, "reason": "import numpy", "num_tokens": 145}
|
[STATEMENT]
lemma (in Corps) val_1px:"\<lbrakk>valuation K v; x \<in> carrier K; 0 \<le> (v (1\<^sub>r \<plusminus> x))\<rbrakk>
\<Longrightarrow> 0 \<le> (v x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; x \<in> carrier K; 0 \<le> v (1\<^sub>r \<plusminus> x)\<rbrakk> \<Longrightarrow> 0 \<le> v x
[PROOF STEP]
apply (cut_tac field_is_ring, frule Ring.ring_is_ag[of "K"],
frule Ring.ring_one[of "K"])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; x \<in> carrier K; 0 \<le> v (1\<^sub>r \<plusminus> x); Ring K; aGroup K; 1\<^sub>r \<in> carrier K\<rbrakk> \<Longrightarrow> 0 \<le> v x
[PROOF STEP]
apply (rule contrapos_pp, simp+,
case_tac "x = \<zero>\<^bsub>K\<^esub>",
simp add:aGroup.ag_r_zero, simp add:value_of_zero,
simp add: aneg_le[of "0" "v x"],
frule value_less_eq[of v x "1\<^sub>r"], assumption+,
simp add:value_of_one)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; x \<in> carrier K; 0 \<le> v (1\<^sub>r \<plusminus> x); Ring K; aGroup K; 1\<^sub>r \<in> carrier K; v x < 0; x \<noteq> \<zero>; v x = v (x \<plusminus> 1\<^sub>r)\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (drule sym,
simp add:aGroup.ag_pOp_commute[of "K" "x"])
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 592, "file": "Valuation_Valuation1", "length": 4}
|
#ifndef BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_GCC_SPARC_HPP_INCLUDED
#define BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_GCC_SPARC_HPP_INCLUDED
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
#pragma once
#endif
// detail/sp_counted_base_gcc_sparc.hpp - g++ on Sparc V8+
//
// Copyright (c) 2006 Piotr Wyderski
// Copyright (c) 2006 Tomas Puverle
// Copyright (c) 2006 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// Thanks to Michael van der Westhuizen
#include <boost/config.hpp>
#include <boost/smart_ptr/detail/sp_typeinfo_.hpp>
#include <inttypes.h> // int32_t
namespace boost {
namespace detail {
inline int32_t compare_and_swap(int32_t *dest_, int32_t compare_,
int32_t swap_) {
__asm__ __volatile__("cas [%1], %2, %0"
: "+r"(swap_)
: "r"(dest_), "r"(compare_)
: "memory");
return swap_;
}
inline int32_t atomic_fetch_and_add(int32_t *pw, int32_t dv) {
// long r = *pw;
// *pw += dv;
// return r;
for (;;) {
int32_t r = *pw;
if (__builtin_expect((compare_and_swap(pw, r, r + dv) == r), 1)) {
return r;
}
}
}
inline void atomic_increment(int32_t *pw) { atomic_fetch_and_add(pw, 1); }
inline int32_t atomic_decrement(int32_t *pw) {
return atomic_fetch_and_add(pw, -1);
}
inline int32_t atomic_conditional_increment(int32_t *pw) {
// long r = *pw;
// if( r != 0 ) ++*pw;
// return r;
for (;;) {
int32_t r = *pw;
if (r == 0) {
return r;
}
if (__builtin_expect((compare_and_swap(pw, r, r + 1) == r), 1)) {
return r;
}
}
}
class BOOST_SYMBOL_VISIBLE sp_counted_base {
private:
sp_counted_base(sp_counted_base const &);
sp_counted_base &operator=(sp_counted_base const &);
int32_t use_count_; // #shared
int32_t weak_count_; // #weak + (#shared != 0)
public:
sp_counted_base() : use_count_(1), weak_count_(1) {}
virtual ~sp_counted_base() // nothrow
{}
// dispose() is called when use_count_ drops to zero, to release
// the resources managed by *this.
virtual void dispose() = 0; // nothrow
// destroy() is called when weak_count_ drops to zero.
virtual void destroy() // nothrow
{
delete this;
}
virtual void *get_deleter(sp_typeinfo_ const &ti) = 0;
virtual void *get_local_deleter(sp_typeinfo_ const &ti) = 0;
virtual void *get_untyped_deleter() = 0;
void add_ref_copy() { atomic_increment(&use_count_); }
bool add_ref_lock() // true on success
{
return atomic_conditional_increment(&use_count_) != 0;
}
void release() // nothrow
{
if (atomic_decrement(&use_count_) == 1) {
dispose();
weak_release();
}
}
void weak_add_ref() // nothrow
{
atomic_increment(&weak_count_);
}
void weak_release() // nothrow
{
if (atomic_decrement(&weak_count_) == 1) {
destroy();
}
}
long use_count() const // nothrow
{
return const_cast<int32_t const volatile &>(use_count_);
}
};
} // namespace detail
} // namespace boost
#endif // #ifndef BOOST_SMART_PTR_DETAIL_SP_COUNTED_BASE_GCC_SPARC_HPP_INCLUDED
|
{"hexsha": "eb4157ccee52742d09fa5aee892dfcc20a063474", "size": 3292, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/boost_1_72_0/boost/smart_ptr/detail/sp_counted_base_gcc_sparc.hpp", "max_stars_repo_name": "henrywarhurst/matrix", "max_stars_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/boost_1_72_0/boost/smart_ptr/detail/sp_counted_base_gcc_sparc.hpp", "max_issues_repo_name": "henrywarhurst/matrix", "max_issues_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/boost_1_72_0/boost/smart_ptr/detail/sp_counted_base_gcc_sparc.hpp", "max_forks_repo_name": "henrywarhurst/matrix", "max_forks_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5479452055, "max_line_length": 79, "alphanum_fraction": 0.65036452, "num_tokens": 955}
|
from typing import Optional
import numpy as np
from scipy import stats
from naive_bayes.distributions.abstract import AbstractDistribution
class MultivariateNormal(AbstractDistribution):
"""
Multivariate Normal (gaussian) distribution with parameters mu and sigma.
"""
def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> None:
"""
Method to compute MLE given X (data). If y is provided, computes MLE of X for each class y.
:param np.ndarray X: training data.
:param Optional[np.ndarray] y: target values.
"""
self._check_input_data(X=X, y=y, univariate=False)
self._check_support(X=X)
if y is None:
self.mu = self.compute_mu_mle(X)
self.sigma = self.compute_sigma_mle(X)
else:
n_classes = max(y) + 1
self.mu = np.zeros((n_classes, X.shape[1]))
self.sigma = np.zeros((n_classes, X.shape[1], X.shape[1]))
for cls in range(n_classes):
self.mu[cls] = self.compute_mu_mle(X[y == cls]) # type: ignore
self.sigma[cls] = self.compute_sigma_mle(X[y == cls]) # type: ignore
def predict_log_proba(self, X: np.ndarray) -> np.ndarray:
"""
Method to compute log probabilities given X (data).
:param np.ndarray X: data.
:return: log probabilities for X.
:rtype: np.ndarray
"""
self._check_input_data(X=X, univariate=False)
self._check_support(X=X)
if self.mu.ndim == 1:
log_proba = stats.multivariate_normal.logpdf(
X, mean=self.mu, cov=self.sigma
)
else:
n_samples = X.shape[0]
n_classes = self.mu.shape[0] # type: ignore
log_proba = np.zeros((n_samples, n_classes))
for cls in range(n_classes):
log_proba[:, cls] = stats.multivariate_normal.logpdf(X, mean=self.mu[cls], cov=self.sigma[cls]) # type: ignore
return log_proba
def sample(self, n_samples: int, random_state: Optional[int] = None) -> np.ndarray:
"""
Generate random variables samples from fitted distribution.
:param int n_samples: number of random variables samples.
:param Optional[int] random_state: random number generator seed.
:return: random variables samples.
:rtype: np.ndarray
"""
if self.mu.ndim == 1:
samples = stats.multivariate_normal.rvs(
mean=self.mu, cov=self.sigma, size=n_samples, random_state=random_state
)
else:
n_classes = self.mu.shape[0] # type: ignore
n_dimensions = self.mu.shape[1]
samples = np.zeros((n_samples, n_dimensions, n_classes))
for cls in range(n_classes):
samples[:, :, cls] = stats.multivariate_normal.rvs(mean=self.mu[cls], cov=self.sigma[cls], size=n_samples, random_state=random_state) # type: ignore
return samples
@staticmethod
def compute_mu_mle(X: np.ndarray) -> np.ndarray:
"""
Compute maximum likelihood estimator for parameters vector mu.
:param np.ndarray X: training data.
:return: maximum likelihood estimator for parameters vector mu.
:rtype: np.ndarray
"""
MultivariateNormal._check_input_data(X=X, univariate=False)
MultivariateNormal._check_support(X=X)
mu = X.mean(axis=0)
return mu
@staticmethod
def compute_sigma_mle(X: np.ndarray) -> np.ndarray:
"""
Compute maximum likelihood estimator for parameters matrix sigma.
:param np.ndarray X: training data.
:return: maximum likelihood estimator for parameters matrix sigma.
:rtype: np.ndarray
"""
MultivariateNormal._check_input_data(X=X, univariate=False)
MultivariateNormal._check_support(X=X)
mu = X.mean(axis=0)
sigma = (X - mu).T @ (X - mu) / X.shape[0]
return sigma
@staticmethod
def _check_support(X: np.ndarray, **kwargs) -> None:
"""
Method to check data for being in random variable support.
:param np.ndarray X: data.
:param kwargs: additional distribution parameters.
"""
pass
|
{"hexsha": "c314a436afbbc18899f8b4a5b9c83a679436d810", "size": 4309, "ext": "py", "lang": "Python", "max_stars_repo_path": "naive_bayes/distributions/multivariate/continuous.py", "max_stars_repo_name": "dayyass/extended_naive_bayes", "max_stars_repo_head_hexsha": "3178b3a79b4094ec7e0a553e9203ac947a83aadd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-22T19:48:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-05T15:03:15.000Z", "max_issues_repo_path": "naive_bayes/distributions/multivariate/continuous.py", "max_issues_repo_name": "dayyass/naive_bayes", "max_issues_repo_head_hexsha": "3178b3a79b4094ec7e0a553e9203ac947a83aadd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-05-25T20:28:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-30T19:54:59.000Z", "max_forks_repo_path": "naive_bayes/distributions/multivariate/continuous.py", "max_forks_repo_name": "dayyass/extended-naive-bayes", "max_forks_repo_head_hexsha": "3178b3a79b4094ec7e0a553e9203ac947a83aadd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1461538462, "max_line_length": 165, "alphanum_fraction": 0.6064051984, "include": true, "reason": "import numpy,from scipy", "num_tokens": 970}
|
From mathcomp Require Import ssreflect ssrfun ssrbool ssrnat.
Set Implicit Arguments.
Module MyNamespace.
(** Euclidean division: returns quotient and reminder *)
(** Type constructors, Product type *)
Section ProductType.
Inductive prod (A B : Type) : Type :=
| pair of A & B.
About pair.
(** Explicit binding of type constructor's parameters for
data constructors
*)
Check pair 42 true : prod nat bool.
(** Implicit arguments;
local deactivation of implicit arguments (@)
*)
Fail Check pair nat bool 42 true : prod nat bool. (* inconvenient *)
Check @pair nat bool 42 true.
(** Notations for better UX *)
Notation "A * B" := (prod A B) (at level 40, left associativity) : type_scope.
(** Notation scopes *)
Fail Check nat * bool.
Check (nat * nat)%type.
Check (nat * bool) : Type.
Open Scope type_scope.
Check (nat * nat).
Close Scope type_scope.
Fail Check (nat * nat).
(** Left / right associativity *)
Check ((nat * bool) * nat)%type.
Check (nat * (bool * nat))%type.
(** Weak notation *)
Notation "( p ; q )" := (pair p q).
(** Triples, quadruples, ... ? *)
(** Recursive notations *)
Notation "( p , q , .. , r )" := (pair .. (pair p q) .. r)
: core_scope.
Check (1, false) : nat * bool.
Unset Printing Notations.
Check (1, false) : nat * bool.
Set Printing Notations.
Definition fst {A B : Type} : A * B -> A :=
(* fun p => match p with | pair a b => a end. *)
(* fun p => let (a, b) := p in a. *)
fun '(a, _) => a.
Notation "p .1" := (fst p).
Definition snd {A B : Type} : A * B -> B :=
fun '(a, b) => b.
Notation "p .2" := (snd p).
Definition swap {A B : Type} : A * B -> B * A :=
fun '(a,b) => (b,a).
End ProductType.
(**
A /\ B -> B /\ A
*)
Check fst.
Check snd.
Check @pair _ _.
Section Intuitionistic_Propositional_Logic.
(** Implication *)
Definition A_implies_A (A : Prop) :
A -> A
:=
fun proof_A : A => proof_A.
Definition A_implies_B_implies_A (A B : Prop) :
A -> B -> A
:=
fun proof_A => fun proof_B => proof_A.
(* const *)
(** Conjunction *)
Inductive and (A B : Prop) : Prop :=
| conj of A & B.
Notation "A /\ B" := (and A B) : type_scope.
Definition andC (A B : Prop) :
A /\ B -> B /\ A
:=
fun '(conj proof_A proof_B) => conj proof_B proof_A.
Definition andA (A B C : Prop) :
(A /\ B) /\ C -> A /\ (B /\ C)
:=
fun '(conj (conj a b) c) => conj a (conj b c).
(** Biimplication, a.k.a. if and only if *)
Definition iff (A B : Prop) : Prop :=
(A -> B) /\ (B -> A).
Notation "A <-> B" := (iff A B) : type_scope.
Definition andA_iff (A B C : Prop) :
(A /\ B) /\ C <-> A /\ (B /\ C)
:=
conj
(fun '(conj (conj a b) c) => conj a (conj b c))
(fun '(conj a (conj b c)) => (conj (conj a b) c)).
(** Disjunction *)
Inductive or (A B : Prop) : Prop :=
| or_introl of A
| or_intror of B.
Arguments or_introl [A B] a, [A] B a.
Arguments or_intror [A B] b, A [B] b.
Notation "A \/ B" := (or A B) : type_scope.
Definition or1 (A B : Prop) : A -> A \/ B
:=
fun proofA => or_introl proofA.
Definition orC A B :
A \/ B -> B \/ A
:=
fun a_or_b =>
match a_or_b with
| or_introl proofA => or_intror proofA
| or_intror proofB => or_introl proofB
end.
Definition or_and_distr A B C :
(A \/ B) /\ C -> (A /\ C) \/ (B /\ C)
:=
fun '(conj a_or_b c) =>
match a_or_b with
| or_introl a => or_introl (conj a c)
| or_intror b => or_intror (conj b c)
end.
Inductive False : Prop := .
Inductive True : Prop :=
| I.
Definition t : True
:=
I.
Definition t_and_t : True /\ True
:=
conj I I.
Definition not (A : Prop) :=
A -> False.
Notation "~ A" := (not A) : type_scope.
Definition A_implies_not_not_A (A : Prop) :
A -> ~ ~ A
(* A -> (A -> False) -> False *)
:=
fun a => fun not_a => not_a a.
(* Double negation elimination is
not provable in Intuitionistic Logic *)
Fail Definition DNE (A : Prop) :
~ ~ A -> A
:=
fun nna => __. (* can't call [nna] *)
(* Since the Law of Excluded Middle
is equivalent to DNE it's not provable
either
*)
Fail Definition LEM (A : Prop) :
A \/ ~A
:=
(* or_intror (fun a => ???). *)
__. (* or_introl / or_intror ? *)
End Intuitionistic_Propositional_Logic.
Section Propositional_Equality.
Inductive eq (A : Type)
(a : A) : A -> Prop :=
| eq_refl : eq a a.
Check eq_ind.
About eq.
Check eq_refl 1 : eq 1 1.
Fail Check eq_refl 1 : eq 1 21.
Check @eq_refl nat 2 : @eq nat 2 2.
Fail Check eq_refl 1 : @eq nat 1 2.
Fail Check eq_refl 2 : @eq nat 1 2.
Notation "a = b" := (eq a b) : type_scope.
Definition eq_reflexive A (x : A) :
x = x
:=
eq_refl x.
(* dependent pattern matching *)
Definition eq_sym A (x y : A) :
x = y -> y = x
:=
fun proof_x_eq_y =>
match proof_x_eq_y with
| eq_refl => eq_refl x
end.
Definition eq_foo (x y z : nat) :
x + y = y + z -> (x + y) + z = (y + z) + z
:=
fun prf_eq =>
match prf_eq with
| eq_refl => eq_refl ((x + y) + z)
end.
Definition eq_trans A (x y z : A) :
x = y -> (y = z -> x = z)
:=
fun x_eq_y : x = y =>
match x_eq_y with
| eq_refl => id
end.
End Propositional_Equality.
End MyNamespace.
(** The SSReflect proof language *)
Lemma A_implies_A (A : Prop) :
A -> A.
Proof. (* <-- optional *)
Show Proof.
move => a. (* tactical *)
Show Proof.
(* move: a. exact. *)
exact: a.
Show Proof.
(* by []. *)
Qed.
Lemma or_and_distr A B C :
(A \/ B) /\ C -> A /\ C \/ B /\ C.
Proof.
case.
case.
- move=> a. move=> c. left. split.
- exact: a.
by apply: c.
move=> b c. right. by split.
Qed.
About or_and_distr.
(* a terser version *)
Lemma or_and_distr' A B C :
(A \/ B) /\ C -> A /\ C \/ B /\ C.
Proof.
by move=> [[a | b] c]; [left | right].
Qed.
(* An example taken from
"An Ssreflect Tutorial" by G.Gonthier, R.S. Le(2009)
*)
Section HilbertSaxiom.
Variables A B C : Prop.
Lemma HilbertS :
(A -> B -> C) -> (A -> B) -> A -> C.
Proof.
move=> hAiBiC hAiB hA.
move: hAiBiC.
apply.
- by [].
by apply: hAiB.
Qed.
End HilbertSaxiom.
Section Rewrite.
Variable A : Type.
Implicit Types x y z : A.
Lemma eq_reflexive x :
x = x.
Proof. by []. Qed.
Lemma eq_sym x y :
x = y -> y = x.
Proof.
move=> x_eq_y. rewrite -x_eq_y. by [].
Show Proof.
Qed.
Eval compute in eq_sym.
Lemma eq_sym_shorter x y :
x = y -> y = x.
Proof.
by move=> ->.
Qed.
Lemma eq_trans x y z :
x = y -> y = z -> x = z.
Proof.
move=> ->->.
apply: eq_reflexive.
Qed.
|
{"author": "anton-trunov", "repo": "coq-lecture-notes", "sha": "e012addae82da6d8d03f6e789e43f35140dcdfea", "save_path": "github-repos/coq/anton-trunov-coq-lecture-notes", "path": "github-repos/coq/anton-trunov-coq-lecture-notes/coq-lecture-notes-e012addae82da6d8d03f6e789e43f35140dcdfea/code/lecture02.v"}
|
# Importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import preprocessing as preprocessing
import seaborn as sns
from pyexpat import model
from sklearn import preprocessing, tree
from sklearn.model_selection import train_test_split, KFold, cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import mean_squared_error
import itertools
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
from pandas.plotting import scatter_matrix
from sklearn.naive_bayes import GaussianNB
import warnings
warnings.filterwarnings('ignore')
dataset = pd.read_csv('diabetes.csv')
# Preview data
print(dataset.head())
dataset.info()
# Statistical summary
print(dataset.describe().T)
#
# Count of null values
print(dataset.isnull().sum())
# Outcome countplot
sns.countplot(x = 'Outcome',data = dataset)
# Histogram of each feature
#
col = dataset.columns[:8]
plt.subplots(figsize = (20, 15))
length = len(col)
#
for i, j in itertools.zip_longest(col, range(length)):
plt.subplot((length/2), 3, j + 1)
plt.subplots_adjust(wspace = 0.1,hspace = 0.5)
dataset[i].hist(bins = 20)
plt.title(i)
plt.show()
# Scatter plot matrix
scatter_matrix(dataset, figsize = (20, 20));
#
# Pairplot
sns.pairplot(data = dataset, hue = 'Outcome')
plt.show()
# Heatmap
sns.heatmap(dataset.corr(), annot = True)
plt.show()
dataset_new = dataset
# Replacing zero values with NaN
dataset_new[["Glucose", "BloodPressure", "SkinThickness", "Insulin", "BMI"]] = dataset_new[["Glucose", "BloodPressure", "SkinThickness", "Insulin", "BMI"]].replace(0, np.NaN)
# Count of NaN
print(dataset_new.isnull().sum())
#Replacing NaN with mean values
dataset_new["Glucose"].fillna(dataset_new["Glucose"].mean(), inplace = True)
dataset_new["BloodPressure"].fillna(dataset_new["BloodPressure"].mean(), inplace = True)
dataset_new["SkinThickness"].fillna(dataset_new["SkinThickness"].mean(), inplace = True)
dataset_new["Insulin"].fillna(dataset_new["Insulin"].mean(), inplace = True)
dataset_new["BMI"].fillna(dataset_new["BMI"].mean(), inplace = True)
# Feature scaling using MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
dataset_scaled = sc.fit_transform(dataset_new)
dataset_scaled = pd.DataFrame(dataset_scaled)
print(dataset_scaled)
# Selecting features - [Glucose, Insulin, BMI, Age]
X = dataset_scaled.iloc[:, [1, 4, 5, 7]].values
Y = dataset_scaled.iloc[:, 8].values
# Splitting X and Y
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.10, random_state = 42, stratify = dataset_new['Outcome'] )
# Checking dimensions
print("X_train shape:", X_train.shape)
print("X_test shape:", X_test.shape)
print("Y_train shape:", Y_train.shape)
print("Y_test shape:", Y_test.shape)
# KNN Plotting a graph for n_neighbors
#
X_axis = list(range(1, 31))
acc = pd.Series()
x = range(1,31)
#
for i in list(range(1, 31)):
knn_model = KNeighborsClassifier(n_neighbors = i)
knn_model.fit(X_train, Y_train)
prediction = knn_model.predict(X_test)
acc = acc.append(pd.Series(metrics.accuracy_score(prediction, Y_test)))
plt.plot(X_axis, acc)
plt.xticks(x)
plt.title("Finding best value for n_estimators")
plt.xlabel("n_estimators")
plt.ylabel("Accuracy")
plt.grid()
plt.show()
print('Highest value: ',acc.values.max())
#
# K nearest neighbors Algorithm
knn = KNeighborsClassifier(n_neighbors = 17, metric = 'minkowski', p = 2)
knn.fit(X_train, Y_train)
#
Y_pred_knn = knn.predict(X_test)
accuracy_knn = accuracy_score(Y_test, Y_pred_knn)
print("K Nearest neighbors accuracy without k-fold cv: " + str(accuracy_knn * 100))
#
# Kfold Split
folds = KFold(n_splits=10, shuffle=True, random_state=35)
knn_acc_scores = []
knn_prec_scores=[]
knn_recall_scores=[]
knn_rmse_scores=[]
knn_mse_scores=[]
knn_f1_scores=[]
#
for n_fold, (train_index, valid_index) in enumerate(folds.split(X, Y)):
print('\n Fold ' + str(n_fold + 1) +
' \n\n train ids :' + str(train_index) +
' \n\n validation ids :' + str(valid_index))
#
X_train, X_valid = X[train_index], X[valid_index]
Y_train, Y_valid = Y[train_index], Y[valid_index]
#
knn.fit(X_train, Y_train)
y_pred = knn.predict(X_test)
#
knn_acc_score = accuracy_score(Y_test, y_pred)
knn_acc_scores.append(knn_acc_score)
print('\n Accuracy score for Fold ' + str(n_fold + 1) + ' --> ' + str(knn_acc_score) + '\n')
#
knn_prec_score=precision_score(Y_test, y_pred)
knn_prec_scores.append(knn_prec_score)
print('\n Precision score for Fold ' + str(n_fold + 1) + ' --> ' + str(knn_prec_score) + '\n')
#
knn_rec_score = recall_score(Y_test, y_pred)
knn_recall_scores.append(knn_rec_score)
print('\n Recall score for Fold ' + str(n_fold + 1) + ' --> ' + str(knn_rec_score) + '\n')
#
knn_f_score = f1_score(Y_test, y_pred)
knn_f1_scores.append(knn_f_score)
print('\n F1 score for Fold ' + str(n_fold + 1) + ' --> ' + str(knn_f_score) + '\n')
#
knn_rmse_score = mean_squared_error(Y_test, y_pred, squared=True)
knn_rmse_scores.append(knn_rmse_score)
print('\n Rmse score for Fold ' + str(n_fold + 1) + ' --> ' + str(knn_rmse_score) + '\n')
#
knn_mse_score = mean_squared_error(Y_test, y_pred, squared=False)
knn_mse_scores.append(knn_mse_score)
print('\n Mse score for Fold ' + str(n_fold + 1) + ' --> ' + str(knn_mse_score) + '\n')
print("\n Accurcy array: ")
print(knn_acc_scores)
print("\n Precision array: ")
print(knn_prec_scores)
print("\n Recall array: ")
print(knn_recall_scores)
print("\n F1 score array: ")
print(knn_f1_scores)
print("\n Rmse array: ")
print(knn_rmse_scores)
print("\n Mse array: ")
print(knn_mse_scores)
print('Avg. accuracy score :' + str(np.mean(knn_acc_scores)))
print('Avg. Precision score :' + str(np.mean(knn_prec_scores)))
print('Avg. Recall score :' + str(np.mean(knn_recall_scores)))
print('Avg. F1 score :' + str(np.mean(knn_f1_scores)))
print('Avg. Rmse score :' + str(np.mean(knn_rmse_scores)))
print('Avg. Mse score : \n' + str(np.mean(knn_mse_scores)))
#
# #####DECISION TREE
# function for fitting trees of various depths on the training data using cross-validation
def run_cross_validation_on_trees(X, Y, tree_depths, cv=10, scoring='accuracy'):
cv_scores_list = []
cv_scores_std = []
cv_scores_mean = []
accuracy_scores = []
for depth in tree_depths:
tree_model = DecisionTreeClassifier(max_depth=depth)
cv_scores = cross_val_score(tree_model, X, Y, cv=cv, scoring=scoring)
cv_scores_list.append(cv_scores)
cv_scores_mean.append(cv_scores.mean())
cv_scores_std.append(cv_scores.std())
accuracy_scores.append(tree_model.fit(X, Y).score(X, Y))
cv_scores_mean = np.array(cv_scores_mean)
cv_scores_std = np.array(cv_scores_std)
accuracy_scores = np.array(accuracy_scores)
return cv_scores_mean, cv_scores_std, accuracy_scores
# function for plotting cross-validation results
def plot_cross_validation_on_trees(depths, cv_scores_mean, cv_scores_std, accuracy_scores, title):
fig, ax = plt.subplots(1,1, figsize=(15,5))
ax.plot(depths, cv_scores_mean, '-o', label='mean cross-validation accuracy', alpha=0.9)
ax.fill_between(depths, cv_scores_mean-2*cv_scores_std, cv_scores_mean+2*cv_scores_std, alpha=0.2)
ylim = plt.ylim()
ax.plot(depths, accuracy_scores, '-*', label='train accuracy', alpha=0.9)
ax.set_title(title, fontsize=16)
ax.set_xlabel('Tree depth', fontsize=14)
ax.set_ylabel('Accuracy', fontsize=14)
ax.set_ylim(ylim)
ax.set_xticks(depths)
ax.legend()
# fitting trees of depth 1 to 24
sm_tree_depths = range(1,25)
sm_cv_scores_mean, sm_cv_scores_std, sm_accuracy_scores = run_cross_validation_on_trees(X_train, Y_train, sm_tree_depths)
# plotting accuracy
plot_cross_validation_on_trees(sm_tree_depths, sm_cv_scores_mean, sm_cv_scores_std, sm_accuracy_scores,
'Accuracy per decision tree depth on training data')
plt.show()
#
idx_max = sm_cv_scores_mean.argmax()
sm_best_tree_depth = sm_tree_depths[idx_max]
sm_best_tree_cv_score = sm_cv_scores_mean[idx_max]
sm_best_tree_cv_score_std = sm_cv_scores_std[idx_max]
print('The depth-{} tree achieves the best mean cross-validation accuracy {} +/- {}% on training dataset'.format(
sm_best_tree_depth, round(sm_best_tree_cv_score*100,5), round(sm_best_tree_cv_score_std*100, 5)))
#
#
# function for training and evaluating a tree
#
dtc = DecisionTreeClassifier(max_depth=sm_best_tree_depth).fit(X_train, Y_train)
accuracy_train = dtc.score(X_train, Y_train)
accuracy_test = dtc.score(X_test, Y_test)
print('Single tree depth: ', sm_best_tree_depth)
#
Y_pred_dtc = dtc.predict(X_test)
accuracy_dtc = accuracy_score(Y_test, Y_pred_dtc)
# Kfold Split
folds = KFold(n_splits=10, shuffle=True, random_state=35)
dtc_acc_scores = []
dtc_prec_scores=[]
dtc_recall_scores=[]
dtc_rmse_scores=[]
dtc_mse_scores=[]
dtc_f1_scores=[]
#
for n_fold, (train_index, valid_index) in enumerate(folds.split(X, Y)):
print('\n Fold ' + str(n_fold + 1) +
' \n\n train ids :' + str(train_index) +
' \n\n validation ids :' + str(valid_index))
X_train, X_valid = X[train_index], X[valid_index]
Y_train, Y_valid = Y[train_index], Y[valid_index]
dtc.fit(X_train, Y_train)
Y_pred = dtc.predict(X_test)
# #
dtc_acc_score = accuracy_score(Y_test, Y_pred)
dtc_acc_scores.append(dtc_acc_score)
print('\n Accuracy score for Fold ' + str(n_fold + 1) + ' --> ' + str(dtc_acc_score) + '\n')
#
dtc_prec_score = precision_score(Y_test, Y_pred)
dtc_prec_scores.append(dtc_prec_score)
print('\n Precision score for Fold ' + str(n_fold + 1) + ' --> ' + str(dtc_prec_score) + '\n')
#
dtc_rec_score = recall_score(Y_test, Y_pred)
dtc_recall_scores.append(dtc_rec_score)
print('\n Recall score for Fold ' + str(n_fold + 1) + ' --> ' + str(dtc_rec_score) + '\n')
#
dtc_f_score = f1_score(Y_test, Y_pred)
dtc_f1_scores.append(dtc_f_score)
print('\n F1 score for Fold ' + str(n_fold + 1) + ' --> ' + str(dtc_f_score) + '\n')
#
dtc_rmse_score = mean_squared_error(Y_test, Y_pred, squared=True)
dtc_rmse_scores.append(dtc_rmse_score)
print('\n Rmse score for Fold ' + str(n_fold + 1) + ' --> ' + str(dtc_rmse_score) + '\n')
#
dtc_mse_score = mean_squared_error(Y_test, Y_pred, squared=False)
dtc_mse_scores.append(dtc_mse_score)
print('\n Mse score for Fold ' + str(n_fold + 1) + ' --> ' + str(dtc_mse_score) + '\n')
print("\n Accurcy array: ")
print(dtc_acc_scores)
print("\n Precision array: ")
print(dtc_prec_scores)
print("\n Recall array: ")
print(dtc_recall_scores)
print("\n F1 score array: ")
print(dtc_f1_scores)
print("\n Rmse array: ")
print(dtc_rmse_scores)
print("\n Mse array: ")
print(dtc_mse_scores)
print('Avg. accuracy score :' + str(np.mean(dtc_acc_scores)))
print('Avg. Precision score :' + str(np.mean(dtc_prec_scores)))
print('Avg. Recall score :' + str(np.mean(dtc_recall_scores)))
print('Avg. F1 score :' + str(np.mean(dtc_f1_scores)))
print('Avg. Rmse score :' + str(np.mean(dtc_rmse_scores)))
print('Avg. Mse score :' + str(np.mean(dtc_mse_scores)))
# #
text_representation = tree.export_text(dtc)
print(text_representation)
# Naive Bayes Algorithm
# We create a object from GaussianNB class
gnb = GaussianNB()
gnb.fit(X_train, Y_train)
result = gnb.predict(X_test)
accuracy_nb = accuracy_score(Y_test, result)
print("Naive Bayes: " + str(accuracy_nb * 100))
accuracy = accuracy_score(Y_test, result)
print(accuracy)
# Kfold Split
folds = KFold(n_splits=10, shuffle=True, random_state=35)
naive_bayes_acc_scores = []
naive_bayes_prec_scores=[]
naive_bayes_prec_recall_scores=[]
naive_bayes_prec_rmse_scores=[]
naive_bayes_prec_mse_scores=[]
naive_bayes_prec_f1_scores=[]
for n_fold, (train_index, valid_index) in enumerate(folds.split(X, Y)):
print('\n Fold ' + str(n_fold + 1) +
' \n\n train ids :' + str(train_index) +
' \n\n validation ids :' + str(valid_index))
X_train, X_valid = X[train_index], X[valid_index]
Y_train, Y_valid = Y[train_index], Y[valid_index]
gnb.fit(X_train, Y_train)
Y_pred = gnb.predict(X_test)
#
naive_bayes_acc_score = accuracy_score(Y_test, Y_pred)
naive_bayes_acc_scores.append(naive_bayes_acc_score)
print('\n Accuracy score for Fold ' + str(n_fold + 1) + ' --> ' + str(naive_bayes_acc_score) + '\n')
naive_bayes_prec_score=precision_score(Y_test, Y_pred)
naive_bayes_prec_scores.append(naive_bayes_prec_score)
print('\n Precision score for Fold ' + str(n_fold + 1) + ' --> ' + str(naive_bayes_prec_score) + '\n')
#
naive_bayes_prec_rec_score = recall_score(Y_test, Y_pred)
naive_bayes_prec_recall_scores.append(naive_bayes_prec_rec_score)
print('\n Recall score for Fold ' + str(n_fold + 1) + ' --> ' + str(naive_bayes_prec_rec_score) + '\n')
#
naive_bayes_prec_f_score = f1_score(Y_test, Y_pred)
naive_bayes_prec_f1_scores.append(naive_bayes_prec_f_score)
print('\n F1 score for Fold ' + str(n_fold + 1) + ' --> ' + str(naive_bayes_prec_f_score) + '\n')
#
naive_bayes_prec_rmse_score = mean_squared_error(Y_test, Y_pred, squared=True)
naive_bayes_prec_rmse_scores.append(naive_bayes_prec_rmse_score)
print('\n Rmse score for Fold ' + str(n_fold + 1) + ' --> ' + str(naive_bayes_prec_rmse_score) + '\n')
#
naive_bayes_prec_mse_score = mean_squared_error(Y_test, Y_pred, squared=False)
naive_bayes_prec_mse_scores.append(naive_bayes_prec_mse_score)
print('\n Mse score for Fold ' + str(n_fold + 1) + ' --> ' + str(naive_bayes_prec_mse_score) + '\n')
print("\n Accurcy array: ")
print(naive_bayes_acc_scores)
print("\n Precision array: ")
print(naive_bayes_prec_scores)
print("\n Recall array: ")
print(naive_bayes_prec_recall_scores)
print("\n F1 score array: ")
print(naive_bayes_prec_f1_scores)
print("\n Rmse array: ")
print(naive_bayes_prec_rmse_scores)
print("\n Mse array: ")
print(naive_bayes_prec_mse_scores)
print('Avg. accuracy score :' + str(np.mean(naive_bayes_acc_scores)))
print('Avg. Precision score :' + str(np.mean(naive_bayes_prec_scores)))
print('Avg. Recall score :' + str(np.mean(naive_bayes_prec_recall_scores)))
print('Avg. Rmse score :' + str(np.mean(naive_bayes_prec_rmse_scores)))
print('Avg. Mse score :' + str(np.mean(naive_bayes_prec_mse_scores)))
#
|
{"hexsha": "b80a5649fb3473a47dd8b52d56a4aaecae7ce3e9", "size": 14961, "ext": "py", "lang": "Python", "max_stars_repo_path": "Diabetes_Prediction.py", "max_stars_repo_name": "mehtapbaglan/Diabetes-Prediction", "max_stars_repo_head_hexsha": "97edfb18d1fa822a79e3d4bcf3b5c7935a092581", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-10T08:18:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T18:29:12.000Z", "max_issues_repo_path": "Diabetes_Prediction.py", "max_issues_repo_name": "busezengin/Diabetes-Prediction", "max_issues_repo_head_hexsha": "97edfb18d1fa822a79e3d4bcf3b5c7935a092581", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Diabetes_Prediction.py", "max_forks_repo_name": "busezengin/Diabetes-Prediction", "max_forks_repo_head_hexsha": "97edfb18d1fa822a79e3d4bcf3b5c7935a092581", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-23T09:11:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T09:11:06.000Z", "avg_line_length": 35.5368171021, "max_line_length": 175, "alphanum_fraction": 0.6991511263, "include": true, "reason": "import numpy", "num_tokens": 4094}
|
# encoding=utf-8
"""
Created on 21:29 2018/11/12
@author: Jindong Wang
"""
import numpy as np
import scipy.io
import scipy.linalg
import sklearn.metrics
from sklearn.neighbors import KNeighborsClassifier
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, None, gamma)
return K
class TCA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, gamma=1):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param gamma: kernel bandwidth for rbf kernel
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.gamma = gamma
def fit(self, Xs, Xt):
'''
Transform Xs and Xt
:param Xs: ns * n_feature, source feature
:param Xt: nt * n_feature, target feature
:return: Xs_new and Xt_new after TCA
'''
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z /= np.linalg.norm(Z, axis=0)
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
return Xs_new, Xt_new
def fit_predict(self, Xs, Ys, Xt, Yt):
'''
Transform Xs and Xt, then make predictions on target using 1NN
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, Xt_new = self.fit(Xs, Xt)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt_new)
acc = sklearn.metrics.accuracy_score(Yt, y_pred)
return acc, y_pred
if __name__ == '__main__':
domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat']
for i in [2]:
for j in [3]:
if i != j:
src, tar = 'data/' + domains[i], 'data/' + domains[j]
src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
Xs, Ys, Xt, Yt = src_domain['feas'], src_domain['label'], tar_domain['feas'], tar_domain['label']
tca = TCA(kernel_type='linear', dim=30, lamb=1, gamma=1)
acc, ypre = tca.fit_predict(Xs, Ys, Xt, Yt)
print(acc)
# It should print 0.910828025477707
|
{"hexsha": "c47eb5ab9ea0e10d381a3edf140b7de84441f316", "size": 3554, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/traditional/TCA/TCA.py", "max_stars_repo_name": "Flsahkong/transferlearning", "max_stars_repo_head_hexsha": "fdc76a7e03d7771517ea938cb5b90aa5dfb8dfbd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-12-26T08:51:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T06:44:52.000Z", "max_issues_repo_path": "code/traditional/TCA/TCA.py", "max_issues_repo_name": "Mrzhangxiaohua/transferlearning", "max_issues_repo_head_hexsha": "5775960eb18685b4dd95c9baa0672c24529851cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/traditional/TCA/TCA.py", "max_forks_repo_name": "Mrzhangxiaohua/transferlearning", "max_forks_repo_head_hexsha": "5775960eb18685b4dd95c9baa0672c24529851cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-09-19T08:02:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:20:18.000Z", "avg_line_length": 35.898989899, "max_line_length": 113, "alphanum_fraction": 0.5557118739, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1028}
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantum gates that are commonly used in the literature."""
from typing import (
Union, Tuple, Optional, List, Callable, cast, Iterable, Sequence,
Any)
import numpy as np
from cirq import linalg, protocols
from cirq.ops import (
gate_features,
eigen_gate,
raw_types,
gate_operation,
)
from cirq.type_workarounds import NotImplementedType
# Note: avoiding 'from/as' because it creates a circular dependency in python 2.
import cirq.ops.phased_x_gate
class CZPowGate(eigen_gate.EigenGate,
gate_features.TwoQubitGate,
gate_features.InterchangeableQubitsGate):
"""Phases the |11⟩ state of two adjacent qubits by a fixed amount.
A ParameterizedCZGate guaranteed to not be using the parameter key field.
"""
def _eigen_components(self):
return [
(0, np.diag([1, 1, 1, 0])),
(1, np.diag([0, 0, 0, 1])),
]
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if protocols.is_parameterized(self):
return NotImplemented
c = 1j**(2 * self._exponent)
one_one = linalg.slice_for_qubits_equal_to(axes, 0b11)
target_tensor[one_one] *= c
p = 1j**(2 * self._exponent * self._global_shift)
if p != 1:
target_tensor *= p
return target_tensor
def _phase_by_(self, phase_turns, qubit_index):
return self
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
return protocols.CircuitDiagramInfo(
wire_symbols=('@', '@'),
exponent=self._diagram_exponent(args))
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
if self._exponent != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('cz {0},{1};\n', qubits[0], qubits[1])
def __str__(self) -> str:
if self._exponent == 1:
return 'CZ'
return 'CZ**{!r}'.format(self._exponent)
def __repr__(self) -> str:
if self._exponent == 1:
return 'cirq.CZ'
return '(cirq.CZ**{!r})'.format(self._exponent)
def _rads_func_symbol(func_name: str,
args: protocols.CircuitDiagramInfoArgs,
half_turns: Any) -> str:
unit = 'π' if args.use_unicode_characters else 'pi'
if half_turns == 1:
return '{}({})'.format(func_name, unit)
if half_turns == -1:
return '{}(-{})'.format(func_name, unit)
return '{}({}{})'.format(func_name, half_turns, unit)
class XPowGate(eigen_gate.EigenGate,
gate_features.SingleQubitGate):
"""Fixed rotation around the X axis of the Bloch sphere."""
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if self._exponent != 1:
return NotImplemented
zero = linalg.slice_for_qubits_equal_to(axes, 0)
one = linalg.slice_for_qubits_equal_to(axes, 1)
available_buffer[zero] = target_tensor[one]
available_buffer[one] = target_tensor[zero]
p = 1j**(2 * self._exponent * self._global_shift)
if p != 1:
available_buffer *= p
return available_buffer
def _eigen_components(self):
return [
(0, np.array([[0.5, 0.5], [0.5, 0.5]])),
(1, np.array([[0.5, -0.5], [-0.5, 0.5]])),
]
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> Union[str, protocols.CircuitDiagramInfo]:
if self._global_shift == -0.5:
return _rads_func_symbol(
'Rx',
args,
self._diagram_exponent(args, ignore_global_phase=False))
return protocols.CircuitDiagramInfo(
wire_symbols=('X',),
exponent=self._diagram_exponent(args))
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
args.validate_version('2.0')
if self._exponent == 1:
return args.format('x {0};\n', qubits[0])
else:
return args.format('rx({0:half_turns}) {1};\n',
self._exponent, qubits[0])
def _phase_by_(self, phase_turns, qubit_index):
"""See `cirq.SupportsPhase`."""
return cirq.ops.phased_x_gate.PhasedXPowGate(
exponent=self._exponent,
phase_exponent=phase_turns * 2)
def __str__(self) -> str:
if self._exponent == 1:
return 'X'
return 'X**{!r}'.format(self._exponent)
def __repr__(self) -> str:
if self._global_shift == -0.5:
return 'cirq.Rx(np.pi*{!r})'.format(self._exponent)
if self._global_shift == 0:
if self._exponent == 1:
return 'cirq.X'
return '(cirq.X**{!r})'.format(self._exponent)
return (
'cirq.XPowGate(exponent={!r}, '
'global_shift={!r})'
).format(self._exponent, self._global_shift)
class YPowGate(eigen_gate.EigenGate,
gate_features.SingleQubitGate):
"""Fixed rotation around the Y axis of the Bloch sphere."""
def _eigen_components(self):
return [
(0, np.array([[0.5, -0.5j], [0.5j, 0.5]])),
(1, np.array([[0.5, 0.5j], [-0.5j, 0.5]])),
]
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> Union[str, protocols.CircuitDiagramInfo]:
if self._global_shift == -0.5:
return _rads_func_symbol(
'Ry',
args,
self._diagram_exponent(args, ignore_global_phase=False))
return protocols.CircuitDiagramInfo(
wire_symbols=('Y',),
exponent=self._diagram_exponent(args))
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
args.validate_version('2.0')
if self._exponent == 1:
return args.format('y {0};\n', qubits[0])
else:
return args.format('ry({0:half_turns}) {1};\n',
self._exponent, qubits[0])
def _phase_by_(self, phase_turns, qubit_index):
"""See `cirq.SupportsPhase`."""
return cirq.ops.phased_x_gate.PhasedXPowGate(
exponent=self._exponent,
phase_exponent=0.5 + phase_turns * 2)
def __str__(self) -> str:
if self._exponent == 1:
return 'Y'
return 'Y**{!r}'.format(self._exponent)
def __repr__(self) -> str:
if self._global_shift == -0.5:
return 'cirq.Ry(np.pi*{!r})'.format(self._exponent)
if self._global_shift == 0:
if self._exponent == 1:
return 'cirq.Y'
return '(cirq.Y**{!r})'.format(self._exponent)
return (
'cirq.YPowGate(exponent={!r}, '
'global_shift={!r})'
).format(self._exponent, self._global_shift)
class ZPowGate(eigen_gate.EigenGate,
gate_features.SingleQubitGate):
"""Fixed rotation around the Z axis of the Bloch sphere."""
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if protocols.is_parameterized(self):
return NotImplemented
one = linalg.slice_for_qubits_equal_to(axes, 1)
c = 1j**(self._exponent * 2)
target_tensor[one] *= c
p = 1j**(2 * self._exponent * self._global_shift)
if p != 1:
target_tensor *= p
return target_tensor
def _eigen_components(self):
return [
(0, np.diag([1, 0])),
(1, np.diag([0, 1])),
]
def _phase_by_(self, phase_turns: float, qubit_index: int):
return self
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> Union[str, protocols.CircuitDiagramInfo]:
if self._global_shift == -0.5:
return _rads_func_symbol(
'Rz',
args,
self._diagram_exponent(args, ignore_global_phase=False))
e = self._diagram_exponent(args)
if e in [-0.25, 0.25]:
return protocols.CircuitDiagramInfo(
wire_symbols=('T',),
exponent=cast(float, e) * 4)
if e in [-0.5, 0.5]:
return protocols.CircuitDiagramInfo(
wire_symbols=('S',),
exponent=cast(float, e) * 2)
return protocols.CircuitDiagramInfo(
wire_symbols=('Z',),
exponent=e)
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
args.validate_version('2.0')
if self._exponent == 1:
return args.format('z {0};\n', qubits[0])
else:
return args.format('rz({0:half_turns}) {1};\n',
self._exponent, qubits[0])
def __str__(self) -> str:
if self._exponent == 0.25:
return 'T'
if self._exponent == -0.25:
return 'T**-1'
if self._exponent == 0.5:
return 'S'
if self._exponent == -0.5:
return 'S**-1'
if self._exponent == 1:
return 'Z'
return 'Z**{}'.format(self._exponent)
def __repr__(self) -> str:
if self._global_shift == -0.5:
return 'cirq.Rz(np.pi*{!r})'.format(self._exponent)
if self._global_shift == 0:
if self._exponent == 0.25:
return 'cirq.T'
if self._exponent == -0.25:
return '(cirq.T**-1)'
if self._exponent == 0.5:
return 'cirq.S'
if self._exponent == -0.5:
return '(cirq.S**-1)'
if self._exponent == 1:
return 'cirq.Z'
return '(cirq.Z**{!r})'.format(self._exponent)
return (
'cirq.ZPowGate(exponent={!r}, '
'global_shift={!r})'
).format(self._exponent, self._global_shift)
class MeasurementGate(raw_types.Gate):
"""Indicates that qubits should be measured plus a key to identify results.
Attributes:
key: The string key of the measurement.
invert_mask: A list of values indicating whether the corresponding
qubits should be flipped. The list's length must not be longer than
the number of qubits, but it is permitted to be shorted.
Qubits with indices past the end of the mask are not flipped.
"""
def __init__(self,
key: str = '',
invert_mask: Tuple[bool, ...] = ()) -> None:
self.key = key
self.invert_mask = invert_mask or ()
@staticmethod
def is_measurement(op: Union[raw_types.Gate, raw_types.Operation]) -> bool:
if isinstance(op, MeasurementGate):
return True
if (isinstance(op, gate_operation.GateOperation) and
isinstance(op.gate, MeasurementGate)):
return True
return False
def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':
"""Toggles whether or not the measurement inverts various outputs."""
old_mask = self.invert_mask or ()
n = max(len(old_mask) - 1, *bit_positions) + 1
new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]
for b in bit_positions:
new_mask[b] = not new_mask[b]
return MeasurementGate(key=self.key, invert_mask=tuple(new_mask))
def validate_args(self, qubits):
if (self.invert_mask is not None and
len(self.invert_mask) > len(qubits)):
raise ValueError('len(invert_mask) > len(qubits)')
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
n = (max(1, len(self.invert_mask))
if args.known_qubit_count is None
else args.known_qubit_count)
symbols = ['M'] * n
# Show which output bits are negated.
if self.invert_mask:
for i, b in enumerate(self.invert_mask):
if b:
symbols[i] = '!M'
# Mention the measurement key.
if (not args.known_qubits or
self.key != _default_measurement_key(args.known_qubits)):
symbols[0] += "('{}')".format(self.key)
return protocols.CircuitDiagramInfo(tuple(symbols))
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
args.validate_version('2.0')
invert_mask = self.invert_mask
if len(invert_mask) < len(qubits):
invert_mask = (invert_mask
+ (False,) * (len(qubits) - len(invert_mask)))
lines = []
for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):
if inv:
lines.append(args.format(
'x {0}; // Invert the following measurement\n', qubit))
lines.append(args.format('measure {0} -> {1:meas}[{2}];\n',
qubit, self.key, i))
return ''.join(lines)
def __repr__(self):
return 'cirq.MeasurementGate({}, {})'.format(repr(self.key),
repr(self.invert_mask))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.key == other.key and self.invert_mask == other.invert_mask
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((MeasurementGate, self.key, self.invert_mask))
def _default_measurement_key(qubits: Iterable[raw_types.QubitId]) -> str:
return ','.join(str(q) for q in qubits)
def measure(*qubits: raw_types.QubitId,
key: Optional[str] = None,
invert_mask: Tuple[bool, ...] = ()
) -> gate_operation.GateOperation:
"""Returns a single MeasurementGate applied to all the given qubits.
The qubits are measured in the computational basis.
Args:
*qubits: The qubits that the measurement gate should measure.
key: The string key of the measurement. If this is None, it defaults
to a comma-separated list of the target qubits' str values.
invert_mask: A list of Truthy or Falsey values indicating whether
the corresponding qubits should be flipped. None indicates no
inverting should be done.
Returns:
An operation targeting the given qubits with a measurement.
Raises:
ValueError if the qubits are not instances of QubitId.
"""
for qubit in qubits:
if isinstance(qubit, np.ndarray):
raise ValueError(
'measure() was called a numpy ndarray. Perhaps you meant '
'to call measure_state_vector on numpy array?'
)
elif not isinstance(qubit, raw_types.QubitId):
raise ValueError(
'measure() was called with type different than QubitId.')
if key is None:
key = _default_measurement_key(qubits)
return MeasurementGate(key, invert_mask).on(*qubits)
def measure_each(*qubits: raw_types.QubitId,
key_func: Callable[[raw_types.QubitId], str] = str
) -> List[gate_operation.GateOperation]:
"""Returns a list of operations individually measuring the given qubits.
The qubits are measured in the computational basis.
Args:
*qubits: The qubits to measure.
key_func: Determines the key of the measurements of each qubit. Takes
the qubit and returns the key for that qubit. Defaults to str.
Returns:
A list of operations individually measuring the given qubits.
"""
return [MeasurementGate(key_func(q)).on(q) for q in qubits]
X = XPowGate() # Pauli X gate.
Y = YPowGate() # Pauli Y gate.
Z = ZPowGate() # Pauli Z gate.
CZ = CZPowGate() # Negates the amplitude of the |11⟩ state.
S = Z**0.5
T = Z**0.25
class HPowGate(eigen_gate.EigenGate, gate_features.SingleQubitGate):
"""Rotation around the X+Z axis of the Bloch sphere."""
def _eigen_components(self):
s = np.sqrt(2)
component0 = np.array([
[3 + 2 * s, 1 + s],
[1 + s, 1]
]) / (4 + 2 * s)
component1 = np.array([
[3 - 2 * s, 1 - s],
[1 - s, 1]
]) / (4 - 2 * s)
return [(0, component0), (1, component1)]
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if self._exponent != 1:
return NotImplemented
zero = linalg.slice_for_qubits_equal_to(axes, 0)
one = linalg.slice_for_qubits_equal_to(axes, 1)
target_tensor[one] -= target_tensor[zero]
target_tensor[one] *= -0.5
target_tensor[zero] -= target_tensor[one]
p = 1j**(2 * self._exponent * self._global_shift)
target_tensor *= np.sqrt(2) * p
return target_tensor
def _decompose_(self, qubits):
q = qubits[0]
if self._exponent == 1:
yield Y(q)**0.5, X(q)
return
yield Y(q)**0.25
yield X(q)**self._exponent
yield Y(q)**-0.25
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
return protocols.CircuitDiagramInfo(('H',))
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
args.validate_version('2.0')
if self._exponent == 1:
return args.format('h {0};\n', qubits[0])
else:
return args.format('ry({0:half_turns}) {3};\n'
'rx({1:half_turns}) {3};\n'
'ry({2:half_turns}) {3};\n',
0.25, self._exponent, -0.25, qubits[0])
def __str__(self):
if self._exponent == 1:
return 'H'
return 'H^{}'.format(self._exponent)
def __repr__(self):
if self._exponent == 1:
return 'cirq.H'
return '(cirq.H**{!r})'.format(self._exponent)
H = HPowGate() # Hadamard gate.
class CNotPowGate(eigen_gate.EigenGate, gate_features.TwoQubitGate):
"""The controlled-not gate, possibly raised to a power.
When applying CNOT (controlled-not) to QuBits, you can either use
positional arguments CNOT(q1, q2), where q2 is toggled when q1 is on,
or named arguments CNOT(control=q1, target=q2).
(Mixing the two is not permitted.)
"""
def _decompose_(self, qubits):
c, t = qubits
yield Y(t)**-0.5
yield CZ(c, t)**self._exponent
yield Y(t)**0.5
def _eigen_components(self):
return [
(0, np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0.5, 0.5],
[0, 0, 0.5, 0.5]])),
(1, np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0.5, -0.5],
[0, 0, -0.5, 0.5]])),
]
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
return protocols.CircuitDiagramInfo(
wire_symbols=('@', 'X'),
exponent=self._diagram_exponent(args))
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if self._exponent != 1:
return NotImplemented
oo = linalg.slice_for_qubits_equal_to(axes, 0b11)
zo = linalg.slice_for_qubits_equal_to(axes, 0b01)
available_buffer[oo] = target_tensor[oo]
target_tensor[oo] = target_tensor[zo]
target_tensor[zo] = available_buffer[oo]
p = 1j**(2 * self._exponent * self._global_shift)
if p != 1:
target_tensor *= p
return target_tensor
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
if self._exponent != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('cx {0},{1};\n', qubits[0], qubits[1])
def __str__(self) -> str:
if self._exponent == 1:
return 'CNOT'
return 'CNOT**{!r}'.format(self._exponent)
def __repr__(self) -> str:
if self._exponent == 1:
return 'cirq.CNOT'
return '(cirq.CNOT**{!r})'.format(self._exponent)
def on(self, *args: raw_types.QubitId,
**kwargs: raw_types.QubitId) -> gate_operation.GateOperation:
if not kwargs:
return super().on(*args)
if not args and set(kwargs.keys()) == {'control', 'target'}:
return super().on(kwargs['control'], kwargs['target'])
raise ValueError(
"Expected two positional argument or else 'target' AND 'control' "
"keyword arguments. But got args={!r}, kwargs={!r}.".format(
args, kwargs))
CNOT = CNotPowGate() # Controlled Not Gate.
class SwapPowGate(eigen_gate.EigenGate,
gate_features.TwoQubitGate,
gate_features.InterchangeableQubitsGate):
"""The SWAP gate, possibly raised to a power. Exchanges qubits."""
def _decompose_(self, qubits):
"""See base class."""
a, b = qubits
yield CNOT(a, b)
yield CNOT(b, a) ** self._exponent
yield CNOT(a, b)
def _eigen_components(self):
return [
(0, np.array([[1, 0, 0, 0],
[0, 0.5, 0.5, 0],
[0, 0.5, 0.5, 0],
[0, 0, 0, 1]])),
(1, np.array([[0, 0, 0, 0],
[0, 0.5, -0.5, 0],
[0, -0.5, 0.5, 0],
[0, 0, 0, 0]])),
]
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if self._exponent != 1:
return NotImplemented
zo = linalg.slice_for_qubits_equal_to(axes, 0b01)
oz = linalg.slice_for_qubits_equal_to(axes, 0b10)
available_buffer[zo] = target_tensor[zo]
target_tensor[zo] = target_tensor[oz]
target_tensor[oz] = available_buffer[zo]
p = 1j**(2 * self._exponent * self._global_shift)
if p != 1:
target_tensor *= p
return target_tensor
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
if not args.use_unicode_characters:
return protocols.CircuitDiagramInfo(
wire_symbols=('swap', 'swap'),
exponent=self._diagram_exponent(args))
return protocols.CircuitDiagramInfo(
wire_symbols=('×', '×'),
exponent=self._diagram_exponent(args))
def _qasm_(self,
args: protocols.QasmArgs,
qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:
if self._exponent != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('swap {0},{1};\n', qubits[0], qubits[1])
def __str__(self) -> str:
if self._exponent == 1:
return 'SWAP'
return 'SWAP**{!r}'.format(self._exponent)
def __repr__(self) -> str:
if self._exponent == 1:
return 'cirq.SWAP'
return '(cirq.SWAP**{!r})'.format(self._exponent)
SWAP = SwapPowGate() # Exchanges two qubits' states.
class ISwapPowGate(eigen_gate.EigenGate,
gate_features.InterchangeableQubitsGate,
gate_features.TwoQubitGate):
"""Rotates the |01⟩-vs-|10⟩ subspace of two qubits around its Bloch X-axis.
When exponent=1, swaps the two qubits and phases |01⟩ and |10⟩ by i. More
generally, this gate's matrix is defined as follows:
ISWAP**t ≡ exp(+i π t (X⊗X + Y⊗Y) / 4)
≡ [1 0 0 0]
[0 cos(π·t/2) i·sin(π·t/2) 0]
[0 i·sin(π·t/2) cos(π·t/2) 0]
[0 0 0 1]
"""
def _eigen_components(self):
return [
(0, np.diag([1, 0, 0, 1])),
(+0.5, np.array([[0, 0, 0, 0],
[0, 0.5, 0.5, 0],
[0, 0.5, 0.5, 0],
[0, 0, 0, 0]])),
(-0.5, np.array([[0, 0, 0, 0],
[0, 0.5, -0.5, 0],
[0, -0.5, 0.5, 0],
[0, 0, 0, 0]])),
]
def _decompose_(self, qubits):
a, b = qubits
yield CNOT(a, b)
yield H(a)
yield CNOT(b, a)
yield S(a)**self._exponent
yield CNOT(b, a)
yield S(a)**-self._exponent
yield H(a)
yield CNOT(a, b)
def _apply_unitary_to_tensor_(self,
target_tensor: np.ndarray,
available_buffer: np.ndarray,
axes: Sequence[int],
) -> Union[np.ndarray, NotImplementedType]:
if self._exponent != 1:
return NotImplemented
zo = linalg.slice_for_qubits_equal_to(axes, 0b01)
oz = linalg.slice_for_qubits_equal_to(axes, 0b10)
available_buffer[zo] = target_tensor[zo]
target_tensor[zo] = target_tensor[oz]
target_tensor[oz] = available_buffer[zo]
target_tensor[zo] *= 1j
target_tensor[oz] *= 1j
p = 1j**(2 * self._exponent * self._global_shift)
if p != 1:
target_tensor *= p
return target_tensor
def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
) -> protocols.CircuitDiagramInfo:
return protocols.CircuitDiagramInfo(
wire_symbols=('iSwap', 'iSwap'),
exponent=self._diagram_exponent(args))
def __str__(self) -> str:
if self._exponent == 1:
return 'ISWAP'
return 'ISWAP**{!r}'.format(self._exponent)
def __repr__(self):
if self._exponent == 1:
return 'cirq.ISWAP'
return '(cirq.ISWAP**{!r})'.format(self._exponent)
# Swaps two qubits while phasing the swapped subspace by i.
ISWAP = ISwapPowGate()
def Rx(rads: float) -> XPowGate:
"""Returns a gate with the matrix e^{-i X rads / 2}."""
return XPowGate(exponent=rads / np.pi, global_shift=-0.5)
def Ry(rads: float) -> YPowGate:
"""Returns a gate with the matrix e^{-i Y rads / 2}."""
return YPowGate(exponent=rads / np.pi, global_shift=-0.5)
def Rz(rads: float) -> ZPowGate:
"""Returns a gate with the matrix e^{-i Z rads / 2}."""
return ZPowGate(exponent=rads / np.pi, global_shift=-0.5)
|
{"hexsha": "fc8f623874dec6cf5101e12080d838f6a1be61a9", "size": 29363, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirq/ops/common_gates.py", "max_stars_repo_name": "sleichen/Cirq", "max_stars_repo_head_hexsha": "02f715203406d1f2af2d86e7561af09a2cdd4d45", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cirq/ops/common_gates.py", "max_issues_repo_name": "sleichen/Cirq", "max_issues_repo_head_hexsha": "02f715203406d1f2af2d86e7561af09a2cdd4d45", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cirq/ops/common_gates.py", "max_forks_repo_name": "sleichen/Cirq", "max_forks_repo_head_hexsha": "02f715203406d1f2af2d86e7561af09a2cdd4d45", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9400244798, "max_line_length": 80, "alphanum_fraction": 0.5467424991, "include": true, "reason": "import numpy", "num_tokens": 7458}
|
# Lint as: python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Run a Stan model to get the ground truth.
This will run your target distribution using PyStan and generate a Python source
file with global variables containing the ground truth values.
Usage (run from TensorFlow Probability source directory):
```
venv=$(mktemp -d)
virtualenv -p python3.6 $venv
source $venv/bin/activate
pip install cmdstanpy==0.8 pandas numpy tf-nightly tfds-nightly
install_cmdstan
bazel run //tools/inference_gym_ground_truth:get_ground_truth -- \
--target=<function name from targets.py>
```
NOTE: By default this will run for a *really* long time and use *a lot* of RAM,
be cautious! Reduce the value of the `stan_samples` flag to make things more
reasonable for quick tests.
NOTE: This must be run locally, and requires at least the following packages:
- cmdstanpy (also cmdstan: `pip install cmdstanpy; install_cmdstan`)
- numpy
- pandas
- tf-nightly
- tfds-nightly
"""
import functools
import os
import sys
from absl import app
from absl import flags
import numpy as np
import pandas as pd
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from spinoffs.inference_gym.internal import ground_truth_encoding
from spinoffs.inference_gym.tools.stan import targets
# Direct import for flatten_with_tuple_paths.
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
flags.DEFINE_enum('target', None, targets.__all__, 'Which Stan model to '
'sample from.')
flags.DEFINE_integer('stan_samples', 150000,
'Number of samples to ask from Stan.')
flags.DEFINE_integer('stan_chains', 10, 'Number of chains to ask from Stan.')
flags.DEFINE_boolean('print_summary', True, 'Whether to print the Stan fit'
'summary')
flags.DEFINE_string('output_directory', None,
'Where to save the ground truth values. By default, this '
'places it in the appropriate directory in the '
'Inference Gym source directory.')
FLAGS = flags.FLAGS
def get_ess(samples):
return tf.function(
functools.partial(
tfp.mcmc.effective_sample_size,
filter_beyond_positive_pairs=True,
),
autograph=False)(samples).numpy()
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
stan_model = getattr(targets, FLAGS.target)()
with stan_model.sample_fn(
sampling_iters=FLAGS.stan_samples,
chains=FLAGS.stan_chains,
show_progress=True) as mcmc_output:
summary = mcmc_output.summary()
if FLAGS.print_summary:
pd.set_option('display.max_rows', sys.maxsize)
pd.set_option('display.max_columns', sys.maxsize)
print(mcmc_output.diagnose())
print(summary)
array_strs = []
for name, fn in sorted(stan_model.extract_fns.items()):
transformed_samples = []
# We handle one chain at a time to reduce memory usage.
chain_means = []
chain_stds = []
chain_esss = []
for chain_id in range(FLAGS.stan_chains):
# TODO(https://github.com/stan-dev/cmdstanpy/issues/218): This step is
# very slow and wastes memory. Consider reading the CSV files ourselves.
# sample shape is [num_samples, num_chains, num_columns]
chain = mcmc_output.sample[:, chain_id, :]
dataframe = pd.DataFrame(chain, columns=mcmc_output.column_names)
transformed_samples = fn(dataframe)
# We reduce over the samples dimension. Transformations can return
# nested outputs.
mean = tf.nest.map_structure(lambda s: s.mean(0), transformed_samples)
std = tf.nest.map_structure(lambda s: s.std(0), transformed_samples)
ess = tf.nest.map_structure(get_ess, transformed_samples)
chain_means.append(mean)
chain_stds.append(std)
chain_esss.append(ess)
# Now we reduce across chains.
ess = tf.nest.map_structure(lambda *s: np.sum(s, 0), *chain_esss)
mean = tf.nest.map_structure(lambda *s: np.mean(s, 0), *chain_means)
sem = tf.nest.map_structure(lambda std, ess: std / np.sqrt(ess), std, ess)
std = tf.nest.map_structure(lambda *s: np.mean(s, 0), *chain_stds)
for (tuple_path, mean_part), sem_part, std_part in zip(
nest.flatten_with_tuple_paths(mean), tf.nest.flatten(sem),
tf.nest.flatten(std)):
array_strs.extend(
ground_truth_encoding.save_ground_truth_part(
name=name,
tuple_path=tuple_path,
mean=mean_part,
sem=sem_part,
std=std_part,
sestd=None,
))
argv_str = '\n'.join([' {} \\'.format(arg) for arg in sys.argv[1:]])
command_str = (
"""bazel run //spinoffs/inference_gym/tools:get_ground_truth -- \
{argv_str}""".format(argv_str=argv_str))
file_str = ground_truth_encoding.get_ground_truth_module_source(
target_name=FLAGS.target, command_str=command_str, array_strs=array_strs)
if FLAGS.output_directory is None:
file_basedir = os.path.dirname(os.path.realpath(__file__))
output_directory = os.path.join(
file_basedir, '../targets/ground_truth')
else:
output_directory = FLAGS.output_directory
file_path = os.path.join(output_directory, '{}.py'.format(FLAGS.target))
print('Writing ground truth values to: {}'.format(file_path))
with open(file_path, 'w') as f:
f.write(file_str)
if __name__ == '__main__':
flags.mark_flag_as_required('target')
app.run(main)
|
{"hexsha": "91e65092d02da54e47f5493bb7a3a2c881314ba8", "size": 6246, "ext": "py", "lang": "Python", "max_stars_repo_path": "spinoffs/inference_gym/tools/get_ground_truth.py", "max_stars_repo_name": "KonstantinKlepikov/probability", "max_stars_repo_head_hexsha": "0cc6c5febf3b10ece5bb2b9877bd695137a420ea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-28T21:01:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-28T21:01:19.000Z", "max_issues_repo_path": "spinoffs/inference_gym/tools/get_ground_truth.py", "max_issues_repo_name": "KonstantinKlepikov/probability", "max_issues_repo_head_hexsha": "0cc6c5febf3b10ece5bb2b9877bd695137a420ea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spinoffs/inference_gym/tools/get_ground_truth.py", "max_forks_repo_name": "KonstantinKlepikov/probability", "max_forks_repo_head_hexsha": "0cc6c5febf3b10ece5bb2b9877bd695137a420ea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8965517241, "max_line_length": 85, "alphanum_fraction": 0.6876400897, "include": true, "reason": "import numpy", "num_tokens": 1435}
|
import numpy as np
from typing import List, NamedTuple, Callable, Optional, Union
import mindspore.ops.operations as P
from mindspore._c_expression import Tensor as _Tensor
from mindspore.common import dtype as mstype
from ._utils import _tensor_getitem
add = P.Add()
cast = P.Cast()
sum = P.ReduceSum()
sum_keepdims = P.ReduceSum(True)
zeros_like = P.ZerosLike()
ones_like = P.OnesLike()
matmul = P.MatMul()
transpose = P.Transpose()
neg = P.Neg()
squeeze_0 = P.Squeeze(0)
squeeze_1 = P.Squeeze(1)
sub = P.Sub()
mul = P.Mul()
class Dependency(NamedTuple):
tensor: 'Tensor'
grad_fn: Callable[['Tensor'], _Tensor]
Arrayable = Union[float, list, _Tensor, np.ndarray]
Tensorable = Union['Tensor', float, np.ndarray, _Tensor]
def ensure_tensor(tensorable: Tensorable) -> 'Tensor':
if isinstance(tensorable, Tensor):
return tensorable
else:
return Tensor(tensorable)
def ensure_array(arrayable: Arrayable, dtype) -> _Tensor:
if isinstance(arrayable, _Tensor):
return arrayable
else:
if dtype is None:
arrayable = np.array(arrayable)
if arrayable.dtype == np.float64:
arrayable = arrayable.astype(np.float32)
if arrayable.dtype == np.int64:
arrayable = arrayable.astype(np.int32)
return _Tensor(arrayable)
return _Tensor(arrayable, dtype)
def ensure_dtype(*dtypes):
if mstype.float32 in dtypes:
return mstype.float32
if mstype.int32 in dtypes:
return mstype.int32
return dtypes[0]
def ensure_matmul_candidates(t, dim):
assert dim in [0, 1]
shape = (1,) + tuple(t.shape) if dim == 0 else tuple(t.shape) + (1,)
t_data = t if len(t.shape) == 2 else P.Reshape()(t, shape)
return t_data
def matmul_postprocess(data, t1_ndim, t2_ndim):
if t1_ndim == 1:
data = squeeze_0(data)
if t2_ndim == 1:
data = squeeze_1(data)
return data
class Tensor:
def __init__(self,
data: Arrayable,
requires_grad: bool = False,
depends_on: List[Dependency] = None,
dtype = None
):
self.data = ensure_array(data, dtype)
self.requires_grad = requires_grad
self.depends_on = depends_on or []
self.shape = self.data.shape
self.grad : Optional['Tensor'] = None
if self.requires_grad:
self.zero_grad()
def zero_grad(self):
self.grad = Tensor(np.zeros_like(self.data.asnumpy()))
def __repr__(self) -> str:
return f"Tensor({self.data.asnumpy()}, requires_grad={self.requires_grad})"
def __add__(self, other) -> 'Tensor':
"""
geys called if I do t + other
"""
return _add(self, ensure_tensor(other))
def __radd__(self, other) -> 'Tensor':
""" gets called if I do other + t """
return _add(ensure_tensor(other), self)
def __iadd__(self, other) -> 'Tensor':
"""
when we do t += other
"""
self.data = add(self.data, ensure_tensor(other).data)
# Invalidate the gradient
self.grad = None
return self
def __isub__(self, other) -> 'Tensor':
self.data = sub(self.data, ensure_tensor(other).data)
# Invalidate the gradient
self.grad = None
return self
def __imul__(self, other) -> 'Tensor':
self.data = mul(self.data, ensure_tensor(other).data)
# Invalidate the gradient
self.grad = None
return self
def __mul__(self, other) -> 'Tensor':
return _mul(self, ensure_tensor(other))
def __rmul__(self, other) -> 'Tensor':
return _mul(ensure_tensor(other), self)
def __matmul__(self, other) -> 'Tensor':
return _matmul(self, other)
def __neg__(self) -> 'Tensor':
return _neg(self)
def __sub__(self, other) -> 'Tensor':
return _sub(self,ensure_tensor(other))
def __rsub__(self, other) -> 'Tensor':
return _sub(ensure_tensor(other), self)
def __getitem__(self, idxs) -> 'Tensor':
return _slice(self, idxs)
def sum(self) -> 'Tensor':
return tensor_sum(self)
@property
def ndim(self):
return len(self.shape)
def backward(self, grad: 'Tensor' = None):
assert self.requires_grad, "called backward on non-requires-grad tensor"
if grad is None:
if self.shape == ():
grad = Tensor(1)
else:
raise RuntimeError("grad must specified for non-0-tensor")
self.grad.data = add(self.grad.data, grad.data)
for dependency in self.depends_on:
backward_grad = dependency.grad_fn(grad.data)
dependency.tensor.backward(Tensor(backward_grad))
def tensor_sum(t: Tensor) -> Tensor:
data_dtype = ensure_dtype(t.data.dtype)
data = sum(cast(t.data, mstype.float32))
requires_grad = t.requires_grad
if requires_grad:
def grad_fn(grad: _Tensor) -> _Tensor:
return grad * ones_like(t.data)
depends_on = [Dependency(t, grad_fn)]
else:
depends_on = []
return Tensor(cast(data, data_dtype), requires_grad, depends_on)
def _add(t1: Tensor, t2:Tensor) -> Tensor:
data = add(t1.data, t2.data)
requires_grad = t1.requires_grad or t2.requires_grad
depends_on: List[Dependency] = []
if t1.requires_grad:
def grad_fn1(grad: _Tensor) -> _Tensor:
# Idea: [1,2,3] + [4,5,6] => [5,7,9]
# Handle the broadcasting properly
# Sum out added dims
grad_dtype = ensure_dtype(grad.dtype)
grad = cast(grad, mstype.float32)
ndims_added = len(grad.shape) - t1.ndim
for _ in range(ndims_added):
grad = sum(grad, axis=0)
# Sum across broadcasted (but non-added dims)
# (2,3) + (1,3) => (2,3) grad(2,3)
for i, dim in enumerate(t1.shape):
if dim == 1:
grad = sum_keepdims(grad, axis=i)
grad = cast(grad, grad_dtype)
return grad
depends_on.append(Dependency(t1, grad_fn1))
if t2.requires_grad:
def grad_fn2(grad: _Tensor) -> _Tensor:
grad_dtype = ensure_dtype(grad.dtype)
grad = cast(grad, mstype.float32)
ndims_added = len(grad.shape) - t2.ndim
for _ in range(ndims_added):
grad = sum(grad, axis=0)
# Sum across broadcasted (but non-added dims)
# (2,3) + (1,3) => (2,3) grad(2,3)
for i, dim in enumerate(t2.shape):
if dim == 1:
grad = sum_keepdims(grad, axis=i)
grad = cast(grad, grad_dtype)
return grad
depends_on.append(Dependency(t2, grad_fn2))
return Tensor(data,
requires_grad,
depends_on
)
def _mul(t1: Tensor, t2:Tensor) -> Tensor:
"""
y = (a + eps) * b = a * b + (eps * b * dL/dy)
gradient_y = 5
have dL/dy
dL/da = dL/dy * dy/da(b)
"""
data = mul(t1.data, t2.data)
requires_grad = t1.requires_grad or t2.requires_grad
depends_on: List[Dependency] = []
if t1.requires_grad:
def grad_fn1(grad: _Tensor) -> _Tensor:
grad_dtype = ensure_dtype(grad.dtype, t2.data.dtype)
grad = cast(grad, mstype.float32)
grad = mul(grad, t2.data)
ndims_added = len(grad.shape) - t1.ndim
for _ in range(ndims_added):
grad = sum(grad, axis=0)
for i, dim in enumerate(t1.shape):
if dim == 1:
grad = sum_keepdims(grad, axis=i)
grad = cast(grad, grad_dtype)
return grad
depends_on.append(Dependency(t1, grad_fn1))
if t2.requires_grad:
def grad_fn2(grad: _Tensor) -> _Tensor:
grad_dtype = ensure_dtype(grad.dtype, t1.data.dtype)
grad = cast(grad, mstype.float32)
grad = mul(grad, t1.data)
ndims_added = len(grad.shape) - t2.ndim
for _ in range(ndims_added):
grad = sum(grad, axis=0)
for i, dim in enumerate(t2.shape):
if dim == 1:
grad = sum_keepdims(grad, axis=i)
grad = cast(grad, grad_dtype)
return grad
depends_on.append(Dependency(t2, grad_fn2))
return Tensor(data,
requires_grad,
depends_on
)
def _neg(t: Tensor) -> Tensor:
data = neg(t.data)
requires_grad = t.requires_grad
if requires_grad:
depends_on = [Dependency(t, lambda x: neg(x))]
else:
depends_on = []
return Tensor(data, requires_grad, depends_on)
def _sub(t1: Tensor, t2: Tensor) -> Tensor:
return t1 + -t2
def _matmul(t1: Tensor, t2:Tensor) -> Tensor:
"""
if t1 is (n1,m1) t2 is (m1,m2) then t1 @ t2 is (n1,m2)
so grad3 is (n1,m2)
if t3 = t1 @ t2 and grad3 is the gradient of some function wrt t3, then
grad1 = grad @ t2.T
grad2 = t1.T @ grad
"""
assert len(t1.data.shape) <= 2, f"Tensor {t1} should not be greater 2 dims, but get {len(t1.data.shape)}"
assert len(t2.data.shape) <= 2, f"Tensor {t2} should not be greater 2 dims, but get {len(t2.data.shape)}"
data_dtype = ensure_dtype(t1.data.dtype, t2.data.dtype)
t1_data = ensure_matmul_candidates(t1.data, 0)
t2_data = ensure_matmul_candidates(t2.data, 1)
data = matmul(cast(t1_data, mstype.float32), cast(t2_data, mstype.float32))
data = matmul_postprocess(data, t1.ndim, t2.ndim)
requires_grad = t1.requires_grad or t2.requires_grad
depends_on: List[Dependency] = []
if t1.requires_grad:
def grad_fn1(grad: _Tensor) -> _Tensor:
grad_dtype = ensure_dtype(grad.dtype, t2.data.dtype)
perm = tuple(range(t2.ndim - 1, -1, -1))
t2_T = transpose(cast(t2.data, mstype.float32), perm)
grad_ndim, t2_T_ndim = len(grad.shape), len(t2_T.shape)
grad = ensure_matmul_candidates(grad, 0)
t2_T = ensure_matmul_candidates(t2_T, 1)
grad = matmul(cast(grad, mstype.float32), t2_T)
grad = matmul_postprocess(grad, grad_ndim, t2_T_ndim)
return cast(grad, grad_dtype)
depends_on.append(Dependency(t1, grad_fn1))
if t2.requires_grad:
def grad_fn2(grad: _Tensor) -> _Tensor:
grad_dtype = ensure_dtype(grad.dtype, t1.data.dtype)
perm = tuple(range(t1.ndim - 1, -1, -1))
t1_T = transpose(cast(t1.data, mstype.float32), perm)
grad_ndim, t2_T_ndim = len(grad.shape), len(t1_T.shape)
grad = ensure_matmul_candidates(grad, 1)
t1_T = ensure_matmul_candidates(t1_T, 0)
grad = matmul(t1_T, cast(grad, mstype.float32))
grad = matmul_postprocess(grad, t2_T_ndim, grad_ndim)
return cast(grad, grad_dtype)
depends_on.append(Dependency(t2, grad_fn2))
return Tensor(cast(data, data_dtype),
requires_grad,
depends_on
)
def _slice(t: Tensor, idx) -> Tensor:
"""
t2 = t1[3:4,4:4]
"""
data = _tensor_getitem(t.data, idx)
requires_grad = t.requires_grad
if requires_grad:
def grad_fn(grad: _Tensor) -> _Tensor:
bigger_grad = zeros_like(data)
# bigger_grad = _tensor_getitem(bigger_grad, idx, grad)
return bigger_grad
depends_on = Dependency(t, grad_fn)
else:
depends_on = []
return Tensor(data,requires_grad,depends_on)
|
{"hexsha": "3a0079257686e171f6375c4e4d2de9868b75b27b", "size": 11593, "ext": "py", "lang": "Python", "max_stars_repo_path": "autograd/tensor.py", "max_stars_repo_name": "lvyufeng/autograd_ms", "max_stars_repo_head_hexsha": "f9e6920cc4fcc85a9d514820f2c3932a4926d436", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-13T08:31:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T08:31:13.000Z", "max_issues_repo_path": "autograd/tensor.py", "max_issues_repo_name": "lvyufeng/autograd_ms", "max_issues_repo_head_hexsha": "f9e6920cc4fcc85a9d514820f2c3932a4926d436", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autograd/tensor.py", "max_forks_repo_name": "lvyufeng/autograd_ms", "max_forks_repo_head_hexsha": "f9e6920cc4fcc85a9d514820f2c3932a4926d436", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7485875706, "max_line_length": 109, "alphanum_fraction": 0.5940653843, "include": true, "reason": "import numpy", "num_tokens": 3040}
|
import numpy as np
import pandas as pd
import argparse
from mbi import Domain, Dataset
import json
def discretize(df, schema):
new = df.copy()
domain = { }
for col in new.columns:
if col not in schema:
continue
info = schema[col]
if 'bins' in info:
# Things that should be binned are marked in the schema with
# the number of bins into which to bin them; they will also
# have min and max values.
bin_info = np.r_[np.linspace(info['min'], info['max'], num=info['bins'],
endpoint=False).astype(info['dtype']), info['max']]
new[col] = pd.cut(df[col], bin_info, right=False).cat.codes
domain[col] = len(bin_info) - 1
elif 'values' in info:
new[col] = df[col].astype(pd.CategoricalDtype(info['values'])).cat.codes
domain[col] = len(info['values'])
else:
new[col] = df[col] - info['min']
domain[col] = info['max'] - info['min'] + 1
return new, domain
def undo_discretize(df, schema):
new = df.copy()
for col in schema.keys():
info = schema[col]
if 'bins' in info:
# Things that should be binned are marked in the schema with
# the number of bins into which to bin them; they will also
# have min and max values.
bin_info = np.r_[np.linspace(info['min'], info['max'], num=info['bins'],
endpoint=False).astype(info['dtype']), info['max']]
low = bin_info[:-1];
high = bin_info[1:]
low[0] = low[1]-2
high[-1] = high[-2]+2
mid = (low + high) / 2
new[col] = mid[df[col].values]
elif 'values' in info:
mapping = np.array(info['values'])
new[col] = mapping[df[col].values]
else:
new[col] = df[col] + info['min']
dtypes = { col : schema[col]['dtype'] for col in schema }
return new.astype(dtypes)
if __name__ == "__main__":
description = "Pre and post processing functions for the Adagrid mechanism"
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(description=description, formatter_class=formatter)
parser.add_argument('--output_dir', help='output directory for \
transformed data and domain if using `discretize`',
required=False)
parser.set_defaults(**{'output_dir': '.'})
required = parser.add_argument_group('required arguments')
required.add_argument('--transform', help='either discretize or \
undo_discretize', required=True)
required.add_argument('--df', help='path to dataset', required=True)
required.add_argument('--schema', help='path to schema file from schemagen',
required=True)
args = parser.parse_args()
transform = args.transform
output_dir = args.output_dir
df = pd.read_csv(args.df)
with open(args.schema) as f:
schemagen = json.load(f)
schema = schemagen["schema"]
assert transform in ['discretize', 'undo_discretize'], "transform name not \
valid"
if transform == 'discretize':
transformed_df, domain = discretize(df=df, schema=schema)
with open(output_dir + '/domain.json', "w") as f:
json.dump(domain, fp=f)
transformed_df.to_csv(output_dir + '/discretized.csv', index=False)
if transform == 'undo_discretize':
transformed_df = undo_discretize(df=df, schema=schema)
transformed_df.to_csv(output_dir + '/raw_synthetic.csv', index=False)
|
{"hexsha": "cf23de92fca0b0770be35052bb0435a5c0211abd", "size": 3726, "ext": "py", "lang": "Python", "max_stars_repo_path": "extensions/transform.py", "max_stars_repo_name": "meijiu/nist-synthetic-data-2021", "max_stars_repo_head_hexsha": "19f6d31b48902743e2efb4820ad77f8ed42c469d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-08T16:39:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T08:58:42.000Z", "max_issues_repo_path": "extensions/transform.py", "max_issues_repo_name": "meijiu/nist-synthetic-data-2021", "max_issues_repo_head_hexsha": "19f6d31b48902743e2efb4820ad77f8ed42c469d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-09-06T21:35:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T20:14:37.000Z", "max_forks_repo_path": "extensions/transform.py", "max_forks_repo_name": "meijiu/nist-synthetic-data-2021", "max_forks_repo_head_hexsha": "19f6d31b48902743e2efb4820ad77f8ed42c469d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-09-02T18:46:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-10T17:47:02.000Z", "avg_line_length": 36.5294117647, "max_line_length": 88, "alphanum_fraction": 0.5823939882, "include": true, "reason": "import numpy", "num_tokens": 847}
|
# Activate local environment
using Pkg
Pkg.activate(".")
printstyled(" Running "; color = :green, bold = true)
print("model...\n")
# Load module
using Negotiations
# Run model
params = parameter_set_from_config("config.yaml")
db = load_database("db.sqlite")
model = setup_model(params, db)
rule1 = BoundedConfidence(bc = 1.0, inertia = 10.0)
rule2 = ContinuousHomophily(inertia = 20.0)
@time simulate(model, rule1, 3, db, batchname = "bounded_confidence")
@time simulate(model, rule2, 3, db, batchname = "continuous_homophily")
|
{"hexsha": "da6e92d8edc0f634bda36e0bdf8d569c28921afc", "size": 532, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "run_model.jl", "max_stars_repo_name": "social-dynamics/btw21-negotiation", "max_stars_repo_head_hexsha": "bc779a4ad4da0b5dd827707824d083d198f2e833", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_model.jl", "max_issues_repo_name": "social-dynamics/btw21-negotiation", "max_issues_repo_head_hexsha": "bc779a4ad4da0b5dd827707824d083d198f2e833", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_model.jl", "max_forks_repo_name": "social-dynamics/btw21-negotiation", "max_forks_repo_head_hexsha": "bc779a4ad4da0b5dd827707824d083d198f2e833", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0, "max_line_length": 71, "alphanum_fraction": 0.7312030075, "num_tokens": 151}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Penetration Test Report Template %
% cyber@cfreg %
% https://hack.newhaven.edu/ %
% %
% Contributors: %
% TJ Balon - https://github.com/balon %
% Samuel Zurowski - https://github.com/samuelzurowski %
% Charles Barone - https://github.com/CharlesBarone %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This section should be written after the technical findings. It should be written with the same target audience as an executive summary.
% The goal of this section is to categorize your findings in a high level manner for c-level executives. For each "slice" in the pie chart, there should be a short summary of what the implications for the organization are for said vulnerabilities. An example of what this might look like is included below with one of these summaries. (In this example the category "Vulnerabilities" represents any generic vulnerabilities that were found which do not fit another category.)
\section{Observations}
This section serves as a high level overview of the security posture of \cptc. A detailed list of all discovered vulnerabilities can be found in Section \ref{sec:tech}. It is important to note that this list is by no means exhaustive and that there are most likely vulnerabilities that \teamname\ did not find.
\begin{figure}[h]
\centering
\begin{tikzpicture}
\pie
[
rotate=180,
after number=,
radius=5,
/tikz/nodes={text=black, font=\normalfont},
/tikz/every pin/.style={align=center, text=black, font=\normalfont},
sum=auto
]
{
2/Data Exposures,
2/Network Design,
2/Default Credentials,
3/Improper Authentication,
1/Misconfigurations,
1/Vulnerabilities
}
\end{tikzpicture}
\caption{Summary of Issues within the Network}
\end{figure}
\section*{Default Credentials \& Lack of authentication}
The most immediate observation about \cptc's security posture is that default, null, and passwordless authentication was discovered on multiple systems. \teamname\ was able to gain access into a SCADA system using the same credentials (which were default) as the last engagement. The system in question is critical to the storage, delivery, and packaging processes within the warehouse facility. Moreover, several databases contained this same issue and were found to not be requiring password authentication. It is important to remember these credentials are for critical services and PII. These weaknesses can have an enormous impact on \cptc's ability to operate if discovered by threat actors. These vulnerabilities can be remediated with low cost and have an outsized impact on the security of \cptc. More details on mitigation for vulnerabilities such as these can be found in each vulnerability's remediation suggestions in Section \ref{sec:tech}.
\subsection{Summary of Recommendations}
The following is an overview of recommendations which should be implemented:
\begin{itemize}
\item Resolve any PCI DSS compliance violations in Section \ref{sec:compliance} and ensure that \cptc\ is currently meeting all documentation requirements set by PCI DSS.
\item Implement both ingress \& egress filtering to reduce attack surface on hosts.
\item Ensure all hosts use least privilege principals to reduce attack surface.
\item Ensure proper encryption is used for confidential data (e.g. passwords and card holder data).
\item Implementation of a strong password policy.
\item Implement Multi-Factor authentication to provide defense in depth in addition to passwords.
\item Uses centralized logging to be able to respond to potential incidents faster.
\item Ensure null or password-less authentication is not allowed.
\item Ensure only necessary services are running within the subnet.
\end{itemize}
\subsection{Positive Security Measures}
As the engagement progressed, \teamname\ was impeded by the security safeguards \cptc\ had in place. A number of basic security best practices were observed that limited \teamname 's ability to move through the network. Some instances of aforementioned security practices implemented by \cptc\ include:
\begin{itemize}
\item The usage of TLS on websites to protect information.
\item The usage of Cross-Origin Resource Sharing (CORS) headers prevented specific attacks.
\item The marketplace \& music player required authentication.
\item Some APIs required authentication in order to query sensitive information (although not all).
\end{itemize}
These controls should be continuously monitored and regulated to maintain the company's security posture.
\input{tex/sections/2_3_compliance}
|
{"hexsha": "f50f3b28fc02e2e9c05422f261b55fccd713c94f", "size": 4990, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/sections/2_1_observations.tex", "max_stars_repo_name": "cyber-cfreg/Penetration-Test-Report-Template", "max_stars_repo_head_hexsha": "f4908a4c92c55acdd1b4ab4fb014fac262eba229", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-25T10:32:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:32:11.000Z", "max_issues_repo_path": "tex/sections/2_1_observations.tex", "max_issues_repo_name": "cyber-cfreg/Penetration-Test-Report-Template", "max_issues_repo_head_hexsha": "f4908a4c92c55acdd1b4ab4fb014fac262eba229", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/sections/2_1_observations.tex", "max_forks_repo_name": "cyber-cfreg/Penetration-Test-Report-Template", "max_forks_repo_head_hexsha": "f4908a4c92c55acdd1b4ab4fb014fac262eba229", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.5333333333, "max_line_length": 956, "alphanum_fraction": 0.7110220441, "num_tokens": 1006}
|
[STATEMENT]
lemma EnsuresInfinite:
"\<lbrakk> sigma \<Turnstile> \<box>\<diamond>P; sigma \<Turnstile> \<box>A; \<turnstile> A \<and> $P \<longrightarrow> Q` \<rbrakk> \<Longrightarrow> sigma \<Turnstile> \<box>\<diamond>Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>sigma \<Turnstile> \<box>\<diamond>P; sigma \<Turnstile> \<box>A; \<turnstile> A \<and> $P \<longrightarrow> Q$\<rbrakk> \<Longrightarrow> sigma \<Turnstile> \<box>\<diamond>Q
[PROOF STEP]
apply (erule leadsto_infinite [temp_use])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>sigma \<Turnstile> \<box>A; \<turnstile> A \<and> $P \<longrightarrow> Q$\<rbrakk> \<Longrightarrow> sigma \<Turnstile> P \<leadsto> Q
[PROOF STEP]
apply (erule EnsuresLeadsto [temp_use])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sigma \<Turnstile> \<box>A \<Longrightarrow> sigma \<Turnstile> \<box>A
[PROOF STEP]
apply assumption
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 397, "file": null, "length": 4}
|
import numpy
from skimage.exposure import rescale_intensity
from aydin.features.groups.random import RandomFeatures
from aydin.io.datasets import camera
def n(image):
return rescale_intensity(
image.astype(numpy.float32), in_range='image', out_range=(0, 1)
)
def test_random_feature_group():
# get image:
image = n(camera().astype(numpy.float32))
# Instantiates DCT features:
randomf = RandomFeatures(size=5)
assert randomf.num_features(image.ndim) == 25
assert randomf.receptive_field_radius == 2
# Set image:
randomf.prepare(image)
# compute features and check their valididty:
feature = numpy.empty_like(image)
# Compute features:
for index in range(randomf.num_features(image.ndim)):
randomf.compute_feature(index=index, feature=feature)
assert (feature != image).any()
|
{"hexsha": "c86ccd7d5a4be3fb2dee96e98cfe468e4ee3b29d", "size": 863, "ext": "py", "lang": "Python", "max_stars_repo_path": "aydin/features/groups/test/test_random_feature_group.py", "max_stars_repo_name": "royerloic/aydin", "max_stars_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 78, "max_stars_repo_stars_event_min_datetime": "2021-11-08T16:11:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T17:51:04.000Z", "max_issues_repo_path": "aydin/features/groups/test/test_random_feature_group.py", "max_issues_repo_name": "royerloic/aydin", "max_issues_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2021-11-08T17:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T17:46:55.000Z", "max_forks_repo_path": "aydin/features/groups/test/test_random_feature_group.py", "max_forks_repo_name": "royerloic/aydin", "max_forks_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-11-09T17:42:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T00:37:57.000Z", "avg_line_length": 26.1515151515, "max_line_length": 71, "alphanum_fraction": 0.7126303592, "include": true, "reason": "import numpy", "num_tokens": 195}
|
@info "Running show tests"
const TEMPLATES_DIR = contractuser(PT.default_file())
const LICENSES_DIR = joinpath(TEMPLATES_DIR, "licenses")
function test_show(expected::AbstractString, observed::AbstractString)
if expected == observed
@test true
else
print_diff(expected, observed)
@test :expected == :observed
end
end
@testset "Show methods" begin
@testset "Plugins" begin
expected = """
Readme:
file: "$(joinpath(TEMPLATES_DIR, "README.md"))"
destination: "README.md"
inline_badges: false
badge_order: DataType[Documenter{GitHubActions}, Documenter{GitLabCI}, Documenter{TravisCI}, GitHubActions, GitLabCI, TravisCI, AppVeyor, DroneCI, CirrusCI, Codecov, Coveralls, BlueStyleBadge, ColPracBadge, PkgEvalBadge]
badge_off: DataType[]
"""
test_show(rstrip(expected), sprint(show, MIME("text/plain"), Readme()))
end
@testset "Template" begin
expected = """
Template:
authors: ["$USER"]
dir: "$(contractuser(Pkg.devdir()))"
host: "github.com"
julia: v"1.0.0"
user: "$USER"
plugins:
CompatHelper:
file: "$(joinpath(TEMPLATES_DIR, "github", "workflows", "CompatHelper.yml"))"
destination: "CompatHelper.yml"
cron: "0 0 * * *"
Git:
ignore: String[]
name: nothing
email: nothing
branch: "main"
ssh: false
jl: true
manifest: false
gpgsign: false
License:
path: "$(joinpath(LICENSES_DIR, "MIT"))"
destination: "LICENSE"
ProjectFile:
version: v"0.1.0"
Readme:
file: "$(joinpath(TEMPLATES_DIR, "README.md"))"
destination: "README.md"
inline_badges: false
badge_order: DataType[Documenter{GitHubActions}, Documenter{GitLabCI}, Documenter{TravisCI}, GitHubActions, GitLabCI, TravisCI, AppVeyor, DroneCI, CirrusCI, Codecov, Coveralls, BlueStyleBadge, ColPracBadge, PkgEvalBadge]
badge_off: DataType[]
SrcDir:
file: "$(joinpath(TEMPLATES_DIR, "src", "module.jl"))"
TagBot:
file: "$(joinpath(TEMPLATES_DIR, "github", "workflows", "TagBot.yml"))"
destination: "TagBot.yml"
trigger: "JuliaTagBot"
token: Secret("GITHUB_TOKEN")
ssh: Secret("DOCUMENTER_KEY")
ssh_password: nothing
changelog: nothing
changelog_ignore: nothing
gpg: nothing
gpg_password: nothing
registry: nothing
branches: nothing
dispatch: nothing
dispatch_delay: nothing
Tests:
file: "$(joinpath(TEMPLATES_DIR, "test", "runtests.jl"))"
project: false
"""
# `with_clean_gitconfig` requires Git to be installed, but if Git is not installed,
# then we probably don't need to worry about any conflicting Git config files.
f = () -> test_show(
rstrip(expected),
sprint(show, MIME("text/plain"), tpl(; authors=USER)),
)
if PT.git_is_installed()
with_clean_gitconfig() do
run(`git config user.name Tester`)
run(`git config user.email te@st.er`)
f()
end
else
f()
end
end
@testset "show as serialization" begin
t1 = tpl()
t2 = eval(Meta.parse(sprint(show, t1)))
@test t1 == t2
foreach((NoDeploy, GitHubActions)) do T
p1 = Documenter{T}()
p2 = eval(Meta.parse(sprint(show, p1)))
@test p1 == p2
end
end
end
|
{"hexsha": "7fcb60b6383193b7bc62fc56f5d5c6a416830547", "size": 4150, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/show.jl", "max_stars_repo_name": "charlieIT/PkgTemplates.jl", "max_stars_repo_head_hexsha": "1cb56bf90326d47c402af51537c9c669e9b08a24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/show.jl", "max_issues_repo_name": "charlieIT/PkgTemplates.jl", "max_issues_repo_head_hexsha": "1cb56bf90326d47c402af51537c9c669e9b08a24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/show.jl", "max_forks_repo_name": "charlieIT/PkgTemplates.jl", "max_forks_repo_head_hexsha": "1cb56bf90326d47c402af51537c9c669e9b08a24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3873873874, "max_line_length": 238, "alphanum_fraction": 0.5127710843, "num_tokens": 927}
|
[STATEMENT]
lemma continuous_on_map_topology2:
"continuous_map T X (g \<circ> f) \<longleftrightarrow> continuous_map (map_topology f T) X g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) = continuous_map (map_topology f T) X g
[PROOF STEP]
unfolding map_topology_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) = continuous_map (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) X g
[PROOF STEP]
apply safe
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. continuous_map T X (g \<circ> f) \<Longrightarrow> continuous_map (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) X g
2. continuous_map (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) X g \<Longrightarrow> continuous_map T X (g \<circ> f)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) \<Longrightarrow> continuous_map (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) X g
[PROOF STEP]
apply (rule continuous_on_final_topologyI1)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>i. continuous_map T X (g \<circ> f) \<Longrightarrow> continuous_map T X (g \<circ> f)
2. \<And>i. continuous_map T X (g \<circ> f) \<Longrightarrow> f \<in> topspace T \<rightarrow> f ` topspace T
3. continuous_map T X (g \<circ> f) \<Longrightarrow> g \<in> f ` topspace T \<rightarrow> topspace X
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) \<Longrightarrow> continuous_map T X (g \<circ> f)
[PROOF STEP]
by assumption
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i. continuous_map T X (g \<circ> f) \<Longrightarrow> f \<in> topspace T \<rightarrow> f ` topspace T
2. continuous_map T X (g \<circ> f) \<Longrightarrow> g \<in> f ` topspace T \<rightarrow> topspace X
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) \<Longrightarrow> f \<in> topspace T \<rightarrow> f ` topspace T
[PROOF STEP]
by force
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) \<Longrightarrow> g \<in> f ` topspace T \<rightarrow> topspace X
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T X (g \<circ> f) \<Longrightarrow> g \<in> f ` topspace T \<rightarrow> topspace X
[PROOF STEP]
by (rule continuous_map_composeD)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) X g \<Longrightarrow> continuous_map T X (g \<circ> f)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) X g \<Longrightarrow> continuous_map T X (g \<circ> f)
[PROOF STEP]
apply (erule continuous_map_compose[rotated])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map T (final_topology (f ` topspace T) (\<lambda>_. T) (\<lambda>_. f)) f
[PROOF STEP]
apply (rule continuous_on_final_topologyI2)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i. f \<in> topspace T \<rightarrow> f ` topspace T
[PROOF STEP]
by force
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1313, "file": "Smooth_Manifolds_Analysis_More", "length": 16}
|
[STATEMENT]
lemma bounded_linear_Blinfun_apply: "bounded_linear f \<Longrightarrow> blinfun_apply (Blinfun f) = f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bounded_linear f \<Longrightarrow> blinfun_apply (Blinfun f) = f
[PROOF STEP]
by (auto simp: Blinfun_inverse)
|
{"llama_tokens": 103, "file": null, "length": 1}
|
"""Regression with abstention network architecture."""
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential
__author__ = "Elizabeth A. Barnes and Randal J. Barnes"
__date__ = "March 4, 2021"
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
def defineNN(hiddens, input_shape, output_shape, ridge_penalty=0., act_fun='relu', network_seed=99):
inputs = tf.keras.Input( shape = input_shape )
x = inputs
# linear network only
if hiddens[0] == 0:
x = tf.keras.layers.Dense(1,
activation='linear',
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=ridge_penalty),
bias_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
)(x)
else:
# initialize first layer
x = tf.keras.layers.Dense(hiddens[0],
activation=act_fun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=ridge_penalty),
bias_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
)(x)
# initialize other layers
for layer in hiddens[1:]:
x = tf.keras.layers.Dense(layer,
activation=act_fun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.0),
bias_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
)(x)
# set final output units separately
mu_unit = tf.keras.layers.Dense(1,
activation = 'linear',
use_bias=True,
bias_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=network_seed),
# bias_initializer=tf.keras.initializers.RandomUniform(minval=0.,maxval=1.,seed=network_seed),
# kernel_initializer=tf.keras.initializers.RandomUniform(minval=0.,maxval=1.,seed=network_seed),
)(x)
sigma_unit = tf.keras.layers.Dense(1,
activation = 'relu', # constrain sigma to be positive
use_bias=True,
bias_initializer=tf.keras.initializers.RandomUniform(minval=.1,maxval= .5,seed=network_seed),
kernel_initializer=tf.keras.initializers.RandomUniform(minval=.1,maxval=.5,seed=network_seed),
# kernel_constraint = tf.keras.constraints.NonNeg(),
# bias_constraint = tf.keras.constraints.NonNeg(),
)(x)
# final output layer
output_layer = tf.keras.layers.concatenate([mu_unit,sigma_unit],axis=1)
# finalize the model
model = tf.keras.models.Model(inputs=inputs, outputs=output_layer)
return model
|
{"hexsha": "03300c9011a651f491d2af3feff71bdf8bfdce77", "size": 4054, "ext": "py", "lang": "Python", "max_stars_repo_path": "manuscript_code/regression_JAMES/network.py", "max_stars_repo_name": "eabarnes1010/controlled_abstention_networks", "max_stars_repo_head_hexsha": "4519ff710d2562a25045d0a2bdd26b3b6a98fa32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-04-23T03:06:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T08:36:32.000Z", "max_issues_repo_path": "manuscript_code/regression_JAMES/network.py", "max_issues_repo_name": "eabarnes1010/controlled_abstention_networks", "max_issues_repo_head_hexsha": "4519ff710d2562a25045d0a2bdd26b3b6a98fa32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manuscript_code/regression_JAMES/network.py", "max_forks_repo_name": "eabarnes1010/controlled_abstention_networks", "max_forks_repo_head_hexsha": "4519ff710d2562a25045d0a2bdd26b3b6a98fa32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-06-23T17:48:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T04:10:37.000Z", "avg_line_length": 52.6493506494, "max_line_length": 137, "alphanum_fraction": 0.511100148, "include": true, "reason": "import numpy", "num_tokens": 689}
|
using TestPackage2
TestPackage2.greet()
|
{"hexsha": "1fd891654dd2dce825ba6f7374e2f648d42a15fb", "size": 41, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/TestPackage2/snoop/snoopfile.jl", "max_stars_repo_name": "daviehh/PackageCompiler.jl", "max_stars_repo_head_hexsha": "9a9e7b9f36bbcbf3cca2d53c99a82b59afdda37e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/TestPackage2/snoop/snoopfile.jl", "max_issues_repo_name": "daviehh/PackageCompiler.jl", "max_issues_repo_head_hexsha": "9a9e7b9f36bbcbf3cca2d53c99a82b59afdda37e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/TestPackage2/snoop/snoopfile.jl", "max_forks_repo_name": "daviehh/PackageCompiler.jl", "max_forks_repo_head_hexsha": "9a9e7b9f36bbcbf3cca2d53c99a82b59afdda37e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.25, "max_line_length": 20, "alphanum_fraction": 0.8292682927, "num_tokens": 11}
|
"""
function gheader(gt)
Retrieve the header infomation of a packed genotype file.
"""
function gheader(gt)
nlc, nid, dms, gt_majored = nothing, nothing, nothing, true
open(gt, "r") do io
header = mmap(io, Vector{Int64}, 3)
gt_majored = (header[1] == 1)
nlc, nid = gt_majored ? header[2:3] : header[[3, 2]]
dms = Tuple(header[2:3])
end
gt_majored, nlc, nid, dms
end
"""
function picd(gt, twop; step = 1000, tol = 1e-4)
Given the binary file of `AGH` genotype format, and a vector of
2 × allele frequencies, this function do the `p`ivoted `i`ncomplete
`C`holesky `d`ecomposition.
Finally it returns an approximate G-inverse.
The program
"""
function picd(gt::String, twop; step = 1000, tol = 1e-4)
goffset = 24
gt_majored, nlc, nid, dms = gheader(gt)
gt_majored || error("The matrix must be genotype majored for speed reasons")
blk = zeros(dms[1], step)
# 1/√(2pq), as product is faster than divide.
rs2pq = 1 ./ sqrt.(twop .* (1 .- twop .* .5))
open(gt, "r") do io
gt = mmap(io, Matrix{Int8}, dms, goffset)
# tp = mean(gt, dims=2) # if gt_majored, or, 1.
copyto!(blk, view(gt, :, 1:step))
blk .-= twop
blk .*= rs2pq
G = blk'blk
G, piv, rank, info = LAPACK.pstrf!('L', G, tol)
end
end
function tst_picd(gt, twop; step = 1000, tol = 1e-4)
rs2pq = 1 ./ sqrt.(twop .* (1 .- twop .* .5))
blk = zeros(size(gt)[1], step)
copyto!(blk, view(gt, :, 1:step))
blk .-= twop
blk .*= rs2pq
G = blk'blk
G, piv, rank, info = LAPACK.pstrf!('L', G, tol)
end
"""
function cholesky_decomposition!(A)
This function is just for fun, and should not be taken seriously.
The lower triangle of matrix `A` will be replace by `L`, such that
the original `A = LL'`.
"""
function cholesky_decomposition!(A)
issymmetric(A) || error("Not a symmetric matrix")
m = size(A)[1]
for i in 1:m
for k in 1:i-1
A[i, i] -= A[i, k] * A[i, k]
end
A[i, i] = sqrt(A[i, i]) # diagonals
for j in i+1:m
for k in 1:i-1
A[j, i] -= A[i, k] * A[j, k]
end
A[j, i] /= A[i, i]
end # off diagonals
end
end
|
{"hexsha": "13a39b57540ddf1e85afa9958fa75813193bad15", "size": 2278, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/picd/picd.jl", "max_stars_repo_name": "xijiang/AGH.jl", "max_stars_repo_head_hexsha": "3fd0213f43f27c254fcfb77b1b14d71524c5e322", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/picd/picd.jl", "max_issues_repo_name": "xijiang/AGH.jl", "max_issues_repo_head_hexsha": "3fd0213f43f27c254fcfb77b1b14d71524c5e322", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/picd/picd.jl", "max_forks_repo_name": "xijiang/AGH.jl", "max_forks_repo_head_hexsha": "3fd0213f43f27c254fcfb77b1b14d71524c5e322", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9736842105, "max_line_length": 80, "alphanum_fraction": 0.5579455663, "num_tokens": 781}
|
"""
Distributions of the Cramer-von Mises statistic.
After doi:10.2307/2346175.
Original Work (scikit-gof) Copyright (c) 2015 Wojciech Ruszczewski <scipy@wr.waw.pl>
Modified Work Copyright (c) 2020 h-bryant
"""
from __future__ import division
from numpy import arange, dot, exp, newaxis, pi, tensordot
from scipy.special import gamma, kv
from scipy.stats import rv_continuous
from vect import varange, vectorize
class cvm_unif_gen(rv_continuous):
"""
Approximate Cramer-von Mises statistic distribution for uniform data
(with the hypothesized distribution continuous and fully specified).
"""
def _argcheck(self, samples):
return samples > 0
@vectorize(otypes=(float,))
def _cdf(self, statistic, samples):
low = 1 / (12 * samples)
# Basic bounds.
if statistic <= low:
return 0.
if statistic >= samples / 3:
return 1.
# From the geometric approach of Csorgo and Faraway (equation 2.4).
if statistic <= low + 1 / (4 * samples ** 2):
return (gamma(samples + 1) / gamma(samples / 2 + 1) *
(pi * (statistic - low)) ** (samples / 2))
# Asymptotic distribution with a one-term correction (equation 1.8).
return cvm_unif_inf(statistic) + cvm_unif_fix1(statistic) / samples
cvm_unif = cvm_unif_gen(a=0, name='cvm-unif', shapes='samples')
inf_ks41 = 4 * arange(11) + 1
inf_args = inf_ks41 ** 2 / 16
inf_cs = (inf_ks41 ** .5 * gamma(varange(.5, 11)) /
(pi ** 1.5 * gamma(varange(1, 11))))
def cvm_unif_inf(statistic):
"""
Calculates the limiting distribution of the Cramer-von Mises statistic.
After the second line of equation 1.3 from the Csorgo and Faraway paper.
"""
args = inf_args / statistic
return (inf_cs * exp(-args) * kv(.25, args)).sum() / statistic ** .5
fix1_args = (4 * (varange((.5, 1., 1.5), 21)) - 1) ** 2 / 16
fix1_dens = 72 * pi ** 1.5 * gamma(varange(1, 21))
fix1_csa = fix1_args ** .75 * gamma(varange(1.5, 21)) / fix1_dens
fix1_csb = fix1_args ** 1.25 * gamma(varange((.5, 1.5, 2.5), 21)) / fix1_dens
def cvm_unif_fix1(statistic):
"""
Approximates the first-term of the small sample count Gotze expansion.
After equation 1.10 (with coefficients pulled out as csa / csb).
"""
args = fix1_args / statistic
kvs = kv((.25, .75, 1.25), args[:, :, newaxis])
gs, hs = exp(-args) * tensordot(((1, 1, 0), (2, 3, -1)), kvs, axes=(1, 2))
a = dot((7, 16, 7), fix1_csa * gs).sum() / statistic ** 1.5
b = dot((1, 0, 24), fix1_csb * hs).sum() / statistic ** 2.5
return cvm_unif_inf(statistic) / 12 - a - b
|
{"hexsha": "b4b9336381aefb3cc82666419b1e7358d0814858", "size": 2699, "ext": "py", "lang": "Python", "max_stars_repo_path": "funcsim/cvmdist.py", "max_stars_repo_name": "h-bryant/funcsim", "max_stars_repo_head_hexsha": "6f0ec2365e3ed6d9478e2f92e755cebafaf6528d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-08T03:40:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T03:40:26.000Z", "max_issues_repo_path": "funcsim/cvmdist.py", "max_issues_repo_name": "h-bryant/funcsim", "max_issues_repo_head_hexsha": "6f0ec2365e3ed6d9478e2f92e755cebafaf6528d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "funcsim/cvmdist.py", "max_forks_repo_name": "h-bryant/funcsim", "max_forks_repo_head_hexsha": "6f0ec2365e3ed6d9478e2f92e755cebafaf6528d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7375, "max_line_length": 84, "alphanum_fraction": 0.6161541312, "include": true, "reason": "from numpy,from scipy", "num_tokens": 863}
|
[STATEMENT]
lemma (in Firstprogram) "STUTINV phi"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. STUTINV phi
[PROOF STEP]
by (auto simp: phi_def init_def m1_def m2_def Live_def stutinvs nstutinvs livestutinv)
|
{"llama_tokens": 95, "file": "TLA_Inc", "length": 1}
|
import numpy as np
import pandas as pd
import scipy.spatial
import triangle
from . import cleanup
from . import points_in_mesh
from . import boundary
def replace_triangles(points, vertices=None, triangles=None, **tri):
if vertices is None:
vertices = pd.DataFrame({"X": [], "Y": []})
if triangles is None:
triangles = pd.DataFrame({0: [], 1: [], 2:[]})
vertices, triangles = cleanup.reindex(vertices, triangles)
# remove duplicated coordinates to not create invalid geometries
# check first within points, then check if points overlap with any vertex in vertices
# TODO: Are there important data in the extra columns of `points` that we lose by dropping them?
# Maybe the most proper approach would be to replace lines of `vertices` with those of `points`...
# or at least users should have the choice of which one should be overwritten.
# 2021-09-02, Duke-of-Lizard
points = points.drop_duplicates(['X','Y']).reset_index(drop=True)
points, vertices = cleanup.remove_overlapping_points_vertices(points, vertices, keep='points')
points_start = len(vertices)
points_and_nodes = vertices.append(points).reset_index(drop=True)
P = points[["X", "Y"]].values
A = vertices.loc[triangles[0].values][["X", "Y"]].values
B = vertices.loc[triangles[1].values][["X", "Y"]].values
C = vertices.loc[triangles[2].values][["X", "Y"]].values
points_and_triangles = points_in_mesh.points_in_triangles(points, vertices, triangles)
mask = np.zeros(triangles.index.shape, dtype="bool")
mask[:] = 1
triangles_with_points = np.unique(points_and_triangles["triangle"])
triangles_with_points = triangles_with_points[triangles_with_points != -1]
mask[triangles_with_points] = 0
leftover = None
all_new_faces = triangles[mask].copy()
for triangle, group in points_and_triangles.groupby("triangle"):
if triangle == -1:
leftover = group["point"] + points_start
continue
triangulation_points = np.append(P[group["point"]],
np.array((A[triangle],
B[triangle],
C[triangle])), axis=0)
# Normalization to get around floating point precision problem in scipy.spatial.Delaunay
triangulation_points[:,0] -= triangulation_points[:,0].mean()
triangulation_points[:,1] -= triangulation_points[:,1].mean()
triangulation = scipy.spatial.Delaunay(triangulation_points, qhull_options="QJ")
triangulation_point_indices = np.append((group["point"] + points_start),
np.array((triangles[0].loc[triangle],
triangles[1].loc[triangle],
triangles[2].loc[triangle])))
new_faces = triangles.iloc[pd.Index([triangle]).repeat(len(triangulation.simplices))].copy()
new_faces[0] = triangulation_point_indices[triangulation.simplices[:,0]]
new_faces[1] = triangulation_point_indices[triangulation.simplices[:,1]]
new_faces[2] = triangulation_point_indices[triangulation.simplices[:,2]]
all_new_faces = all_new_faces.append(new_faces)
res = dict(tri)
res["vertices"] = points_and_nodes
res["triangles"] = all_new_faces
res["leftover"] = leftover
return res
def supplant_triangles(existing_boundary=False, **tri):
if "triangles" not in tri:
tri["triangles"] = pd.DataFrame({0: [], 1: [], 2:[]})
# Remove any duplicate vertices, or triangle.triangulate() is
# going to segfault!
tri["vertices"], tri["triangles"] = cleanup.clean_triangles(
tri["vertices"], tri["triangles"], decimals = None)
tri = boundary.mesh_boundary(**tri)
if not existing_boundary:
tri = boundary.vertices_boundary(**tri)
trivertices = tri["vertices"][["X", "Y"]]
res = dict(tri)
process_tri = {"vertices": trivertices.values}
if "segments" in tri:
process_tri["segments"] = tri["segments"][[0, 1]].values
if "holes" in tri:
process_tri["holes"] = tri["holes"].values
if "triangles" in tri and len(tri["triangles"]):
triangles = tri["triangles"]
holes = (trivertices.loc[triangles[0]].values
+ trivertices.loc[triangles[1]].values
+ trivertices.loc[triangles[2]].values) / 3
if "holes" in process_tri:
holes = np.append(process_tri["holes"], holes, axis=0)
process_tri["holes"] = holes
if existing_boundary:
xmin = tri["vertices"]["X"].min()
ymin = tri["vertices"]["Y"].min()
xmax = tri["vertices"]["X"].max()
ymax = tri["vertices"]["Y"].max()
process_tri["vertices"] = np.append(
process_tri["vertices"],
np.array([[xmin-20,ymin-20], [xmin-20, ymax+20], [xmax+20, ymax+20], [xmax+20, ymin-20]]),
axis=0)
holes = np.array([[xmin-10, ymin-10]])
if "holes" in process_tri:
holes = np.append(process_tri["holes"], holes, axis=0)
process_tri["holes"] = holes
res.update(triangle.triangulate(process_tri, 'p'))
if "triangles" in tri:
triangles = tri["triangles"]
res["triangles"] = triangles.append(triangles.iloc[0:0].append(pd.DataFrame(res["triangles"])))
res["vertices"] = tri["vertices"].append(
pd.DataFrame(res["vertices"][len(tri["vertices"]):,:], columns=["X", "Y"]), ignore_index=True)
new_points = res["vertices"].loc[len(tri["vertices"]):].index
if len(new_points):
res = interpolate_vertices(res, new_points)
if 'segments' in tri:
res['segments'] = pd.DataFrame(res['segments'])
return res
def triangles_to_segments(triangles):
return triangles[[0, 1]].append(
triangles[[1, 2]].rename(columns={1:0, 2:1})).append(
triangles[[2, 0]].rename(columns={2:0, 0:1})).append(
triangles[[0, 1]].rename(columns={0:1, 1:0})).append(
triangles[[1, 2]].rename(columns={2:0, 1:1})).append(
triangles[[2, 0]].rename(columns={0:0, 2:1})).drop_duplicates().set_index(0).sort_index()
def interpolate_vertices(tri, to_interpolate_idxs):
new_points = to_interpolate_idxs
new_triangles = (
tri["triangles"][0].isin(new_points)
| tri["triangles"][1].isin(new_points)
| tri["triangles"][2].isin(new_points))
segments = triangles_to_segments(tri["triangles"].loc[new_triangles])
segments2 = segments.join(
tri["vertices"]
).reset_index().rename(columns={"index":0}).set_index(1).join(
tri["vertices"][["X", "Y"]].rename(columns={"X":"X1", "Y":"Y1"}))
segments2["segment_length"] = np.sqrt( (segments2["X1"]-segments2["X"])**2
+ (segments2["Y1"]-segments2["Y"])**2)
segments2["segment_weight"] = 1. / segments2["segment_length"]
segments2 = segments2[~segments2[0].isin(new_points)]
segments3 = segments2.mul(
segments2["segment_weight"], axis=0
).assign(
segment_weight=segments2["segment_weight"]
).groupby(level=0).sum()
interpolated = segments3.div(segments3["segment_weight"], axis=0)
no_interpolation = set(("X", "Y")).union(set(tri.get("no_interpolation", ())))
res = dict(tri)
res["vertices"] = res["vertices"].copy()
cols = set(interpolated.columns).intersection(set(tri["vertices"].columns)) - no_interpolation
res["vertices"].loc[interpolated.index, cols] = interpolated[cols]
return res
|
{"hexsha": "464899d66b5cb62b2aa2c33f3765cc2265d1a68c", "size": 7746, "ext": "py", "lang": "Python", "max_stars_repo_path": "emeraldtriangles/refine_mesh.py", "max_stars_repo_name": "emerald-geomodelling/EmeraldTriangles", "max_stars_repo_head_hexsha": "f65d5ba8cc6206b11e649124e2a1f61d46d80690", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "emeraldtriangles/refine_mesh.py", "max_issues_repo_name": "emerald-geomodelling/EmeraldTriangles", "max_issues_repo_head_hexsha": "f65d5ba8cc6206b11e649124e2a1f61d46d80690", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-16T19:23:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-22T08:06:42.000Z", "max_forks_repo_path": "emeraldtriangles/refine_mesh.py", "max_forks_repo_name": "emerald-geomodelling/EmeraldTriangles", "max_forks_repo_head_hexsha": "f65d5ba8cc6206b11e649124e2a1f61d46d80690", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9841269841, "max_line_length": 108, "alphanum_fraction": 0.613607023, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1931}
|
from rllab.policies.base import StochasticPolicy
from sandbox.finetuning.policies.test_hier_snn_mlp_policy import GaussianMLPPolicy_snn_hier
from rllab.envs.normalized_env import normalize
from sandbox.finetuning.envs.mujoco.swimmer_env import SwimmerEnv
from sandbox.finetuning.envs.mujoco.gather.swimmer_gather_env import SwimmerGatherEnv
from sandbox.finetuning.policies.categorical_mlp_policy import CategoricalMLPPolicy
from rllab.core.parameterized import Parameterized
from rllab.core.serializable import Serializable
from rllab.core.lasagne_powered import LasagnePowered
from rllab import spaces
from rllab.envs.env_spec import EnvSpec
import math
import numpy as np
import joblib
import os
from contextlib import contextmanager
from rllab import config
from sandbox.finetuning.policies.concurrent_hier_policy import HierarchicalPolicy
from sandbox.finetuning.envs.period_varying_env import PeriodVaryingEnv
from rllab.envs.mujoco.gather.gather_env import GatherEnv
from rllab.envs.mujoco.maze.maze_env import MazeEnv
from rllab.envs.normalized_env import NormalizedEnv # this is just to check if the env passed is a normalized maze
# todo: check the lasagne powered, look at the output layers in the init method
# class HierarchicalPolicy(StochasticPolicy, LasagnePowered, Serializable):
class HierarchicalPolicyRandomTime(HierarchicalPolicy):
"""
This class is built to contain the entire hierarchical policy,
both the manager and the skills network, so that it can interact with a normal environment
Concern: may need to pull out all the internal workings from GaussianMLPPolicy_snn_hier,
CategoricalMLPPolicy, in order to do the gradients correctly
"""
def __init__(
self,
env_spec,
env, # the inner one, I believe
pkl_path=None, # for the entire hierarchical policy
snn_pkl_path=None,
snn_json_path=None,
manager_pkl_path=None, # default is to initialize a new manager from scratch
max_period=10, # possible periods
latent_dim=6,
bilinear_integration=True,
trainable_snn=True,
trainable_manager=True,
hidden_sizes_snn=(64, 64),
hidden_sizes_selector=(32, 32)):
StochasticPolicy.__init__(self, env_spec)
self.env = env
self.periods = np.arange(1, max_period + 1)
assert len(self.periods) > 0
self.curr_period = self.periods[0]
self.max_period = max(self.periods)
self.latent_dim = latent_dim # unsure
self.bilinear_integration = bilinear_integration # unsure
self.count = 0 # keep track of how long it's been since sampling a latent skill
self.curr_latent = None # something
self.outer_action_space = spaces.Discrete(latent_dim)
self.trainable_manager = trainable_manager
self.random_period = True
self.fake_env = PeriodVaryingEnv(env)
if pkl_path:
data = joblib.load(os.path.join(config.PROJECT_PATH, pkl_path))
policy = data['policy']
self.manager = policy.manager
self.low_policy = policy.low_policy
# following two lines used for random manager
# outer_env_spec = EnvSpec(observation_space=self.env.observation_space, action_space=self.outer_action_space)
# self.manager = CategoricalMLPPolicy(env_spec=outer_env_spec, latent_dim=latent_dim, )
else:
# env spec that includes the extra parameter for time
self.low_policy = GaussianMLPPolicy_snn_hier(
env_spec=self.fake_env.spec,
env=self.fake_env,
pkl_path=snn_pkl_path,
json_path=snn_json_path,
trainable_snn=trainable_snn,
latent_dim=latent_dim,
bilinear_integration=bilinear_integration,
external_latent=True,
hidden_sizes_snn=hidden_sizes_snn,
hidden_sizes_selector=hidden_sizes_selector
)
# loading manager from pkl file
if manager_pkl_path:
manager_data = joblib.load(os.path.join(config.PROJECT_PATH, manager_pkl_path))
self.manager = manager_data['policy']
print("loaded manager")
else:
# self.outer_env = hierarchize_snn(self.env, time_steps_agg=10, pkl_path=snn_pkl_path)
outer_env_spec = EnvSpec(observation_space=self.fake_env.observation_space,
action_space=self.outer_action_space)
self.manager = CategoricalMLPPolicy(env_spec=outer_env_spec, latent_dim=latent_dim, )
if isinstance(env, MazeEnv) or isinstance(env, GatherEnv):
self.obs_robot_dim = env.robot_observation_space.flat_dim
self.obs_maze_dim = env.maze_observation_space.flat_dim
elif isinstance(env, NormalizedEnv):
if isinstance(env.wrapped_env, MazeEnv) or isinstance(env.wrapped_env, GatherEnv):
self.obs_robot_dim = env.wrapped_env.robot_observation_space.flat_dim
self.obs_maze_dim = env.wrapped_env.maze_observation_space.flat_dim
else:
self.obs_robot_dim = env.wrapped_env.observation_space.flat_dim
self.obs_maze_dim = 0
else:
self.obs_robot_dim = env.observation_space.flat_dim
self.obs_maze_dim = 0
Serializable.quick_init(self, locals()) # todo: ask if this fixes my problem
def get_random_period(self):
return self.periods[np.random.choice(len(self.periods))]
def get_action(self, observation):
resampled = False
time_remaining, extended_obs = None, None
if self.count % self.curr_period == 0: # sample a new latent skill
if self.random_period:
# print("Resampling, old period:", self.curr_period)
self.curr_period = self.get_random_period()
# print("New period:", self.curr_period)
time_remaining = (self.curr_period - self.count) / self.max_period
extended_obs = np.insert(observation, self.obs_robot_dim, time_remaining)
self.curr_latent = self.outer_action_space.flatten(self.manager.get_action(extended_obs)[0])
# print("latent", self.curr_latent)
self.low_policy.set_pre_fix_latent(self.curr_latent)
self.low_policy.reset()
resampled = True
if time_remaining is None or extended_obs is None:
time_remaining = (self.curr_period - self.count) / self.max_period
extended_obs = np.insert(observation, self.obs_robot_dim, time_remaining)
# print("Time remaining,", time_remaining)
action, info_dict = self.low_policy.get_action(extended_obs)
info_dict['resampled_period'] = resampled
info_dict['time_remaining'] = time_remaining
self.count = (self.count + 1) % self.curr_period
return action, info_dict
@contextmanager
def fix_period(self, period):
prev_period, prev_random_period = self.curr_period, self.random_period
self.curr_period = period
self.random_period = False
yield
self.curr_period = prev_period
self.random_period = prev_random_period
def reset(self):
self.count = 0
def log_diagnostics(self, paths):
# timesteps = 0
# manager_entropy = 0.0
# # skill_entropies = [0.0 for _ in range(self.latent_dim)]
# skill_entropy = 0.0
#
# for path in paths:
# timesteps += len(path['observations'])
#
# # calculate the entropy of the categorical distribution at each stage
# manager_dist_info = self.manager.dist_info(path['observations'])
# manager_entropy += self.manager.distribution.entropy(manager_dist_info).sum()
#
# # calculate the entropy of each skill
# latent_dist_infos = self.low_policy.dist_info_sym_all_latents(path['observations'])
# # for i in range(len(latent_dist_infos)):
# # latent_dist_info = {'log_std': latent_dist_infos[i]['log_std'].eval()}
# # skill_entropies[i] += self.low_policy.distribution.entropy(latent_dist_info).sum()
# latent_dist_info = {'log_std': latent_dist_infos[0]['log_std'].eval()}
# skill_entropy += self.low_policy.distribution.entropy(latent_dist_info).sum()
#
# logger.record_tabular("AverageManagerEntropy", manager_entropy/timesteps)
# # for i in range(self.latent_dim):
# # logger.record_tabular("AverageLatent{0}Entropy".format(str(i)), skill_entropies[i]/timesteps)
# logger.record_tabular("AverageLatentEntropy", skill_entropy / timesteps)
pass
def distribution(self):
raise NotImplementedError
def dist_info_sym(self, obs_var, state_info_vars):
if self.latent_dim == 1:
return self.low_policy.dist_info_sym(obs_var, state_info_vars)
else:
raise NotImplementedError
|
{"hexsha": "e944d93b7197d03763439492b4e059997c43792d", "size": 9202, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandbox/finetuning/policies/concurrent_policy_random_time.py", "max_stars_repo_name": "andrewli77/rllab-finetuning", "max_stars_repo_head_hexsha": "2dae9141d0fdc284d04f18931907131d66b43023", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-04-27T23:53:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T03:13:16.000Z", "max_issues_repo_path": "sandbox/finetuning/policies/concurrent_policy_random_time.py", "max_issues_repo_name": "WeiChengTseng/rllab-finetuning", "max_issues_repo_head_hexsha": "2dae9141d0fdc284d04f18931907131d66b43023", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-14T13:30:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-14T13:30:22.000Z", "max_forks_repo_path": "sandbox/finetuning/policies/concurrent_policy_random_time.py", "max_forks_repo_name": "WeiChengTseng/rllab-finetuning", "max_forks_repo_head_hexsha": "2dae9141d0fdc284d04f18931907131d66b43023", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-06-17T03:28:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T03:13:03.000Z", "avg_line_length": 47.1897435897, "max_line_length": 122, "alphanum_fraction": 0.6733318844, "include": true, "reason": "import numpy", "num_tokens": 2028}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 12 13:08:12 2014
@author: Ken
"""
import marisa_trie
import re
import pandas as pd
import numpy as np
import sys
if sys.version_info[0] == 3:
basestring = str
unicode = str
from multiprocessing import Pool, cpu_count
"""
testTrie = marisa_trie.Trie([u'derpx', u'derpy', u'derpz'])
testFRDict = {u'derpx': u'derp', u'derpy': u'derp', u'derpz': u'derp'}
trieInput_df = pd.DataFrame(data=testFRDict, index=["Values"]).T
trieInput_df["Keys"] = trieInput_df.index
trieInput_df = trieInput_df.ix[:, ["Keys", "Values"]]
"""
class BulkFindReplacer:
def __init__(self, trieInput, version="v4"):
if isinstance(trieInput, basestring):
trieInput = pd.read_csv(trieInput)
self.frTrie = marisa_trie.Trie(list(trieInput.iloc[:, 0].apply(unicode)))
self.frDict = dict(zip(trieInput.iloc[:, 0].apply(unicode), trieInput.iloc[:, 1].apply(unicode)))
self.startRegex = re.compile(r'[^\w]')
self.endRegex = re.compile(r'[^\w]')
self.BulkFindReplace_str = self.BulkFindReplace_orig_str
if version == "v3":
self.BulkFindReplace_str = self.BulkFindReplace_v3_str
elif version == "v4":
self.BulkFindReplace_str = self.BulkFindReplace_v4_str
def BulkFindReplace_orig_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]'):
i = 0
outString = inString
strLen = len(outString)
while (i < strLen):
if i is 0 or re.search(startRegex, outString[i - 1]):
remainingStr = outString[i:]
pref_list = self.frTrie.prefixes(remainingStr)
if len(pref_list) > 0:
# iterate backwards through list
for pref in pref_list[::-1]:
# make sure char after prefix is an endRegex char
if (len(remainingStr) is len(pref) or re.search(endRegex, remainingStr[len(pref)])):
# if there is a valid prefix, replace 1st instance
mapStr = self.frDict[pref]
if mapStr != pref:
outString = outString[:i] + remainingStr.replace(pref, mapStr, 1)
strLen = len(outString)
i += len(mapStr) - 1
break
i += 1
return outString
def BulkFindReplace_v3_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]'):
i = 0
outString = inString
strLen = len(outString)
while (i < strLen):
if i is 0 or self.startRegex.search(outString[i - 1]):
remainingStr = outString[i:]
pref_list = self.frTrie.prefixes(remainingStr)
if len(pref_list) > 0:
# iterate backwards through list
for pref in pref_list[::-1]:
# make sure char after prefix is an endRegex char
if (len(remainingStr) is len(pref) or self.endRegex.search(remainingStr[len(pref)])):
# if there is a valid prefix, replace 1st instance
mapStr = self.frDict[pref]
if mapStr != pref:
outString = outString[:i] + remainingStr.replace(pref, mapStr, 1)
strLen = len(outString)
i += len(mapStr) - 1
break
i += 1
return outString
def BulkFindReplace_v4_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]'):
i = 0
outString = inString
outString_list = []
# while (i < strLen):
iSkipTo = -1
lastCut = 0
for i in [0] + [x.end() for x in self.startRegex.finditer(inString)]:
if i >= iSkipTo:
remainingStr = inString[i:]
pref_list = self.frTrie.prefixes(remainingStr)
if len(pref_list) > 0:
# iterate backwards through list
for pref in pref_list[::-1]:
# make sure char after prefix is an endRegex char
if (len(remainingStr) is len(pref) or self.endRegex.search(remainingStr[len(pref)])):
# if there is a valid prefix, replace 1st instance
mapStr = self.frDict[pref]
if mapStr != pref:
addStr = inString[lastCut:i] + mapStr
outString_list.append(addStr)
lastCut = i + len(pref)
# outString = outString[:i] + remainingStr.replace(pref, mapStr, 1)
# strLen = len(outString)
iSkipTo = i + len(pref)
break
if len(outString_list) > 0:
if lastCut < len(inString):
outString_list.append(inString[lastCut:len(inString)])
outString = "".join(outString_list)
else:
outString = inString
return outString
def BulkFindReplaceToCompletion_str(self, inString, startRegex=r'[^\w]', endRegex=r'[^\w]', maxCycles=10):
cycle = 0
previousStr = inString
inString = self.BulkFindReplace_str(inString, startRegex, endRegex)
cycle = 1
if inString == previousStr or cycle >= maxCycles:
return inString
# Save secondToLastStr to help prevent endless cycles
secondToLastStr = previousStr
previousStr = inString
inString = self.BulkFindReplace_str(inString, startRegex, endRegex)
cycle = 2
while (inString != previousStr and inString != secondToLastStr and cycle < maxCycles):
secondToLastStr = previousStr
previousStr = inString
inString = self.BulkFindReplace_str(inString, startRegex, endRegex)
cycle += 1
# if cycle is 10:
# return "\nsecondToLastStr: " + secondToLastStr + ";\npreviousStr: " + previousStr + ";\ncurrentStr: " + inString + ";\n"
return inString
def BulkFindReplace(self, strSeries, startRegex=r'[^\w]', endRegex=r'[^\w]', maxCycles=10):
isBaseString = isinstance(strSeries, basestring)
strSeries = pd.Series(strSeries).copy()
strSeries = strSeries.apply(unicode)
strSeries = strSeries.apply(self.BulkFindReplaceToCompletion_str, (startRegex, endRegex, maxCycles))
if isBaseString:
return strSeries.iloc[0]
return strSeries
def BulkFindReplaceMPHelper(self, args):
strSeries, startRegex, endRegex, maxCycles = args
strSeries = strSeries.apply(self.BulkFindReplaceToCompletion_str, (startRegex, endRegex, maxCycles))
return strSeries
def BulkFindReplaceMultiProc(self, strSeries, startRegex=r'[^\w]', endRegex=r'[^\w]', maxCycles=10, workers=-1):
isBaseString = isinstance(strSeries, basestring)
strSeries = pd.Series(strSeries).copy()
strSeries = strSeries.fillna("")
strSeries = strSeries.apply(unicode)
if workers == -1:
if cpu_count() % 2 == 0:
workers = int(cpu_count()/2)
else:
workers = cpu_count()
if workers > 1:
pool = Pool(processes=workers)
strSeries_list = pool.map(self.BulkFindReplaceMPHelper, [(d, startRegex, endRegex, maxCycles) for d in np.array_split(strSeries, workers)])
pool.close()
strSeries = pd.concat(strSeries_list)
else:
strSeries = strSeries.apply(self.BulkFindReplaceToCompletion_str, (startRegex, endRegex, maxCycles))
if isBaseString:
return strSeries.iloc[0]
return strSeries
|
{"hexsha": "b3704d7b31ba339d35509770d828de11d55b7994", "size": 8149, "ext": "py", "lang": "Python", "max_stars_repo_path": "BulkFindReplace/BulkFindReplace.py", "max_stars_repo_name": "KCzar/BulkFindReplace", "max_stars_repo_head_hexsha": "4783836508bad9428bc55307774546ba00447e42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BulkFindReplace/BulkFindReplace.py", "max_issues_repo_name": "KCzar/BulkFindReplace", "max_issues_repo_head_hexsha": "4783836508bad9428bc55307774546ba00447e42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BulkFindReplace/BulkFindReplace.py", "max_forks_repo_name": "KCzar/BulkFindReplace", "max_forks_repo_head_hexsha": "4783836508bad9428bc55307774546ba00447e42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7897435897, "max_line_length": 152, "alphanum_fraction": 0.5437476991, "include": true, "reason": "import numpy", "num_tokens": 1899}
|
import os
os.environ['DGLBACKEND'] = 'mxnet'
import mxnet as mx
import numpy as np
import scipy as sp
import dgl
from dgl import utils
def generate_rand_graph(n):
arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
return dgl.DGLGraph(arr, readonly=True)
def test_1neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg, seed_ids in dgl.contrib.sampling.NeighborSampler(g, 1, 100, neighbor_type='in',
num_workers=4):
assert len(seed_ids) == 1
src, dst, eid = g.in_edges(seed_ids, form='all')
# Test if there is a self loop
self_loop = mx.nd.sum(src == dst).asnumpy() == 1
if self_loop:
assert subg.number_of_nodes() == len(src)
else:
assert subg.number_of_nodes() == len(src) + 1
assert subg.number_of_edges() >= len(src)
child_ids = subg.map_to_subgraph_nid(seed_ids)
child_src, child_dst, child_eid = subg.in_edges(child_ids, form='all')
child_src1 = subg.map_to_subgraph_nid(src)
assert mx.nd.sum(child_src1 == child_src).asnumpy() == len(src)
def is_sorted(arr):
return np.sum(np.sort(arr) == arr) == len(arr)
def verify_subgraph(g, subg, seed_id):
src, dst, eid = g.in_edges(seed_id, form='all')
child_id = subg.map_to_subgraph_nid(seed_id)
child_src, child_dst, child_eid = subg.in_edges(child_id, form='all')
child_src = child_src.asnumpy()
# We don't allow duplicate elements in the neighbor list.
assert(len(np.unique(child_src)) == len(child_src))
# The neighbor list also needs to be sorted.
assert(is_sorted(child_src))
child_src1 = subg.map_to_subgraph_nid(src).asnumpy()
child_src1 = child_src1[child_src1 >= 0]
for i in child_src:
assert i in child_src1
def test_1neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg, seed_ids in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4):
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_10neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg, seed_ids in dgl.contrib.sampling.NeighborSampler(g, 10, 100, neighbor_type='in',
num_workers=4):
src, dst, eid = g.in_edges(seed_ids, form='all')
child_ids = subg.map_to_subgraph_nid(seed_ids)
child_src, child_dst, child_eid = subg.in_edges(child_ids, form='all')
child_src1 = subg.map_to_subgraph_nid(src)
assert mx.nd.sum(child_src1 == child_src).asnumpy() == len(src)
def check_10neighbor_sampler(g, seeds):
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg, seed_ids in dgl.contrib.sampling.NeighborSampler(g, 10, 5, neighbor_type='in',
num_workers=4, seed_nodes=seeds):
assert subg.number_of_nodes() <= 6 * len(seed_ids)
assert subg.number_of_edges() <= 5 * len(seed_ids)
for seed_id in seed_ids:
verify_subgraph(g, subg, seed_id)
def test_10neighbor_sampler():
g = generate_rand_graph(100)
check_10neighbor_sampler(g, None)
check_10neighbor_sampler(g, seeds=np.unique(np.random.randint(0, g.number_of_nodes(),
size=int(g.number_of_nodes() / 10))))
if __name__ == '__main__':
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
|
{"hexsha": "422a0d8c8bfaafdeae336e4ee7dbdb8c05ffbe23", "size": 4038, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/mxnet/test_sampler.py", "max_stars_repo_name": "sufeidechabei/dgl", "max_stars_repo_head_hexsha": "f9f92803b422c04b6d8e3f95b18f71cf158f3b1f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-06T01:30:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-06T01:30:23.000Z", "max_issues_repo_path": "tests/mxnet/test_sampler.py", "max_issues_repo_name": "sufeidechabei/dgl", "max_issues_repo_head_hexsha": "f9f92803b422c04b6d8e3f95b18f71cf158f3b1f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/mxnet/test_sampler.py", "max_forks_repo_name": "sufeidechabei/dgl", "max_forks_repo_head_hexsha": "f9f92803b422c04b6d8e3f95b18f71cf158f3b1f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5052631579, "max_line_length": 103, "alphanum_fraction": 0.6384348687, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1014}
|
# This proposal samples from prior and updates exogenous variables
# Using inverse transform
# function stdproposal(qω, x::T, ω) where T
# display(ω)
# @show T
# if x in keys(ω.data)
# nothing
# else
# x_ = rand(qω, x.class)
# @show x => x_
# end
# @show T
# @assert false
# end
stdproposal(qω, x, ω) = nothing
# function stdproposal(qω, x::Member{<:Distribution}, ω)
# @show x
# display(ω)
# @show ω.data[x]
# if x in keys(ω)
# @show "here"
# @assert false
# else
# @show "not here"
# @assert false
# end
# end
|
{"hexsha": "b4bb72f669fb04fef142786b4481e8ecd90437a0", "size": 573, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "OmegaCore/src/proposal/stdproposal.jl", "max_stars_repo_name": "zenna/expect", "max_stars_repo_head_hexsha": "48bd661df410777eeb8940876a5cc8817eed2ac5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OmegaCore/src/proposal/stdproposal.jl", "max_issues_repo_name": "zenna/expect", "max_issues_repo_head_hexsha": "48bd661df410777eeb8940876a5cc8817eed2ac5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OmegaCore/src/proposal/stdproposal.jl", "max_forks_repo_name": "zenna/expect", "max_forks_repo_head_hexsha": "48bd661df410777eeb8940876a5cc8817eed2ac5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1, "max_line_length": 70, "alphanum_fraction": 0.5846422339, "num_tokens": 202}
|
import data.nat.basic
example (u w x y z : ℕ) (h₁ : x = y + z) (h₂ : w = u + x) :
w = z + y + u :=
by simp [*, add_assoc, add_comm, add_left_comm]
variables (p q r : Prop)
example (hp : p) : p ∧ q ↔ q :=
by simp *
example (hp : p) : p ∨ q :=
by simp *
example (hp : p) (hq : q) : p ∧ (q ∨ r) :=
by simp *
|
{"author": "agryman", "repo": "theorem-proving-in-lean", "sha": "cf5a3a19d0d9d9c0a4f178f79e9b0fa67c5cddb9", "save_path": "github-repos/lean/agryman-theorem-proving-in-lean", "path": "github-repos/lean/agryman-theorem-proving-in-lean/theorem-proving-in-lean-cf5a3a19d0d9d9c0a4f178f79e9b0fa67c5cddb9/src/05-Tactics/example-5.7-6.lean"}
|
from os.path import join
import numpy as np
import pytest
from warnings import catch_warnings
from hera_sim.defaults import defaults
from hera_sim.config import CONFIG_PATH
from hera_sim.sigchain import gen_bandpass
from hera_sim.interpolators import Tsky, Beam
def test_config_swap():
defaults.set("h1c")
config1 = defaults().copy()
defaults.set("h2c", refresh=True)
assert config1 != defaults()
def test_direct_config_path():
config = join(CONFIG_PATH, "H2C.yaml")
defaults.set(config, refresh=True)
# check some of the parameters
assert defaults()["integration_time"] == 8.59
assert isinstance(defaults()["Tsky_mdl"], Tsky)
assert isinstance(defaults()["omega_p"], Beam)
def test_null_config():
defaults.set(None, refresh=True)
assert defaults() == {}
defaults.deactivate()
def test_multiple_param_specification():
config = {0: {"Nfreqs": 100}, 1: {"Nfreqs": 200}}
with catch_warnings(record=True) as w:
defaults.set(config, refresh=True)
# make sure that there's an error message
assert w[0].message != ""
defaults.deactivate()
def test_bandpass_changes():
defaults.set("h1c", refresh=True)
fqs = np.linspace(0.1, 0.2, 100)
np.random.seed(0)
bp = gen_bandpass(fqs, [0])[0]
defaults.set("h2c", refresh=True)
np.random.seed(0)
assert not np.all(bp == gen_bandpass(fqs, [0])[0])
defaults.deactivate()
def test_activate_and_deactivate():
defaults.activate()
assert defaults._override_defaults
defaults.deactivate()
assert not defaults._override_defaults
def test_dict_unpacking():
config = {
"setup": {
"frequency_array": {"Nfreqs": 100, "start_freq": 100e6},
"time_array": {"Ntimes": 50, "start_time": 2e6},
},
"telescope": {"omega_p": np.ones(100)},
}
defaults.set(config, refresh=True)
for value in defaults().values():
assert not isinstance(value, dict)
defaults.deactivate()
def test_refresh():
# choose some defaults to start with
defaults.set("h1c")
# now use new, simple defaults
config = {"Nfreqs": 100}
defaults.set(config, refresh=True)
# check that refresh is working
assert "Ntimes" not in defaults()
assert "Nfreqs" in defaults()
# default behavior is to not refresh, just update
config = {"Nfreqs": 200, "Ntimes": 50}
defaults.set(config)
assert "Ntimes" in defaults()
assert defaults("Nfreqs") == 200
defaults.deactivate()
def test_call_with_bad_component():
defaults.set("h1c")
with pytest.raises(KeyError) as err:
defaults("not_a_valid_key")
assert "not_a_valid_key not found in configuration." == err.value.args[0]
defaults.deactivate()
def test_bad_config_type():
not_a_string = 1
with pytest.raises(ValueError) as err:
defaults.set(not_a_string)
assert "The configuration must be a" in err.value.args[0]
defaults.deactivate()
def test_bad_config_file():
with pytest.raises(FileNotFoundError):
defaults.set("not_a_file")
defaults.deactivate()
|
{"hexsha": "b0cc661522242711a4653b5e7228b6b4def7a5f5", "size": 3117, "ext": "py", "lang": "Python", "max_stars_repo_path": "hera_sim/tests/test_defaults.py", "max_stars_repo_name": "hughbg/hera_sim", "max_stars_repo_head_hexsha": "b9f4fc39437f586f6ddfa908cf5c5f2e2a6d2231", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-08-13T14:29:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T00:31:14.000Z", "max_issues_repo_path": "hera_sim/tests/test_defaults.py", "max_issues_repo_name": "hughbg/hera_sim", "max_issues_repo_head_hexsha": "b9f4fc39437f586f6ddfa908cf5c5f2e2a6d2231", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 195, "max_issues_repo_issues_event_min_datetime": "2018-06-14T16:07:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T22:26:41.000Z", "max_forks_repo_path": "hera_sim/tests/test_defaults.py", "max_forks_repo_name": "hughbg/hera_sim", "max_forks_repo_head_hexsha": "b9f4fc39437f586f6ddfa908cf5c5f2e2a6d2231", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-01-27T06:55:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-01T17:58:56.000Z", "avg_line_length": 28.0810810811, "max_line_length": 77, "alphanum_fraction": 0.6740455566, "include": true, "reason": "import numpy", "num_tokens": 778}
|
import pandas as pd
import numpy as np
from pyfolio import timeseries
import pyfolio
import matplotlib.pyplot as plt
from copy import deepcopy
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.config import config
def get_daily_return(df, value_col_name="account_value"):
'''
This function takes the return of env.get_save_asset_memory dataframe
It then computes daily returns based on the column 'value_col_name'
'''
df = deepcopy(df)
df["daily_return"] = df[value_col_name].pct_change(1)
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace = True, drop = True)
df.index = df.index.tz_localize("UTC")
return pd.Series(df['daily_return'], index = df.index)
def backtest_stats(account_value, value_col_name="account_value"):
'''
This function takes in an account value dataframe and creates backtesting statistics about the value specified.
'''
dr_test = get_daily_return(account_value, value_col_name=value_col_name)
perf_stats_all = timeseries.perf_stats(
returns=dr_test,
positions=None,
transactions=None,
turnover_denom="AGB",
)
print(perf_stats_all)
return perf_stats_all
def backtest_plot(
account_value,
baseline_start=config.START_TRADE_DATE,
baseline_end=config.END_DATE,
baseline_ticker="^DJI",
value_col_name="account_value",
):
'''
This function takes in the output of env.save_asset_memory function dataframe, and the specified value column and returns
a backtesting plot for display.
It also takes in a comparison ticker to see how the account value compares to the baseline.
'''
df = deepcopy(account_value)
test_returns = get_daily_return(df, value_col_name=value_col_name)
baseline_df = get_baseline(
ticker=baseline_ticker, start=baseline_start, end=baseline_end
)
baseline_returns = get_daily_return(baseline_df, value_col_name='close')
with pyfolio.plotting.plotting_context(font_scale=1.1):
pyfolio.create_full_tear_sheet(
returns=test_returns, benchmark_rets=baseline_returns, set_context=False
)
def get_baseline(ticker, start, end):
'''
This function downloads a ticker from the yahoo downloader.
'''
dji = YahooDownloader(
start_date=start, end_date=end, ticker_list=[ticker]
).fetch_data()
return dji
|
{"hexsha": "7ed7ab27f08d16ae0e2197d739b42eda484461f6", "size": 2438, "ext": "py", "lang": "Python", "max_stars_repo_path": "finrl/trade/backtest.py", "max_stars_repo_name": "zhaoranwang/FinRL-Library", "max_stars_repo_head_hexsha": "08351591ac104484b6e23ed3a311e02bb23afda2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "finrl/trade/backtest.py", "max_issues_repo_name": "zhaoranwang/FinRL-Library", "max_issues_repo_head_hexsha": "08351591ac104484b6e23ed3a311e02bb23afda2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "finrl/trade/backtest.py", "max_forks_repo_name": "zhaoranwang/FinRL-Library", "max_forks_repo_head_hexsha": "08351591ac104484b6e23ed3a311e02bb23afda2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-25T21:58:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T21:58:04.000Z", "avg_line_length": 29.7317073171, "max_line_length": 126, "alphanum_fraction": 0.7202625103, "include": true, "reason": "import numpy", "num_tokens": 548}
|
[STATEMENT]
lemma evalc_evaln: "<c,s> -c-> t \<Longrightarrow> \<exists>n. <c,s> -n-> t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. <c,s> -c-> t \<Longrightarrow> \<exists>n. <c,s> -n-> t
[PROOF STEP]
apply (erule evalc.induct)
[PROOF STATE]
proof (prove)
goal (10 subgoals):
1. \<And>s. \<exists>n. <SKIP,s> -n-> s
2. \<And>X a s. \<exists>n. <X:==a,s> -n-> s[X::=a s]
3. \<And>c s0 Y a s1. \<lbrakk><c,s0[Loc Y::=a s0]> -c-> s1; \<exists>n. <c,s0[Loc Y::=a s0]> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <LOCAL Y:=a IN c,s0> -n-> s1[Loc Y::=s0<Y>]
4. \<And>c0 s0 s1 c1 s2. \<lbrakk><c0,s0> -c-> s1; \<exists>n. <c0,s0> -n-> s1; <c1,s1> -c-> s2; \<exists>n. <c1,s1> -n-> s2\<rbrakk> \<Longrightarrow> \<exists>n. <c0;; c1,s0> -n-> s2
5. \<And>b s c0 s1 c1. \<lbrakk>b s; <c0,s> -c-> s1; \<exists>n. <c0,s> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <IF b THEN c0 ELSE c1,s> -n-> s1
6. \<And>b s c1 s1 c0. \<lbrakk>\<not> b s; <c1,s> -c-> s1; \<exists>n. <c1,s> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <IF b THEN c0 ELSE c1,s> -n-> s1
7. \<And>b s c. \<not> b s \<Longrightarrow> \<exists>n. <WHILE b DO c,s> -n-> s
8. \<And>b s0 c s1 s2. \<lbrakk>b s0; <c,s0> -c-> s1; \<exists>n. <c,s0> -n-> s1; <WHILE b DO c,s1> -c-> s2; \<exists>n. <WHILE b DO c,s1> -n-> s2\<rbrakk> \<Longrightarrow> \<exists>n. <WHILE b DO c,s0> -n-> s2
9. \<And>pn s0 s1. \<lbrakk><the (body pn),s0> -c-> s1; \<exists>n. <the (body pn),s0> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <BODY pn,s0> -n-> s1
10. \<And>pn s0 a s1 X. \<lbrakk><BODY pn,setlocs s0 newlocs[Loc Arg::=a s0]> -c-> s1; \<exists>n. <BODY pn,setlocs s0 newlocs[Loc Arg::=a s0]> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <X:=CALL pn(a),s0> -n-> setlocs s1 (loc s0)[X::=s1<Res>]
[PROOF STEP]
apply (tactic \<open>ALLGOALS (REPEAT o eresolve_tac \<^context> [exE])\<close>)
[PROOF STATE]
proof (prove)
goal (10 subgoals):
1. \<And>s. \<exists>n. <SKIP,s> -n-> s
2. \<And>X a s. \<exists>n. <X:==a,s> -n-> s[X::=a s]
3. \<And>c s0 Y a s1 n. \<lbrakk><c,s0[Loc Y::=a s0]> -c-> s1; <c,s0[Loc Y::=a s0]> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <LOCAL Y:=a IN c,s0> -n-> s1[Loc Y::=s0<Y>]
4. \<And>c0 s0 s1 c1 s2 n na. \<lbrakk><c0,s0> -c-> s1; <c1,s1> -c-> s2; <c0,s0> -n-> s1; <c1,s1> -na-> s2\<rbrakk> \<Longrightarrow> \<exists>n. <c0;; c1,s0> -n-> s2
5. \<And>b s c0 s1 c1 n. \<lbrakk>b s; <c0,s> -c-> s1; <c0,s> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <IF b THEN c0 ELSE c1,s> -n-> s1
6. \<And>b s c1 s1 c0 n. \<lbrakk>\<not> b s; <c1,s> -c-> s1; <c1,s> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <IF b THEN c0 ELSE c1,s> -n-> s1
7. \<And>b s c. \<not> b s \<Longrightarrow> \<exists>n. <WHILE b DO c,s> -n-> s
8. \<And>b s0 c s1 s2 n na. \<lbrakk>b s0; <c,s0> -c-> s1; <WHILE b DO c,s1> -c-> s2; <c,s0> -n-> s1; <WHILE b DO c,s1> -na-> s2\<rbrakk> \<Longrightarrow> \<exists>n. <WHILE b DO c,s0> -n-> s2
9. \<And>pn s0 s1 n. \<lbrakk><the (body pn),s0> -c-> s1; <the (body pn),s0> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <BODY pn,s0> -n-> s1
10. \<And>pn s0 a s1 X n. \<lbrakk><BODY pn,setlocs s0 newlocs[Loc Arg::=a s0]> -c-> s1; <BODY pn,setlocs s0 newlocs[Loc Arg::=a s0]> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <X:=CALL pn(a),s0> -n-> setlocs s1 (loc s0)[X::=s1<Res>]
[PROOF STEP]
apply (tactic \<open>TRYALL (EVERY' [dresolve_tac \<^context> @{thms evaln_max2}, assume_tac \<^context>,
REPEAT o eresolve_tac \<^context> [exE, conjE]])\<close>)
[PROOF STATE]
proof (prove)
goal (10 subgoals):
1. \<And>s. \<exists>n. <SKIP,s> -n-> s
2. \<And>X a s. \<exists>n. <X:==a,s> -n-> s[X::=a s]
3. \<And>c s0 Y a s1 n. \<lbrakk><c,s0[Loc Y::=a s0]> -c-> s1; <c,s0[Loc Y::=a s0]> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <LOCAL Y:=a IN c,s0> -n-> s1[Loc Y::=s0<Y>]
4. \<And>c0 s0 s1 c1 s2 n na nb. \<lbrakk><c0,s0> -c-> s1; <c1,s1> -c-> s2; <c1,s1> -na-> s2; <c0,s0> -nb-> s1; <c1,s1> -nb-> s2\<rbrakk> \<Longrightarrow> \<exists>n. <c0;; c1,s0> -n-> s2
5. \<And>b s c0 s1 c1 n. \<lbrakk>b s; <c0,s> -c-> s1; <c0,s> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <IF b THEN c0 ELSE c1,s> -n-> s1
6. \<And>b s c1 s1 c0 n. \<lbrakk>\<not> b s; <c1,s> -c-> s1; <c1,s> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <IF b THEN c0 ELSE c1,s> -n-> s1
7. \<And>b s c. \<not> b s \<Longrightarrow> \<exists>n. <WHILE b DO c,s> -n-> s
8. \<And>b s0 c s1 s2 n na nb. \<lbrakk>b s0; <c,s0> -c-> s1; <WHILE b DO c,s1> -c-> s2; <WHILE b DO c,s1> -na-> s2; <c,s0> -nb-> s1; <WHILE b DO c,s1> -nb-> s2\<rbrakk> \<Longrightarrow> \<exists>n. <WHILE b DO c,s0> -n-> s2
9. \<And>pn s0 s1 n. \<lbrakk><the (body pn),s0> -c-> s1; <the (body pn),s0> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <BODY pn,s0> -n-> s1
10. \<And>pn s0 a s1 X n. \<lbrakk><BODY pn,setlocs s0 newlocs[Loc Arg::=a s0]> -c-> s1; <BODY pn,setlocs s0 newlocs[Loc Arg::=a s0]> -n-> s1\<rbrakk> \<Longrightarrow> \<exists>n. <X:=CALL pn(a),s0> -n-> setlocs s1 (loc s0)[X::=s1<Res>]
[PROOF STEP]
apply (tactic
\<open>ALLGOALS (resolve_tac \<^context> [exI] THEN'
resolve_tac \<^context> @{thms evaln.intros} THEN_ALL_NEW assume_tac \<^context>)\<close>)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 2807, "file": null, "length": 5}
|
import re
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.colors import n_colors
class autoViz:
"""This class implements model visualization.
Parameters
----------
preprocess_dict : dict, default = None
1st output result (DICT_PREPROCESS) of autoPipe module.
report : df, default = None
4th output result (dyna_report) of autoPipe module.
Example
-------
.. [Example]:
References
----------
"""
def __init__(self,preprocess_dict = None,report = None ):
self.DICT_PREPROCESSING = preprocess_dict
self.dyna_report = report
def clf_table_report(self):
"""This function implements heatmap style pipeline cluster's model evaluation report(Dynamic Table) for classification output report.
Parameters
----------
Example
-------
.. [Example] https://optimal-flow.readthedocs.io/en/latest/demos.html#pipeline-cluster-model-evaluation-dynamic-table-using-autoviz
References
----------
"""
df = self.dyna_report
colors = n_colors('rgb(49, 130, 189)', 'rgb(239, 243, 255)', 15, colortype='rgb')
bins = [-1, 2, 4, 6, 7, 8, 9, 11]
bins_latency = [0, 5, 10, 15, 20, 50, 80, 100]
labels = [1,2,3,4,5,6,7]
fig = go.Figure(data=[go.Table(
header=dict(values=list(df.columns),
fill_color='paleturquoise',
font=dict(color='black', size=12),
align='center'),
cells=dict(values=[df.Dataset,df.Model_Name,df.Best_Parameters,df.Accuracy,df.Precision,df.Recall,df.Latency],
# fill_color='lavender',
fill_color=['lavender','lavender','lavender',
np.array(colors)[pd.cut(df.Accuracy.apply(lambda x: x*10), bins=bins, labels=labels).astype(int)],
np.array(colors)[pd.cut(df.Precision.apply(lambda x: x*10), bins=bins, labels=labels).astype(int)],
np.array(colors)[pd.cut(df.Recall.apply(lambda x: x*10), bins=bins, labels=labels).astype(int)],
'lavender'],
align='left'))
# np.array(colors)[pd.cut(df.Latency,bins=bins_latency, labels=labels).astype(int)]],
])
fig.update_layout(title = f'Pipeline Cluster Model Classification Evaluation Report - autoViz <a href="https://www.linkedin.com/in/lei-tony-dong/"> ©Tony Dong</a>', font_size=8)
plot(fig,filename='Pipeline Cluster Model Evaluation Report.html',auto_open = False)
# fig.show()
def reg_table_report(self):
"""This function implements heatmap style pipeline cluster's model evaluation report(Dynamic Table) for regression output report.
Parameters
----------
Example
-------
.. [Example] https://optimal-flow.readthedocs.io/en/latest/demos.html#pipeline-cluster-model-evaluation-dynamic-table-using-autoviz
References
----------
"""
df = self.dyna_report
colors = n_colors('rgb(49, 130, 189)', 'rgb(239, 243, 255)', 15, colortype='rgb')
bins = [-1, 2, 4, 6, 7, 8, 9, 11]
labels = [1,2,3,4,5,6,7]
fig = go.Figure(data=[go.Table(
header=dict(values=list(df.columns),
fill_color='paleturquoise',
align='left'),
cells=dict(values=[df.Dataset,df.Model_Name,df.Best_Parameters,df.R2,df.MAE,df.MSE,df.RMSE,df.Latency],
# fill_color='lavender',
fill_color=['lavender','lavender','lavender',
np.array(colors)[pd.cut(df.R2.apply(lambda x: x*10), bins=bins, labels=labels).astype(int)],
'lavender',
'lavender',
'lavender',
'lavender'],
align='left'))
])
fig.update_layout(title = f'Pipeline Cluster Model Regression Evaluation Report - autoViz <a href="https://www.linkedin.com/in/lei-tony-dong/"> ©Tony Dong</a>', font_size=8)
plot(fig,filename='Pipeline Cluster Model Evaluation Report.html',auto_open = False)
# fig.show()
def clf_model_retrieval(self,metrics = None):
"""This function implements classification model retrieval visualization.
Parameters
----------
metrics : str, default = None
Value in ["accuracy","precision","recall"].
Example
-------
.. [Example] https://optimal-flow.readthedocs.io/en/latest/demos.html#pipeline-cluster-traversal-experiments-model-retrieval-diagram-using-autoviz
References
----------
"""
columns = ["Dataset","Encode_low_dimension","Encode_high_dimension","Winsorize","Scale"]
df_pp = pd.DataFrame(columns=columns)
for i in list(self.DICT_PREPROCESSING.keys()):
row_pp = [i]
s = self.DICT_PREPROCESSING[i]
ext = re.search("Encoded Features:(.*)']", s).group(1)
if ("onehot_" in ext) and ("Frequency_" in ext):
row_pp.append('Low Dim_Onehot')
row_pp.append('High Dim_Frequency')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("onehot_" in ext) and ("Mean_" in ext):
row_pp.append('Low Dim_Onehot')
row_pp.append('High Dim_Mean')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("onehot_" in ext) and ("Mean_" not in ext) and ("Frequency_" not in ext):
row_pp.append('Low Dim_Onehot')
row_pp.append('High Dim_No Encoder')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("Label_" in ext) and ("Frequency_" in ext):
row_pp.append('Low Dim_Label')
row_pp.append('High Dim_Frequency')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("Label_" in ext) and ("Mean_" in ext):
row_pp.append('Low Dim_Label')
row_pp.append('High Dim_Mean')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("Label_" in ext) and ("Mean_" not in ext) and ("Frequency_" not in ext):
row_pp.append('Low Dim_Label')
row_pp.append('High Dim_No Encoder')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("Frequency_" in ext) and ("onehot_" not in ext) and ("Label_" not in ext):
row_pp.append('Low Dim_No Encoder')
row_pp.append('High Dim_Frequency')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("Mean_" in ext) and ("onehot_" not in ext) and ("Label_" not in ext):
row_pp.append('Low Dim_No Encoder')
row_pp.append('High Dim_Mean')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
elif ("Frequency_" not in ext) and ("Mean_" not in ext) and ("onehot_" not in ext) and ("Label_" not in ext):
row_pp.append('Low Dim_No Encoder')
row_pp.append('High Dim_No Encoder')
row_pp.append(re.search('winsor_(.*)-Scaler', s).group(1))
row_pp.append(re.search('-Scaler_(.*)-- ', s).group(1))
df_pp.loc[len(df_pp)] = row_pp
if metrics == "accuracy":
df_report_Accuracy = df_pp.merge(self.dyna_report[['Dataset','Accuracy']], how = 'left', on = 'Dataset')
bins = [0, 0.70, 0.90, 1]
labels = ["Low Accuracy","High Accuracy","Top Accuracy"]
df_report_Accuracy['Level'] = pd.cut(df_report_Accuracy['Accuracy'], bins=bins, labels=labels)
df_report_Accuracy['cnt'] = 1
df_report_Accuracy.loc[df_report_Accuracy['Scale'] == 'None','Scale'] = "No Scaler"
df_report_Accuracy['Scale'] = 'Scale_'+df_report_Accuracy['Scale']
df_report_Accuracy['Winsorize'] = 'Winsorize_' + df_report_Accuracy['Winsorize']
step1_df = df_report_Accuracy.groupby(['Encode_low_dimension','Dataset'], as_index=False)['cnt'].count().rename({"cnt":"Total","Dataset":"antecedentIndex","Encode_low_dimension":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step2_df = df_report_Accuracy.groupby(['Encode_low_dimension','Encode_high_dimension'], as_index=False)['cnt'].count().rename({"cnt":"Total","Encode_low_dimension":"antecedentIndex","Encode_high_dimension":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step3_df = df_report_Accuracy.groupby(['Encode_high_dimension','Winsorize'], as_index=False)['cnt'].count().rename({"cnt":"Total","Encode_high_dimension":"antecedentIndex","Winsorize":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step4_df = df_report_Accuracy.groupby(['Winsorize','Scale'], as_index=False)['cnt'].count().rename({"cnt":"Total","Winsorize":"antecedentIndex","Scale":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step5_df = df_report_Accuracy.groupby(['Scale','Level'], as_index=False)['cnt'].count().rename({"cnt":"Total","Scale":"antecedentIndex","Level":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']].dropna()
integrated_df = pd.concat([step1_df,step2_df,step3_df,step4_df,step5_df],axis = 0)
label_df = pd.DataFrame(integrated_df['antecedentIndex'].append(integrated_df['consequentIndex']).drop_duplicates(),columns = {"label"})
label_df['Number'] = label_df.reset_index().index
label_list = list(label_df.label)
source_df = pd.DataFrame(integrated_df['antecedentIndex'])
source_df = source_df.merge(label_df, left_on=['antecedentIndex'], right_on = ['label'],how = 'left')
source_list = list(source_df['Number'])
target_df = pd.DataFrame(integrated_df['consequentIndex'])
target_df = target_df.merge(label_df, left_on=['consequentIndex'], right_on = ['label'],how = 'left')
target_list = list(target_df['Number'])
value_list = [int(i) for i in list(integrated_df.Total)]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 15,
thickness = 10,
line = dict(color = 'rgb(25,100,90)', width = 0.5),
label = label_list,
color = 'rgb(71,172,55)'
),
link = dict(
source = source_list,
target = target_list,
value = value_list
))])
fig.update_layout(title = f'Pipeline Cluster Traversal Experiments - autoViz {metrics} Retrieval Diagram <a href="https://www.linkedin.com/in/lei-tony-dong/"> ©Tony Dong</a>', font_size=8)
plot(fig,filename = "Pipeline Cluster Retrieval Diagram.html",auto_open = False)
# fig.show()
elif metrics == "precision":
df_report_Precision = df_pp.merge(self.dyna_report[['Dataset','Precision']], how = 'left', on = 'Dataset')
bins = [0, 0.70, 0.90, 1]
labels = ["Low Precision","High Precision","Top Precision"]
df_report_Precision['Level'] = pd.cut(df_report_Precision['Precision'], bins=bins, labels=labels)
df_report_Precision['cnt'] = 1
df_report_Precision.loc[df_report_Precision['Scale'] == 'None','Scale'] = "No Scaler"
df_report_Precision['Scale'] = 'Scale_'+df_report_Precision['Scale']
df_report_Precision['Winsorize'] = 'Winsorize_' + df_report_Precision['Winsorize']
step1_df = df_report_Precision.groupby(['Encode_low_dimension','Dataset'], as_index=False)['cnt'].count().rename({"cnt":"Total","Dataset":"antecedentIndex","Encode_low_dimension":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step2_df = df_report_Precision.groupby(['Encode_low_dimension','Encode_high_dimension'], as_index=False)['cnt'].count().rename({"cnt":"Total","Encode_low_dimension":"antecedentIndex","Encode_high_dimension":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step3_df = df_report_Precision.groupby(['Encode_high_dimension','Winsorize'], as_index=False)['cnt'].count().rename({"cnt":"Total","Encode_high_dimension":"antecedentIndex","Winsorize":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step4_df = df_report_Precision.groupby(['Winsorize','Scale'], as_index=False)['cnt'].count().rename({"cnt":"Total","Winsorize":"antecedentIndex","Scale":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step5_df = df_report_Precision.groupby(['Scale','Level'], as_index=False)['cnt'].count().rename({"cnt":"Total","Scale":"antecedentIndex","Level":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']].dropna()
integrated_df = pd.concat([step1_df,step2_df,step3_df,step4_df,step5_df],axis = 0)
label_df = pd.DataFrame(integrated_df['antecedentIndex'].append(integrated_df['consequentIndex']).drop_duplicates(),columns = {"label"})
label_df['Number'] = label_df.reset_index().index
label_list = list(label_df.label)
source_df = pd.DataFrame(integrated_df['antecedentIndex'])
source_df = source_df.merge(label_df, left_on=['antecedentIndex'], right_on = ['label'],how = 'left')
source_list = list(source_df['Number'])
target_df = pd.DataFrame(integrated_df['consequentIndex'])
target_df = target_df.merge(label_df, left_on=['consequentIndex'], right_on = ['label'],how = 'left')
target_list = list(target_df['Number'])
value_list = [int(i) for i in list(integrated_df.Total)]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 15,
thickness = 10,
line = dict(color = 'rgb(25,100,90)', width = 0.5),
label = label_list,
color = 'rgb(71,172,55)'
),
link = dict(
source = source_list,
target = target_list,
value = value_list
))])
fig.update_layout(title = f'Pipeline Cluster Traversal Experiments - autoViz {metrics} Retrieval Diagram <a href="https://www.linkedin.com/in/lei-tony-dong/"> ©Tony Dong</a>', font_size=8)
plot(fig,filename = "Pipeline Cluster Retrieval Diagram.html",auto_open = False)
# fig.show()
elif metrics == "recall":
df_report_Recall = df_pp.merge(dyna_report[['Dataset','Recall']], how = 'left', on = 'Dataset')
bins = [0, 0.70, 0.90, 1]
labels = ["Low Recall","High Recall","Top Recall"]
df_report_Recall['Level'] = pd.cut(df_report_Recall['Recall'], bins=bins, labels=labels)
df_report_Recall['cnt'] = 1
df_report_Recall.loc[df_report_Recall['Scale'] == 'None','Scale'] = "No Scaler"
df_report_Recall['Scale'] = 'Scale_'+df_report_Recall['Scale']
df_report_Recall['Winsorize'] = 'Winsorize_' + df_report_Recall['Winsorize']
step1_df = df_report_Recall.groupby(['Encode_low_dimension','Dataset'], as_index=False)['cnt'].count().rename({"cnt":"Total","Dataset":"antecedentIndex","Encode_low_dimension":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step2_df = df_report_Recall.groupby(['Encode_low_dimension','Encode_high_dimension'], as_index=False)['cnt'].count().rename({"cnt":"Total","Encode_low_dimension":"antecedentIndex","Encode_high_dimension":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step3_df = df_report_Recall.groupby(['Encode_high_dimension','Winsorize'], as_index=False)['cnt'].count().rename({"cnt":"Total","Encode_high_dimension":"antecedentIndex","Winsorize":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step4_df = df_report_Recall.groupby(['Winsorize','Scale'], as_index=False)['cnt'].count().rename({"cnt":"Total","Winsorize":"antecedentIndex","Scale":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']]
step5_df = df_report_Recall.groupby(['Scale','Level'], as_index=False)['cnt'].count().rename({"cnt":"Total","Scale":"antecedentIndex","Level":"consequentIndex"},axis = 1)[['antecedentIndex','consequentIndex','Total']].dropna()
integrated_df = pd.concat([step1_df,step2_df,step3_df,step4_df,step5_df],axis = 0)
label_df = pd.DataFrame(integrated_df['antecedentIndex'].append(integrated_df['consequentIndex']).drop_duplicates(),columns = {"label"})
label_df['Number'] = label_df.reset_index().index
label_list = list(label_df.label)
source_df = pd.DataFrame(integrated_df['antecedentIndex'])
source_df = source_df.merge(label_df, left_on=['antecedentIndex'], right_on = ['label'],how = 'left')
source_list = list(source_df['Number'])
target_df = pd.DataFrame(integrated_df['consequentIndex'])
target_df = target_df.merge(label_df, left_on=['consequentIndex'], right_on = ['label'],how = 'left')
target_list = list(target_df['Number'])
value_list = [int(i) for i in list(integrated_df.Total)]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 15,
thickness = 10,
line = dict(color = 'rgb(25,100,90)', width = 0.5),
label = label_list,
color = 'rgb(71,172,55)'
),
link = dict(
source = source_list,
target = target_list,
value = value_list
))])
fig.update_layout(title = f'Pipeline Cluster Traversal Experiments - autoViz {metrics} Retrieval Diagram <a href="https://www.linkedin.com/in/lei-tony-dong/"> ©Tony Dong</a>', font_size=8)
plot(fig,filename = "Pipeline Cluster Retrieval Diagram.html",auto_open = False)
# fig.show()
|
{"hexsha": "fc37acd8512d04482d8852ee628f6b1c07d7b4e5", "size": 19640, "ext": "py", "lang": "Python", "max_stars_repo_path": "optimalflow/autoViz.py", "max_stars_repo_name": "tonyleidong/OptimalFlow", "max_stars_repo_head_hexsha": "f2aaddaa083673f0343a579899ad0db7ee294707", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-08-30T18:40:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T11:36:29.000Z", "max_issues_repo_path": "optimalflow/autoViz.py", "max_issues_repo_name": "tonyleidong/optimalflow", "max_issues_repo_head_hexsha": "8c38b2f6681ba8754f4d3aeb0785d55e8d8310ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optimalflow/autoViz.py", "max_forks_repo_name": "tonyleidong/optimalflow", "max_forks_repo_head_hexsha": "8c38b2f6681ba8754f4d3aeb0785d55e8d8310ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-09-08T17:17:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T03:07:28.000Z", "avg_line_length": 57.4269005848, "max_line_length": 294, "alphanum_fraction": 0.5935845214, "include": true, "reason": "import numpy", "num_tokens": 4726}
|
function test_measures()
m1 = Lebesgue()
@test !isdiscrete(m1)
@test iscontinuous(m1)
@test !isnormalized(m1)
@test domaintype(m1) == Float64
@test DomainIntegrals.unsafe_weightfun(m1, 0.4) == 1
m2 = DomainIntegrals.LebesgueUnit()
@test !isdiscrete(m2)
@test iscontinuous(m2)
@test isnormalized(m2)
@test domaintype(m2) == Float64
@test support(m2) == UnitInterval()
@test weightfun(m2, 0.4) == 1
@test weightfun(m2, 1.4) == 0
m3 = LegendreWeight()
@test !isdiscrete(m3)
@test iscontinuous(m3)
@test !isnormalized(m3)
@test domaintype(m3) == Float64
@test support(m3) == ChebyshevInterval()
@test weightfun(m3, 0.4) == 1
@test weightfun(m3, -0.4) == 1
@test weightfun(m3, 1.4) == 0
@test weightfun(m3, -1.4) == 0
x = 0.5
m4 = DiracWeight(x)
@test !isdiscrete(m4)
@test iscontinuous(m4)
@test isnormalized(m4)
@test domaintype(m4) == Float64
@test support(m4) == Point(x)
@test point(m4) == x
@test weightfun(m4, x) == Inf
@test weightfun(m4, x+1) == 0
@test weightfun(m4, big(x)) == Inf
@test weightfun(m4, big(x+1)) == 0
m5 = GaussianWeight{SVector{2,Float64}}()
@test !isdiscrete(m5)
@test iscontinuous(m5)
@test isnormalized(m5)
@test domaintype(m5) == SVector{2,Float64}
@test support(m5) == FullSpace{SVector{2,Float64}}()
@test weightfun(m5, SVector(0.0,0.0)) ≈ 1/(2pi)
@test weightfun(m5, SVector(0,0)) ≈ 1/(2pi)
@test weightfun(m5, SVector(big(0),big(0))) ≈ 1/(2pi)
@test weightfun(m5, SVector(big(0.0),big(0.0))) ≈ 1/(2pi)
m6 = LaguerreWeight(0.0)
@test !isdiscrete(m6)
@test iscontinuous(m6)
@test isnormalized(m6)
@test !isnormalized(LaguerreWeight(0.1))
@test domaintype(m6) == Float64
@test support(m6) == HalfLine()
@test weightfun(m6, 0.4) == exp(-0.4)
@test weightfun(m6, -0.4) == 0
@test weightfun(m6, big(0.4)) == exp(-big(0.4))
end
|
{"hexsha": "1856cba7c2fe462ef53e74a1a23003c8d9e60e91", "size": 1981, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_measures.jl", "max_stars_repo_name": "JuliaApproximation/DomainIntegrals.jl", "max_stars_repo_head_hexsha": "b2cb2fc9df91aef66fe1318c568da487ca433064", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-05-02T12:51:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-14T17:50:52.000Z", "max_issues_repo_path": "test/test_measures.jl", "max_issues_repo_name": "JuliaApproximation/DomainIntegrals.jl", "max_issues_repo_head_hexsha": "b2cb2fc9df91aef66fe1318c568da487ca433064", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-05-02T12:37:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T08:46:04.000Z", "max_forks_repo_path": "test/test_measures.jl", "max_forks_repo_name": "JuliaApproximation/DomainIntegrals.jl", "max_forks_repo_head_hexsha": "b2cb2fc9df91aef66fe1318c568da487ca433064", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-07T20:50:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-07T20:50:23.000Z", "avg_line_length": 30.4769230769, "max_line_length": 61, "alphanum_fraction": 0.607269056, "num_tokens": 773}
|
# This file is a part of ROOTFramework.jl, licensed under the MIT License (MIT).
cxxinclude("TBufferJSON.h")
export rootjson
rootjson(obj::CppValue, compact::Integer = 3) =
rootjson(pointer_to(obj), compact)
rootjson(obj::CppPtr, compact::Integer = 3) =
string(@cxx TBufferJSON::ConvertToJSON(obj, Int32(compact)))
|
{"hexsha": "ae046dc3b313de757b30c8c1dd3742732c2e94c7", "size": 322, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/json.jl", "max_stars_repo_name": "mppmu/ROOTFramework.jl", "max_stars_repo_head_hexsha": "30e162deb826356a9c7d792ab2c8d2aa61494f63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-09-27T20:16:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-20T10:04:18.000Z", "max_issues_repo_path": "src/json.jl", "max_issues_repo_name": "mppmu/ROOTFramework.jl", "max_issues_repo_head_hexsha": "30e162deb826356a9c7d792ab2c8d2aa61494f63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-21T09:28:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-21T09:28:29.000Z", "max_forks_repo_path": "src/json.jl", "max_forks_repo_name": "mppmu/ROOTFramework.jl", "max_forks_repo_head_hexsha": "30e162deb826356a9c7d792ab2c8d2aa61494f63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-07-13T18:44:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:38:41.000Z", "avg_line_length": 24.7692307692, "max_line_length": 80, "alphanum_fraction": 0.7422360248, "num_tokens": 86}
|
# Make the model, perform cross validation and export submission file.
from sklearn.model_selection import cross_val_score
from sklearn import metrics
import pandas as pd
import numpy as np
def modelfit(algorthm, dftrain, dftest, predictors, target, IDcol, filename=None):
#Fit the algorthmorithm on the data
algorthm.fit(dftrain[predictors], dftrain[target]) # similar to the base dataframe created above with the predictor & target columns
#Predict training set:
dftrain_predictions = algorthm.predict(dftrain[predictors]) # Predicting using the predictors
#Perform cross-validation:
cv_score = cross_val_score(algorthm, dftrain[predictors], dftrain[target], cv=20, n_jobs=-1, scoring='neg_mean_squared_error')
cv_score = np.sqrt(np.abs(cv_score))
#Print model report:
print ("\n------Model Report----\n")
print ("RMSE : " , np.sqrt(metrics.mean_squared_error(dftrain[target].values, dftrain_predictions)))
print ("CV Score Mean : %.4g" %(np.mean(cv_score)))
print ("CV Score Std : %.4g" %(np.std(cv_score)))
print ("CV Score Min : %.4g" %(np.min(cv_score)))
print ("CV Score Max : %.4g" %(np.max(cv_score)))
#Predict on testing data:
dftest[target] = algorthm.predict(dftest[predictors])
if filename!= None :
#Export submission file:
IDcol.append(target)
submission = pd.DataFrame({ x: dftest[x] for x in IDcol})
submission.to_csv(filename, index=False)
|
{"hexsha": "914106e53bed4a6b677a7a92afa5626aa06e70f1", "size": 1484, "ext": "py", "lang": "Python", "max_stars_repo_path": "support/mfit.py", "max_stars_repo_name": "NageshVani/BlackFridaySales", "max_stars_repo_head_hexsha": "8ca79ea99fbfaa475642e30df71c7c0950039f25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "support/mfit.py", "max_issues_repo_name": "NageshVani/BlackFridaySales", "max_issues_repo_head_hexsha": "8ca79ea99fbfaa475642e30df71c7c0950039f25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "support/mfit.py", "max_forks_repo_name": "NageshVani/BlackFridaySales", "max_forks_repo_head_hexsha": "8ca79ea99fbfaa475642e30df71c7c0950039f25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4, "max_line_length": 136, "alphanum_fraction": 0.6933962264, "include": true, "reason": "import numpy", "num_tokens": 372}
|
import cv2
import imutils
import numpy as np
from DetectRed import *
from DetectRed import checkRed
def splitIntoCardImages(img):
#Splits the image into an array of images that each have 1 card in them
images = []
blur = cv2.GaussianBlur(img, (9, 9), 0)
## convert to hsv
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
mask = cv2.inRange(hsv, (36, 25, 5), (70, 255, 255))
## slice the green
imask = mask > 0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
memes, threshImg = cv2.threshold(green, 0, 255, cv2.THRESH_BINARY_INV)
grayScale = cv2.cvtColor(threshImg, cv2.COLOR_BGR2GRAY)
c = cv2.findContours(grayScale.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = imutils.grab_contours(c)
for i in range(len(c)):
perimeter = cv2.arcLength(c[i], True)
if perimeter > 500:
extLeft = tuple(c[i][c[i][:, :, 0].argmin()][0])
extRight = tuple(c[i][c[i][:, :, 0].argmax()][0])
extTop = tuple(c[i][c[i][:, :, 1].argmin()][0])
extBot = tuple(c[i][c[i][:, :, 1].argmax()][0])
#TM - Transform matrix for moving the card to the top left corner
TM = np.float32([[1, 0, -extLeft[0]], [0, 1, -extTop[1]]])
# Crop the card image
imgT = cv2.warpAffine(img, TM, ((extRight[0] - extLeft[0]), (extBot[1] - extTop[1])))
if imgT.shape[0] > 200:
images.append(imgT)
#cv2.imshow("Single card pls" + str(i), imgT)
return images
def splitCornerToSuitAndNumber(img, isRed):
# returns 2 thresholded images suit, number of the card
# has to be given colored image of the corner with the suit blob on the left and number blob on right side of image
images = []
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if isRed:
minSaturation = 170
maxSaturation = 255
mask1 = cv2.inRange(hsv, (170, minSaturation, 25), (180, maxSaturation, 255))
mask2 = cv2.inRange(hsv, (0, minSaturation, 25), (19, maxSaturation, 255))
mask = mask1 | mask2
## slice the green
imask = mask > 0
red = np.zeros_like(img, np.uint8)
red[imask] = img[imask]
memes, threshImg = cv2.threshold(red, 0, 255, cv2.THRESH_BINARY_INV)
threshImg = cv2.bitwise_not(threshImg)
#cv2.imshow("Red Thresh",threshImg)
else:
memes, threshImg = cv2.threshold(grey, 50, 255, cv2.THRESH_BINARY_INV)
threshImg = cv2.cvtColor(threshImg, cv2.COLOR_GRAY2BGR)
#cv2.imshow("Black threshold", threshImg)
grayScale = cv2.cvtColor(threshImg, cv2.COLOR_BGR2GRAY)
c = cv2.findContours(grayScale.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = imutils.grab_contours(c)
for i in range(len(c)):
perimeter = cv2.arcLength(c[i], True)
if perimeter > 30 and perimeter < 500:
x, y, w, h = cv2.boundingRect(c[i])
if h > 10:
# print(i)
extLeft = tuple(c[i][c[i][:, :, 0].argmin()][0])
extRight = tuple(c[i][c[i][:, :, 0].argmax()][0])
extTop = tuple(c[i][c[i][:, :, 1].argmin()][0])
extBot = tuple(c[i][c[i][:, :, 1].argmax()][0])
if(extTop[1] > 2): # we dont want objects that are touching image top - this causes problems with cards like 10 and Queen
#print("contourTop: " + str(extTop[1]) + " image top: " + str(img.shape[1]))
# Used to flatted the array containing the co-ordinates of the vertices.
TM = np.float32([[1, 0, -extLeft[0]], [0, 1, -extTop[1]]])
imgT = cv2.warpAffine(threshImg, TM, ((extRight[0] - extLeft[0]) + 2, (extBot[1] - extTop[1]) + 2))
#cv2.imshow("Single card pls" + str(i), imgT)
images.append(imgT)
return findTwoBiggestImages(images)
def findTwoBiggestImages(images):
#Returns to biggest images in the list based on area
#if(len(images) < 2):
#print("Contours failed")
#print(len(images))
#find first biggest image - number
biggestImage1 = images[0]
for i in range(len(images)):
if(images[i].shape[0] * images[i].shape[1] > biggestImage1.shape[0] * biggestImage1.shape[1]):
height = images[i].shape[0]
width = images[i].shape[1]
if(width > height):
if( width / height > 1.3):
biggestImage1 = images[i]
else:
if (height / width > 1.3):
biggestImage1 = images[i]
#cv2.imshow("the thing",biggestImage1)
images.remove(biggestImage1)
#print(len(images))
'''
#find first biggest image - number
biggestImage1 = images[0]
for i in range(len(images)):
if(images[i].shape[0] * images[i].shape[1] > biggestImage1.shape[0] * biggestImage1.shape[1]):
biggestImage1 = images[i]
images.remove(biggestImage1)
#print(len(images))
'''
#cv2.imshow("Number", biggestImage1)
#Create new list that does not have images with holes in them(We are trying to avoid blobs with holes in them) - this might need changing
imagesToCheck = []
try:
for i in range(len(images)):
width = images[i].shape[1]
heigth = images[i].shape[0]
image = images[i]
centerH = int(heigth / 2)
centerW = int(width / 2)
image_data = np.asarray(image)
centerPixel = image_data[centerH, centerW]
if centerPixel[0] > 0:
imagesToCheck.append(images[i])
except:
if False:
print("Failed to remove")
#print(len(imagesToCheck))
#find second biggest image
biggestImage2 = imagesToCheck[0]
for i in range(len(imagesToCheck)):
if (imagesToCheck[i].shape[0] * imagesToCheck[i].shape[1] > biggestImage2.shape[0] * biggestImage2.shape[1]):
biggestImage2 = imagesToCheck[i]
#cv2.imshow("Suit", biggestImage2)
return biggestImage2, biggestImage1
|
{"hexsha": "5b14b3d0b1a05800f3f6b34b69e08141513611c2", "size": 6231, "ext": "py", "lang": "Python", "max_stars_repo_path": "openCV_version/venv/Scripts/finalCode/ImageSplit.py", "max_stars_repo_name": "Jokubas126/PokerAssistant_CV", "max_stars_repo_head_hexsha": "930c945c11634dce9702fd9774dd43161da11fee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-22T22:47:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-22T22:47:11.000Z", "max_issues_repo_path": "openCV_version/venv/Scripts/finalCode/ImageSplit.py", "max_issues_repo_name": "Jokubas126/PokerAssistant_CV", "max_issues_repo_head_hexsha": "930c945c11634dce9702fd9774dd43161da11fee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openCV_version/venv/Scripts/finalCode/ImageSplit.py", "max_forks_repo_name": "Jokubas126/PokerAssistant_CV", "max_forks_repo_head_hexsha": "930c945c11634dce9702fd9774dd43161da11fee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2362637363, "max_line_length": 141, "alphanum_fraction": 0.5820895522, "include": true, "reason": "import numpy", "num_tokens": 1780}
|
// Boost.Geometry
// Copyright (c) 2020, Oracle and/or its affiliates.
// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle
// Licensed under the Boost Software License version 1.0.
// http://www.boost.org/users/license.html
#ifndef BOOST_GEOMETRY_STRATEGIES_RELATE_CARTESIAN_HPP
#define BOOST_GEOMETRY_STRATEGIES_RELATE_CARTESIAN_HPP
// TEMP - move to strategy
#include <boost/geometry/strategies/agnostic/point_in_box_by_side.hpp>
#include <boost/geometry/strategies/cartesian/intersection.hpp>
#include <boost/geometry/strategies/cartesian/box_in_box.hpp>
#include <boost/geometry/strategies/cartesian/point_in_point.hpp>
#include <boost/geometry/strategies/cartesian/point_in_poly_crossings_multiply.hpp>
#include <boost/geometry/strategies/cartesian/point_in_poly_franklin.hpp>
#include <boost/geometry/strategies/cartesian/point_in_poly_winding.hpp>
#include <boost/geometry/strategies/cartesian/disjoint_box_box.hpp>
#include <boost/geometry/strategies/envelope/cartesian.hpp>
#include <boost/geometry/strategies/relate/services.hpp>
#include <boost/geometry/strategies/detail.hpp>
#include <boost/geometry/strategy/cartesian/area.hpp>
#include <boost/geometry/strategy/cartesian/side_robust.hpp>
#include <boost/geometry/strategy/cartesian/side_by_triangle.hpp>
#include <boost/geometry/strategy/cartesian/area_box.hpp>
#include <boost/geometry/util/type_traits.hpp>
namespace boost { namespace geometry
{
namespace strategies { namespace relate
{
template <typename CalculationType = void>
class cartesian
: public strategies::envelope::cartesian<CalculationType>
{
public:
//area
template <typename Geometry>
static auto area(Geometry const&,
std::enable_if_t<! util::is_box<Geometry>::value> * = nullptr)
{
return strategy::area::cartesian<CalculationType>();
}
template <typename Geometry>
static auto area(Geometry const&,
std::enable_if_t<util::is_box<Geometry>::value> * = nullptr)
{
return strategy::area::cartesian_box<CalculationType>();
}
// covered_by
template <typename Geometry1, typename Geometry2>
static auto covered_by(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::covered_by::cartesian_point_box();
}
template <typename Geometry1, typename Geometry2>
static auto covered_by(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_box<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::covered_by::cartesian_box_box();
}
// disjoint
template <typename Geometry1, typename Geometry2>
static auto disjoint(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_box<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::disjoint::cartesian_box_box();
}
template <typename Geometry1, typename Geometry2>
static auto disjoint(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_segment<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
// NOTE: Inconsistent name.
return strategy::disjoint::segment_box();
}
// relate
template <typename Geometry1, typename Geometry2>
static auto relate(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& util::is_pointlike<Geometry2>::value
> * = nullptr)
{
return strategy::within::cartesian_point_point();
}
template <typename Geometry1, typename Geometry2>
static auto relate(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& ( util::is_linear<Geometry2>::value
|| util::is_polygonal<Geometry2>::value )
> * = nullptr)
{
return strategy::within::cartesian_winding<void, void, CalculationType>();
}
// The problem is that this strategy is often used with non-geometry ranges.
// So dispatching only by geometry categories is impossible.
// In the past it was taking two segments, now it takes 3-point sub-ranges.
// So dispatching by segments is impossible.
// It could be dispatched by (linear || polygonal || non-geometry point range).
// For now implement as 0-parameter, special case relate.
//template <typename Geometry1, typename Geometry2>
static auto relate(/*Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
( util::is_linear<Geometry1>::value
|| util::is_polygonal<Geometry1>::value )
&& ( util::is_linear<Geometry2>::value
|| util::is_polygonal<Geometry2>::value )
> * = nullptr*/)
{
return strategy::intersection::cartesian_segments<CalculationType>();
}
// side
static auto side()
{
return strategy::side::side_robust<CalculationType>();
}
// within
template <typename Geometry1, typename Geometry2>
static auto within(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::within::cartesian_point_box();
}
template <typename Geometry1, typename Geometry2>
static auto within(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_box<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::within::cartesian_box_box();
}
};
namespace services
{
template <typename Geometry1, typename Geometry2>
struct default_strategy<Geometry1, Geometry2, cartesian_tag, cartesian_tag>
{
using type = strategies::relate::cartesian<>;
};
template <>
struct strategy_converter<strategy::within::cartesian_point_point>
{
static auto get(strategy::within::cartesian_point_point const& )
{
return strategies::relate::cartesian<>();
}
};
template <>
struct strategy_converter<strategy::within::cartesian_point_box>
{
static auto get(strategy::within::cartesian_point_box const&)
{
return strategies::relate::cartesian<>();
}
};
template <>
struct strategy_converter<strategy::covered_by::cartesian_point_box>
{
static auto get(strategy::covered_by::cartesian_point_box const&)
{
return strategies::relate::cartesian<>();
}
};
template <>
struct strategy_converter<strategy::covered_by::cartesian_box_box>
{
static auto get(strategy::covered_by::cartesian_box_box const&)
{
return strategies::relate::cartesian<>();
}
};
template <>
struct strategy_converter<strategy::disjoint::cartesian_box_box>
{
static auto get(strategy::disjoint::cartesian_box_box const&)
{
return strategies::relate::cartesian<>();
}
};
template <>
struct strategy_converter<strategy::disjoint::segment_box>
{
static auto get(strategy::disjoint::segment_box const&)
{
return strategies::relate::cartesian<>();
}
};
template <>
struct strategy_converter<strategy::within::cartesian_box_box>
{
static auto get(strategy::within::cartesian_box_box const&)
{
return strategies::relate::cartesian<>();
}
};
template <typename P1, typename P2, typename CalculationType>
struct strategy_converter<strategy::within::cartesian_winding<P1, P2, CalculationType>>
{
static auto get(strategy::within::cartesian_winding<P1, P2, CalculationType> const& )
{
return strategies::relate::cartesian<CalculationType>();
}
};
template <typename CalculationType>
struct strategy_converter<strategy::intersection::cartesian_segments<CalculationType>>
{
static auto get(strategy::intersection::cartesian_segments<CalculationType> const& )
{
return strategies::relate::cartesian<CalculationType>();
}
};
template <typename CalculationType>
struct strategy_converter<strategy::within::cartesian_point_box_by_side<CalculationType>>
{
struct altered_strategy
: strategies::relate::cartesian<CalculationType>
{
template <typename Geometry1, typename Geometry2>
static auto covered_by(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::covered_by::cartesian_point_box_by_side<CalculationType>();
}
template <typename Geometry1, typename Geometry2>
static auto within(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& util::is_box<Geometry2>::value
> * = nullptr)
{
return strategy::within::cartesian_point_box_by_side<CalculationType>();
}
};
static auto get(strategy::covered_by::cartesian_point_box_by_side<CalculationType> const&)
{
return altered_strategy();
}
static auto get(strategy::within::cartesian_point_box_by_side<CalculationType> const&)
{
return altered_strategy();
}
};
template <typename CalculationType>
struct strategy_converter<strategy::covered_by::cartesian_point_box_by_side<CalculationType>>
: strategy_converter<strategy::within::cartesian_point_box_by_side<CalculationType>>
{};
template <typename P1, typename P2, typename CalculationType>
struct strategy_converter<strategy::within::franklin<P1, P2, CalculationType>>
{
struct altered_strategy
: strategies::relate::cartesian<CalculationType>
{
template <typename Geometry1, typename Geometry2>
static auto relate(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& ( util::is_linear<Geometry2>::value
|| util::is_polygonal<Geometry2>::value )
> * = nullptr)
{
return strategy::within::franklin<void, void, CalculationType>();
}
};
static auto get(strategy::within::franklin<P1, P2, CalculationType> const&)
{
return altered_strategy();
}
};
template <typename P1, typename P2, typename CalculationType>
struct strategy_converter<strategy::within::crossings_multiply<P1, P2, CalculationType>>
{
struct altered_strategy
: strategies::relate::cartesian<CalculationType>
{
template <typename Geometry1, typename Geometry2>
static auto relate(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& ( util::is_linear<Geometry2>::value
|| util::is_polygonal<Geometry2>::value )
> * = nullptr)
{
return strategy::within::crossings_multiply<void, void, CalculationType>();
}
};
static auto get(strategy::within::crossings_multiply<P1, P2, CalculationType> const&)
{
return altered_strategy();
}
};
// TEMP used in distance segment/box
template <typename CalculationType>
struct strategy_converter<strategy::side::side_by_triangle<CalculationType>>
{
static auto get(strategy::side::side_by_triangle<CalculationType> const&)
{
return strategies::relate::cartesian<CalculationType>();
}
};
template <typename CalculationType>
struct strategy_converter<strategy::side::side_robust<CalculationType>>
{
static auto get(strategy::side::side_robust<CalculationType> const&)
{
return strategies::relate::cartesian<CalculationType>();
}
};
} // namespace services
}} // namespace strategies::relate
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_STRATEGIES_RELATE_CARTESIAN_HPP
|
{"hexsha": "3ddc766a21e3706d549189b432527f8aa1045e13", "size": 13571, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/geometry/strategies/relate/cartesian.hpp", "max_stars_repo_name": "jhypolite/geometry", "max_stars_repo_head_hexsha": "f79b3f0c457bc4ae4bb1c1cb5a117efbe97be3c4", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/geometry/strategies/relate/cartesian.hpp", "max_issues_repo_name": "jhypolite/geometry", "max_issues_repo_head_hexsha": "f79b3f0c457bc4ae4bb1c1cb5a117efbe97be3c4", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/geometry/strategies/relate/cartesian.hpp", "max_forks_repo_name": "jhypolite/geometry", "max_forks_repo_head_hexsha": "f79b3f0c457bc4ae4bb1c1cb5a117efbe97be3c4", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6749379653, "max_line_length": 94, "alphanum_fraction": 0.6042296073, "num_tokens": 2792}
|
#' @export
load.tped <- function(prefix) {
tped.file <- paste0(prefix, '.tped')
tfam.file <- paste0(prefix, '.tfam')
stopifnot(file.exists(tped.file), file.exists(tfam.file))
geno.samples <- read.table(tfam.file)
n.samples <- nrow(geno.samples)
geno.data <- scan(tped.file, character())
n.snps <- length(geno.data)/(2*n.samples+4)
dim(geno.data) <- c(2*n.samples+4, n.snps)
geno.raw <- geno.data[-(1:4), ]
geno.info <- geno.data[1:4, ]
rm('geno.data')
gc(FALSE)
colnames(geno.info) <- c('chromosome', 'id', 'distance', 'position')
alleles <- sort(unique(as.vector(geno.raw)))
# Put '0' (missing) at the end
alleles <- c(alleles[alleles !='0'], '0')
n.alleles <- length(alleles)-1
rev.order <- c(n.alleles:1, n.alleles+1)
# This is the most time consuming step
has.what <- t(apply(geno.raw, 2, function (x) alleles %in% x))
# A allele is first alphabetically
# B allele is last alphabetically
a.allele <- alleles[apply(has.what, 1, which.max)]
b.allele <- (alleles[rev.order])[apply(has.what[, rev.order], 1, which.max)]
b.allele[a.allele==b.allele] <- NA
geno.both <- apply(geno.raw, 1, function (x) x != a.allele)
geno.both[geno.raw=='0'] <- NA
# Add even and odd columns to get diploid genotype code (# of b alleles)
geno <- geno.both[, 2*(1:n.samples)-1] + geno.both[, 2*(1:n.samples)]
geno.info$a <- a.allele
geno.info$b <- b.allele
colnames(geno) <- make.names(geno.samples$V2)
rownames(geno) <- make.names(geno.info$id)
return (list(geno=geno, geno.info=geno.info))
}
|
{"hexsha": "748cc6aac5f4c4925d325c3d8d7f99fdbd17c183", "size": 1580, "ext": "r", "lang": "R", "max_stars_repo_path": "R/load_tped.r", "max_stars_repo_name": "sushilashenoy/zoom.plot", "max_stars_repo_head_hexsha": "036aa60980fdf7d86b5168f08e63aa13ca1f9e4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-03-30T22:17:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-14T22:13:21.000Z", "max_issues_repo_path": "R/load_tped.r", "max_issues_repo_name": "sushilashenoy/zoom.plot", "max_issues_repo_head_hexsha": "036aa60980fdf7d86b5168f08e63aa13ca1f9e4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-03-30T22:17:37.000Z", "max_issues_repo_issues_event_max_datetime": "2016-09-23T16:18:28.000Z", "max_forks_repo_path": "R/load_tped.r", "max_forks_repo_name": "sushilashenoy/zoom.plot", "max_forks_repo_head_hexsha": "036aa60980fdf7d86b5168f08e63aa13ca1f9e4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-20T19:14:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-20T19:14:21.000Z", "avg_line_length": 30.3846153846, "max_line_length": 78, "alphanum_fraction": 0.6291139241, "num_tokens": 532}
|
FROM ghcr.io/lballabio/quantlib-swig-devenv:default
MAINTAINER Luigi Ballabio <luigi.ballabio@gmail.com>
LABEL Description="A development environment for building QuantLib-SWIG on Travis CI"
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y r-base-dev texlive \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
|
{"hexsha": "ea67a63d291b1d7ed5431fffa01213297a998fab", "size": 341, "ext": "r", "lang": "R", "max_stars_repo_path": "quantlib-swig-devenv/Dockerfile.r", "max_stars_repo_name": "yrtf/dockerfiles", "max_stars_repo_head_hexsha": "83aba03c93f8012cbe493f4c5c60034a1e083135", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2015-11-17T16:25:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T19:59:17.000Z", "max_issues_repo_path": "quantlib-swig-devenv/Dockerfile.r", "max_issues_repo_name": "yrtf/dockerfiles", "max_issues_repo_head_hexsha": "83aba03c93f8012cbe493f4c5c60034a1e083135", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-10-25T03:25:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-27T12:58:57.000Z", "max_forks_repo_path": "quantlib-swig-devenv/Dockerfile.r", "max_forks_repo_name": "yrtf/dockerfiles", "max_forks_repo_head_hexsha": "83aba03c93f8012cbe493f4c5c60034a1e083135", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2015-10-19T13:32:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T16:29:20.000Z", "avg_line_length": 34.1, "max_line_length": 85, "alphanum_fraction": 0.7683284457, "num_tokens": 92}
|
"Holds the tableau of an variational partitioned additive Runge-Kutta method."
struct TableauVPARK{T} <: AbstractTableau{T}
name::Symbol
o::Int
s::Int
r::Int
q::CoefficientsARK{T}
p::CoefficientsARK{T}
q̃::CoefficientsPRK{T}
p̃::CoefficientsPRK{T}
λ::CoefficientsMRK{T}
d::Vector{T}
function TableauVPARK{T}(name, o, s, r, q, p, q̃, p̃, λ, d) where {T}
@assert isa(name, Symbol)
@assert isa(s, Integer)
@assert isa(r, Integer)
@assert isa(o, Integer)
@assert s > 0 "Number of stages s must be > 0"
@assert r > 0 "Number of stages r must be > 0"
@assert s==q.s==p.s==q̃.s==p̃.s==length(d)
@assert r==q.r==p.r==q̃.r==p̃.r==λ.r
new(name, o, s, r, q, p, q̃, p̃, λ, d)
end
function TableauVPARK{T}(name, o, s, r, q, p, q̃, p̃, λ) where {T}
@assert isa(name, Symbol)
@assert isa(s, Integer)
@assert isa(r, Integer)
@assert isa(o, Integer)
@assert s > 0 "Number of stages s must be > 0"
@assert r > 0 "Number of stages r must be > 0"
@assert s==q.s==p.s==q̃.s==p̃.s
@assert r==q.r==p.r==q̃.r==p̃.r==λ.r
new(name, o, s, r, q, p, q̃, p̃, λ)
end
end
function TableauVPARK(name::Symbol, order::Int,
a_q::Matrix{T}, a_p::Matrix{T},
α_q::Matrix{T}, α_p::Matrix{T},
a_q̃::Matrix{T}, a_p̃::Matrix{T},
α_q̃::Matrix{T}, α_p̃::Matrix{T},
b_q::Vector{T}, b_p::Vector{T},
β_q::Vector{T}, β_p::Vector{T},
c_q::Vector{T}, c_p::Vector{T},
c_λ::Vector{T}, d_λ::Vector{T},
d::Vector{T}) where {T <: Real}
s = length(c_q)
r = length(c_λ)
@assert s > 0 "Number of stages s must be > 0"
@assert r > 0 "Number of stages r must be > 0"
@assert s==size(a_q,1)==size(a_q,2)==length(b_q)==length(c_q)
@assert s==size(a_p,1)==size(a_p,2)==length(b_p)==length(c_p)
@assert s==size(α_q,1)==size(α_p,1)
@assert r==size(α_q,2)==size(α_p,2)
@assert s==length(d)
@assert r==length(c_λ)==length(d_λ)
@assert r==size(a_q̃,1)==size(a_p̃,1)
@assert s==size(a_q̃,2)==size(a_p̃,2)
@assert r==size(α_q̃,1)==size(α_q̃,2)==length(β_q)
@assert r==size(α_p̃,1)==size(α_p̃,2)==length(β_p)
q = CoefficientsARK{T}(name, order, s, r, a_q, b_q, c_q, α_q, β_q)
p = CoefficientsARK{T}(name, order, s, r, a_p, b_p, c_p, α_p, β_p)
q̃ = CoefficientsPRK{T}(name, order, s, r, a_q̃, c_λ, α_q̃)
p̃ = CoefficientsPRK{T}(name, order, s, r, a_p̃, c_λ, α_p̃)
λ = CoefficientsMRK{T}(name, r, d_λ, c_λ)
TableauVPARK{T}(name, order, s, r, q, p, q̃, p̃, λ, d)
end
function TableauVPARK(name::Symbol, order::Int,
a_q::Matrix{T}, a_p::Matrix{T},
α_q::Matrix{T}, α_p::Matrix{T},
a_q̃::Matrix{T}, a_p̃::Matrix{T},
α_q̃::Matrix{T}, α_p̃::Matrix{T},
b_q::Vector{T}, b_p::Vector{T},
β_q::Vector{T}, β_p::Vector{T},
c_q::Vector{T}, c_p::Vector{T},
c_λ::Vector{T}, d_λ::Vector{T}) where {T <: Real}
s = length(c_q)
r = length(c_λ)
@assert s > 0 "Number of stages s must be > 0"
@assert r > 0 "Number of stages r must be > 0"
@assert s==size(a_q,1)==size(a_q,2)==length(b_q)==length(c_q)
@assert s==size(a_p,1)==size(a_p,2)==length(b_p)==length(c_p)
@assert s==size(α_q,1)==size(α_p,1)
@assert r==size(α_q,2)==size(α_p,2)
@assert r==length(c_λ)==length(d_λ)
@assert r==size(a_q̃,1)==size(a_p̃,1)
@assert s==size(a_q̃,2)==size(a_p̃,2)
@assert r==size(α_q̃,1)==size(α_q̃,2)==length(β_q)
@assert r==size(α_p̃,1)==size(α_p̃,2)==length(β_p)
q = CoefficientsARK{T}(name, order, s, r, a_q, b_q, c_q, α_q, β_q)
p = CoefficientsARK{T}(name, order, s, r, a_p, b_p, c_p, α_p, β_p)
q̃ = CoefficientsPRK{T}(name, order, s, r, a_q̃, c_λ, α_q̃)
p̃ = CoefficientsPRK{T}(name, order, s, r, a_p̃, c_λ, α_p̃)
λ = CoefficientsMRK{T}(name, r, d_λ, c_λ)
TableauVPARK{T}(name, order, s, r, q, p, q̃, p̃, λ)
end
# TODO function readTableauVPARKFromFile(dir::AbstractString, name::AbstractString)
"Parameters for right-hand side function of variational partitioned additive Runge-Kutta methods."
mutable struct ParametersVPARK{DT,TT,D,S,R,ϑT,FT,UT,GT,ϕT} <: Parameters{DT,TT}
f_ϑ::ϑT
f_f::FT
f_u::UT
f_g::GT
f_ϕ::ϕT
Δt::TT
t_q::CoefficientsARK{TT}
t_p::CoefficientsARK{TT}
t_q̃::CoefficientsPRK{TT}
t_p̃::CoefficientsPRK{TT}
t_λ::CoefficientsMRK{TT}
d_v::Vector{TT}
t::TT
q::Vector{DT}
p::Vector{DT}
λ::Vector{DT}
function ParametersVPARK{DT,TT,D,S,R,ϑT,FT,UT,GT,ϕT}(f_ϑ, f_f, f_u, f_g, f_ϕ, Δt, t_q, t_p, t_q̃, t_p̃, t_λ, d_v) where {DT,TT,D,S,R,ϑT,FT,UT,GT,ϕT}
# create solution vectors
q = zeros(DT,D)
p = zeros(DT,D)
λ = zeros(DT,D)
new(f_ϑ, f_f, f_u, f_g, f_ϕ, Δt,
t_q, t_p, t_q̃, t_p̃, t_λ, d_v,
zero(TT), q, p, λ)
end
end
"""
Variational partitioned additive Runge-Kutta integrator cache.
### Fields
* `n`: time step number
* `t`: time of current time step
* `t̅`: time of previous time step
* `q`: current solution of q
* `q̅`: previous solution of q
* `p`: current solution of p
* `p̅`: previous solution of p
* `v`: vector field of q
* `v̅`: vector field of q̅
* `f`: vector field of p
* `f̅`: vector field of p̅
* `q̃`: initial guess of q
* `p̃`: initial guess of p
* `ṽ`: initial guess of v
* `f̃`: initial guess of f
* `s̃`: holds shift due to periodicity of solution
* `Q`: internal stages of q
* `P`: internal stages of p
* `V`: internal stages of v
* `F`: internal stages of f
* `Y`: vector field of internal stages of q
* `Z`: vector field of internal stages of p
"""
mutable struct IntegratorCacheVPARK{ST,TT,D,S,R} <: IDAEIntegratorCache{ST,D}
n::Int
t::TT
t̅::TT
q::Vector{ST}
q̅::Vector{ST}
p::Vector{ST}
p̅::Vector{ST}
λ::Vector{ST}
λ̅::Vector{ST}
μ::Vector{ST}
μ̅::Vector{ST}
qₑᵣᵣ::Vector{ST}
pₑᵣᵣ::Vector{ST}
v::Vector{ST}
v̅::Vector{ST}
f::Vector{ST}
f̅::Vector{ST}
u::Vector{ST}
u̅::Vector{ST}
g::Vector{ST}
g̅::Vector{ST}
q̃::Vector{ST}
p̃::Vector{ST}
ṽ::Vector{ST}
f̃::Vector{ST}
s̃::Vector{ST}
Qi::Vector{Vector{ST}}
Pi::Vector{Vector{ST}}
Vi::Vector{Vector{ST}}
Fi::Vector{Vector{ST}}
Yi::Vector{Vector{ST}}
Zi::Vector{Vector{ST}}
Φi::Vector{Vector{ST}}
Qp::Vector{Vector{ST}}
Pp::Vector{Vector{ST}}
Λp::Vector{Vector{ST}}
Up::Vector{Vector{ST}}
Gp::Vector{Vector{ST}}
Yp::Vector{Vector{ST}}
Zp::Vector{Vector{ST}}
Φp::Vector{Vector{ST}}
function IntegratorCacheVPARK{ST,TT,D,S,R}() where {ST,TT,D,S,R}
q = zeros(ST,D)
q̅ = zeros(ST,D)
p = zeros(ST,D)
p̅ = zeros(ST,D)
λ = zeros(ST,D)
λ̅ = zeros(ST,D)
μ = zeros(ST,D)
μ̅ = zeros(ST,D)
# create error vectors
qₑᵣᵣ = zeros(ST,D)
pₑᵣᵣ = zeros(ST,D)
# create update vectors
v = zeros(ST,D)
v̅ = zeros(ST,D)
f = zeros(ST,D)
f̅ = zeros(ST,D)
u = zeros(ST,D)
u̅ = zeros(ST,D)
g = zeros(ST,D)
g̅ = zeros(ST,D)
# create temporary vectors
q̃ = zeros(ST,D)
p̃ = zeros(ST,D)
ṽ = zeros(ST,D)
f̃ = zeros(ST,D)
s̃ = zeros(ST,D)
# create internal stage vectors
Qi = create_internal_stage_vector(ST, D, S)
Pi = create_internal_stage_vector(ST, D, S)
Vi = create_internal_stage_vector(ST, D, S)
Fi = create_internal_stage_vector(ST, D, S)
Yi = create_internal_stage_vector(ST, D, S)
Zi = create_internal_stage_vector(ST, D, S)
Φi = create_internal_stage_vector(ST, D, S)
Qp = create_internal_stage_vector(ST, D, R)
Pp = create_internal_stage_vector(ST, D, R)
Λp = create_internal_stage_vector(ST, D, R)
Up = create_internal_stage_vector(ST, D, R)
Gp = create_internal_stage_vector(ST, D, R)
Yp = create_internal_stage_vector(ST, D, R)
Zp = create_internal_stage_vector(ST, D, R)
Φp = create_internal_stage_vector(ST, D, R)
new(0, zero(TT), zero(TT), q, q̅, p, p̅, λ, λ̅, μ, μ̅,
qₑᵣᵣ, pₑᵣᵣ,
v, v̅, f, f̅, u, u̅, g, g̅,
q̃, p̃, ṽ, f̃, s̃,
Qi, Pi, Vi, Fi, Yi, Zi, Φi,
Qp, Pp, Λp, Up, Gp, Yp, Zp, Φp)
end
end
function compute_stages!(x::Vector{ST}, cache::IntegratorCacheVPARK{ST,TT,D,S,R},
params::ParametersVPARK{DT,TT,D,S,R}) where {ST,DT,TT,D,S,R}
local tpᵢ::TT
local tλᵢ::TT
for i in 1:S
for k in 1:D
# copy x to Y, Z
cache.Yi[i][k] = x[3*(D*(i-1)+k-1)+1]
cache.Zi[i][k] = x[3*(D*(i-1)+k-1)+2]
cache.Vi[i][k] = x[3*(D*(i-1)+k-1)+3]
# compute Q and P
cache.Qi[i][k] = params.q[k] + params.Δt * cache.Yi[i][k]
cache.Pi[i][k] = params.p[k] + params.Δt * cache.Zi[i][k]
end
# compute f(X)
tpᵢ = params.t + params.Δt * params.t_p.c[i]
params.f_ϑ(tpᵢ, cache.Qi[i], cache.Vi[i], cache.Φi[i])
params.f_f(tpᵢ, cache.Qi[i], cache.Vi[i], cache.Fi[i])
cache.Φi[i] .-= cache.Pi[i]
end
for i in 1:R
for k in 1:D
# copy y to Y, Z and Λ
cache.Yp[i][k] = x[3*D*S+3*(D*(i-1)+k-1)+1]
cache.Zp[i][k] = x[3*D*S+3*(D*(i-1)+k-1)+2]
cache.Λp[i][k] = x[3*D*S+3*(D*(i-1)+k-1)+3]
# compute Q and V
cache.Qp[i][k] = params.q[k] + params.Δt * cache.Yp[i][k]
cache.Pp[i][k] = params.p[k] + params.Δt * cache.Zp[i][k]
end
# compute f(X)
tλᵢ = params.t + params.Δt * params.t_λ.c[i]
params.f_u(tλᵢ, cache.Qp[i], cache.Pp[i], cache.Λp[i], cache.Up[i])
params.f_g(tλᵢ, cache.Qp[i], cache.Pp[i], cache.Λp[i], cache.Gp[i])
params.f_ϕ(tλᵢ, cache.Qp[i], cache.Pp[i], cache.Φp[i])
end
if length(params.d_v) > 0
for k in 1:D
cache.μ[k] = x[3*D*S+3*D*R+k]
end
end
end
"Compute stages of variational partitioned additive Runge-Kutta methods."
@generated function function_stages!(y::Vector{ST}, b::Vector{ST}, params::ParametersVPARK{DT,TT,D,S,R}) where {ST,DT,TT,D,S,R}
cache = IntegratorCacheVPARK{ST,TT,D,S,R}()
quote
compute_stages!(y, $cache, params)
# compute b = - [(Y-AV-AU), (Z-AF-AG), Φ]
for i in 1:S
for k in 1:D
b[3*(D*(i-1)+k-1)+1] = - $cache.Yi[i][k]
b[3*(D*(i-1)+k-1)+2] = - $cache.Zi[i][k]
b[3*(D*(i-1)+k-1)+3] = - $cache.Φi[i][k]
for j in 1:S
b[3*(D*(i-1)+k-1)+1] += params.t_q.a[i,j] * $cache.Vi[j][k]
b[3*(D*(i-1)+k-1)+2] += params.t_p.a[i,j] * $cache.Fi[j][k]
end
for j in 1:R
b[3*(D*(i-1)+k-1)+1] += params.t_q.α[i,j] * $cache.Up[j][k]
b[3*(D*(i-1)+k-1)+2] += params.t_p.α[i,j] * $cache.Gp[j][k]
end
end
end
# compute b = - [(Y-AV-AU), (Z-AF-AG), Φ]
for i in 1:R
for k in 1:D
b[3*D*S+3*(D*(i-1)+k-1)+1] = - $cache.Yp[i][k]
b[3*D*S+3*(D*(i-1)+k-1)+2] = - $cache.Zp[i][k]
b[3*D*S+3*(D*(i-1)+k-1)+3] = - $cache.Φp[i][k]
for j in 1:S
b[3*D*S+3*(D*(i-1)+k-1)+1] += params.t_q̃.a[i,j] * $cache.Vi[j][k]
b[3*D*S+3*(D*(i-1)+k-1)+2] += params.t_p̃.a[i,j] * $cache.Fi[j][k]
end
for j in 1:R
b[3*D*S+3*(D*(i-1)+k-1)+1] += params.t_q̃.α[i,j] * $cache.Up[j][k]
b[3*D*S+3*(D*(i-1)+k-1)+2] += params.t_p̃.α[i,j] * $cache.Gp[j][k]
end
end
end
# compute b = - [Λ₁-λ]
if params.t_λ.c[1] == 0
for k in 1:D
b[3*D*S+3*(k-1)+3] = - $cache.Λp[1][k] + params.λ[k]
end
end
if length(params.d_v) > 0
for i in 1:S
for k in 1:D
b[3*(D*(i-1)+k-1)+3] -= $cache.μ[k] * params.d_v[i]
end
end
for k in 1:D
b[3*D*S+3*D*R+k] = 0
for i in 1:S
b[3*D*S+3*D*R+k] -= $cache.Vi[i][k] * params.d_v[i]
end
end
end
end
end
@doc raw"""
Variational partitioned additive Runge-Kutta integrator.
This integrator solves the following system of equations for the internal stages,
```math
\begin{align}
Q_{n,i} &= q_{n} + h \sum \limits_{j=1}^{s} a_{ij} V_{n,j} + h \sum \limits_{j=1}^{r} \alpha_{ij} U_{n,j} , & i &= 1, ..., s , \\
P_{n,i} &= p_{n} + h \sum \limits_{j=1}^{s} a_{ij} F_{n,j} + h \sum \limits_{j=1}^{r} \alpha_{ij} G_{n,j} , & i &= 1, ..., s , \\
\tilde{Q}_{n,i} &= q_{n} + h \sum \limits_{j=1}^{s} \tilde{a}_{ij} V_{n,j} + h \sum \limits_{j=1}^{r} \tilde{\alpha}_{ij} U_{n,j} , & i &= 1, ..., r , \\
\tilde{P}_{n,i} &= p_{n} + h \sum \limits_{j=1}^{s} \tilde{a}_{ij} F_{n,j} + h \sum \limits_{j=1}^{r} \tilde{\alpha}_{ij} G_{n,j} , & i &= 1, ..., r , \\
\tilde{\Phi}_{n,i} &= 0 , & i &= 1, ..., r ,
\end{align}
```
with definitions
```math
\begin{align}
P_{n,i} &= \frac{\partial L}{\partial v} (Q_{n,i}, V_{n,i}) , & i &= 1, ..., s , \\
F_{n,i} &= \frac{\partial L}{\partial q} (Q_{n,i}, V_{n,i}) , & i &= 1, ..., s , \\
U_{n,i} &= \hphantom{-} \frac{\partial \phi}{\partial p} (\tilde{Q}_{n,i}, \tilde{P}_{n,i})^{T} \Lambda_{n,i} , & i &= 1, ..., r , \\
G_{n,i} &= - \frac{\partial \phi}{\partial q} (\tilde{Q}_{n,i}, \tilde{P}_{n,i})^{T} \Lambda_{n,i} , & i &= 1, ..., r , \\
\tilde{\Phi}_{n,i} &= \phi(\tilde{Q}_{n,i}, \tilde{P}_{n,i}) , & i &= 1, ..., r ,
\end{align}
```
and update rule
```math
\begin{align}
q_{n+1} &= q_{n} + h \sum \limits_{i=1}^{s} b_{i} V_{n,i} + h \sum \limits_{i=1}^{r} \beta_{i} U_{n,i} , \\
p_{n+1} &= p_{n} + h \sum \limits_{i=1}^{s} b_{i} F_{n,i} + h \sum \limits_{i=1}^{r} \beta_{i} G_{n,i} .
\end{align}
```
"""
struct IntegratorVPARK{DT, TT, ET <: IDAE{DT,TT},
PT <: ParametersVPARK{DT,TT},
ST <: NonlinearSolver{DT},
IT <: InitialGuessPODE{DT,TT}} <: AbstractIntegratorSPARK{DT, TT}
equation::ET
tableau::TableauVPARK{TT}
params::PT
solver::ST
iguess::IT
end
function IntegratorVPARK(equation::IDAE{DT,TT,FT,PT,UT,GT,ϕT,VT},
tableau::TableauVPARK{TT}, Δt::TT) where {DT,TT,FT,PT,UT,GT,ϕT,VT}
D = equation.d
S = tableau.s
R = tableau.r
N = 3*D*S + 3*D*R
if isdefined(tableau, :d)
N += D
d_v = tableau.d
else
d_v = DT[]
end
# create params
params = ParametersVPARK{DT,TT,D,S,R,FT,PT,UT,GT,ϕT}(
equation.ϑ, equation.f, equation.u, equation.g, equation.ϕ, Δt,
tableau.q, tableau.p, tableau.q̃, tableau.p̃, tableau.λ, d_v)
# create solver
solver = create_nonlinear_solver(DT, N, params)
# create initial guess
iguess = InitialGuessPODE(get_config(:ig_interpolation), equation, Δt)
# create integrator
IntegratorVPARK{DT, TT, typeof(equation), typeof(params), typeof(solver), typeof(iguess)}(
equation, tableau, params, solver, iguess)
end
equation(int::IntegratorVPARK) = int.equation
timestep(int::IntegratorVPARK) = int.params.Δt
tableau(int::IntegratorVPARK) = int.tableau
nstages(int::IntegratorVPARK) = int.tableau.s
pstages(int::IntegratorVPARK) = int.tableau.r
function create_integrator_cache(int::IntegratorVPARK{DT,TT}) where {DT,TT}
IntegratorCacheVPARK{DT, TT, ndims(int), nstages(int), pstages(int)}()
end
function update_params!(params::ParametersVPARK, cache::IntegratorCacheVPARK)
# set time for nonlinear solver and copy previous solution
params.t = cache.t
params.q .= cache.q
params.p .= cache.p
params.λ .= cache.λ
end
function initialize!(int::IntegratorVPARK, cache::IntegratorCacheVPARK)
cache.t̅ = cache.t - timestep(int)
equation(int).v(cache.t, cache.q, cache.p, cache.v)
equation(int).f(cache.t, cache.q, cache.p, cache.f)
initialize!(int.iguess, cache.t, cache.q, cache.p, cache.v, cache.f,
cache.t̅, cache.q̅, cache.p̅, cache.v̅, cache.f̅)
end
function initial_guess!(int::IntegratorVPARK, cache::IntegratorCacheVPARK)
for i in 1:nstages(int)
evaluate!(int.iguess, cache.q, cache.p, cache.v, cache.f,
cache.q̅, cache.p̅, cache.v̅, cache.f̅,
cache.q̃, cache.p̃, cache.ṽ, cache.f̃,
tableau(int).q.c[i], tableau(int).p.c[i])
for k in 1:ndims(int)
int.solver.x[3*(ndims(int)*(i-1)+k-1)+1] = (cache.q̃[k] - cache.q[k])/timestep(int)
int.solver.x[3*(ndims(int)*(i-1)+k-1)+2] = (cache.p̃[k] - cache.p[k])/timestep(int)
int.solver.x[3*(ndims(int)*(i-1)+k-1)+3] = cache.ṽ[k]
end
end
for i in 1:pstages(int)
evaluate!(int.iguess, cache.q, cache.p, cache.v, cache.f,
cache.q̅, cache.p̅, cache.v̅, cache.f̅,
cache.q̃, cache.p̃, cache.ṽ, cache.f̃,
tableau(int).q̃.c[i], tableau(int).p̃.c[i])
for k in 1:ndims(int)
int.solver.x[3*ndims(int)*nstages(int)+3*(ndims(int)*(i-1)+k-1)+1] = (cache.q̃[k] - cache.q[k])/timestep(int)
int.solver.x[3*ndims(int)*nstages(int)+3*(ndims(int)*(i-1)+k-1)+2] = (cache.p̃[k] - cache.p[k])/timestep(int)
int.solver.x[3*ndims(int)*nstages(int)+3*(ndims(int)*(i-1)+k-1)+3] = 0
end
end
if int.params.t_λ.c[1] == 0
for k in 1:ndims(int)
int.solver.x[3*ndims(int)*nstages(int)+3*(k-1)+3] = cache.λ[k]
end
end
if isdefined(tableau(int), :d)
for k in 1:ndims(int)
int.solver.x[3*ndims(int)*nstages(int)+3*ndims(int)*pstages(int)+k] = 0
end
end
end
"Integrate DAE with variational partitioned additive Runge-Kutta integrator."
function integrate_step!(int::IntegratorVPARK{DT,TT}, cache::IntegratorCacheVPARK{DT,TT}) where {DT,TT}
# update nonlinear solver parameters from cache
update_params!(int.params, cache)
# compute initial guess
initial_guess!(int, cache)
# reset cache
reset!(cache, timestep(int))
# call nonlinear solver
solve!(int.solver)
# print solver status
print_solver_status(int.solver.status, int.solver.params, cache.n)
# check if solution contains NaNs or error bounds are violated
check_solver_status(int.solver.status, int.solver.params, cache.n)
# compute vector fields at internal stages
compute_stages!(int.solver.x, cache, int.params)
# compute final update
update_solution!(cache.q, cache.qₑᵣᵣ, cache.Vi, int.params.t_q.b, timestep(int))
update_solution!(cache.p, cache.pₑᵣᵣ, cache.Fi, int.params.t_p.b, timestep(int))
# compute projection
update_solution!(cache.q, cache.qₑᵣᵣ, cache.Up, int.params.t_q.β, timestep(int))
update_solution!(cache.p, cache.pₑᵣᵣ, cache.Gp, int.params.t_p.β, timestep(int))
update_multiplier!(cache.λ, cache.Λp, int.params.t_λ.b)
# copy solution to initial guess
update!(int.iguess, cache.t, cache.q, cache.p, cache.v, cache.f)
# take care of periodic solutions
cut_periodic_solution!(cache, equation(int).periodicity)
end
|
{"hexsha": "277e2c92c1e258e77e72c3698b61ee6718a75d6d", "size": 20178, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/integrators/spark/integrators_vpark.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/GeometricIntegrators.jl-dcce2d33-59f6-5b8d-9047-0defad88ae06", "max_stars_repo_head_hexsha": "5ffdd27e87719a998492287d90794ffa6d69231a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/integrators/spark/integrators_vpark.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/GeometricIntegrators.jl-dcce2d33-59f6-5b8d-9047-0defad88ae06", "max_issues_repo_head_hexsha": "5ffdd27e87719a998492287d90794ffa6d69231a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/integrators/spark/integrators_vpark.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/GeometricIntegrators.jl-dcce2d33-59f6-5b8d-9047-0defad88ae06", "max_forks_repo_head_hexsha": "5ffdd27e87719a998492287d90794ffa6d69231a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.63, "max_line_length": 153, "alphanum_fraction": 0.5269104966, "num_tokens": 7327}
|
from collections import defaultdict
import sys
import os
import argparse
import madmom
import numpy as np
import pandas as pd
import pretty_midi
import librosa
import h5py
import math
from config import load_config
import numpy as np
def readmm(d, args):
ipath = os.path.join(d, 'input.dat')
note_range = 88
n_bins = int(args['bin_multiple']) * note_range
window_size = 7
mmi = np.memmap(ipath, mode='r')
i = np.reshape(mmi, (-1, window_size, n_bins))
opath = os.path.join(d, 'output.dat')
mmo = np.memmap(opath, mode='r')
o = np.reshape(mmo, (-1, note_range))
return i, o
class DataGen:
def __init__(self, dirpath, batch_size, args, num_files=1):
print('initializing gen for '+dirpath)
self.mmdirs = os.listdir(dirpath)
self.spe = 0 # steps per epoch
self.dir = dirpath
for mmdir in self.mmdirs:
print(mmdir)
_, outputs = readmm(os.path.join(self.dir, mmdir), args)
self.spe += len(outputs) // batch_size
# print cnt
self.num_files = num_files
self.batch_size = batch_size
self.current_file_idx = 0
print('starting with ',
self.mmdirs[self.current_file_idx:self.current_file_idx+self.num_files])
for j in range(self.num_files):
mmdir = os.path.join(
self.dir, self.mmdirs[self.current_file_idx+j])
i, o = readmm(mmdir, args)
if j == 0:
self.inputs, self.outputs = i, o
print('set inputs,outputs')
else:
self.inputs = np.concatenate((self.inputs, i))
self.outputs = np.concatenate((self.outputs, o))
print('concatenated')
self.current_file_idx = (
self.current_file_idx + 1) % len(self.mmdirs)
self.i = 0
def steps(self):
return self.spe
def __next__(self):
while True:
if (self.i+1)*self.batch_size > self.inputs.shape[0]:
# return rest and then switch files
x, y = self.inputs[self.i *
self.batch_size:], self.outputs[self.i*self.batch_size:]
self.i = 0
# no need to open any new files if we only deal with one, like for validation
if len(self.mmdirs) > 1:
print(
'switching to ', self.mmdirs[self.current_file_idx:self.current_file_idx+self.num_files])
for j in range(self.num_files):
mmdir = os.path.join(
self.dir, self.mmdirs[self.current_file_idx+j])
i, o = readmm(mmdir, args)
if j == 0:
self.inputs, self.output = i, o
else:
self.inputs = np.concatenate((self.inputs, i))
self.outputs = np.concatenate((self.outputs, o))
self.current_file_idx = (
self.current_file_idx + 1) % len(self.mmdirs)
else:
x, y = self.inputs[self.i*self.batch_size:(
self.i+1)*self.batch_size], self.outputs[self.i*self.batch_size:(self.i+1)*self.batch_size]
self.i += 1
yield x, y
'''def load_data(dirpa):
print('loading data from '+dirpath)
hdf5_file = os.listdir(dirpath)[0]
with h5py.File(os.path.join(dirpath,hdf5_file), 'r') as hf:
inputs = hf['-inputs'][:]
outputs = hf['-outputs'][:]
return inputs,outputs'''
sr = 22050
hop_length = 512
window_size = 7
min_midi = 21
max_midi = 108
def wav2inputnp(audio_fn, spec_type='cqt', bin_multiple=3):
print("wav2inputnp")
bins_per_octave = 12 * bin_multiple # should be a multiple of 12
n_bins = (max_midi - min_midi + 1) * bin_multiple
# down-sample,mono-channel
y, _ = librosa.load(audio_fn, sr)
S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,
bins_per_octave=bins_per_octave, n_bins=n_bins)
S = S.T
# TODO: LogScaleSpectrogram?
'''
if spec_type == 'cqt':
#down-sample,mono-channel
y,_ = librosa.load(audio_fn,sr)
S = librosa.cqt(y,fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,
bins_per_octave=bins_per_octave, n_bins=n_bins)
S = S.T
else:
#down-sample,mono-channel
y = madmom.audio.signal.Signal(audio_fn, sample_rate=sr, num_channels=1)
S = madmom.audio.spectrogram.LogarithmicFilteredSpectrogram(y,fmin=librosa.midi_to_hz(min_midi),
hop_size=hop_length, num_bands=bins_per_octave, fft_size=4096)'''
#S = librosa.amplitude_to_db(S)
S = np.abs(S)
minDB = np.min(S)
print(np.min(S), np.max(S), np.mean(S))
S = np.pad(S, ((window_size//2, window_size//2), (0, 0)),
'constant', constant_values=minDB)
windows = []
# IMPORTANT NOTE:
# Since we pad the the spectrogram frame,
# the onset frames are actually `offset` frames.
# To obtain a window of the center frame at each true index, we take a slice from i to i+window_size
# starting at frame 0 of the padded spectrogram
for i in range(S.shape[0]-window_size+1):
w = S[i:i+window_size, :]
windows.append(w)
# print inputs
x = np.array(windows)
return x
def mid2outputnp(pm_mid, times):
piano_roll = pm_mid.get_piano_roll(fs=sr, times=times)[
min_midi:max_midi+1].T
piano_roll[piano_roll > 0] = 1
return piano_roll
def joinAndCreate(basePath, new):
newPath = os.path.join(basePath, new)
if not os.path.exists(newPath):
os.mkdir(newPath)
return newPath
def isSplitFolder(ddir):
return ddir == 'train' or ddir == 'test' or ddir == 'val'
def organize(args):
valCnt = 1
testPrefix = 'ENS'
path = os.path.join('models', args['model_name'])
dpath = os.path.join(path, 'data')
train_path = joinAndCreate(dpath, 'train')
test_path = joinAndCreate(dpath, 'test')
val_path = joinAndCreate(dpath, 'val')
for ddir in os.listdir(dpath):
if os.path.isdir(os.path.join(dpath, ddir)) and not isSplitFolder(ddir):
# print h5file
if ddir.startswith(testPrefix):
os.rename(os.path.join(dpath, ddir),
os.path.join(test_path, ddir))
elif valCnt > 0:
os.rename(os.path.join(dpath, ddir),
os.path.join(val_path, ddir))
valCnt -= 1
else:
os.rename(os.path.join(dpath, ddir),
os.path.join(train_path, ddir))
def preprocess(args):
# params
path = os.path.join('models', args['model_name'])
config = load_config(os.path.join(path, 'config.json'))
args.update(config)
data_dir = args['data_dir']
bin_multiple = int(args['bin_multiple'])
spec_type = args['spec_type']
framecnt = 0
# hack to deal with high PPQ from MAPS
# https://github.com/craffel/pretty-midi/issues/112
pretty_midi.pretty_midi.MAX_TICK = 1e10
for s in os.listdir(data_dir):
subdir = os.path.join(data_dir, s)
if not os.path.isdir(subdir):
continue
# recursively search in subdir
print(subdir)
inputs, outputs = [], []
addCnt, errCnt = 0, 0
for dp, dn, filenames in os.walk(subdir):
# in each level of the directory, look at filenames ending with .mid
for f in filenames:
# if there exists a .wav file and .midi file with the same name
if f.endswith('.wav'):
audio_fn = f
fprefix = audio_fn.split('.wav')[0]
mid_fn = fprefix + '.mid'
txt_fn = fprefix + '.txt'
if mid_fn in filenames:
# wav2inputnp
audio_fn = os.path.join(dp, audio_fn)
# mid2outputnp
mid_fn = os.path.join(dp, mid_fn)
pm_mid = pretty_midi.PrettyMIDI(mid_fn)
inputnp = wav2inputnp(
audio_fn, spec_type=spec_type, bin_multiple=bin_multiple)
times = librosa.frames_to_time(
np.arange(inputnp.shape[0]), sr=sr, hop_length=hop_length)
outputnp = mid2outputnp(pm_mid, times)
# check that num onsets is equal
if inputnp.shape[0] == outputnp.shape[0]:
print(("adding to dataset fprefix {}".format(fprefix)))
addCnt += 1
framecnt += inputnp.shape[0]
print(("framecnt is {}".format(framecnt)))
inputs.append(inputnp)
outputs.append(outputnp)
else:
print(("error for fprefix {}".format(fprefix)))
errCnt += 1
print((inputnp.shape))
print((outputnp.shape))
print(("{} examples in dataset".format(addCnt)))
print(("{} examples couldnt be processed".format(errCnt)))
# concatenate dynamic list to numpy list of example
if addCnt:
inputs = np.concatenate(inputs)
outputs = np.concatenate(outputs)
fn = subdir.split('/')[-1]
if not fn:
fn = subdir.split('/')[-2]
#fn += '.h5'
# save inputs,outputs to hdf5 file
datapath = joinAndCreate(path, 'data')
fnpath = joinAndCreate(datapath, fn)
mmi = np.memmap(filename=os.path.join(
fnpath, 'input.dat'), mode='w+', shape=inputs.shape)
mmi[:] = inputs[:]
mmo = np.memmap(filename=os.path.join(
fnpath, 'output.dat'), mode='w+', shape=outputs.shape)
mmo[:] = outputs[:]
del mmi
del mmo
'''with h5py.File(os.path.join(datapath,fn), 'w') as hf:
hf.create_dataset("-inputs", data=inputs)
hf.create_dataset("-outputs", data=outputs)
without dB, i'm just going to not worry about feature scaling
if args.zn:
nppath = os.path.join(path,'xn')
if os.path.isfile(nppath+'.npz'):
npzfile = np.load(nppath+'.npz')
x,x2,n = npzfile['x'],npzfile['x2'],npzfile['n']
else:
x,x2,n = 0,0,0
x += np.sum(inputs,axis=0)
x2 += np.sum(inputs**2,axis=0)
n += inputs.shape[0]
print x,x2,n
print 'mean={}'.format(x/n)
print 'var={}'.format(x2/n-(x/n)**2)
np.savez(nppath,x=x,x2=x2,n=n)'''
if __name__ == '__main__':
# Set up command-line argument parsing
parser = argparse.ArgumentParser(
description='Preprocess MIDI/Audio file pairs into ingestible data')
parser.add_argument('model_name',
help='model name. will use config from directory and save preprocessed data to it')
parser.add_argument('data_dir',
help='Path to data dir, searched recursively, used for naming HDF5 file')
parser.add_argument('--no-zn', dest='zn', action='store_false')
parser.set_defaults(zn=True)
args = vars(parser.parse_args())
preprocess(args)
|
{"hexsha": "d605126f25cff8dd183e12f1a9c86d4d761d6e5d", "size": 11905, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess.py", "max_stars_repo_name": "KimberleyEvans-Parker/wav2mid", "max_stars_repo_head_hexsha": "de37c8e5e61b9f43401ac3885b455231c7e9ecec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-18T09:44:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-25T12:00:54.000Z", "max_issues_repo_path": "preprocess.py", "max_issues_repo_name": "KimberleyEvans-Parker/wav2mid", "max_issues_repo_head_hexsha": "de37c8e5e61b9f43401ac3885b455231c7e9ecec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess.py", "max_forks_repo_name": "KimberleyEvans-Parker/wav2mid", "max_forks_repo_head_hexsha": "de37c8e5e61b9f43401ac3885b455231c7e9ecec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7084548105, "max_line_length": 113, "alphanum_fraction": 0.5397732045, "include": true, "reason": "import numpy", "num_tokens": 2836}
|
import numpy as np
import sys
import os
import nrrd
if (len(sys.argv) < 2):
print('Error: missing arguments!')
print('e.g. python copyHeader.py template.nrrd target.nrrd')
else:
print('Loading header from %s...' % (str(sys.argv[1])))
data1, header1 = nrrd.read(str(sys.argv[1]))
size = np.shape(data1)
print('Loading target image: %s...' % (str(sys.argv[2])))
data1, header2 = nrrd.read(str(sys.argv[2]))
if np.shape(data1) == size:
print('Changing: ' + str(header2))
print('Into: ' + str(header1))
print('Saving...')
nrrd.write(str(sys.argv[2]), data1, header1)
print('Updated: ' + str(sys.argv[2]))
else:
print('Images must be the same size!')
print(str(size) + ' not equal to ' + str(np.shape(data1)))
|
{"hexsha": "359b1b0a4bb81e87a54d709be7d016964a2284ca", "size": 802, "ext": "py", "lang": "Python", "max_stars_repo_path": "copyHeader.py", "max_stars_repo_name": "Robbie1977/NRRDtools", "max_stars_repo_head_hexsha": "e16f1e49fccadc5f717f55b7c2c3dc49ec96f89f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-02-23T11:41:45.000Z", "max_stars_repo_stars_event_max_datetime": "2015-02-23T11:41:45.000Z", "max_issues_repo_path": "copyHeader.py", "max_issues_repo_name": "Robbie1977/NRRDtools", "max_issues_repo_head_hexsha": "e16f1e49fccadc5f717f55b7c2c3dc49ec96f89f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-04-07T11:07:01.000Z", "max_issues_repo_issues_event_max_datetime": "2016-06-24T13:23:24.000Z", "max_forks_repo_path": "copyHeader.py", "max_forks_repo_name": "Robbie1977/NRRDtools", "max_forks_repo_head_hexsha": "e16f1e49fccadc5f717f55b7c2c3dc49ec96f89f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4166666667, "max_line_length": 66, "alphanum_fraction": 0.5910224439, "include": true, "reason": "import numpy", "num_tokens": 231}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 10:34:23 2018
@author: liushenghui
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# comment_classifier.py
#
# Vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Python source code - replace this with a description
# of the code and write the code below this text
#
import pdb
import numpy as np
from collections import Counter
import jieba
import pandas as pd
import tensorflow as tf
import pickle
import random
from matplotlib import pyplot as plt
import keras
from keras import Model
from keras.models import Sequential
from keras.layers import Dense,Flatten,Dropout, Input ,LSTM
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.utils.np_utils import to_categorical
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from collections import Counter
from sklearn.lda import LDA
"""
'I'm super man'
tokenize:
['I', ''m', 'super', 'man']
"""
"""
词形还原(lemmatizer),即把一个任何形式的英语单词还原到一般形式,
与词根还原不同(stemmer),后者是抽取一个单词的词根。
"""
# 创建词汇表
def create_lexicon(train_file):
def process_file(txtfile):
with open(txtfile, 'r',encoding='utf8') as f:
lex = []
lines = f.readlines()
#print(lines)
for i,line in enumerate(lines):
try:
if i%2000 == 0:
print(i)
content = line.split('\t')[1]
words = jieba.lcut(content)
lex += words
except:
pass
print("分词完成")
return lex
lex = process_file(train_file)
#print(len(lex))
# lemmatizer = WordNetLemmatizer()
# lex = [lemmatizer.lemmatize(word) for word in lex] # 词形还原(cats -> cat)
word_count = Counter(lex)
#print(word_count)
lex = []
for word in word_count:
if word_count[word] < 2000 and word_count[word] > 600:
lex.append(word)
return lex
#lex 里保存了文本中出现过的单词
def string_to_vector2(lex, review):
words = jieba.lcut(review[5:])
features = np.zeros(len(lex))
for word in words:
if word in lex:
features[lex.index(word)] += 1
return features
###会爆内存
def normalize_dataset(lex,file):
dataset = []
# lex:词汇表;review:评论;clf:评论对应的分类,
# [0,1]代表负面评论 [1,0]代表正面评论
def string_to_vector(lex, review):
lab, content = review.split('\t')
words = jieba.lcut(content)
features = np.zeros(len(lex))
for word in words:
if word in lex:
features[lex.index(word)] = 1
return features, lab
with open(file, 'r', encoding='utf8') as f:
lines = f.readlines()
lab_str = []
for i, line in enumerate(lines):
try:
if i%2000 == 0:
print(i)
lab, one_sample = string_to_vector(lex, line)
dataset.append(one_sample)
lab_str.append(lab)
except:
pass
return dataset,lab_str
def data_2_array(dataset):
data_x = []
data_y = []
for i in range(len(dataset)):
data_x.append(dataset[i][0])
data_y.append(dataset[i][1])
return np.array(data_x),np.array(data_y)
train_file = 'data/cnews/cnews.train.txt'
test_file = 'data/cnews/cnews.test.txt'
lex = create_lexicon(train_file)
train_x, train_lab = normalize_dataset(lex, train_file)
labs = Counter(train_x).keys()
lab_dict = zip(labs,[i for i in range(10)])
test_x, test_lab = normalize_dataset(lex, test_file)
x_train = np.array(train_lab)
y_train = np.array([lab_dict[i] for i in train_x])
x_test = np.array(test_lab)
y_test = np.array([lab_dict[i] for i in test_x])
data = {'x_train':x_train,'x_test': x_test,'y_train': y_train,'y_test':y_test}
with open('lex.pkl','wb') as ff:
pickle.dump(lex,ff)
#把整理好的数据保存到文件,方便使用。到此完成了数据的整理工作
with open('data.pkl', 'wb') as f:
pickle.dump(data, f)
|
{"hexsha": "5be2905e3eac0b7f33aad01b542eb2688ef4042d", "size": 3955, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_preprocess.py", "max_stars_repo_name": "fightingst/text-classification-cnn-rnn", "max_stars_repo_head_hexsha": "dda5b98a6bb5db897dc7db1966d8be74eb555adb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_preprocess.py", "max_issues_repo_name": "fightingst/text-classification-cnn-rnn", "max_issues_repo_head_hexsha": "dda5b98a6bb5db897dc7db1966d8be74eb555adb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_preprocess.py", "max_forks_repo_name": "fightingst/text-classification-cnn-rnn", "max_forks_repo_head_hexsha": "dda5b98a6bb5db897dc7db1966d8be74eb555adb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.275862069, "max_line_length": 78, "alphanum_fraction": 0.6219974716, "include": true, "reason": "import numpy", "num_tokens": 1133}
|
import numpy as np
from scipy.cluster import hierarchy
import sys
import json
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
codebasesPath = str(sys.argv[1])
codebaseName = str(sys.argv[2])
dendrogramName = str(sys.argv[3])
with open(codebasesPath + codebaseName + "/" + dendrogramName + "/similarityMatrix.json") as f:
similarityMatrix = json.load(f)
entities = similarityMatrix["entities"]
matrix = np.array(similarityMatrix["matrix"])
linkageType = similarityMatrix["linkageType"]
hierarc = hierarchy.linkage(y=matrix, method=linkageType)
fig = plt.figure(figsize=(25, 10))
hierarchy.dendrogram(hierarc, labels=entities, distance_sort='descending')
plt.savefig(codebasesPath + codebaseName + "/" + dendrogramName + "/dendrogramImage.png", format="png", bbox_inches='tight')
|
{"hexsha": "21f4ffa650d22d392f2a0d9eb4d66d582f04b2c0", "size": 814, "ext": "py", "lang": "Python", "max_stars_repo_path": "backend/src/main/resources/createDendrogram.py", "max_stars_repo_name": "ritosilva/mono2micro", "max_stars_repo_head_hexsha": "c45813443cbf4519797c9b8368220667cd3cb0ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-26T11:45:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-03T09:02:01.000Z", "max_issues_repo_path": "backend/src/main/resources/createDendrogram.py", "max_issues_repo_name": "ritosilva/mono2micro", "max_issues_repo_head_hexsha": "c45813443cbf4519797c9b8368220667cd3cb0ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2019-03-18T22:24:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-04T16:47:44.000Z", "max_forks_repo_path": "backend/src/main/resources/createDendrogram.py", "max_forks_repo_name": "ritosilva/mono2micro", "max_forks_repo_head_hexsha": "c45813443cbf4519797c9b8368220667cd3cb0ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-01-09T02:38:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T16:58:20.000Z", "avg_line_length": 29.0714285714, "max_line_length": 124, "alphanum_fraction": 0.7628992629, "include": true, "reason": "import numpy,from scipy", "num_tokens": 201}
|
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <Elementary.h>
#include <boost/concept_check.hpp>
#include <boost/format.hpp>
#include <vector>
#include <string>
#include <string.h>
#include <AbstractMainWindow.h>
#include "app_i18n.h"
#include "HistoryUI.h"
#include "ServiceManager.h"
#include "BrowserLogger.h"
#include "Tools/EflTools.h"
#include "Tools/GeneralTools.h"
#include "HistoryDaysListManager/HistoryDaysListManagerMob.h"
#include "services/HistoryService/HistoryItem.h"
#include "HistoryDeleteManager.h"
namespace tizen_browser{
namespace base_ui{
// TODO History needs solid refactoring. A lot of features are not used in any place.
using namespace services;
EXPORT_SERVICE(HistoryUI, "org.tizen.browser.historyui")
HistoryUI::HistoryUI()
: m_parent(nullptr)
, m_main_layout(nullptr)
, m_buttonClose(nullptr)
, m_buttonClear(nullptr)
, m_daysList(nullptr)
, m_historyDaysListManager(nullptr)
, m_naviframe(nullptr)
, m_modulesToolbar(nullptr)
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
m_edjFilePath = EDJE_DIR;
m_edjFilePath.append("HistoryUI/History.edj");
if (!m_historyDaysListManager)
m_historyDaysListManager = std::make_shared<HistoryDaysListManagerMob>();
m_historyDaysListManager->signalHistoryItemClicked.connect(signalHistoryItemClicked);
m_historyDaysListManager->signalDeleteHistoryItems.connect(signalDeleteHistoryItems);
m_historyDaysListManager->setRightButtonEnabledForHistory.connect(
boost::bind(&HistoryUI::setRightButtonEnabled, this, _1));
m_historyDaysListManager->setSelectedItemsCount.connect([this](auto count){
m_naviframe->setTitle((boost::format(_("IDS_BR_HEADER_PD_SELECTED_ABB")) % count).str());
});
}
HistoryUI::~HistoryUI()
{
}
void HistoryUI::showUI()
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
M_ASSERT(m_main_layout);
m_naviframe->show();
evas_object_show(m_main_layout);
}
void HistoryUI::hideUI()
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
M_ASSERT(m_main_layout);
evas_object_hide(m_main_layout);
clearItems();
m_naviframe->hide();
}
void HistoryUI::setRightButtonEnabled(bool enable)
{
BROWSER_LOGD("[%s:%d] %d", __PRETTY_FUNCTION__, __LINE__, enable);
m_naviframe->setRightButtonEnabled(enable);
}
void HistoryUI::init(Evas_Object* parent)
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
M_ASSERT(parent);
m_parent = parent;
}
Evas_Object* HistoryUI::getContent()
{
M_ASSERT(m_parent);
createHistoryUILayout();
return m_naviframe->getLayout();
}
void HistoryUI::createHistoryUILayout()
{
elm_theme_extension_add(nullptr, m_edjFilePath.c_str());
if (!m_naviframe)
m_naviframe = std::make_shared<NaviframeWrapper>(m_parent);
m_main_layout = elm_layout_add(m_naviframe->getLayout());
m_naviframe->setContent(m_main_layout);
elm_layout_file_set(m_main_layout, m_edjFilePath.c_str(), "history-layout");
evas_object_size_hint_weight_set(m_main_layout, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(m_main_layout, EVAS_HINT_FILL, EVAS_HINT_FILL);
m_daysList = createDaysList(m_main_layout);
clearItems();
createTopContent();
elm_object_signal_emit(m_naviframe->getLayout(), "show_toolbars", "ui");
}
void HistoryUI::createTopContent()
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
M_ASSERT(m_naviframe->getLayout());
m_naviframe->addPrevButton(_close_clicked_cb, this);
m_naviframe->setPrevButtonVisible(true);
m_naviframe->setTitle(_("IDS_BR_TAB2_HISTORY"));
}
Evas_Object* HistoryUI::createDaysList(Evas_Object* parent, bool isRemoveMode)
{
M_ASSERT(history_layout);
auto list = m_historyDaysListManager->createDaysList(parent, isRemoveMode);
evas_object_size_hint_weight_set(list, EVAS_HINT_EXPAND, EVAS_HINT_EXPAND);
evas_object_size_hint_align_set(list, EVAS_HINT_FILL, EVAS_HINT_FILL);
return list;
}
void HistoryUI::removeSelectedHistoryItems()
{
if (!m_historyDaysListManager) {
BROWSER_LOGD("[%s:%d] No selected elements to delete");
return;
}
if (m_historyDaysListManager->isSelectAllChecked())
clearHistoryClicked();
m_historyDaysListManager->removeSelectedItems();
}
void HistoryUI::_close_clicked_cb(void * data, Evas_Object*, void*)
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
if (!data) {
BROWSER_LOGW("[%s] data = nullptr", __PRETTY_FUNCTION__);
return;
}
auto self = static_cast<HistoryUI*>(data);
self->closeHistoryUIClicked();
}
void HistoryUI::addHistoryItems(
std::shared_ptr<HistoryItemVector> items,
HistoryPeriod period)
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
if (items->size() == 0)
return;
m_historyDaysListManager->addHistoryItems(items, period);
}
void HistoryUI::clearItems()
{
BROWSER_LOGD("[%s:%d] ", __PRETTY_FUNCTION__, __LINE__);
m_historyDaysListManager->clear();
}
}
}
|
{"hexsha": "9ecda8bdb7d2571f91ee73ec536be25f1419cee1", "size": 5650, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "services/HistoryUI/HistoryUI.cpp", "max_stars_repo_name": "knowac/tizen-browser-30", "max_stars_repo_head_hexsha": "0ea06a4cd6bdca3dc3da674dd8189bf528c166f8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-01-31T21:44:00.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-31T21:44:00.000Z", "max_issues_repo_path": "services/HistoryUI/HistoryUI.cpp", "max_issues_repo_name": "knowac/tizen-browser-1.6.4", "max_issues_repo_head_hexsha": "a37a3ea5b8c01d86bd3dac00d228800e5eed4619", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "services/HistoryUI/HistoryUI.cpp", "max_forks_repo_name": "knowac/tizen-browser-1.6.4", "max_forks_repo_head_hexsha": "a37a3ea5b8c01d86bd3dac00d228800e5eed4619", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-01-31T21:44:04.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-31T21:44:04.000Z", "avg_line_length": 29.4270833333, "max_line_length": 97, "alphanum_fraction": 0.7350442478, "num_tokens": 1387}
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import pytest
import popart
import pprint
import json
import platform
# 'import test_util' requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
def getBaseOptions():
opts = popart.SessionOptions()
opts.reportOptions = {"showExecutionSteps": "true"}
# TODO(T14786) investigate why swapping causes some tests to fail
opts.swapLimitScheduler = -1
# TODO(T14786) investigate why GREEDY causes some tests to fail
opts.kahnTieBreaker = "FIFO"
# TODO(T14786) investigate why tighter initialization causes some tests to fail
opts.transitiveClosureOptimizationThreshold = 0
return opts
def _is_grad_tensor(x):
return x['name'].startswith(popart.reservedGradientPrefix())
# The backward lhs matmul should take a gradient tensor in
# as input 0 and a non gradient tensor in as input 1
def _get_bwd_lhs_matmul(matmuls):
result = []
for m in matmuls:
i0 = m['inputs'][0]
i1 = m['inputs'][1]
if _is_grad_tensor(i0) and not _is_grad_tensor(i1):
result.append(m)
assert len(result) == 1
return result[0]
# The backward rhs matmul should take a non gradient tensor
# in as input 0 and a gradient tensor in as input 1
def _get_bwd_rhs_matmul(matmuls):
result = []
for m in matmuls:
i0 = m['inputs'][0]
i1 = m['inputs'][1]
if not _is_grad_tensor(i0) and _is_grad_tensor(i1):
result.append(m)
assert len(result) == 1
return result[0]
def gen_shape(shape):
return '[{0} {1} {2}]'.format(str(shape[0]), str(shape[1]), str(shape[2]))
def test_matmul_serialization_invalid_mode():
if platform.system() == "Darwin":
# MacOS is throwing a RuntimeError not popart_exception
print("T11614 : skipping this test on mac/os")
pytest.skip("T11614 : skipping this test on mac/os")
else:
lhs_shape = [2, 2]
rhs_shape = [2, 4]
lhs_data = np.random.rand(*lhs_shape).astype(np.float32)
rhs_data = np.random.rand(*rhs_shape).astype(np.float32)
builder = popart.Builder()
lhs = builder.addInputTensor(popart.TensorInfo("FLOAT", lhs_shape),
"lhs")
rhs = builder.addInputTensor(popart.TensorInfo("FLOAT", rhs_shape),
"rhs")
o = builder.aiOnnx.matmul([lhs, rhs])
with pytest.raises(popart.popart_exception) as e_info:
try:
builder.setSerializeMatMul({o}, "invalid_mode")
except popart.popart_exception as e:
print("Catch popart_exception ", type(e))
raise
except Exception as e:
print("Unexpected exception from setSerializeMatMul ", type(e))
raise
assert (e_info.value.args[0].startswith(
"Unsupported mat mul serialization mode 'invalid_mode'. Supported modes are 'input_channels', 'reducing_dim', 'output_channels' or 'none'"
))
def test_matmul_serialization_invalid_factor():
lhs_shape = [2, 2]
rhs_shape = [2, 4]
lhs_data = np.random.rand(*lhs_shape).astype(np.float32)
rhs_data = np.random.rand(*rhs_shape).astype(np.float32)
builder = popart.Builder()
lhs = builder.addInputTensor(popart.TensorInfo("FLOAT", lhs_shape), "lhs")
rhs = builder.addInputTensor(popart.TensorInfo("FLOAT", rhs_shape), "rhs")
o = builder.aiOnnx.matmul([lhs, rhs])
builder.setSerializeMatMul({o}, "output_channels", 3)
builder.addOutputTensor(o)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = getBaseOptions()
pat = popart.Patterns(['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp'])
pat.enableRuntimeAsserts(False)
with pytest.raises(popart.popart_exception) as e_info:
session = popart.InferenceSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
assert (e_info.value.args[0].startswith(
"Invalid serialisation factor 3 for output channels dim 4. output_channels dim should be a multple of the serialisation factor"
))
def test_matmul_serialization_inference():
input_channels = 2
reducing_dim = 2
output_channels = 4
lhs_shape = [input_channels, reducing_dim]
rhs_shape = [reducing_dim, output_channels]
lhs_data = np.random.rand(*lhs_shape).astype(np.float32)
rhs_data = np.random.rand(*rhs_shape).astype(np.float32)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInputTensor(popart.TensorInfo("FLOAT", lhs_shape),
"lhs")
rhs = builder.addInputTensor(popart.TensorInfo("FLOAT", rhs_shape),
"rhs")
o = builder.aiOnnx.matmul([lhs, rhs])
builder.setSerializeMatMul({o}, matmul_serialization_mode,
matmul_serialization_factor)
builder.addOutputTensor(o)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType("All")})
opts = getBaseOptions()
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp'])
pat.enableRuntimeAsserts(False)
session = popart.InferenceSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data, rhs: rhs_data}
stepio = popart.PyStepIO(inputs, anchors)
session.run(stepio)
verify(session, matmul_serialization_factor)
return anchors[o]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 1)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, reducing_dim])
and rhs['shape'] == gen_shape(
[1, reducing_dim, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, reducing_dim // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, reducing_dim // matmul_serialization_factor, output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, input_channels, reducing_dim]) and rhs['shape'] == gen_shape([
1, reducing_dim, output_channels // matmul_serialization_factor
]))
o1 = run_test("none", 0, verify_no_serialisation)
o2 = run_test("input_channels", 2, verify_serialisation_input_channels)
o3 = run_test("reducing_dim", 2, verify_serialisation_reducing_dim)
o4 = run_test("output_channels", 4, verify_serialisation_output_channels)
assert (np.allclose(o1, o2))
assert (np.allclose(o1, o3))
assert (np.allclose(o1, o4))
def test_matmul_serialization_training_1():
input_channels = 6
reducing_dim = 2
output_channels = 4
lhs_shape = [input_channels, reducing_dim]
rhs_shape = [reducing_dim, output_channels]
lhs_data = np.ones((*lhs_shape, ), dtype=np.float32)
rhs_data = np.ones((*rhs_shape, ), dtype=np.float32)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInitializedInputTensor(lhs_data, "lhs")
rhs = builder.addInitializedInputTensor(rhs_data, "rhs")
o = builder.aiOnnx.matmul([lhs, rhs])
builder.setSerializeMatMul({o}, matmul_serialization_mode,
matmul_serialization_factor)
loss = builder.aiGraphcore.l1loss([o], 0.1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(
1,
{
o:
popart.AnchorReturnType("All"),
rhs:
popart.AnchorReturnType("Final"),
popart.reservedGradientPrefix() + lhs:
popart.AnchorReturnType("All"),
#popart.reservedGradientPrefix() + rhs: popart.AnchorReturnType("All"), << T11469
})
opts = getBaseOptions()
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp', 'OpToIdentity'])
pat.enableRuntimeAsserts(False)
session = popart.TrainingSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
loss=loss,
optimizer=popart.ConstSGD(0.01),
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data}
stepio = popart.PyStepIO(inputs, anchors)
session.run(stepio)
session.weightsToHost()
verify(session, matmul_serialization_factor)
return anchors[rhs]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 3)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, reducing_dim])
and rhs['shape'] == gen_shape(
[1, reducing_dim, output_channels]))
# bwd lhs
bwd_lhs = _get_bwd_lhs_matmul(matmuls)
lhs = bwd_lhs['inputs'][0]
rhs = bwd_lhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape(
[1, output_channels, reducing_dim]))
# bwd rhs
bwd_rhs = _get_bwd_rhs_matmul(matmuls)
lhs = bwd_rhs['inputs'][0]
rhs = bwd_rhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape(
[1, input_channels, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]) and rhs['shape'] == gen_shape([1, output_channels, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim, input_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, reducing_dim // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, reducing_dim // matmul_serialization_factor, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape([
1, output_channels,
reducing_dim // matmul_serialization_factor
]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim // matmul_serialization_factor, input_channels
]) and rhs['shape'] == gen_shape([1, input_channels, output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, input_channels, reducing_dim]) and rhs['shape'] == gen_shape([
1, reducing_dim, output_channels // matmul_serialization_factor
]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, output_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, output_channels // matmul_serialization_factor, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape([
1, input_channels,
output_channels // matmul_serialization_factor
]))
w1 = run_test("none", 0, verify_no_serialisation)
w2 = run_test("input_channels", 2, verify_serialisation_input_channels)
w3 = run_test("reducing_dim", 2, verify_serialisation_reducing_dim)
w4 = run_test("output_channels", 4, verify_serialisation_output_channels)
assert (np.allclose(w1, w2))
assert (np.allclose(w1, w3))
assert (np.allclose(w1, w4))
def test_matmul_serialization_training_2():
input_channels = 6
reducing_dim = 16
output_channels = 15
lhs_group_dim = 2
lhs_shape = [lhs_group_dim, input_channels, reducing_dim]
rhs_shape = [reducing_dim, output_channels]
lhs_data = np.ones((*lhs_shape, ), dtype=np.float32)
rhs_data = np.ones((*rhs_shape, ), dtype=np.float32)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInputTensor(popart.TensorInfo("FLOAT", lhs_shape),
"lhs")
lhs_reshape = builder.reshape_const(
builder.aiOnnx, [lhs],
[lhs_group_dim * input_channels, reducing_dim])
rhs = builder.addInitializedInputTensor(rhs_data, "rhs")
o = builder.aiOnnx.matmul([lhs_reshape, rhs])
builder.setSerializeMatMul({o}, matmul_serialization_mode,
matmul_serialization_factor)
o_reshape = builder.reshape_const(
builder.aiOnnx, [o],
[lhs_group_dim, input_channels, output_channels])
loss = builder.aiGraphcore.l1loss([o_reshape], 0.1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(
1, {
o_reshape:
popart.AnchorReturnType("All"),
rhs:
popart.AnchorReturnType("Final"),
popart.reservedGradientPrefix() + lhs:
popart.AnchorReturnType("All"),
})
opts = getBaseOptions()
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp', 'OpToIdentity'])
pat.enableRuntimeAsserts(False)
session = popart.TrainingSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
loss=loss,
optimizer=popart.ConstSGD(0.01),
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data}
stepio = popart.PyStepIO(inputs, anchors)
session.run(stepio)
session.weightsToHost()
verify(session, matmul_serialization_factor)
return anchors[rhs]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 3)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, lhs_group_dim * input_channels, reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
# bwd lhs
bwd_lhs = _get_bwd_lhs_matmul(matmuls)
lhs = bwd_lhs['inputs'][0]
rhs = bwd_lhs['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, lhs_group_dim * input_channels, output_channels
]) and rhs['shape'] == gen_shape([1, output_channels, reducing_dim]))
# bwd rhs
bwd_rhs = _get_bwd_rhs_matmul(matmuls)
lhs = bwd_rhs['inputs'][0]
rhs = bwd_rhs['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, reducing_dim, lhs_group_dim * input_channels])
and rhs['shape'] == gen_shape(
[1, lhs_group_dim * input_channels, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels) // matmul_serialization_factor,
reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels) // matmul_serialization_factor,
output_channels
]) and rhs['shape'] == gen_shape([1, output_channels, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim,
(lhs_group_dim * input_channels) // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels) // matmul_serialization_factor,
output_channels
]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels),
reducing_dim // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, reducing_dim // matmul_serialization_factor, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels), output_channels
]) and rhs['shape'] == gen_shape(
[1, output_channels, reducing_dim // matmul_serialization_factor]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim // matmul_serialization_factor,
(lhs_group_dim * input_channels)
]) and rhs['shape'] == gen_shape(
[1, (lhs_group_dim * input_channels), output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels), reducing_dim
]) and rhs['shape'] == gen_shape(
[1, reducing_dim, output_channels // matmul_serialization_factor]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels),
output_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, output_channels // matmul_serialization_factor, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, reducing_dim, (lhs_group_dim * input_channels)])
and rhs['shape'] == gen_shape([
1, (lhs_group_dim * input_channels),
output_channels // matmul_serialization_factor
]))
w1 = run_test("none", 0, verify_no_serialisation)
w2 = run_test("input_channels", 2, verify_serialisation_input_channels)
w3 = run_test("reducing_dim", 4, verify_serialisation_reducing_dim)
w4 = run_test("output_channels", 5, verify_serialisation_output_channels)
assert (np.allclose(w1, w2))
assert (np.allclose(w1, w3))
assert (np.allclose(w1, w4))
def test_matmul_serialization_training_3():
input_channels = 6
reducing_dim = 2
output_channels = 4
lhs_shape = [input_channels, reducing_dim]
rhs_shape = [output_channels, reducing_dim]
lhs_data = np.ones((*lhs_shape, ), dtype=np.float32)
rhs_data = np.ones((*rhs_shape, ), dtype=np.float32)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInitializedInputTensor(lhs_data, "lhs")
rhs = builder.addInitializedInputTensor(rhs_data, "rhs")
rhs_transposed = builder.aiOnnx.transpose([rhs])
o = builder.aiOnnx.matmul([lhs, rhs_transposed])
builder.setSerializeMatMul({o}, matmul_serialization_mode,
matmul_serialization_factor)
loss = builder.aiGraphcore.l1loss([o], 0.1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(
1,
{
o:
popart.AnchorReturnType("All"),
rhs:
popart.AnchorReturnType("Final"),
popart.reservedGradientPrefix() + lhs:
popart.AnchorReturnType("All"),
#popart.reservedGradientPrefix() + rhs: popart.AnchorReturnType("All"), << T11469
})
opts = getBaseOptions()
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp', 'OpToIdentity'])
pat.enableRuntimeAsserts(False)
session = popart.TrainingSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
loss=loss,
optimizer=popart.ConstSGD(0.01),
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data}
stepio = popart.PyStepIO(inputs, anchors)
session.run(stepio)
session.weightsToHost()
verify(session, matmul_serialization_factor)
return anchors[rhs]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 3)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, reducing_dim])
and rhs['shape'] == gen_shape(
[1, reducing_dim, output_channels]))
# bwd lhs
bwd_lhs = _get_bwd_lhs_matmul(matmuls)
lhs = bwd_lhs['inputs'][0]
rhs = bwd_lhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape(
[1, output_channels, reducing_dim]))
# bwd rhs
bwd_rhs = _get_bwd_rhs_matmul(matmuls)
lhs = bwd_rhs['inputs'][0]
rhs = bwd_rhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape(
[1, input_channels, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]) and rhs['shape'] == gen_shape([1, output_channels, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim, input_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, reducing_dim // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, reducing_dim // matmul_serialization_factor, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape([
1, output_channels,
reducing_dim // matmul_serialization_factor
]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim // matmul_serialization_factor, input_channels
]) and rhs['shape'] == gen_shape([1, input_channels, output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, input_channels, reducing_dim]) and rhs['shape'] == gen_shape([
1, reducing_dim, output_channels // matmul_serialization_factor
]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, output_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, output_channels // matmul_serialization_factor, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape([
1, input_channels,
output_channels // matmul_serialization_factor
]))
w1 = run_test("none", 0, verify_no_serialisation)
w2 = run_test("input_channels", 2, verify_serialisation_input_channels)
w3 = run_test("reducing_dim", 2, verify_serialisation_reducing_dim)
w4 = run_test("output_channels", 4, verify_serialisation_output_channels)
assert (np.allclose(w1, w2))
assert (np.allclose(w1, w3))
assert (np.allclose(w1, w4))
def test_matmul_serialization_precision():
np.random.seed(1984)
input_channels = 20
reducing_dim = 128
output_channels = 64
lhs_shape = [input_channels, reducing_dim]
rhs_shape = [reducing_dim, output_channels]
lhs_data = np.random.normal(0, 0.02, (*lhs_shape, )).astype(np.float16)
rhs_data = np.random.normal(0, 0.02, (*rhs_shape, )).astype(np.float16)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInitializedInputTensor(lhs_data, "lhs")
rhs = builder.addInitializedInputTensor(rhs_data, "rhs")
o = builder.aiOnnx.matmul([lhs, rhs])
builder.setSerializeMatMul({o},
matmul_serialization_mode,
matmul_serialization_factor,
keep_precision=True)
loss = builder.aiGraphcore.l1loss([o], 0.1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(
1,
{
o:
popart.AnchorReturnType("All"),
rhs:
popart.AnchorReturnType("Final"),
popart.reservedGradientPrefix() + lhs:
popart.AnchorReturnType("All"),
#popart.reservedGradientPrefix() + rhs: popart.AnchorReturnType("All"), << T11469
})
opts = getBaseOptions()
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp', 'OpToIdentity'])
pat.enableRuntimeAsserts(False)
session = popart.TrainingSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
loss=loss,
optimizer=popart.ConstSGD(0.01),
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data}
stepio = popart.PyStepIO(inputs, anchors)
session.run(stepio)
session.weightsToHost()
verify(session, matmul_serialization_factor)
return anchors[rhs]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 3)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, reducing_dim])
and rhs['shape'] == gen_shape(
[1, reducing_dim, output_channels]))
# bwd lhs
bwd_lhs = _get_bwd_lhs_matmul(matmuls)
lhs = bwd_lhs['inputs'][0]
rhs = bwd_lhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape(
[1, output_channels, reducing_dim]))
# bwd rhs
bwd_rhs = _get_bwd_rhs_matmul(matmuls)
lhs = bwd_rhs['inputs'][0]
rhs = bwd_rhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape(
[1, input_channels, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
casts = [op for op in ir['maingraph'] if op['type'] == 'Cast']
assert (len(casts) == 1)
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]) and rhs['shape'] == gen_shape([1, output_channels, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim, input_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
casts = [op for op in ir['maingraph'] if op['type'] == 'Cast']
assert (len(casts) == 1)
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, input_channels, reducing_dim // matmul_serialization_factor]))
assert (rhs['shape'] == gen_shape(
[1, reducing_dim // matmul_serialization_factor, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape([
1, output_channels,
reducing_dim // matmul_serialization_factor
]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim // matmul_serialization_factor, input_channels
]) and rhs['shape'] == gen_shape([1, input_channels, output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
casts = [op for op in ir['maingraph'] if op['type'] == 'Cast']
assert (len(casts) == 1)
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, input_channels, reducing_dim]) and rhs['shape'] == gen_shape([
1, reducing_dim, output_channels // matmul_serialization_factor
]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, output_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, output_channels // matmul_serialization_factor, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape([
1, input_channels,
output_channels // matmul_serialization_factor
]))
w1 = run_test("none", 0, verify_no_serialisation)
w2 = run_test("input_channels", 10, verify_serialisation_input_channels)
w3 = run_test("reducing_dim", 16, verify_serialisation_reducing_dim)
w4 = run_test("output_channels", 16, verify_serialisation_output_channels)
# Check to make sure that the output (9)anchors[rhs]) of the test is the same without
# serialization and with different serialization modes.
assert (np.allclose(w1, w2))
assert (np.allclose(w1, w3))
assert (np.allclose(w1, w4))
def test_matmul_serialization_training_with_gradient_accumlation():
input_channels = 6
reducing_dim = 2
output_channels = 4
batches_per_step = 20
lhs_shape = [input_channels, reducing_dim]
rhs_shape = [reducing_dim, output_channels]
lhs_data = np.ones((
batches_per_step,
*lhs_shape,
), dtype=np.float32)
rhs_data = np.ones((
batches_per_step,
*rhs_shape,
), dtype=np.float32)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInitializedInputTensor(lhs_data, "lhs")
rhs = builder.addInitializedInputTensor(rhs_data, "rhs")
o = builder.aiOnnx.matmul([lhs, rhs])
loss = builder.aiGraphcore.l1loss([o], 0.1)
builder.setSerializeMatMul({o}, matmul_serialization_mode,
matmul_serialization_factor)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(
20,
{
o:
popart.AnchorReturnType("All"),
rhs:
popart.AnchorReturnType("Final"),
popart.reservedGradientPrefix() + lhs:
popart.AnchorReturnType("All"),
#popart.reservedGradientPrefix() + rhs: popart.AnchorReturnType("All"), << T11469
})
opts = getBaseOptions()
opts.enableGradientAccumulation = True
opts.accumulationFactor = 5
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp', 'OpToIdentity'])
pat.enableRuntimeAsserts(False)
session = popart.TrainingSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
loss=loss,
optimizer=popart.ConstSGD(0.01),
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data}
stepio = popart.PyStepIO(inputs, anchors)
#TODO (T15448, understand why shapes are incorrect)
stepio.enableRuntimeAsserts(False)
session.run(stepio)
session.weightsToHost()
verify(session, matmul_serialization_factor)
return anchors[rhs]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
# With no serialization we should have 3 matmul's not outlined
# FWD, BWD_LHS, BWD_RHS
assert (len(matmuls) == 3)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[batches_per_step, input_channels, reducing_dim])
and rhs['shape'] == gen_shape(
[batches_per_step, reducing_dim, output_channels]))
# bwd lhs
lhs = matmuls[2]['inputs'][0]
rhs = matmuls[2]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[batches_per_step, input_channels, output_channels])
and rhs['shape'] == gen_shape(
[batches_per_step, output_channels, reducing_dim]))
# bwd rhs
lhs = matmuls[1]['inputs'][0]
rhs = matmuls[1]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[batches_per_step, reducing_dim, input_channels])
and rhs['shape'] == gen_shape(
[batches_per_step, input_channels, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
batches_per_step, input_channels // matmul_serialization_factor,
reducing_dim
]) and rhs['shape'] == gen_shape(
[batches_per_step, reducing_dim, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
batches_per_step, input_channels // matmul_serialization_factor,
output_channels
]) and rhs['shape'] == gen_shape(
[batches_per_step, output_channels, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
batches_per_step, reducing_dim,
input_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
batches_per_step, input_channels // matmul_serialization_factor,
output_channels
]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
print(ir)
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
batches_per_step, input_channels,
reducing_dim // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
batches_per_step, reducing_dim // matmul_serialization_factor,
output_channels
]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[batches_per_step, input_channels, output_channels])
and rhs['shape'] == gen_shape([
batches_per_step, output_channels,
reducing_dim // matmul_serialization_factor
]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
batches_per_step, reducing_dim // matmul_serialization_factor,
input_channels
]) and rhs['shape'] == gen_shape(
[batches_per_step, input_channels, output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[batches_per_step, input_channels, reducing_dim])
and rhs['shape'] == gen_shape([
batches_per_step, reducing_dim,
output_channels // matmul_serialization_factor
]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
batches_per_step, input_channels,
output_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
batches_per_step, output_channels // matmul_serialization_factor,
reducing_dim
]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[batches_per_step, reducing_dim, input_channels])
and rhs['shape'] == gen_shape([
batches_per_step, input_channels,
output_channels // matmul_serialization_factor
]))
w1 = run_test("none", 0, verify_no_serialisation)
w2 = run_test("input_channels", 2, verify_serialisation_input_channels)
w3 = run_test("reducing_dim", 2, verify_serialisation_reducing_dim)
w4 = run_test("output_channels", 4, verify_serialisation_output_channels)
assert (np.allclose(w1, w2))
assert (np.allclose(w1, w3))
assert (np.allclose(w1, w4))
def test_matmul_serialization_training_with_castop():
input_channels = 6
reducing_dim = 2
output_channels = 4
lhs_shape = [input_channels, reducing_dim]
rhs_shape = [reducing_dim, output_channels]
lhs_data = np.ones((*lhs_shape, ), dtype=np.float16)
rhs_data = np.ones((*rhs_shape, ), dtype=np.float32)
def run_test(matmul_serialization_mode, matmul_serialization_factor,
verify):
builder = popart.Builder()
lhs = builder.addInputTensor(popart.TensorInfo("FLOAT16", lhs_shape),
"lhs")
rhs = builder.addInitializedInputTensor(rhs_data, "rhs")
rhs_f16 = builder.aiOnnx.cast([rhs], "FLOAT16")
o = builder.aiOnnx.matmul([lhs, rhs_f16])
builder.setSerializeMatMul({o}, matmul_serialization_mode,
matmul_serialization_factor)
loss = builder.aiGraphcore.l1loss([o], 0.1)
proto = builder.getModelProto()
dataFlow = popart.DataFlow(
1, {
o:
popart.AnchorReturnType("All"),
rhs:
popart.AnchorReturnType("Final"),
popart.reservedGradientPrefix() + lhs:
popart.AnchorReturnType("All"),
})
opts = getBaseOptions()
pat = popart.Patterns(
['MatMulOp', 'MatMulRhsGradOp', 'MatMulLhsGradOp', 'OpToIdentity'])
pat.enableRuntimeAsserts(False)
session = popart.TrainingSession(
fnModel=proto,
dataFlow=dataFlow,
userOptions=opts,
loss=loss,
optimizer=popart.ConstSGD(0.01),
patterns=pat,
deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))
session.prepareDevice()
session.weightsFromHost()
anchors = session.initAnchorArrays()
inputs = {lhs: lhs_data}
stepio = popart.PyStepIO(inputs, anchors)
session.run(stepio)
session.weightsToHost()
verify(session, matmul_serialization_factor)
return anchors[rhs]
def verify_no_serialisation(session, matmul_serialization_factor):
''' Verify the the matmul in the main graphs is correct'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 3)
# forward
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, reducing_dim])
and rhs['shape'] == gen_shape(
[1, reducing_dim, output_channels]))
# bwd lhs
bwd_lhs = _get_bwd_lhs_matmul(matmuls)
lhs = bwd_lhs['inputs'][0]
rhs = bwd_lhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape(
[1, output_channels, reducing_dim]))
# bwd rhs
bwd_rhs = _get_bwd_rhs_matmul(matmuls)
lhs = bwd_rhs['inputs'][0]
rhs = bwd_rhs['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape(
[1, input_channels, output_channels]))
def verify_serialisation_input_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, reducing_dim
]) and rhs['shape'] == gen_shape([1, reducing_dim, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]) and rhs['shape'] == gen_shape([1, output_channels, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim, input_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape([
1, input_channels // matmul_serialization_factor, output_channels
]))
def verify_serialisation_reducing_dim(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, reducing_dim // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, reducing_dim // matmul_serialization_factor, output_channels]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, input_channels, output_channels])
and rhs['shape'] == gen_shape([
1, output_channels,
reducing_dim // matmul_serialization_factor
]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, reducing_dim // matmul_serialization_factor, input_channels
]) and rhs['shape'] == gen_shape([1, input_channels, output_channels]))
def verify_serialisation_output_channels(session,
matmul_serialization_factor):
''' Verify the the matmul has the input sliced and is in a subgraph'''
ir = json.loads(session._serializeIr(
popart.IrSerializationFormat.JSON))
matmuls = [op for op in ir['maingraph'] if op['type'] == 'MatMul']
assert (len(matmuls) == 0)
# FWD
matmuls = [
op for op in ir['call_subgraph(0)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape(
[1, input_channels, reducing_dim]) and rhs['shape'] == gen_shape([
1, reducing_dim, output_channels // matmul_serialization_factor
]))
# BWD_LHS
matmuls = [
op for op in ir['call_subgraph(1)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([
1, input_channels, output_channels // matmul_serialization_factor
]) and rhs['shape'] == gen_shape(
[1, output_channels // matmul_serialization_factor, reducing_dim]))
# BWD_RHS
matmuls = [
op for op in ir['call_subgraph(2)'] if op['type'] == 'MatMul'
]
assert (len(matmuls) == 1)
lhs = matmuls[0]['inputs'][0]
rhs = matmuls[0]['inputs'][1]
assert (lhs['shape'] == gen_shape([1, reducing_dim, input_channels])
and rhs['shape'] == gen_shape([
1, input_channels,
output_channels // matmul_serialization_factor
]))
w1 = run_test("none", 0, verify_no_serialisation)
w2 = run_test("input_channels", 2, verify_serialisation_input_channels)
w3 = run_test("reducing_dim", 2, verify_serialisation_reducing_dim)
w4 = run_test("output_channels", 4, verify_serialisation_output_channels)
assert (np.allclose(w1, w2))
assert (np.allclose(w1, w3))
assert (np.allclose(w1, w4))
|
{"hexsha": "9e19c2f17c443b25620608e6ad8e6995c97757d3", "size": 66666, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/integration/transformation_tests/serializematmul.py", "max_stars_repo_name": "gglin001/popart", "max_stars_repo_head_hexsha": "3225214343f6d98550b6620e809a3544e8bcbfc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2020-07-06T17:11:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T14:42:51.000Z", "max_issues_repo_path": "tests/integration/transformation_tests/serializematmul.py", "max_issues_repo_name": "gglin001/popart", "max_issues_repo_head_hexsha": "3225214343f6d98550b6620e809a3544e8bcbfc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-25T01:30:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-09T11:13:14.000Z", "max_forks_repo_path": "tests/integration/transformation_tests/serializematmul.py", "max_forks_repo_name": "gglin001/popart", "max_forks_repo_head_hexsha": "3225214343f6d98550b6620e809a3544e8bcbfc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-07-15T12:33:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-07T06:55:00.000Z", "avg_line_length": 34.6857440166, "max_line_length": 150, "alphanum_fraction": 0.5704107041, "include": true, "reason": "import numpy", "num_tokens": 16674}
|
#!/usr/bin/env python
import os
from setuptools import find_packages
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError, DistutilsError
from numpy.distutils.core import setup, Extension
from numpy.distutils.command.build_ext import build_ext as old_build_ext
from numpy.distutils.fcompiler import CompilerNotFound
class BuildFailed(Exception):
pass
def construct_build_ext(build_ext):
# This class allows extension building to fail.
# https://stackoverflow.com/questions/41778153/
ext_errors = (CCompilerError, DistutilsExecError,
DistutilsPlatformError, DistutilsError, IOError)
class WrappedBuildExt(build_ext):
def run(self):
try:
build_ext.run(self)
except (DistutilsPlatformError, CompilerNotFound) as x:
raise BuildFailed(x)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors as x:
raise BuildFailed(x)
return WrappedBuildExt
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
direct_files = ['direct.pyf', 'DIRect.f', 'DIRserial.f', 'DIRsubrout.f']
direct_paths = [os.path.join('dragonfly', 'utils', 'direct_fortran', x)
for x in direct_files]
ext1 = Extension(name='dragonfly.utils.direct_fortran.direct',
sources=direct_paths)
setup_options = dict(
name='dragonfly-opt',
version="0.1.5",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url='https://github.com/dragonfly/dragonfly/',
license='MIT',
author_email='kandasamy@cs.cmu.edu',
packages=find_packages(exclude=('examples*', 'dragonfly.test_data*')),
scripts=['bin/dragonfly-script.py'],
install_requires=[
'future',
'numpy',
'scipy',
'six',
],
extras_require={
'extra': ['cython', 'POT'],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
try:
# Try building the Fortran extension.
setup(
ext_modules=[ext1],
cmdclass={"build_ext": construct_build_ext(old_build_ext)},
**setup_options
)
except BuildFailed:
print("")
print("*" * 80)
print("Fortran compilation failed. Falling back on pure Python version.")
print("*" * 80)
setup(**setup_options)
|
{"hexsha": "b35fabc4c1bc087d543a830df0faa5dddc3cdcdb", "size": 2911, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "hase1128/dragonfly", "max_stars_repo_head_hexsha": "4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "hase1128/dragonfly", "max_issues_repo_head_hexsha": "4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "hase1128/dragonfly", "max_forks_repo_head_hexsha": "4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3010752688, "max_line_length": 103, "alphanum_fraction": 0.6502919959, "include": true, "reason": "from numpy", "num_tokens": 649}
|
# octree的具体实现,包括构建和查找
import random
import math
import numpy as np
import time
from result_set import KNNResultSet, RadiusNNResultSet
# 节点,构成OCtree的基本元素
class Octant:
def __init__(self, children, center, extent, point_indices, is_leaf):
self.children = children
self.center = center
self.extent = extent
self.point_indices = point_indices
self.is_leaf = is_leaf
def __str__(self):
output = ''
output += 'center: [%.2f, %.2f, %.2f], ' % (self.center[0], self.center[1], self.center[2])
output += 'extent: %.2f, ' % self.extent
output += 'is_leaf: %d, ' % self.is_leaf
output += 'children: ' + str([x is not None for x in self.children]) + ", "
output += 'point_indices: ' + str(self.point_indices)
return output
# 功能:翻转octree
# 输入:
# root: 构建好的octree
# depth: 当前深度
# max_depth:最大深度
def traverse_octree(root: Octant, depth, max_depth):
depth[0] += 1
if max_depth[0] < depth[0]:
max_depth[0] = depth[0]
if root is None:
pass
elif root.is_leaf:
print(root)
else:
for child in root.children:
traverse_octree(child, depth, max_depth)
depth[0] -= 1
# 功能:通过递归的方式构建octree
# 输入:
# root:根节点
# db:原始数据
# center: 中心
# extent: 当前分割区间
# point_indices: 点的key
# leaf_size: scale
# min_extent: 最小分割区间
def octree_recursive_build(root, db, center, extent, point_indices, leaf_size, min_extent):
if len(point_indices) == 0:
return None
if root is None:
root = Octant([None for i in range(8)], center, extent, point_indices, is_leaf=True)
# determine whether to split this octant
if len(point_indices) <= leaf_size or extent <= min_extent:
root.is_leaf = True
else:
# 作业4
# 屏蔽开始
#root有子节点,is_leaf置为False
root.is_leaf = False
#创建8个子节点
children_point_indices = [[] for i in range(8)]
#遍历每一个点
for point_idx in point_indices:
point_db = db[point_idx]
#计算当前点该放置到哪个子节点
morton_code = 0
#判断该放到x轴的哪一侧
if point_db[0] > center[0]:
morton_code = morton_code | 1
#判断该放到y轴的哪一侧
if point_db[1] > center[1]:
morton_code = morton_code | 2
#判断该放到z轴的哪一侧
if point_db[2] > center[2]:
morton_code = morton_code | 4
#子节点存储点的索引
children_point_indices[morton_code].append(point_idx)
# create children
factor = [-0.5, 0.5]
for i in range(8):
#计算每一个子节点的center坐标
child_center_x = center[0] + factor[(i & 1) > 0] * extent
child_center_y = center[1] + factor[(i & 2) > 0] * extent
child_center_z = center[2] + factor[(i & 4) > 0] * extent
#子节点的extent
child_extent = 0.5 * extent
child_center = np.asarray([child_center_x, child_center_y, child_center_z])
#递归创建子节点的八叉树
root.children[i] = octree_recursive_build(root.children[i],
db,
child_center,
child_extent,
children_point_indices[i],
leaf_size,
min_extent)
# 屏蔽结束
return root
# 功能:判断当前query区间是否在octant内
# 输入:
# query: 索引信息
# radius:索引半径
# octant:octree
# 输出:
# 判断结果,即True/False
def inside(query: np.ndarray, radius: float, octant:Octant):
"""
Determines if the query ball is inside the octant
:param query:
:param radius:
:param octant:
:return:
"""
#查询点到中心的三个轴的偏移量
query_offset = query - octant.center
#偏移量的绝对值
query_offset_abs = np.fabs(query_offset)
possible_space = query_offset_abs + radius
return np.all(possible_space < octant.extent)
# 功能:判断当前query区间是否和octant有重叠部分
# 输入:
# query: 索引信息
# radius:索引半径
# octant:octree
# 输出:
# 判断结果,即True/False
def overlaps(query: np.ndarray, radius: float, octant:Octant):
"""
Determines if the query ball overlaps with the octant
:param query:
:param radius:
:param octant:
:return:
"""
query_offset = query - octant.center
query_offset_abs = np.fabs(query_offset)
# completely outside, since query is outside the relevant area
max_dist = radius + octant.extent
if np.any(query_offset_abs > max_dist):
return False
# if pass the above check, consider the case that the ball is contacting the face of the octant
if np.sum((query_offset_abs < octant.extent).astype(np.int)) >= 2:
return True
# conside the case that the ball is contacting the edge or corner of the octant
# since the case of the ball center (query) inside octant has been considered,
# we only consider the ball center (query) outside octant
x_diff = max(query_offset_abs[0] - octant.extent, 0)
y_diff = max(query_offset_abs[1] - octant.extent, 0)
z_diff = max(query_offset_abs[2] - octant.extent, 0)
return x_diff * x_diff + y_diff * y_diff + z_diff * z_diff < radius * radius
# 功能:判断当前query是否包含octant
# 输入:
# query: 索引信息
# radius:索引半径
# octant:octree
# 输出:
# 判断结果,即True/False
def contains(query: np.ndarray, radius: float, octant:Octant):
"""
Determine if the query ball contains the octant
:param query:
:param radius:
:param octant:
:return:
"""
query_offset = query - octant.center
query_offset_abs = np.fabs(query_offset)
query_offset_to_farthest_corner = query_offset_abs + octant.extent
return np.linalg.norm(query_offset_to_farthest_corner) < radius
# 功能:在octree中查找信息
# 输入:
# root: octree
# db:原始数据
# result_set: 索引结果
# query:索引信息
def octree_radius_search_fast(root: Octant, db: np.ndarray, result_set: RadiusNNResultSet, query: np.ndarray):
if root is None:
return False
# 作业5
# 提示:尽量利用上面的inside、overlaps、contains等函数
# 屏蔽开始
if contains(query, result_set.worstDist(), root):
# compare the contents of the octant
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
# don't need to check any child
return False
if root.is_leaf and len(root.point_indices) > 0:
# compare the contents of a leaf
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
# check whether we can stop search now
return inside(query, result_set.worstDist(), root)
# no need to go to most relevant child first, because anyway we will go through all children
for c, child in enumerate(root.children):
if child is None:
continue
if False == overlaps(query, result_set.worstDist(), child):
continue
if octree_radius_search_fast(child, db, result_set, query):
return True
# 屏蔽结束
return inside(query, result_set.worstDist(), root)
# 功能:在octree中查找radius范围内的近邻
# 输入:
# root: octree
# db: 原始数据
# result_set: 搜索结果
# query: 搜索信息
def octree_radius_search(root: Octant, db: np.ndarray, result_set: RadiusNNResultSet, query: np.ndarray):
if root is None:
return False
if root.is_leaf and len(root.point_indices) > 0:
# compare the contents of a leaf
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
# check whether we can stop search now
return inside(query, result_set.worstDist(), root)
# 作业6
# 屏蔽开始
# go to the relevant child first
morton_code = 0
if query[0] > root.center[0]:
morton_code = morton_code | 1
if query[1] > root.center[1]:
morton_code = morton_code | 2
if query[2] > root.center[2]:
morton_code = morton_code | 4
if octree_radius_search(root.children[morton_code], db, result_set, query):
return True
# check other children
for c, child in enumerate(root.children):
if c == morton_code or child is None:
continue
if False == overlaps(query, result_set.worstDist(), child):
continue
if octree_radius_search(child, db, result_set, query):
return True
# 屏蔽结束
# final check of if we can stop search
return inside(query, result_set.worstDist(), root)
# 功能:在octree中查找最近的k个近邻
# 输入:
# root: octree
# db: 原始数据
# result_set: 搜索结果
# query: 搜索信息
def octree_knn_search(root: Octant, db: np.ndarray, result_set: KNNResultSet, query: np.ndarray):
if root is None:
return False
#判断当前root是否为叶子节点
if root.is_leaf and len(root.point_indices) > 0:
# compare the contents of a leaf
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
#result_set保存当前所有距离和点索引
result_set.add_point(diff[i], root.point_indices[i])
# check whether we can stop search now
# 判断是否还需要继续查询
return inside(query, result_set.worstDist(), root)
# 作业7
# 屏蔽开始
# go to the relevant child first
# 根据query找到当前所属象限
morton_code = 0
if query[0] > root.center[0]:
morton_code = morton_code | 1
if query[1] > root.center[1]:
morton_code = morton_code | 2
if query[2] > root.center[2]:
morton_code = morton_code | 4
#去八叉树的相应象限中递归查找
if octree_knn_search(root.children[morton_code], db, result_set, query):
return True
# check other children
# 上面的octree_knn_search返回False,代表query_offset + radius < extent
for c, child in enumerate(root.children):
# 如果前面已经搜索过某一个子节点,或者子节点为空
if c == morton_code or child is None:
continue
# 如果搜索半径与子节点没有交集
if False == overlaps(query, result_set.worstDist(), child):
continue
if octree_knn_search(child, db, result_set, query):
return True
# 屏蔽结束
# final check of if we can stop search
return inside(query, result_set.worstDist(), root)
# 功能:构建octree,即通过调用octree_recursive_build函数实现对外接口
# 输入:
# dp_np: 原始数据
# leaf_size:scale
# min_extent:最小划分区间
def octree_construction(db_np, leaf_size, min_extent):
N, dim = db_np.shape[0], db_np.shape[1]
db_np_min = np.amin(db_np, axis=0)
db_np_max = np.amax(db_np, axis=0)
db_extent = np.max(db_np_max - db_np_min) * 0.5
db_center = np.mean(db_np, axis=0)
root = None
root = octree_recursive_build(root, db_np, db_center, db_extent, list(range(N)),
leaf_size, min_extent)
return root
def main():
# configuration
db_size = 64000
dim = 3
leaf_size = 4
min_extent = 0.0001
k = 8
db_np = np.random.rand(db_size, dim)
root = octree_construction(db_np, leaf_size, min_extent)
# depth = [0]
# max_depth = [0]
# traverse_octree(root, depth, max_depth)
# print("tree max depth: %d" % max_depth[0])
# query = np.asarray([0, 0, 0])
# result_set = KNNResultSet(capacity=k)
# octree_knn_search(root, db_np, result_set, query)
# print(result_set)
#
# diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
# nn_idx = np.argsort(diff)
# nn_dist = diff[nn_idx]
# print(nn_idx[0:k])
# print(nn_dist[0:k])
begin_t = time.time()
print("Radius search normal:")
for i in range(100):
query = np.random.rand(3)
result_set = RadiusNNResultSet(radius=0.5)
octree_radius_search(root, db_np, result_set, query)
# print(result_set)
print("Search takes %.3fms\n" % ((time.time() - begin_t) * 1000))
begin_t = time.time()
print("Radius search fast:")
for i in range(100):
query = np.random.rand(3)
result_set = RadiusNNResultSet(radius = 0.5)
octree_radius_search_fast(root, db_np, result_set, query)
# print(result_set)
print("Search takes %.3fms\n" % ((time.time() - begin_t)*1000))
if __name__ == '__main__':
main()
|
{"hexsha": "fc2660dfaafda6024a750716cd80c8a1a449349a", "size": 12689, "ext": "py", "lang": "Python", "max_stars_repo_path": "Homework/Homework II/solution/octree.py", "max_stars_repo_name": "SS47816/3D-PointCloud", "max_stars_repo_head_hexsha": "60b58d09b8c07b5359801e442f9ba70174065827", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-01T20:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-01T20:05:23.000Z", "max_issues_repo_path": "Homework/Homework II/solution/octree.py", "max_issues_repo_name": "SS47816/3D-PointCloud", "max_issues_repo_head_hexsha": "60b58d09b8c07b5359801e442f9ba70174065827", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Homework/Homework II/solution/octree.py", "max_forks_repo_name": "SS47816/3D-PointCloud", "max_forks_repo_head_hexsha": "60b58d09b8c07b5359801e442f9ba70174065827", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2536945813, "max_line_length": 110, "alphanum_fraction": 0.6129718654, "include": true, "reason": "import numpy", "num_tokens": 3769}
|
from PIL import Image as PILImage
import math
import torchvision.transforms as T
import torch.nn.functional as F
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
import numpy as np
import random
import time
import os
import sys
"""## Step 1: We initialize the Experience Replay memory"""
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, transition):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = transition
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(transition)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
batch_states1, batch_states2, batch_next_states1, batch_next_states2, batch_actions, batch_rewards, batch_dones = [], [], [], [], [], [], []
for i in ind:
state1, state2, next_state1, next_state2, action, reward, done = self.storage[i]
batch_states1.append(state1)
batch_states2.append(np.array(state2, copy=False))
batch_next_states1.append(next_state1)
batch_next_states2.append(np.array(next_state2, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states1), np.array(batch_states2), np.array(batch_next_states1), np.array(batch_next_states2), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
"""## Step 2: We build one neural network for the Actor model and one neural network for the Actor target"""
# This helps calculate the final output dim of CNN
# def conv2d_size_out(size, kernel_size=3, stride=2):
# return (size - (kernel_size - 1) - 1) // stride + 1
# conv2d_size_out(conv2d_size_out(60))
class AC_conv(nn.Module):
def __init__(self, state_dim=1):
super(AC_conv, self).__init__()
self.conv1 = nn.Conv2d(state_dim, 16, kernel_size=3, stride=2) # 16
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=1) # 16
self.bn2 = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 9, kernel_size=3, stride=1) # 9 : 15x15
self.bn3 = nn.BatchNorm2d(9) # sq of an odd number, because just!
self.conv4 = nn.Conv2d(9, 1, kernel_size=1) # 1 : 15x15 | combining 9 channels to one
def forward(self, x):
# final output is 5x5 which later be flattened to 25
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.conv4(x))
return torch.nn.functional.avg_pool2d(x, kernel_size=3, stride=3) # 5x5
# Actor Models
class Actor(AC_conv):
def __init__(self, state_dim, action_dim, max_action):
AC_conv.__init__(self)
super(Actor, self).__init__()
linear_input_size = 25+5
self.layer_1 = nn.Linear(linear_input_size, 30) # if on road or sand
self.layer_2 = nn.Linear(30, 50)
self.layer_3 = nn.Linear(50, action_dim)
self.max_action = max_action
def forward(self, x1, x2):
x1 = AC_conv.forward(self, x1)
x = torch.cat(((x1.view(x1.size(0), -1)),
x2), 1)
x = F.relu(self.layer_1(x))
x = F.relu(self.layer_2(x))
return self.max_action * torch.tanh(self.layer_3(x))
"""## Step 3: We build two neural networks for the two Critic models and two neural networks for the two Critic targets"""
class Critic(AC_conv):
def __init__(self, state_dim, action_dim):
AC_conv.__init__(self)
super(Critic, self).__init__()
# Defining the first Critic neural network
linear_input_size = 25+5 # add state["orientation"]
self.layer_1 = nn.Linear(linear_input_size + action_dim, 30)# if on road or sand
self.layer_2 = nn.Linear(30, 50)
self.layer_3 = nn.Linear(50, 1)
# Defining the second Critic neural network
self.layer_4 = nn.Linear(linear_input_size + action_dim, 30)# if on road or sand
self.layer_5 = nn.Linear(30, 50)
self.layer_6 = nn.Linear(50, 1)
def forward(self, x1, x2, u):
# Forward-Propagation on the first Critic Neural Network
x1_1 = AC_conv.forward(self,x1)
xu_1 = torch.cat(((x1_1.view(x1_1.size(0), -1)),
x2, u),1)
x_1 = F.relu(self.layer_1(xu_1))
x_1 = F.relu(self.layer_2(x_1))
x_1 = self.layer_3(x_1)
# Forward-Propagation on the second Critic Neural Network
x1_2 = AC_conv.forward(self,x1)
xu_2 = torch.cat(((x1_2.view(x1_1.size(0), -1)),
x2, u),1)
x_2 = F.relu(self.layer_4(xu_2))
x_2 = F.relu(self.layer_5(x_2))
x_2 = self.layer_6(x_2)
return x_1, x_2
def Q1(self, x1, x2, u):
x1_1 = AC_conv.forward(self,x1)
xu_1 = torch.cat(((x1_1.view(x1_1.size(0), -1)),
x2, u),1)
x_1 = F.relu(self.layer_1(xu_1))
x_1 = F.relu(self.layer_2(x_1))
x_1 = self.layer_3(x_1)
return x_1
"""## Steps 4 to 15: Training Process"""
# Selecting the device (CPU or GPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Building the whole Training Process into a class
class TD3(object):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
# for name, p in self.actor.named_parameters():
# if "layer" not in name:
# p.requires_grad = False
# self.actor_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.actor.parameters()), lr = 0.001245)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr = 0.0007)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = Critic(state_dim, action_dim).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
# for name, p in self.critic.named_parameters():
# if "layer" not in name:
# p.requires_grad = False
# self.critic_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.critic.parameters()), lr = 0.001245)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=0.0007)
self.max_action = max_action
def select_action(self, state1, state2):
state1 = torch.from_numpy(state1).float().permute(2, 0, 1).unsqueeze(0).to(device)
state2 = torch.Tensor(state2).unsqueeze(0).to(device)
# print(f'shape of state1: {state1.shape}; state2{state2.shape}')
return self.actor(state1, state2).cpu().data.numpy().flatten()
def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
for it in range(iterations):
# Step 4: We sample a batch of transitions (s, s’, a, r) from the memory
# batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size)
batch_states1, batch_states2, batch_next_states1, batch_next_states2, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(
batch_size)
state1 = torch.from_numpy(batch_states1).float().permute(0, 3, 1, 2).to(device)
state2 = torch.Tensor(batch_states2).to(device)
# next_state1 = torch.Tensor(batch_next_states1).to(device)
next_state1 = torch.from_numpy(batch_next_states1).float().permute(0, 3, 1, 2).to(device)
next_state2 = torch.Tensor(batch_next_states2).to(device)
action = torch.Tensor(batch_actions).to(device)
reward = torch.Tensor(batch_rewards).to(device)
done = torch.Tensor(batch_dones).to(device)
# Step 5: From the next state s’, the Actor target plays the next action a’
next_action = self.actor_target(next_state1, next_state2)
# Step 6: We add Gaussian noise to this next action a’ and we clamp it in a range of values supported by the environment
noise = torch.Tensor(batch_actions).data.normal_(0, policy_noise).to(device)
noise = noise.clamp(-noise_clip, noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
# Step 7: The two Critic targets take each the couple (s’, a’) as input and return two Q-values Qt1(s’,a’) and Qt2(s’,a’) as outputs
target_Q1, target_Q2 = self.critic_target(
next_state1, next_state2, next_action)
# Step 8: We keep the minimum of these two Q-values: min(Qt1, Qt2)
target_Q = torch.min(target_Q1, target_Q2)
# Step 9: We get the final target of the two Critic models, which is: Qt = r + γ * min(Qt1, Qt2), where γ is the discount factor
target_Q = reward + ((1 - done) * discount * target_Q).detach()
# Step 10: The two Critic models take each the couple (s, a) as input and return two Q-values Q1(s,a) and Q2(s,a) as outputs
current_Q1, current_Q2 = self.critic(state1, state2, action)
# Step 11: We compute the loss coming from the two Critic models: Critic Loss = MSE_Loss(Q1(s,a), Qt) + MSE_Loss(Q2(s,a), Qt)
critic_loss = F.mse_loss(
current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Step 12: We backpropagate this Critic loss and update the parameters of the two Critic models with a SGD optimizer
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Step 13: Once every two iterations, we update our Actor model by performing gradient ascent on the output of the first Critic model
if it % policy_freq == 0:
actor_loss = - self.critic.Q1(state1, state2, self.actor(state1, state2)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Step 14: Still once every two iterations, we update the weights of the Actor target by polyak averaging
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# Making a save method to save a trained model
def save(self, filename, directory):
torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))
torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))
# Making a load method to load a pre-trained model
def load(self, filename, directory):
self.actor.load_state_dict(torch.load(
'%s/%s_actor.pth' % (directory, filename), map_location=lambda storage, loc: storage))
self.critic.load_state_dict(torch.load(
'%s/%s_critic.pth' % (directory, filename), map_location=lambda storage, loc: storage))
|
{"hexsha": "ec2c7bf2959accc8985364afab44d451c9cd053c", "size": 12001, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/ai.py", "max_stars_repo_name": "bhuvnk/myGymEnvs", "max_stars_repo_head_hexsha": "61cd214de05d91100db5a0be52ea919f2b6d0639", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/ai.py", "max_issues_repo_name": "bhuvnk/myGymEnvs", "max_issues_repo_head_hexsha": "61cd214de05d91100db5a0be52ea919f2b6d0639", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/ai.py", "max_forks_repo_name": "bhuvnk/myGymEnvs", "max_forks_repo_head_hexsha": "61cd214de05d91100db5a0be52ea919f2b6d0639", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1165413534, "max_line_length": 227, "alphanum_fraction": 0.6334472127, "include": true, "reason": "import numpy", "num_tokens": 3089}
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits import mplot3d
def trajectory_generator(T_final, N, traj=0, show_traj=False):
'''
Generates a circular trajectory given a final time and a sampling time
'''
r = 1 # radius
th = np.linspace(0,6*np.pi,N)
c_x, c_y = [0,0] # center coordinates
## circular trajectory
if traj ==0:
t = np.linspace(0,T_final,N)
x = r * np.cos(th) + c_x
y = r * np.sin(th) + c_y
z = np.ones_like(th)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
ax.set_xlabel("x[m]")
ax.set_ylabel("y[m]")
ax.set_zlabel("z[m]")
plt.show()
## hellical trajectory
if traj ==1:
t = np.linspace(0,T_final,N)
x = r * np.cos(th) + c_x
y = r * np.sin(th) + c_y
z = np.linspace(1,2,N)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
plt.show()
## vertical trajectory
if traj ==2:
t = np.linspace(0,T_final,N)
x = np.ones_like(t)
y = np.zeros_like(t)
z = np.linspace(1,2,N)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
plt.show()
return t,x,y,z
def trajectory_generator2D( x0: np.array, # initial position of the quadrotor
N_hover: int, # number of time steps in the hovering phase
N_traj: int, # number of time steps in the simulation
N: int, # number of time step in a single time horizon (used to add an additional horizon in order for the closed loop simulation to work)
radius: float, # radius of the circular trajectory
show_traj=False): # boolean to show trajectory before the simulation
'''
Generates a circular trajectory with a hovering time of T_hover at the start,
given a trajectory time and a sampling time.
'''
# hovering trajectory
y_hover = np.ones(N_hover) * x0[0]
z_hover = np.ones(N_hover) * x0[1]
# circular trajectory parameters
theta = np.linspace(0,4*np.pi, N_traj+N)
c_x, c_y = [4,5] # center coordinates
## circular trajectory
y_circle = radius * np.cos(theta) + c_x
z_circle = radius * np.sin(theta) + c_y
# appending the hovering and the circular trajectories
y = np.append(y_hover, y_circle)
z = np.append(z_hover, z_circle)
if show_traj == True:
fig, ax = plt.subplots()
plt.title('Reference trajectory')
ax.plot(y, z)
ax.set_xlabel("y[m]")
ax.set_ylabel("z[m]")
plt.show()
return y,z
# trajectory generation with velocities
def trajectory_generotaor2D_with_vel( x0: np.array, # initial potision of the quadrotor
N_hover: int, # number of time steps in the hovering phase
model: object, # model of the drone (used to check if the maximum )
radius: float, # radius of the circular trajectory
freq: float, # used to control the speed of the trajectory
T_traj: float, # final time of the tre
Tf: float, # control horizon (used to add an additional horizon in order for the closed loop simulation to work)
dt: float):
# hovering trajectory
y_hover = np.ones(N_hover) * x0[0]
z_hover = np.ones(N_hover) * x0[1]
vz_hover = np.zeros(N_hover)
vy_hover = np.zeros(N_hover)
t = np.arange(0,T_traj+Tf,dt)
c_y, c_z = [4,5] # center coordinates
y_circle = radius * np.cos(freq * t) + c_y
z_circle = radius * np.sin(freq * t) + c_z
vy_circle = - radius * freq * np.sin(freq * t)
vz_circle = + radius * freq * np.cos(freq * t)
# appending the hovering and the circular trajectories
y = np.append(y_hover, y_circle)
z = np.append(z_hover, z_circle)
vy = np.append(vy_hover, vy_circle)
vz = np.append(vz_hover, vz_circle)
v = np.sqrt(vy**2 + vz**2)
# maximum velocity in the trajectory
v_max = np.max(v)
if v_max > model.v_max:
sys.exit("The desired trajectory contains velocities that the drone cannot handle.")
else:
return y, z, vy, vz
def readTrajectory(T_hover, N):
# import csv file of measX and simU (noisy measurement)
# ref_traj = pd.read_csv('used_data/matlab/ga4/measX.csv')
# ref_U = pd.read_csv('used_data/matlab/ga4/simU.csv')
# ref_traj = pd.read_csv('used_data/matlab/fmincon/J=u1/measX.csv')
# ref_U = pd.read_csv('used_data/matlab/fmincon/J=u1/simU.csv')
ref_traj = pd.read_csv('used_data/matlab/globalsearch_1/measX.csv')
ref_U = pd.read_csv('used_data/matlab/globalsearch_1/simU.csv')
# create references to add for the hovering time
ref_traj_x0 = ref_traj.iloc[[0]*N*T_hover]
ref_u0 = ref_U.iloc[[0]*N*T_hover]
# insert hovering references and inputs into their respective dataframes
ref_traj = pd.concat([pd.DataFrame(ref_traj_x0), ref_traj], ignore_index=True)
ref_U = pd.concat([pd.DataFrame(ref_u0), ref_U], ignore_index=True)
# append last reference point 3*N times in order for the MPC controller to work at the last iteration (multiplication by 3 is not necessary for the simulation to work but will improve the results than just multilplying by 1)
ref_traj = ref_traj.append( ref_traj.iloc[[-1]*N*3] )
ref_U = ref_U.append( ref_U.iloc[[-1]*N*3] )
# convert data frames to numpy arrays
ref_traj = ref_traj[['y', 'z', 'phi', 'vy', 'vz', 'phi_dot']].to_numpy()
ref_U = ref_U[['Thrust', 'Torque']].to_numpy()
# computing simulation time
T = (len(ref_traj) ) / N
return T, ref_traj, ref_U
'''
def loop_trajectory(quad, discretization_dt, radius, z, lin_acc, clockwise, yawing, v_max, map_name, plot):
"""
Creates a circular trajectory on the x-y plane that increases speed by 1m/s at every revolution.
:param quad: Quadrotor model
:param discretization_dt: Sampling period of the trajectory.
:param radius: radius of loop trajectory in meters
:param z: z position of loop plane in meters
:param lin_acc: linear acceleration of trajectory (and successive deceleration) in m/s^2
:param clockwise: True if the rotation will be done clockwise.
:param yawing: True if the quadrotor yaws along the trajectory. False for 0 yaw trajectory.
:param v_max: Maximum speed at peak velocity. Revolutions needed will be calculated automatically.
:param map_name: Name of map to load its limits
:param plot: Whether to plot an analysis of the planned trajectory or not.
:return: The full 13-DoF trajectory with time and input vectors
"""
ramp_up_t = 2 # s
# Calculate simulation time to achieve desired maximum velocity with specified acceleration
t_total = 2 * v_max / lin_acc + 2 * ramp_up_t
# Transform to angular acceleration
alpha_acc = lin_acc / radius # rad/s^2
# Generate time and angular acceleration sequences
# Ramp up sequence
ramp_t_vec = np.arange(0, ramp_up_t, discretization_dt)
ramp_up_alpha = alpha_acc * np.sin(np.pi / (2 * ramp_up_t) * ramp_t_vec) ** 2
# Acceleration phase
coasting_duration = (t_total - 4 * ramp_up_t) / 2
coasting_t_vec = ramp_up_t + np.arange(0, coasting_duration, discretization_dt)
coasting_alpha = np.ones_like(coasting_t_vec) * alpha_acc
# Transition phase: decelerate
transition_t_vec = np.arange(0, 2 * ramp_up_t, discretization_dt)
transition_alpha = alpha_acc * np.cos(np.pi / (2 * ramp_up_t) * transition_t_vec)
transition_t_vec += coasting_t_vec[-1] + discretization_dt
# Deceleration phase
down_coasting_t_vec = transition_t_vec[-1] + np.arange(0, coasting_duration, discretization_dt) + discretization_dt
down_coasting_alpha = -np.ones_like(down_coasting_t_vec) * alpha_acc
# Bring to rest phase
ramp_up_t_vec = down_coasting_t_vec[-1] + np.arange(0, ramp_up_t, discretization_dt) + discretization_dt
ramp_up_alpha_end = ramp_up_alpha - alpha_acc
# Concatenate all sequences
t_ref = np.concatenate((ramp_t_vec, coasting_t_vec, transition_t_vec, down_coasting_t_vec, ramp_up_t_vec))
alpha_vec = np.concatenate((
ramp_up_alpha, coasting_alpha, transition_alpha, down_coasting_alpha, ramp_up_alpha_end))
# Calculate derivative of angular acceleration (alpha_vec)
ramp_up_alpha_dt = alpha_acc * np.pi / (2 * ramp_up_t) * np.sin(np.pi / ramp_up_t * ramp_t_vec)
coasting_alpha_dt = np.zeros_like(coasting_alpha)
transition_alpha_dt = - alpha_acc * np.pi / (2 * ramp_up_t) * np.sin(np.pi / (2 * ramp_up_t) * transition_t_vec)
alpha_dt = np.concatenate((
ramp_up_alpha_dt, coasting_alpha_dt, transition_alpha_dt, coasting_alpha_dt, ramp_up_alpha_dt))
if not clockwise:
alpha_vec *= -1
alpha_dt *= -1
# Compute angular integrals
w_vec = np.cumsum(alpha_vec) * discretization_dt
angle_vec = np.cumsum(w_vec) * discretization_dt
# Compute position, velocity, acceleration, jerk
pos_traj_x = radius * np.sin(angle_vec)[np.newaxis, np.newaxis, :]
pos_traj_y = radius * np.cos(angle_vec)[np.newaxis, np.newaxis, :]
pos_traj_z = np.ones_like(pos_traj_x) * z
vel_traj_x = (radius * w_vec * np.cos(angle_vec))[np.newaxis, np.newaxis, :]
vel_traj_y = - (radius * w_vec * np.sin(angle_vec))[np.newaxis, np.newaxis, :]
acc_traj_x = radius * (alpha_vec * np.cos(angle_vec) - w_vec ** 2 * np.sin(angle_vec))[np.newaxis, np.newaxis, :]
acc_traj_y = - radius * (alpha_vec * np.sin(angle_vec) + w_vec ** 2 * np.cos(angle_vec))[np.newaxis, np.newaxis, :]
jerk_traj_x = radius * (alpha_dt * np.cos(angle_vec) - alpha_vec * np.sin(angle_vec) * w_vec -
np.cos(angle_vec) * w_vec ** 3 - 2 * np.sin(angle_vec) * w_vec * alpha_vec)
jerk_traj_y = - radius * (np.cos(angle_vec) * w_vec * alpha_vec + np.sin(angle_vec) * alpha_dt -
np.sin(angle_vec) * w_vec ** 3 + 2 * np.cos(angle_vec) * w_vec * alpha_vec)
jerk_traj_x = jerk_traj_x[np.newaxis, np.newaxis, :]
jerk_traj_y = jerk_traj_y[np.newaxis, np.newaxis, :]
if yawing:
yaw_traj = -angle_vec
else:
yaw_traj = np.zeros_like(angle_vec)
traj = np.concatenate((
np.concatenate((pos_traj_x, pos_traj_y, pos_traj_z), 1),
np.concatenate((vel_traj_x, vel_traj_y, np.zeros_like(vel_traj_x)), 1),
np.concatenate((acc_traj_x, acc_traj_y, np.zeros_like(acc_traj_x)), 1),
np.concatenate((jerk_traj_x, jerk_traj_y, np.zeros_like(jerk_traj_x)), 1)), 0)
yaw = np.concatenate((yaw_traj[np.newaxis, :], w_vec[np.newaxis, :]), 0)
return minimum_snap_trajectory_generator(traj, yaw, t_ref, quad, map_limits, plot)
'''
|
{"hexsha": "b4b57d86083ad35098d957683f2f07aeae9ae245", "size": 11474, "ext": "py", "lang": "Python", "max_stars_repo_path": "planar_mpc/trajectory.py", "max_stars_repo_name": "enhatem/quadrotor_mpc_acados", "max_stars_repo_head_hexsha": "9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "planar_mpc/trajectory.py", "max_issues_repo_name": "enhatem/quadrotor_mpc_acados", "max_issues_repo_head_hexsha": "9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "planar_mpc/trajectory.py", "max_forks_repo_name": "enhatem/quadrotor_mpc_acados", "max_forks_repo_head_hexsha": "9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-29T03:37:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T03:37:01.000Z", "avg_line_length": 41.5724637681, "max_line_length": 228, "alphanum_fraction": 0.6340421823, "include": true, "reason": "import numpy", "num_tokens": 2999}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 19:44:02 2017
@author: user
"""
import argparse
import torch
import torch.nn as nn
from flyai.dataset import Dataset
from torch.optim import Adam, SGD
from torch.optim.lr_scheduler import *
import numpy as np
from model import Model
from path import MODEL_PATH
from flyai.utils.log_helper import train_log
from torch.utils.data import DataLoader
from torchvision.models.segmentation import *
from segdataset import SkyData
from albumentations import (
HorizontalFlip,
VerticalFlip,
Resize,
CenterCrop,
Compose,
RandomRotate90,
RandomResizedCrop,
RandomBrightnessContrast,
Normalize
)
'''
项目的超参
'''
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=3, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=8, type=int, help="batch size")
args = parser.parse_args()
'''
flyai库中的提供的数据处理方法
传入整个数据训练多少轮,每批次批大小
'''
data = Dataset(epochs=args.EPOCHS, batch=args.BATCH)
model = Model(data)
mean, std = model.get_mean_std()
resize = 600
crop_size = 520
train_aug = Compose([
RandomResizedCrop(height=crop_size, width=crop_size, p=1),
VerticalFlip(p=0.5),
HorizontalFlip(p=0.5),
RandomRotate90(p=0.5),
RandomBrightnessContrast(p=0.3),
Normalize(mean, std)
])
valid_aug = Compose([
Resize(resize, resize),
CenterCrop(crop_size,crop_size),
VerticalFlip(p=0.3),
HorizontalFlip(p=0.3),
RandomRotate90(p=0.3),
RandomBrightnessContrast(p=0.3),
Normalize(mean, std)
])
x_train, y_train, x_val, y_val = data.get_all_data()
print(mean, std)
train_dataset = SkyData(x_train, y_train, transformation=train_aug)
valid_dataset = SkyData(x_val, y_val, transformation=valid_aug)
train_dataloader = DataLoader(train_dataset,batch_size=args.BATCH,shuffle=True)
valid_dataloader = DataLoader(valid_dataset,batch_size=args.BATCH)
# 判断gpu是否可用
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
'''
实现自己的网络结构
'''
# cnn = deeplabv3_resnet101(pretrained=False, num_classes=2)
cnn = fcn_resnet101(pretrained=False, num_classes=2)
cnn = cnn.to(device)
optimizer = SGD(cnn.parameters(), lr=0.005, momentum=0.9, weight_decay=0.0005)
# scheduler = StepLR(optimizer,step_size=7,gamma=0.1)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=3)
# criterion = nn.BCELoss() # 定义损失函数
def criterion(inputs, target):
losses = {}
for name, x in inputs.items():
# x.transpose_(1,2)
# x.transpose_(2,3)
loss_fn = nn.CrossEntropyLoss()
losses[name] = loss_fn(x, target)
return losses['out']
'''
dataset.get_step() 获取数据的总迭代次数
'''
best_val_loss = 1e5
for epo in range(args.EPOCHS):
cnn.train()
for x_train,y_train in train_dataloader:
x_train, y_train = x_train.to(device), y_train.to(device)
optimizer.zero_grad()
outputs = cnn(x_train)
loss = criterion(outputs, y_train)
loss.backward()
optimizer.step()
train_log(train_loss=loss.item())
# val
cnn.eval()
loss_m = 0.0
for x_val, y_val in valid_dataloader:
with torch.no_grad():
x_val, y_val = x_val.to(device), y_val.to(device)
outputs = cnn(x_val)
val_loss = criterion(outputs, y_val)
train_log(val_loss=val_loss.item())
loss_m += val_loss.item()
loss_m /= len(valid_dataloader)
if best_val_loss > loss_m:
model.save_model(cnn, MODEL_PATH, overwrite=True)
best_val_loss = loss_m
print("saved model, loss=%f, epo=%d"%(best_val_loss,epo))
else:
print("val_loss=%f"%loss_m)
print("lr=%f"%optimizer.param_groups[0]["lr"])
scheduler.step(loss_m)
print("train finished, best_loss=%f"%best_val_loss)
|
{"hexsha": "7ee2a76fb3567a2a6d2dc582f6d9eed95a57b77b", "size": 3786, "ext": "py", "lang": "Python", "max_stars_repo_path": "Sky_Seg_FlyAI/main.py", "max_stars_repo_name": "invisprints/flyai_match", "max_stars_repo_head_hexsha": "d087279268b10efed156292dc6e5844b03940192", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-06T09:41:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-10T02:54:08.000Z", "max_issues_repo_path": "Sky_Seg_FlyAI/main.py", "max_issues_repo_name": "invisprints/flyai_match", "max_issues_repo_head_hexsha": "d087279268b10efed156292dc6e5844b03940192", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sky_Seg_FlyAI/main.py", "max_forks_repo_name": "invisprints/flyai_match", "max_forks_repo_head_hexsha": "d087279268b10efed156292dc6e5844b03940192", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2537313433, "max_line_length": 83, "alphanum_fraction": 0.6922873745, "include": true, "reason": "import numpy", "num_tokens": 1049}
|
import logging
import math
import matplotlib.pyplot as plt
import heartpy as hp
import numpy as np
import json
logging.basicConfig(filename='bad_data.log',
filemode='w',
level=logging.INFO)
def output_file(metrics, filename):
"""This function writes the output json file for the ECG data
This function takes the dictionary of ECG metrics and the name
of the csv file as inputs. The name of the csv file is split
into a list at the period and the first item of the list is
put into a string followed by .json to create the filename.
Using a with statement and a .dump() command the json file is
written.
Args:
metrics (dict): Dictionary containing ECG metrics including
the time duration, extreme values, number of beats,
time points of beats, and mean heart rate in bpm.
"""
logging.info('Creating JSON output file')
filename_split = filename.split(".")
file = filename_split[0]
filename = file + ".json"
with open(filename, 'x') as out_file:
json.dump(metrics, out_file)
def group_similar_values(beat_list):
"""This returns a list of time points that represents heart beats
The input argument beat_list contains a list of time values
that correspond to voltages that are above half the max voltage.
The list beat_list has groups of voltages that represent heart beats.
For each group of times the median time is selected to be the heart
beat and is appended to the list median_list which is returned and
used as the list of times when heart beats occur.
Args:
beat_list (list): This list contains all times that correspond
to voltages over one half of the max voltage.
Returns:
list : a list of time points representing heart beats
"""
big_list = [[]]
x = 0
for i in range(len(beat_list)):
diff = beat_list[i] - beat_list[i - 1]
if diff > 0.1:
x += 1
big_list.append([])
big_list[x].append(beat_list[i])
median_list = list()
for i in range(len(big_list)):
val = math.floor(len(big_list[i])/2)
median_list.append(big_list[i][val])
return median_list
def calc_beats(time, volts):
"""This function returns the time points of heart beats by looking
at the voltage values
This function takes the time and voltage lists as input. The max voltage
is stored in the variable maximum by calling the function
calc_voltage_extremes() and selecting the max value. A for
loop loops through the voltage values and stores the time points
where the voltage is greater than half of the max voltage. This list
of times is sent to the function group_similar_values() which
returns the list of times corresponding to heart beats.
Args:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
Returns:
list : list of time values corresponding to heart beats
"""
logging.info('Finding the times that each '
'heart beat occurred')
beat_list = list()
extremes = calc_voltage_extremes(volts)
maximum = extremes[1]
for i in range(len(volts)):
if volts[i] > (maximum / 2):
beat_list.append(time[i])
beat_list = group_similar_values(beat_list)
return beat_list
def calc_mean_hr_bpm(time, volts):
"""This function returns the average heart rate over the ECG data
The two lists time and volts are used as input parameters and are sent
to the function calc_beats which returns the list of time points
corresponding to heart beats. Using a for loop the time between beats
are stored in the list hr_list. The inverse of the time between the
beats is multiplied by 60 to convert beats per second to beats per
minute. The average of this list of heart rates is calculated by
finding the mean of the heart rates.
Args:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
Returns:
float : mean heart rate over the ECG data
"""
logging.info('Calculating the mean heart rate '
'in beats-per-minute')
beat_list = calc_beats(time, volts)
hr_list = list()
for i in range(len(beat_list)):
if i > 0:
diff = beat_list[i] - beat_list[i-1]
hr_list.append(diff)
ave_hr = [(1/x)*60 for x in hr_list]
ave = np.mean(ave_hr)
return ave
def calc_num_beats(time, volt):
"""This returns the number of heart beats over the ECG data.
This function calls the function calc_beats to get the list
of heart beats for the ECG data. The length of this list
returned which is the number of beats in the list.
Args:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
Returns:
int : the number of beats in the ECG data
"""
logging.info('Calculating the number of heart beats')
beats = calc_beats(time, volt)
return len(beats)
def calc_voltage_extremes(volt):
"""This function calculates the extreme values in the ECG data.
This functon takes the volt list as input which is the magnitude
of the ECG data, and finds the extreme values using the max() and
min() values. The max and min values are returned as a tuple.
Args:
volts (list): list of ECG voltage magnitudes
Returns:
tuple: (min, max)
"""
logging.info('Finding max and min ECG values')
maximum = max(volt)
minimum = min(volt)
ans = (minimum, maximum)
return ans
def calc_duration(time):
"""This calculates the time duration of the ECG data.
The time duration is found by subtracting the first time
value from the last time value.
Args:
time (list): list of time values for the ECG data
Returns:
float : duration of ECG data in seconds
"""
logging.info('Calculating ECG duration')
first = time[0]
last = time[-1]
return last - first
def filter_data(time, raw_volt):
"""This function filters out noise below 10 Hz and above 50Hz
This filter takes the time and raw_volt data as input and
filters out noises below 10 Hz and above 50 Hz using the heartpy
function filter_signal. See documentation on the filter_signal function
at: https://python-heart-rate-analysis-toolkit.readthedocs.io/en/
latest/_modules/heartpy/filtering.html
Args:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
Returns:
list : the filtered ECG voltage values
"""
logging.info('Filtering Data')
sample_rate = 1 / (time[1] - time[0])
volt = hp.filter_signal(raw_volt, [5, 20], sample_rate, 2, 'bandpass')
return volt
def make_dictionary(duration, voltage_extremes, num_beats, mean_hr_bpm, beats):
"""This function returns a dictionary of ECG metric data
This function makes a dictionary containing all of the ECG metric data,
which is passed into the function as the function's input parameters.
Args:
duration (float): the time duration of the ECG data
voltage_extremes (tuple): a tuple containing the min and max voltages
num_beats (int): the number of heart beats in the ECG data
mean_hr_bpm (float): the mean heart rate in beats per minutes
beats (list): the list of times corresponding to heart beats
Returns:
dictionary : dictionary containing ecg metrics
"""
metrics = {"duration": duration, "voltage_extremes": voltage_extremes,
"num_beats": num_beats, "mean_hr_bpm": mean_hr_bpm,
"beats": beats}
return metrics
def plot_data(time, volt, filename):
"""This function plots the ECG data for a file
This function takes three arguments as inputs: time, volt,
and filename. It uses matplotlib.pyplot to plot the time and
voltage pairs and uses the filename as the title.
Args:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
filename (str): the string of the filename to be opened
"""
plt.plot(time, volt)
plt.title(filename)
plt.xlabel("Time (s)")
plt.ylabel("Voltage (mV)")
plt.show()
def calc_metrics(time, volt, filename):
"""This function calls the functions necessary to calculate the
ECG metrics
This function takes the time and volt lists as inputs and calls several
functions to return a dictionary of ECG metrics. First filter_data() is
called to get the voltage data without the high and low noise. Then the
duration is calculated using the function calc_duration(), the voltage
extremes are calculated by calling the function calc_voltage_extremes(),
the number of beats is calculated by calling the function
calc_num_beats(), the average heart rate is calculated by calling the
function calc_mean_hr_bpm(), the list of times corresponding to heart
beats is calculated by calling the function calc_beats(). All of the
metric data is put into a dictionare by calling the function
make_dictionary.
Args:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
Returns:
dictionary : dictionary containing ecg metrics
"""
logging.info('Beginning analysis of ECG data.')
volt = filter_data(time, volt)
duration = calc_duration(time)
voltage_extremes = calc_voltage_extremes(volt)
num_beats = calc_num_beats(time, volt)
mean_hr_bpm = calc_mean_hr_bpm(time, volt)
beats = calc_beats(time, volt)
plot_data(time, volt, filename)
metrics = make_dictionary(duration, voltage_extremes, num_beats,
mean_hr_bpm, beats)
return metrics
def split_data(temp_line):
"""This function recieves a line of input from the
data file and returns the data after it is isolated
ECG test data is read into the software using the function
read_input() which sends the data line by line to this function
to clean it up. The lines of data are stripped and split at the comma
into time and voltage values. If a value is missing, then a blank string
is returned.
Args:
temp_line (str): a string that contains a line of ECG data
Returns:
time (list): list of time values for the ECG data
volts (list): list of ECG voltage magnitudes
"""
temp_line = temp_line.strip("\n")
temp_list = temp_line.split(",")
time = temp_list[0]
time = time.strip(" ")
if len(temp_list) == 2:
volt = temp_list[1]
volt = volt.strip(" ")
else:
volt = ''
return time, volt
def is_a_number(number):
"""This function returns whether a string contains a number
This function uses a try-except statement to check whether a
string is numeric. It does so by trying to take the float of
a string and returning True if it is successful and False if
unsuccessful.
Args:
number (str): a string containing data from the .csv file
Returns:
bool: True or False depending on if the string is numeric
"""
try:
float(number)
except ValueError:
return False
return True
def check_data(temp_time, temp_volt):
"""This function checks whether the .csv data can be used for analysis
This function takes two string arguments, one of the string in the time
location in the csv file and one string that was in the voltage location
in the csv file. This function returns False if either of the strings are
empty, non-numeric, or NaN.
Args:
temp_time (str): a string of what should be the time data
temp_volt (str): a string of what should be the voltage data
"""
if temp_volt == '':
return False
elif temp_time == '':
return False
elif is_a_number(temp_time) is False:
return False
elif is_a_number(temp_volt) is False:
return False
elif math.isnan(float(temp_time)) is True:
return False
elif math.isnan(float(temp_volt)) is True:
return False
else:
return True
def log_if_bad_data(temp_check):
"""This function logs an error if there is a bad data point.
This function uses the result of temp_check to see if a data point
is usable in the ECG analysis. If the data point prompts check_data()
to return False, then it will log it as a bad data point.
Args:
temp_check (bool): True or False depending on whether the data
can be used
"""
if temp_check is False:
logging.error('Bad data point, '
'skipping to next line')
return
def log_if_data_too_high(volt):
"""This function logs a warning if there are voltages outside of the
normal operating range.
This function takes the volt list as input and finds the max and min of
the list. If the max is above 300 mV or the min is under -300 mV then a
warning is logged.
Args:
volt (list): list of ECG voltage magnitudes
"""
maximum = max(volt)
minimum = min(volt)
if maximum > 300 or minimum < -300:
logging.warning("This file contains a value outside the "
"normal operating range of +/- 300 mV.")
return
def read_input(filename):
"""This function reads the data from an input file
This function uses a with statement to open a file and a while loop to
go through the entire function. Each line is read, and the contents of
the line are cleaned and turned into time and voltage values where they
are then stored in the time and volt lists.
Args:
filename (str): the string of the filename to be opened
Returns:
list : a list of time values
list : a list of voltages
"""
time = list()
volt = list()
with open(filename, 'r') as f:
temp_line = f.readline()
while temp_line != "":
temp_time, temp_volt = split_data(temp_line)
temp_check = check_data(temp_time, temp_volt)
log_if_bad_data(temp_check)
if temp_check is True:
time.append(float(temp_time))
volt.append(float(temp_volt))
temp_line = f.readline()
log_if_data_too_high(volt)
return time, volt
def interface():
"""This function calls the functions that read the data and write json
files
This funcion is called when the module is ran. It requests that the
user type in the filename that stores the ECG data. This function
then calls the function read_input() that reads the data inside the
given filename and creates a dictionary of ECG metrics. This dictionary
of metrics is then sent to the function output_file() which creates the
json to store the data.
"""
filename = input("Please enter the filename: ")
ecg_time, ecg_volt = read_input(filename)
metrics = calc_metrics(ecg_time, ecg_volt, filename)
output_file(metrics, filename)
if __name__ == '__main__':
interface()
|
{"hexsha": "5cc996ee1e30a98a02b45da5df6e1511061323d7", "size": 15291, "ext": "py", "lang": "Python", "max_stars_repo_path": "ecg_analysis.py", "max_stars_repo_name": "cduncan9/ECG-Analysis", "max_stars_repo_head_hexsha": "17517b86970d320ad749d04b3c54ad0929c286d3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ecg_analysis.py", "max_issues_repo_name": "cduncan9/ECG-Analysis", "max_issues_repo_head_hexsha": "17517b86970d320ad749d04b3c54ad0929c286d3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ecg_analysis.py", "max_forks_repo_name": "cduncan9/ECG-Analysis", "max_forks_repo_head_hexsha": "17517b86970d320ad749d04b3c54ad0929c286d3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.98, "max_line_length": 79, "alphanum_fraction": 0.6745144202, "include": true, "reason": "import numpy", "num_tokens": 3551}
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package gl_lib
# general backend for the OpenGL viewer.
#
import sys
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from PyQt5 import QtOpenGL
from PyQt5.QtOpenGL import QGLWidget
open_gl_ok=True
except:
print("opengl error from gl_lib",sys.exc_info()[0])
import random
import numpy as np
from math import pi,acos,sin,cos
from gl_base_object import gl_base_object
stars=[]
def gl_obj_id_starts_with(ids,val):
found=False
for id in ids:
if id.startswith(val)==True:
found=True
break
return found
def val_to_rgb(v,grey=False):
if v>1.0:
v=0.99
if grey==True:
return v,v,v
mesh=[ 0.0, 0.5, 1.0]
colors = [(0, 0, 1.0), (0, 1.0, 0), (1.0, 0, 0)]
i0=0
while(1):
if mesh[i0]<v:
i0=i0+1
else:
i0=i0-1
break
i1 = i0+1
dx=1.0/(len(colors)-1)
#print(i0,v)
f=(v-mesh[i0])/dx
#print(i0)
#print(i1)
#print(f)
#print(i_f)
#print(v)
#print(len(colors)-1)
#print(i)
#print(f)
(r0, g0, b0) = colors[i0]
(r1, g1, b1) = colors[i1]
#print(i0,i1,f,v,mesh[i0])
return r0 + f*(r1-r0), g0 + f*(g1-g0), b0 + f*(b1-b0)
|
{"hexsha": "e320ac19cd3645ac30d95b07031684ce156c41aa", "size": 2015, "ext": "py", "lang": "Python", "max_stars_repo_path": "gpvdm_gui/gui/gl_lib.py", "max_stars_repo_name": "roderickmackenzie/gpvdm", "max_stars_repo_head_hexsha": "914fd2ee93e7202339853acaec1d61d59b789987", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2016-09-13T08:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T07:04:52.000Z", "max_issues_repo_path": "gpvdm_gui/gui/gl_lib.py", "max_issues_repo_name": "roderickmackenzie/gpvdm", "max_issues_repo_head_hexsha": "914fd2ee93e7202339853acaec1d61d59b789987", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-11-11T12:33:02.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-08T00:48:08.000Z", "max_forks_repo_path": "gpvdm_gui/gui/gl_lib.py", "max_forks_repo_name": "roderickmackenzie/gpvdm", "max_forks_repo_head_hexsha": "914fd2ee93e7202339853acaec1d61d59b789987", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-01-03T06:17:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-01T15:59:00.000Z", "avg_line_length": 22.3888888889, "max_line_length": 89, "alphanum_fraction": 0.6789081886, "include": true, "reason": "import numpy", "num_tokens": 659}
|
//
// helper.hpp
// arb-avm-cpp
//
// Created by Harry Kalodner on 5/17/20.
//
#ifndef avm_tests_helper_hpp
#define avm_tests_helper_hpp
#include <boost/filesystem.hpp>
#include <string>
extern std::string dbpath;
struct DBDeleter {
~DBDeleter() { boost::filesystem::remove_all(dbpath); }
};
#endif /* avm_tests_helper_hpp */
|
{"hexsha": "e7f172eae57e829a05cc85bf67e6814406dc8ee5", "size": 339, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "packages/arb-avm-cpp/tests/helper.hpp", "max_stars_repo_name": "mrsmkl/arbitrum", "max_stars_repo_head_hexsha": "7941a8c4870f98ed7999357049a5eec4a75d8c78", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "packages/arb-avm-cpp/tests/helper.hpp", "max_issues_repo_name": "mrsmkl/arbitrum", "max_issues_repo_head_hexsha": "7941a8c4870f98ed7999357049a5eec4a75d8c78", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 120.0, "max_issues_repo_issues_event_min_datetime": "2021-02-18T07:19:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:08:45.000Z", "max_forks_repo_path": "packages/arb-avm-cpp/tests/helper.hpp", "max_forks_repo_name": "fredlacs/arbitrum", "max_forks_repo_head_hexsha": "d4dc5a2c596880614d41119a13b90c09922d08de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-09-20T19:25:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-20T19:25:23.000Z", "avg_line_length": 15.4090909091, "max_line_length": 59, "alphanum_fraction": 0.7020648968, "num_tokens": 98}
|
import string
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.api.types import (
is_extension_array_dtype,
pandas_dtype,
)
from .pandas_vb_common import (
datetime_dtypes,
extension_dtypes,
numeric_dtypes,
string_dtypes,
)
_numpy_dtypes = [
np.dtype(dtype) for dtype in (numeric_dtypes + datetime_dtypes + string_dtypes)
]
_dtypes = _numpy_dtypes + extension_dtypes
class Dtypes:
params = _dtypes + list(map(lambda dt: dt.name, _dtypes))
param_names = ["dtype"]
def time_pandas_dtype(self, dtype):
pandas_dtype(dtype)
class DtypesInvalid:
param_names = ["dtype"]
params = ["scalar-string", "scalar-int", "list-string", "array-string"]
data_dict = {
"scalar-string": "foo",
"scalar-int": 1,
"list-string": ["foo"] * 1000,
"array-string": np.array(["foo"] * 1000),
}
def time_pandas_dtype_invalid(self, dtype):
try:
pandas_dtype(self.data_dict[dtype])
except TypeError:
pass
class SelectDtypes:
params = [
tm.ALL_INT_NUMPY_DTYPES
+ tm.ALL_INT_EA_DTYPES
+ tm.FLOAT_NUMPY_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES
]
param_names = ["dtype"]
def setup(self, dtype):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
def create_df(data):
return DataFrame(data, index=self.index, columns=self.columns)
self.df_int = create_df(np.random.randint(low=100, size=(N, K)))
self.df_float = create_df(np.random.randn(N, K))
self.df_bool = create_df(np.random.choice([True, False], size=(N, K)))
self.df_string = create_df(
np.random.choice(list(string.ascii_letters), size=(N, K))
)
def time_select_dtype_int_include(self, dtype):
self.df_int.select_dtypes(include=dtype)
def time_select_dtype_int_exclude(self, dtype):
self.df_int.select_dtypes(exclude=dtype)
def time_select_dtype_float_include(self, dtype):
self.df_float.select_dtypes(include=dtype)
def time_select_dtype_float_exclude(self, dtype):
self.df_float.select_dtypes(exclude=dtype)
def time_select_dtype_bool_include(self, dtype):
self.df_bool.select_dtypes(include=dtype)
def time_select_dtype_bool_exclude(self, dtype):
self.df_bool.select_dtypes(exclude=dtype)
def time_select_dtype_string_include(self, dtype):
self.df_string.select_dtypes(include=dtype)
def time_select_dtype_string_exclude(self, dtype):
self.df_string.select_dtypes(exclude=dtype)
class CheckDtypes:
def setup(self):
self.ext_dtype = pd.Int64Dtype()
self.np_dtype = np.dtype("int64")
def time_is_extension_array_dtype_true(self):
is_extension_array_dtype(self.ext_dtype)
def time_is_extension_array_dtype_false(self):
is_extension_array_dtype(self.np_dtype)
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
{"hexsha": "c45d5a0814544af5affbb4bb593bb418a33254af", "size": 3167, "ext": "py", "lang": "Python", "max_stars_repo_path": "asv_bench/benchmarks/dtypes.py", "max_stars_repo_name": "KiranHipparagi/pandas", "max_stars_repo_head_hexsha": "cc743996fe49aab5a9226444d98a6faa423f4aec", "max_stars_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-04-02T06:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T02:03:50.000Z", "max_issues_repo_path": "asv_bench/benchmarks/dtypes.py", "max_issues_repo_name": "KiranHipparagi/pandas", "max_issues_repo_head_hexsha": "cc743996fe49aab5a9226444d98a6faa423f4aec", "max_issues_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-12T00:39:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-12T00:39:36.000Z", "max_forks_repo_path": "asv_bench/benchmarks/dtypes.py", "max_forks_repo_name": "KiranHipparagi/pandas", "max_forks_repo_head_hexsha": "cc743996fe49aab5a9226444d98a6faa423f4aec", "max_forks_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-17T12:55:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T21:46:27.000Z", "avg_line_length": 27.0683760684, "max_line_length": 83, "alphanum_fraction": 0.6741395643, "include": true, "reason": "import numpy", "num_tokens": 760}
|
import os
import struct
from multiprocessing.pool import Pool
import numpy as np
class PointCloud:
"""
PCD format to (x, y, z, intensity) data.
Only binary-based PCD is supported.
Use attribute 'data' to get the numpy array (float32).
"""
def __init__(self, filename: str, use_intensity=True):
self.fields = []
self.formats = []
self.sizes = []
self.types = []
self.counts = []
self.viewpoint = []
self.points = 0
self.use_intensity = use_intensity
with open(filename, 'rb') as f:
lines = f.readlines()
header = [line.decode("utf-8") for line in lines[:11]]
binary_data = bytearray(b''.join(lines[11:]))
self.parse_header(header)
self.data = self.parse_binary_data(binary_data)
def parse_header(self, header):
version = header[1].strip()
assert version == 'VERSION 0.7'
fields = header[2].split()
assert fields[0] == 'FIELDS'
assert 'x' in fields and 'y' in fields and 'z' in fields
if self.use_intensity:
assert 'intensity' in fields
self.fields = fields[1:]
sizes = header[3].split()
assert sizes[0] == 'SIZE' and len(sizes) == len(fields)
self.sizes = [int(size) for size in sizes[1:]]
types = header[4].split()
assert types[0] == 'TYPE' and len(types) == len(fields)
self.types = types[1:]
# convert to struct format
self.formats = [''] * len(self.fields)
for i in range(len(self.fields)):
size = self.sizes[i]
t = self.types[i]
if size == 1:
if t == 'I':
self.formats[i] = 'b'
elif t == 'U':
self.formats[i] = 'B'
else:
raise ValueError
elif size == 2:
if t == 'I':
self.formats[i] = 'h'
elif t == 'U':
self.formats[i] = 'H'
else:
raise ValueError
elif size == 4:
if t == 'I':
self.formats[i] = 'i'
elif t == 'U':
self.formats[i] = 'I'
elif t == 'F':
self.formats[i] = 'f'
else:
raise ValueError
elif size == 8:
if t == 'I':
self.formats[i] = 'q'
elif t == 'U':
self.formats[i] = 'Q'
elif t == 'F':
self.formats[i] = 'd'
else:
raise ValueError
else:
raise ValueError
counts = header[5].split()
assert counts[0] == 'COUNT' and len(counts) == len(fields)
self.counts = [int(count) for count in counts[1:]]
viewpoint = header[8].split()
assert viewpoint[0] == 'VIEWPOINT'
self.viewpoint = [int(v) for v in viewpoint[1:]]
points = header[9].split()
assert points[0] == 'POINTS'
self.points = int(points[1])
data = header[10].strip()
assert data == 'DATA binary'
def parse_binary_data(self, binary_data):
output_points = []
ptr = 0
point_len = 4 if self.use_intensity else 3
for p_idx in range(self.points):
point = [0] * point_len
for f_idx, field in enumerate(self.fields):
cur_data_len = self.sizes[f_idx] * self.counts[f_idx]
cur_data = binary_data[ptr:ptr + cur_data_len]
ptr += cur_data_len
# check field of interest
idx = -1
if field == 'x':
idx = 0
elif field == 'y':
idx = 1
elif field == 'z':
idx = 2
elif field == 'intensity' and self.use_intensity:
idx = 3
# assign converted value
if idx != -1:
point[idx] = struct.unpack(self.formats[f_idx], cur_data)[0]
output_points.append(point)
output_points = np.array(output_points).astype(np.float32)
return output_points
def _pcd_to_bin(in_path, out_path):
pc = PointCloud(in_path)
pts = pc.data
# change coordinates
# from x left, y rear, z up, to x front, y left, z up
# pts[:, (0, 1)] = pts[:, (1, 0)]
# pts[:, 0] *= -1
pts.tofile(out_path)
def pcd_to_bin_dir(data_root):
src_path = os.path.join(data_root, 'lidar')
dst_path = os.path.join(data_root, 'lidar_bin')
os.makedirs(dst_path, exist_ok=True)
arguments = []
for sustech_lidar in os.listdir(src_path):
frame = sustech_lidar[:-4]
in_path = os.path.join(src_path, sustech_lidar)
out_path = os.path.join(dst_path, f'{frame}.bin')
arguments.append((in_path, out_path))
print(f'Start converting {len(arguments)} files from {src_path} to {dst_path}')
with Pool(8) as pool:
for _ in pool.starmap(_pcd_to_bin, arguments):
pass
print('Done')
|
{"hexsha": "c96485190f3ce20b02193f3488bd83b1fe3a6e88", "size": 5226, "ext": "py", "lang": "Python", "max_stars_repo_path": "pcdet/datasets/sustech/pcd_utils.py", "max_stars_repo_name": "Kemo-Huang/OpenPCDet", "max_stars_repo_head_hexsha": "2f1c9d46ea8ba342dbbcf1b50054d38f99234dfc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pcdet/datasets/sustech/pcd_utils.py", "max_issues_repo_name": "Kemo-Huang/OpenPCDet", "max_issues_repo_head_hexsha": "2f1c9d46ea8ba342dbbcf1b50054d38f99234dfc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pcdet/datasets/sustech/pcd_utils.py", "max_forks_repo_name": "Kemo-Huang/OpenPCDet", "max_forks_repo_head_hexsha": "2f1c9d46ea8ba342dbbcf1b50054d38f99234dfc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6727272727, "max_line_length": 83, "alphanum_fraction": 0.4955989284, "include": true, "reason": "import numpy", "num_tokens": 1259}
|
#!/usr/bin/python3.6
import os
#import gunicorn
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State,Event
import time
from rq import Queue
from worker import conn
import uuid
from data_import import *
from graph_manipulation import *
from time_aware_splits import *
from popularity_based_rec import *
from personalized_prank import *
from pathsim import *
from simrank import *
from accuracy_evaluation import *
from collections import defaultdict
from collections import Counter
from operator import itemgetter
from background_jobs import create_graph
import pandas as pd
import networkx as nx
from collections import defaultdict
import plotly.graph_objs as go
from datetime import datetime
import numpy as np
import datetime
e = ""
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
datasets = [('test','test'),('German News Provider','german_news'),('Italian News Provider','italian_news'),('German TVBroadcasts Provider','german_tvbroadcasts')]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.Div(id='job_id',style={'display':'none'}),
# dcc.Input(id='my-id', value='initial value', type='text'),
dcc.Interval(
id='interval-component',
interval=10 * 1000, # in milliseconds
n_intervals=100
),
html.Div([
html.Center('Comparison of Graph-based algorithms for Session-based Recommendations', style={'color':'blue','font-weight':'bold'})]),
html.Br(),
html.Div([
html.Center('Co-creators: P. Symeonidis, L. Kirjackaja, S. Chairistanidis, and M. Zanker', style={'color':'blue','font-weight':'bold','font-size':12})]),
html.Br(),
html.Div([
html.Div('Select a dataset'),
dcc.Dropdown(
id='xaxis-column',
options=[{'label': i[0], 'value': i[1]} for i in datasets],
value='italian_news'
)]),
html.Div([
html.Div('Number of Time Splits'),
dcc.Input(id='number_of_splits',
placeholder='Number Of Splits',
type='number',
min=1,
step=1,
value=12,
),
html.Div('Time Window Size'),
dcc.Input(id='short_days',
placeholder='Short Days Window',
type='number',
min=1,
step=1,
value=1,
),
html.Div('Number of Recommendations'),
dcc.Input(id='number_recommendations',
placeholder='Number Of Recommendations',
type='number',
min=1,
step=1,
value=1,
),
html.Div('Minimum Number of Items per Session'),
dcc.Input(id='min_items_n',
placeholder='Minimum Number of Items per Session',
type='number',
min=1,
step=1,
value=3,
)
],style={'columnCount': 2}),
html.Br(),
html.Label('Node types',style={'font-weight':'bold'}),
html.Div([
dcc.Checklist(id='nodes',
options=[
{'label': 'Users', 'value': 'U'},
{'label': 'Articles', 'value': 'A'},
{'label': 'Sessions', 'value': 'S'},
{'label': 'Locations', 'value': 'L'},
{'label': 'Categories', 'value': 'C'},
],
values=['U','S', 'A']
),],style={'columnCount': 3}),
html.Br(),
html.Label('Methods Comparison',style={'font-weight':'bold'}),
html.Div([
dcc.Checklist(id='methods',
options=[
{'label': 'Pop', 'value': 'POP'},
{'label': 'RWR', 'value': 'RWR'},
{'label': 'Simrank', 'value': 'Simrank'},
{'label': 'Pathsim', 'value': 'Pathsim'},
{'label':'PathCount','value': 'PathCount'}
# {'label': 'SKNN', 'value': 'SKNN'}
],
values=['RWR','POP']
),],style={'columnCount': 3}),
html.Br(),
html.Button('Execute',id='execute'),
html.Button('Toggle Details',id='hide'),
html.Br(),
html.Div(id='status_wrap',children=[dcc.Markdown(id='status')]),
html.Div([
html.Div(children='', id='dummy-results'),
dcc.Interval(
id='update-interval',
interval=60 * 60 * 5000, # in milliseconds
n_intervals=0
),
dcc.Graph(id = 'evaluation'),
], id='results'),
],)
# @app.callback(
# Output(component_id='my-div', component_property='children'),
# [Input(component_id='my-id', component_property='value')]
# )
# def update_output_div(input_value):
# return 'You\'ve entered "{}"'.format(input_value)
# @app.callback(
# dash.dependencies.Output('job_id', 'children'),
# [dash.dependencies.Input('execute', 'n_clicks')],
#
# state = [State('xaxis-column', 'value'),
# State('number_of_splits', 'value'),
# State('short_days', 'value'),
# State('number_recommendations', 'value'),
# State('min_items_n', 'value'),
# State('nodes', 'values'),
# State('methods', 'values'),
# ]
# )
# def query_submitted(click,data_path,number_splits,short_days,number_recommendations,min_items_n,nodes,methods):
# if click == 0 or click is None:
# return ''
# else:
# # a query was submitted, so queue it up and return job_id
# duration = 20 # pretend the process takes 20 seconds to complete
# q = Queue(connection=conn)
# job_id = str(uuid.uuid4())
# print(f'Job ID when started {job_id}')
# job = q.enqueue_call(func=create_graph,
# args=(data_path,number_splits,short_days,number_recommendations,min_items_n,nodes,methods),
#
# job_id=job_id)
# return job_id
#
#
#
# @app.callback(
# dash.dependencies.Output('update-interval', 'interval'),
# [dash.dependencies.Input('job_id', 'children'),
# dash.dependencies.Input('update-interval', 'n_intervals')])
# def stop_or_start_table_update(job_id, n_intervals):
# q = Queue(connection=conn)
#
# job = q.fetch_job(job_id)
# if job is not None:
# # the job exists - try to get results
# result = job.result
# if result is None:
# # a job is in progress but we're waiting for results
# # therefore regular refreshing is required. You will
# # need to fine tune this interval depending on your
# # environment.
# return 1000
# else:
# # the results are ready, therefore stop regular refreshing
# return 60*60*1000
# else:
# # the job does not exist, therefore stop regular refreshing
# return 60*60*1000
#
#
# # this callback checks if the job result is ready. If it's ready
# # the results return to the table. If it's not ready, it pauses
# # for a short moment, then empty results are returned. If there is
# # no job, then empty results are returned.
# @app.callback(
# dash.dependencies.Output('evaluation', 'figure'),
# [dash.dependencies.Input('update-interval', 'n_intervals')],
# [dash.dependencies.State('job_id', 'children')])
# def update_results_tables(n_intervals, job_id):
# q = Queue(connection=conn)
# print(job_id)
# print(q.count)
# job = q.fetch_job(job_id)
# if job is not None:
# # job exists - try to get result
# result = job.result
# print(f'Result : {result}')
# if result is None:
# # results aren't ready, pause then return empty results
# # You will need to fine tune this interval depending on
# # your environment
# time.sleep(3)
# return {
# 'data': go.Scatter(x=[], y=[]),
# 'layout': go.Layout(
# xaxis={
# 'title': 'Time_Period',
#
# },
# yaxis={
# 'title': 'Precision',
#
# })
#
# }
# if result is not None:
# # results are ready
# traces = []
# for method in result.keys():
# trace = go.Scatter(x=result[method][0], y=result[method][1], name=method)
# traces.append(trace)
# return {
# 'data': traces,
# 'layout': go.Layout(
# xaxis={
# 'title': 'Time_Period',
#
# },
# yaxis={
# 'title': 'Precision',
#
# })
#
# }
# else:
# # no job exists with this id
# return {
# 'data': go.Scatter(x=[], y=[]),
# 'layout': go.Layout(
# xaxis={
# 'title': 'Time_Period',
#
# },
# yaxis={
# 'title': 'Precision',
#
# })
#
# }
#
# @app.callback(
# dash.dependencies.Output('status', 'children'),
# [dash.dependencies.Input('job_id', 'children'),
# dash.dependencies.Input('update-interval', 'n_intervals')])
# def stop_or_start_table_update(job_id, n_intervals):
# q = Queue(connection=conn)
# job = q.fetch_job(job_id)
# if job is not None:
# # the job exists - try to get results
# result = job.result
# if result is None:
# # a job is in progress and we're waiting for results
# global e
# return 'Running query. This might take a moment - don\'t close your browser! ' + str(e)
# else:
# # the results are ready, therefore no message
# return ''
# else:
# # the job does not exist, therefore no message
# return ''
#
@app.callback(Output(component_id='status',component_property='children'),
events=[Event('interval-component','interval')])
def update_text_area():
global e
return e
@app.callback(Output(component_id='status_wrap',component_property='style'),
[Input('hide','n_clicks')])
def toggle_details(n_clicks):
if n_clicks==None:
return {'white-space':'pre-wrap','margin-top':'40px'}
else:
if n_clicks%2==0:
return {'display':'block','white-space':'pre-wrap','margin-top':'40px'}
else:
return {'display':'none'}
@app.callback(
Output(component_id='evaluation', component_property='figure'),
[Input('execute', 'n_clicks')
],state=[ State('xaxis-column','value'),
State('number_of_splits','value'),
State('short_days','value'),
State('number_recommendations','value'),
State('min_items_n','value'),
State('nodes','values'),
State('methods','values'),
]
)
def update_div(n_clicks,data_path,number_splits,short_days,number_recommendations,min_items_n,nodes,methods):
import itertools
if n_clicks == None:
global e
e+=(f'{n_clicks}')
else:
# DATA_PATH = f'./Data/{data_path} - pk_client, pk_session, pk_article, timeview (s), date, time.txt'
# CAT_DATA_PATH = f'./Data/{data_path}-5topics-doc-topics.txt'
# LOC_DATA_PATH = f'./Data/{data_path} - pk_article, pk_district.txt'
# gm = GraphManipulation()
# di = DataImport()
# di.import_user_click_data(DATA_PATH, adjust_pk_names=True)
# print('Sterguis')
# # --- Reduce dataset to 1 month / 1 week / ...
# # di.reduce_timeframe(dt.datetime(2017,3,1), dt.datetime(2017,3,31)) # if G_Video33_1month is selected
# # di.reduce_timeframe(dt.datetime(2017, 3, 1), dt.datetime(2017, 3, 7)) # if G_Video33_1week is selected
#
# # --- Remove inactive users (the ones with small number of sessions in total)
# # di.remove_inactive_users(n_sessions=MIN_N_SESSIONS)
# #
# # ---------- Add categories -----------------------------
# print(f'{datetime.datetime.now()} Import Categories')
# di.import_categories_data(CAT_DATA_PATH)
# print(f'{datetime.datetime.now()} Import Categories End')
#
# print(f'{datetime.datetime.now()} Filter Short Session')
# # ---- Leave only sessions with at least specified number of articles
# di.filter_short_sessions(n_items=min_items_n)
# print(f'{datetime.datetime.now()} Filter Short Session End')
#
#
# # ------ Create a graph on the base of the dataframe ----
# print(f'{datetime.datetime.now()} Graph Manipulation')
# gm = GraphManipulation(G_structure='USAC')
# print(f'{datetime.datetime.now()} Graph Manipulation End')
#
# print(f'{datetime.datetime.now()} Create Graph')
# gm.create_graph(di.user_ses_df)
# print(f'{datetime.datetime.now()} Create Graph End')
#
# # Filter again, because dataframe filtering leaves sessions where the same article is repeatedly read several times
# # gm.filter_sessions(gm.G, n_items=MIN_ITEMS_N)
# # gm.filter_users(gm.G, n_sessions=MIN_N_SESSIONS)
#
# # ---------- Add locations ------------------------------
# di.import_locations_data(LOC_DATA_PATH)
# gm.add_locations_data(di.locations_data)
# G = gm.G
gm = GraphManipulation()
G = nx.read_gpickle(f'./Data/{data_path}.gpickle')
possible_subgraphs = [('U','A'),('S','A'),('A','C'),('A','L'), ('U','S','A'),('U','A','C'),('U','A','L'),('A','C','L'), ('S','A','C'), ('S','A','L'), ('U','S','A','C'), ('U','S','A','L'), ('U','A','C','L'), ('S','A','C','L'), ('U','S','A','C','L')]
subgraph_movies = {('U','A'):(('U','M')),('S','A'):('S','M'),('A','C'):('M','C'),('A','L'):('M','L'),('U','S','A'):('U','S','M'),('U','A','C'):('U','M','C'),('U','A','L'):('U','M','L'),('A','C','L'):('M','C','L'), ('S','A','C'):('S','M','C'), ('S','A','L'):('S','M','L'), ('U','S','A','C'):('U','S','M','C'), ('U','S','A','L'):('U','S','M','L'), ('U','A','C','L'):('U','M','C','L'), ('S','A','C','L'):('S','M','C','L'), ('U','S','A','C','L'):('U','S','M','C','L')}
subgraphs =[]
for length in range(2,len(nodes)+1):
x = list(itertools.permutations(nodes,length))
for item in x:
if item in possible_subgraphs:
subgraphs.append(item)
# gm.filter_users(gm.G, n_sessions=min)
gm.G = G
gm.filter_sessions(gm.G, n_items=min_items_n)
e = f'{n_clicks}'
e+=(f'\nGENERAL STATISTICS')
e+=(f'\nNumber of users:{len(gm.get_users(G))}')
e +=(f'\nNumber of sessions:{len(gm.get_sessions(G))}')
e +=(f'\nNumber of articles:{len(gm.get_articles(G))}')
e +=(f'\nNumber of categories:{len(gm.get_categories(G))}')
e +=(f'\nNumber of locations:{len(gm.get_locations(G))}')
art_per_session = gm.get_articles_per_session(gm.G)
e +=(f'\nAvg number of articles per session:{round(np.mean(art_per_session), 2)}')
e +=(f'\nMax number of articles per session:{round(np.max(art_per_session), 2)}')
ses_per_user = gm.get_sessions_per_user(gm.G)
e +=(f'\nAvg number of sessions per user:{round(np.mean(ses_per_user), 2)}')
e +=(f'\nMax number of sessions per user:{round(np.max(ses_per_user), 2)} ')
tas = TimeAwareSplits(G)
tas.create_time_split_graphs(G, num_splits=number_splits)
# tas.create_time_window_graphs(short_days)
tas.create_time_window_graphs(short_days)
_dump_process = True
short_back_timedelta = datetime.timedelta(days=short_days)
e +=(f'\n\nTime span list:\n')
counter = 0
for timespan in tas.time_span_list:
e+=(f'{timespan}\n')
counter+=1
if counter>1:
counter=0
pop = PopularityBasedRec(G, number_recommendations)
# RWR_SA = PersonalizedPageRankBasedRec(number_recommendations)
ae = AccuracyEvaluation(G)
train_set_len = []
train_len_dict = defaultdict(list)
n_articles_train = []
n_recommendation = dict()
sessions_per_user_in_short_term = []
avg_ses_len = defaultdict(list)
for tw_i, tw_iter in enumerate(tas.time_window_graph_list):
e +=(f'\n\n======= Time split{tw_i} =======')
n_recommendation[tw_i] = 0
n_recommendation[f'{tw_i}_correct'] = 0
# long_train_g = tw_iter[0]
tw_iter[1].frozen = False
test_g = tw_iter[1].copy()
# ------ From test_g remove sessions with less or equal number of articles needed for building recommendation
test_g = gm.filter_sessions(test_g, n_items=min_items_n)
if len(test_g) == 0:
continue
# ------ 1. Create a time-ordered list of user sessions
test_sessions = sorted(
[(s, attr['datetime']) for s, attr in test_g.nodes(data=True) if attr['entity'] == 'S'],
key=lambda x: x[1])
sessions_knn_dict = defaultdict(tuple)
# For each step a ranked list of N recommendations is created
for (s, s_datetime) in test_sessions:
user = [n for n in nx.neighbors(test_g, s) if test_g.get_edge_data(s, n)['edge_type'] == 'US'][0]
if user=='U50000':
print(1)
test_session_G = nx.Graph()
test_session_G.add_node(user, entity='U')
test_session_G.add_node(s, entity='S')
test_session_G.add_edge(user, s, edge_type='US')
# -----------------------------------------------------
articles = sorted(
[n for n in nx.neighbors(test_g, s) if test_g.get_edge_data(s, n)['edge_type'] == 'SA'],
key=lambda x: test_g.get_edge_data(s, x)['reading_datetime'])
avg_ses_len[tw_i].append(len(articles))
# print('----------\narticles:', articles)
# print('session:', s, s_datetime)
for i in range(min_items_n, len(articles)):
methods_to_be_evaluated = []
methos_to_be_evaluated_explainable = []
# ------------ Short-term training set ----
short_train_g = tas.create_short_term_train_set(s_datetime, short_back_timedelta,
test_session_graph=test_session_G)
if len(short_train_g) == 0:
continue
test_session_G.add_nodes_from(articles[:i], entity='A')
for a in articles[:i]:
test_session_G.add_edge(s, a, edge_type='SA')
test_session_G.add_node(gm.map_category(a), entity='C')
test_session_G.add_edge(a, gm.map_category(a), edge_type='AC')
test_session_G.add_node(gm.map_location(a), entity='L')
test_session_G.add_edge(a, gm.map_location(a), edge_type='AL')
# ------------ Short Training Set (containing currently analyzed session!) ---------
# ----------- Long-term user training set ---
users_from_short_train = gm.get_users(short_train_g)
user_long_train_g = tas.create_long_term_user_train_set(user, s, s_datetime, articles[:i],
users_from_short_train)
if len(user_long_train_g) == 0:
continue
train_set_len.append(len(gm.get_sessions(short_train_g)))
train_len_dict[tw_i].append(len(gm.get_sessions(short_train_g)))
n_articles_train.append(len(gm.get_articles(short_train_g)))
ses_per_user = gm.get_sessions_per_user(short_train_g)
sessions_per_user_in_short_term.append(Counter(ses_per_user))
subgraphs_train = []
# --- Create train graphs
for subgraph in subgraphs:
if len(subgraph)==2:
subgraphs_train.append((gm.create_subgraph_of_adjacent_entities(short_train_g,
list_of_entities=[subgraph[0][0], subgraph[1]]),f'{subgraph[0]}_{subgraph[1]}'))
elif len(subgraph)==3:
subgraphs_train.append((gm.create_subgraph_of_adjacent_entities(short_train_g,
list_of_entities=[
subgraph[0],
subgraph[1],subgraph[2]]),f'{subgraph[0]}_{subgraph[1]}_{subgraph[2]}'))
elif len(subgraph)==4:
subgraphs_train.append((gm.create_subgraph_of_adjacent_entities(short_train_g,
list_of_entities=[
subgraph[0],
subgraph[1],
subgraph[2],subgraph[3]]),f'{subgraph[0]}_{subgraph[1]}_{subgraph[2]}_{subgraph[3]}'))
else:
subgraphs_train.append((gm.create_subgraph_of_adjacent_entities(short_train_g,
list_of_entities=[
subgraph[0],
subgraph[1],
subgraph[2],
subgraph[3],subgraph[4]]),f'{subgraph[0]}_{subgraph[1]}_{subgraph[2]}_{subgraph[3]}_{subgraph[4]}'))
# sa_train_g = gm.create_subgraph_of_adjacent_entities(short_train_g, list_of_entities=['S', 'A'])
# usa_train_g = gm.create_subgraph_of_adjacent_entities(short_train_g, list_of_entities=['U', 'S', 'A'])
# sac_train_g = gm.create_subgraph_of_adjacent_entities(short_train_g, list_of_entities=['S', 'A', 'C'])
# sal_train_g = gm.create_subgraph_of_adjacent_entities(short_train_g,
# list_of_entities=['S', 'A', 'L'])
# usac_train_g = gm.create_subgraph_of_adjacent_entities(short_train_g, list_of_entities=['U', 'S', 'A', 'C'])
# usal_train_g = gm.create_subgraph_of_adjacent_entities(short_train_g, list_of_entities=['U', 'S', 'A', 'L'])
# -------------------------------------------------------------------------------
# --------------- SIMILARITIES --------------------------------------------------
# -----------------------------------------------------
# -----------------------------------------------------
# ------------------- SimRank -------------------------
# -----------------------------------------------------
# ------------------- RWR -----------------------------
# --- Run models
# RWR_SA.compute_transition_matrix(sa_train_g)
# RWR_USA.compute_transition_matrix(usa_train_g)
# RWR_SAC.compute_transition_matrix(sac_train_g)
# RWR_SAL.compute_transition_matrix(sal_train_g)
# RWR_USAC.compute_transition_matrix(usac_train_g)
# RWR_USAL.compute_transition_matrix(usal_train_g)
# RWR_USACL.compute_transition_matrix(short_train_g)
# --- Extract SS matrices
# RWR_SA.create_sessionsession_matrix()
# RWR_SA.create_sessionitem_matrix()
# RWR_USA.create_sessionsession_matrix()
# RWR_USA.create_sessionitem_matrix()
# RWR_USA.create_itemitem_matrix()
# RWR_SAC.create_sessionsession_matrix()
# RWR_SAC.create_sessionitem_matrix()
# RWR_SAC.create_itemitem_matrix()
# RWR_SAL.create_sessionsession_matrix()
# RWR_SAL.create_sessionitem_matrix()
# RWR_SAL.create_itemitem_matrix()
# RWR_USAC.create_sessionsession_matrix()
# RWR_USAC.create_sessionitem_matrix()
# RWR_USAC.create_itemitem_matrix()
# RWR_USAL.create_sessionsession_matrix()
# RWR_USAL.create_sessionitem_matrix()
# RWR_USAL.create_itemitem_matrix()
# RWR_USACL.create_sessionsession_matrix()
# RWR_USACL.create_sessionitem_matrix()
# RWR_USACL.create_itemitem_matrix()
# -----------------------------------------------------
# ------------------ PathSim --------------------------
# PathSim_AUA.compute_similarity_matrix(short_train_g, 'A', 'U', 2)
# PathSim_ASA.compute_similarity_matrix(short_train_g, 'A', 'S', 1)
# PathSim_ACA.compute_similarity_matrix(short_train_g, 'A', 'C', 1)
# PathSim_ALA.compute_similarity_matrix(short_train_g, 'A', 'L', 1)
# -----------------------------------------------------
# ------------------ PathCount --------------------------
# PathCount_AUA.compute_similarity_matrix_my(short_train_g, 'A', 'U', 2)
# PathCount_ASA.compute_similarity_matrix_my(short_train_g, 'A', 'S', 1)
# PathCount_ACA.compute_similarity_matrix_my(short_train_g, 'A', 'C', 1)
# PathCount_ALA.compute_similarity_matrix_my(short_train_g, 'A', 'L', 1)
# -----------------------------------------------------
# ------------------- S-S PathSim ---------------------
# SKNN_PathSim_SAS.compute_similarity_matrix(short_train_g, 'S', 'A', 1)
# SKNN_PathSim_SACAS.compute_similarity_matrix(sac_train_g, 'S', 'C', 2)
# SKNN_PathSim_SALAS.compute_similarity_matrix(sal_train_g, 'S', 'L', 2)
# -----------------------------------------------------
# ------------------- S-S PathCounts ------------------
# SKNN_PathCount_SAS.compute_similarity_matrix_my(short_train_g, 'S', 'A', 1)
# SKNN_PathCount_SACAS.compute_similarity_matrix_my(sac_train_g, 'S', 'C', 2)
# SKNN_PathCount_SALAS.compute_similarity_matrix_my(sal_train_g, 'S', 'L', 2)
# -----------------------------------------------------
# ------------------- PathCounts for expl -------------
# PathCount_ASA.compute_similarity_matrix(short_train_g, 'A', 'S', 1)
# PathCount_ACA.compute_similarity_matrix(short_train_g, 'A', 'C', 1)
# PathCount_ALA.compute_similarity_matrix(short_train_g, 'A', 'L', 1)
# PathCount_AUA.compute_similarity_matrix(short_train_g, 'A', 'U', 2)
#
# PathCount_SAS.compute_similarity_matrix(short_train_g, 'S', 'A', 1)
# PathCount_SCS.compute_similarity_matrix(short_train_g, 'S', 'C', 2)
# PathCount_SLS.compute_similarity_matrix(short_train_g, 'S', 'L', 2)
#
# PathCount_UAU.compute_similarity_matrix(user_long_train_g, 'U', 'A', 2)
# PathCount_UCU.compute_similarity_matrix(user_long_train_g, 'U', 'C', 3)
# PathCount_ULU.compute_similarity_matrix(user_long_train_g, 'U', 'L', 3)
# -------------------------------------------------------------------------------
# --------------- RECOMMENDATIONS -----------------------------------------------
session_categories = [gm.map_category(a) for a in articles[:i]]
session_timeviews = [gm.map_timeview(test_g, s, a) for a in articles[:i]]
# ------- POP --------------------------
# ------------------- Popularity ----------------------
if 'POP' in methods:
pop.compute_pop(short_train_g)
pop_rec = pop.predict_next(user, articles[:i])
# if len(pop_rec) == 0:
# continue
# else:
methods_to_be_evaluated.append((pop_rec, 'POP'))
# ------- SimRank ----------------------
if 'Simrank' in methods:
simrank_models = []
for subgraph_train in subgraphs_train:
simrank = SimRankRec(number_recommendations)
simrank.compute_similarity_matrix(subgraph_train[0], max_iter=10)
simrank_models.append((simrank,subgraph_train[1]))
for simrank_model in simrank_models:
recommendation = simrank_model[0].predict_next(user, articles[:i], method=2)
if len(recommendation) == 0:
continue
else:
methods_to_be_evaluated.append((recommendation, f'Simrank_{simrank_model[1]}'))
# ------- RWR --------------------------
if 'RWR' in methods:
rwr_models = []
for subgraph_train in subgraphs_train:
RWR = PersonalizedPageRankBasedRec(number_recommendations)
RWR.compute_transition_matrix(subgraph_train[0])
RWR.create_itemitem_matrix()
rwr_models.append((RWR, subgraph_train[1]))
for rwr_model in rwr_models:
recommendation = rwr_model[0].predict_next(user,articles[:i])
if len(recommendation) == 0:
continue
else:
methods_to_be_evaluated.append((recommendation,f'RWR_{rwr_model[1]}'))
metapaths_a = []
if 'Pathsim' in methods:
pathsim_models = []
for subgraph_train in subgraphs_train:
pathsim = PathSimRec(number_recommendations)
nodes = subgraph_train[1].split('_')
for node in nodes:
if node !='A':
if f'A_{node}_A' not in metapaths_a:
pathsim.compute_similarity_matrix(short_train_g,'A',node,1)
pathsim_models.append((pathsim,f'Pathsim_A_{node}_A'))
metapaths_a.append(f'A_{node}_A')
if ('U' in nodes) and ('S' in nodes) and ('A' in nodes):
pathsim.compute_similarity_matrix(short_train_g,'A','U',2)
pathsim_models.append((pathsim,f'Pathsim_A_S_U_S_A'))
for pathsim_model in pathsim_models:
recommendation = pathsim_model[0].predict_next(user, articles[:i], method=2)
if len(recommendation)==0:
continue
else:
methods_to_be_evaluated.append((recommendation,pathsim_model[1]))
if 'PathCount' in methods:
pathcounts = []
pathcount_models = []
ab_rec = []
sb_rec = []
if 'A' in nodes:
# for path_len in range(1,len(nodes)-1):
for node in nodes:
if node != 'A':
if f'A_{node}_A' not in pathcounts:
pathcounts.append(f'A_{node}_A')
pathsim = PathSimRec(number_recommendations)
pathsim.compute_similarity_matrix(short_train_g,'A',node,1)
pathcount_rec_dict =pathsim.predict_next_by_AB(articles[:i], option='ib',topN=False)
pathcount_models.append((pathsim,f'PathCount_A{node}A',list(pathcount_rec_dict.keys())[:number_recommendations],pathcount_rec_dict))
ab_rec.append(list(pathcount_rec_dict.keys())[:number_recommendations])
if ('U' in nodes) and ('S' in nodes) and ('A' in nodes):
pathcounts.append(f'A_S_U_S_A')
pathsim = PathSimRec(number_recommendations)
pathsim.compute_similarity_matrix(short_train_g, 'A', 'U', 2)
pathcount_rec_dict = pathsim.predict_next_by_AB(articles[:i], option='ib', topN=False)
pathcount_models.append((pathsim, f'PathCount_A_S_U_S_A',
list(pathcount_rec_dict.keys())[:number_recommendations],
pathcount_rec_dict))
ab_rec.append(list(pathcount_rec_dict.keys())[:number_recommendations])
#Combine Recs
rec_ab_df = pd.DataFrame(index=set(x for l in ab_rec for x in l), columns=pathcounts)
for a in rec_ab_df.index:
for pathcount in pathcount_models:
if len(pathcount[2])>0:
rec_ab_df.loc[a, f'{pathcount[1][10:]}'] = pathcount[3][a] if a in list(pathcount[3].keys()) else 0
rec_ab_df = rec_ab_df.fillna(0)
for pathcount in pathcount_models:
recommendation = pathcount[2]
if len(recommendation) == 0:
continue
else:
methos_to_be_evaluated_explainable.append((recommendation, f'{pathcount[1]}',rec_ab_df))
# if any(len(m) == 0 for m in methods_to_be_evaluated):
# continue
n_recommendation[tw_i] += 1
# ------- Measuring accuracy ----------------------
# ae.evaluate_recommendation(rec=pop_rec, truth=articles[i], method='POP', s=s)
# ae.evaluate_recommendation(rec=simrank_sal_s_rec, truth=articles[i], method='SimRank_SAL(s)', s=s)
e += f'\n\nuser:{user}'
active_users = gm.get_users(short_train_g)
# e += f'\nactive users : {user in active_users}'
e += f'\nNext Article : {articles[i]}'
for method in methods_to_be_evaluated:
rec_counter = 0
ae.evaluate_recommendation(rec=method[0],truth=articles[i],method=method[1],s=s)
e += f'\n{method[1]}_rec: ['
for rec in method[0]:
rec_counter+=1
if rec == articles[i]:
e+= f'**{rec}**'
n_recommendation[f'{tw_i}_correct'] +=1
else:
e+=f'{rec}'
if rec_counter<len(method[0]):
e+=', '
e+= ']'
for method in methos_to_be_evaluated_explainable:
rec_counter = 0
ae.evaluate_recommendation(rec=method[0], truth=articles[i], method=method[1], s=s)
e += f'\n{method[1]}_rec: ['
for rec in method[0]:
rec_counter += 1
if rec == articles[i]:
e += f'**{rec}**'
n_recommendation[f'{tw_i}_correct'] += 1
else:
e += f'{rec}'
e += ' explained by '
for index in rec_ab_df.columns:
if rec_ab_df[index][rec] > 0:
e += f'{index}: {rec_ab_df[index][rec]} '
if rec_counter < len(method[0]):
e += ', '
e += '] '
rec_ab_df = method[2]
ae.evaluate_session()
ae.evaluate_tw()
# print('- Number of recommendations made:', n_recommendations)
ae.evaluate_total_performance()
avg_n_ses_per_train_per_period = [round(np.mean(l)) for l in train_len_dict.values()]
avg_ses_len_per_period = [round(np.mean(l), 2) for l in avg_ses_len.values()]
# e +=(f'\n\n\nNumber of sessions per user per short train period:\n{sessions_per_user_in_short_term}')
e +=(f'\nNumber of recommendations per time split:{n_recommendation.values()}')
e +=(f'\nTotal # of recs:{sum(n_recommendation.values())}')
e +=(f'\nAverage # sessions per train per period {avg_n_ses_per_train_per_period}')
e +=(f'\nAverage # artiles per session per period {avg_ses_len_per_period}')
e +=(f'\nAverage # sessions in train:{round(np.mean(train_set_len), 2)}')
e +=(f'\nAverage # articles in train:{round(np.mean(n_articles_train), 2)}')
e+=('\n---------- METHODS EVALUATION -------------')
methods = [k for k, v in sorted(ae.precision.items(), key=itemgetter(1), reverse=True)]
for m in methods:
e +=(f'\n--- {m}: Precision:{ae.precision[m]}, NDCG:{ae.ndcg[m]}, ILD:{ae.diversity[m]},Explainability:{ae.explainability[m]}')
# exit()
# --- Create period index for plotting
p_start = tas.time_span_list[1][0]
p_end = tas.time_span_list[len(tas.time_span_list) - 1][1] + datetime.timedelta(days=1)
month_range = pd.date_range(p_start, p_end, freq='M')
p = []
for period in tas.time_span_list:
p.append(datetime.datetime.strftime(period[1], format='%Y-%m-%d'))
traces =[]
for method in methods:
trace = go.Scatter(x = p,y=ae.tw_precision[method],name=method)
traces.append(trace)
return {
'data':traces,
'layout': go.Layout(
xaxis={
'title': 'Time_Period',
},
yaxis = {
'title': 'Precision',
})
}
if __name__ == '__main__':
app.run_server(debug= False,threaded = True)
|
{"hexsha": "7f2503b1572963d66c1e2ac501f9cd91e6afe338", "size": 40698, "ext": "py", "lang": "Python", "max_stars_repo_path": "toy_example/recommend.py", "max_stars_repo_name": "hericonejito/health_recommendations", "max_stars_repo_head_hexsha": "14f3d98df4ab548441dd3bac730175892722dca9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "toy_example/recommend.py", "max_issues_repo_name": "hericonejito/health_recommendations", "max_issues_repo_head_hexsha": "14f3d98df4ab548441dd3bac730175892722dca9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "toy_example/recommend.py", "max_forks_repo_name": "hericonejito/health_recommendations", "max_forks_repo_head_hexsha": "14f3d98df4ab548441dd3bac730175892722dca9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4059293044, "max_line_length": 472, "alphanum_fraction": 0.4873458155, "include": true, "reason": "import numpy,import networkx", "num_tokens": 8493}
|
import numpy as np
from src.network_elements.network_element import NetworkElement
class LayersLinker(NetworkElement):
def __init__(self, previous_layer_dimension, next_layer_dimension) -> None:
self.previous_layer_dimension = previous_layer_dimension
self.next_layer_dimension = next_layer_dimension
self.W = self.init_random_uniform_matrix(size=(previous_layer_dimension, next_layer_dimension))
self.B = self.init_random_uniform_matrix(size=(1, next_layer_dimension))
self.previous_layer_activated_output = None
self.dLdW = None
self.dLdB = None
def init_random_uniform_matrix(self, size):
low = - np.sqrt(1 / np.sum(size))
high = np.sqrt(1 / np.sum(size))
return np.random.uniform(low=low, high=high, size=size)
def init_random_gaussian_matrix(self, size, mean=0.0, variance=1.0):
return np.random.normal(loc=mean, scale=np.sqrt(variance), size=size)
def forward_propagate(self, A):
self.previous_layer_activated_output = A
Z = (A @ self.W) + self.B
return Z
def backward_propagate(self, dLdZ):
if self.previous_layer_activated_output is None:
raise ValueError("Please forward propagate information before backward propagating.")
(batch_size, _) = dLdZ.shape
self.dLdW = self.previous_layer_activated_output.T @ dLdZ
self.dLdB = np.ones(batch_size).T @ dLdZ
return dLdZ @ self.W.T
def update_weights_and_bias(self, learning_rate):
if self.dLdW is None and self.dLdB is None:
raise ValueError("Please forward propagate and backward propagate before updating parameters.")
self.W -= learning_rate * self.dLdW
self.B -= learning_rate * self.dLdB
|
{"hexsha": "4153b441f71fa78958caa128a12adb8f1cfdc6d8", "size": 1738, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/network_elements/layers_linker.py", "max_stars_repo_name": "Mathieu-R/neurawine", "max_stars_repo_head_hexsha": "9093662ef7df6d0a8c2de8a6aeb9b5598c63b576", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/network_elements/layers_linker.py", "max_issues_repo_name": "Mathieu-R/neurawine", "max_issues_repo_head_hexsha": "9093662ef7df6d0a8c2de8a6aeb9b5598c63b576", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/network_elements/layers_linker.py", "max_forks_repo_name": "Mathieu-R/neurawine", "max_forks_repo_head_hexsha": "9093662ef7df6d0a8c2de8a6aeb9b5598c63b576", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2083333333, "max_line_length": 101, "alphanum_fraction": 0.7151898734, "include": true, "reason": "import numpy", "num_tokens": 419}
|
using Test
using DataFlowTasks
using DataFlowTasks: R,W,RW, execute_dag
using LinearAlgebra
sch = DataFlowTasks.StaticScheduler()
DataFlowTasks.setscheduler!(sch)
include(joinpath(DataFlowTasks.PROJECT_ROOT,"test","testutils.jl"))
@testset "Static scheduler" begin
@testset "Fork-join" begin
m = 50
s = 0.1
nw = Threads.nthreads()
# create the dag
t = fork_join(m,s)
execute_dag(sch)
fork_join(m,s)
t1 = @elapsed execute_dag(sch)
t2 = (2+ceil(m/nw))*s
# test that ideal vs actual time are close
@test abs(t1-t2) < 1e-2
end
@testset "Tiled cholesky factorization" begin
m = 1000
bsize = div(m,5)
# create an SPD matrix
A = rand(m,m)
A = (A + adjoint(A))/2
A = A + m*I
t = tiled_cholesky(A,bsize)
execute_dag(sch)
F = fetch(t)
@test F.L*F.U ≈ A
end
@testset "Tiled lu factorization" begin
m = 1000
bsize = div(m,5)
A = rand(m,m)
t = tiled_lu(A,bsize)
execute_dag(sch)
F = fetch(t)
@test F.L*F.U ≈ A
end
end
|
{"hexsha": "d1fc9443883feaaf33a5626545f445bc511235e4", "size": 1160, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/staticscheduler_test.jl", "max_stars_repo_name": "maltezfaria/DataFlowTasks.jl", "max_stars_repo_head_hexsha": "5fda1dfa60f381cdb3f3164c95aa6beb5b2b8ef6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-18T08:02:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T09:08:59.000Z", "max_issues_repo_path": "test/staticscheduler_test.jl", "max_issues_repo_name": "maltezfaria/DataFlowTasks.jl", "max_issues_repo_head_hexsha": "5fda1dfa60f381cdb3f3164c95aa6beb5b2b8ef6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/staticscheduler_test.jl", "max_forks_repo_name": "maltezfaria/DataFlowTasks.jl", "max_forks_repo_head_hexsha": "5fda1dfa60f381cdb3f3164c95aa6beb5b2b8ef6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6734693878, "max_line_length": 67, "alphanum_fraction": 0.5568965517, "num_tokens": 369}
|
from neat.population import Population
from neat.neural_network import NeuralNetwork, CTRNN, Neuron, Connection
from neat.genome import Genome
from neat.evolution import Neat, TrainTask
from hyperneat.substrate import Substrate
from hyperneat.spatial_node import SpatialNode, SpatialNodeType
import json
import copy
import random
import math
import numpy as np
# TODO: make CPPN inputs functions file
# vector: [x1, y1, x2, y2]
def euclidean_distance(vector):
return math.sqrt((vector[0] + vector[2]) ** 2 + (vector[1] + vector[3]) ** 2)
class Hyperneat:
def __init__(self):
self.ea = Neat()
self.substrate = Substrate()
self.connection_threshold = 0.0
self.max_connection_weight = 0.0
# For modular CTRNN
self.max_bias = 0.0
self.max_delay = 0.0
def import_config(self, config_file):
try:
config = json.load(config_file)
except ValueError:
print('HyperNEAT: Invalid config file')
return False
self.connection_threshold = float(config['connectionThreshold'])
self.max_connection_weight = float(config['maxConnectionWeight'])
self.substrate.import_substrate(config['Substrate'])
def build_substrate(self, organism, net):
neuron_count = len(self.substrate.nodes)
net.clear()
net.num_inputs = self.substrate.input_count
net.num_outputs = self.substrate.output_count
hidden_count = neuron_count - (net.num_inputs + net.num_outputs)
hidden_offset = net.num_inputs + net.num_outputs
net.neurons = [Neuron(self.substrate.activation_function) for i in range(neuron_count)]
if hidden_count > 0:
for i in range(net.num_inputs):
inputs = [0.0] * 5
inputs[0] = self.substrate.nodes[i].coordinates[0]
inputs[1] = self.substrate.nodes[i].coordinates[1]
for j in range(hidden_count):
inputs[2] = self.substrate.nodes[hidden_offset + j].coordinates[0]
inputs[3] = self.substrate.nodes[hidden_offset + j].coordinates[1]
inputs[4] = euclidean_distance(inputs)
outputs = organism.eval(inputs)
weight = outputs()[0] * self.max_connection_weight
if math.fabs(weight) > self.connection_threshold:
connection = Connection(i, hidden_offset + j, weight)
net.add_connection(connection)
for i in range(hidden_count):
inputs = [0.0] * 5
inputs[0] = self.substrate.nodes[hidden_offset + i].coordinates[0]
inputs[1] = self.substrate.nodes[hidden_offset + i].coordinates[1]
for j in range(net.num_outputs):
inputs[2] = self.substrate.nodes[net.num_inputs + j].coordinates[0]
inputs[3] = self.substrate.nodes[net.num_inputs + j].coordinates[1]
inputs[4] = euclidean_distance(inputs)
outputs = organism.eval(inputs)
weight = outputs()[0] * self.max_connection_weight
if math.fabs(weight) > self.connection_threshold:
connection = Connection(hidden_offset + i, net.num_inputs + j, weight)
net.add_connection(connection)
else:
for i in range(net.num_inputs):
inputs = [0.0] * 5
inputs[0] = self.substrate.nodes[i].coordinates[0]
inputs[1] = self.substrate.nodes[i].coordinates[1]
for j in range(net.num_outputs):
inputs[2] = self.substrate.nodes[net.num_inputs + j].coordinates[0]
inputs[3] = self.substrate.nodes[net.num_inputs + j].coordinates[1]
inputs[4] = euclidean_distance(inputs)
outputs = organism.eval(inputs)
weight = outputs()[0] * self.max_connection_weight
if math.fabs(weight) > self.connection_threshold:
connection = Connection(i, net.num_inputs + j, weight)
net.add_connection(connection)
return True
# TODO: Inheritance
def build_modular_substrate(self, organism, substrate_set, intra_conn_table, inter_conn_table):
# Instance of modular Continuous Recurrent Neural Network
net = CTRNN([], [], 0, 0)
# Total amount of input, output and total neurons
num_inputs = 0
num_outputs = 0
neuron_cnt = 0
# Map of the substrate nodes to the CTRNN
node_gene_map = {}
# Get CPPN network
cppn = organism.build_phenotype()
for idx, s in enumerate(substrate_set):
num_inputs += s.input_count
num_outputs += s.output_count
for idy, n in enumerate(s.nodes):
# Mapping by substrate pos, node pos in the respective substrate
sn_id = (idx, idy)
node_gene_map[sn_id] = neuron_cnt
# Get delay and bias
cppn_input_data = np.zeros(8) # cppn.num_inputs
x1, y1 = n.coordinates
# Just first four inputs set, the rest is zero
cppn_input_data[0] = s.coordinates[0]
cppn_input_data[1] = s.coordinates[1]
cppn_input_data[2] = x1
cppn_input_data[3] = y1
cppn.reset_values()
cppn.input(cppn_input_data)
cppn.concurrent_activation()
# Neuron delya and bias parameters for CTRNN activation
delay = np.fabs(cppn.output()[1]) * self.max_delay
if delay < 0.1: # TODO: redefine output CPPN limits
delay = 0.1
bias = cppn.output()[2] * self.max_bias
new_neuron = Neuron(n.function, max_output=3.0)
new_neuron.delay = delay
new_neuron.bias = bias
net.neurons.append(new_neuron)
# Register the id of input and output neurons
if n.node_type == SpatialNodeType.INPUT:
net.in_neurons.append(neuron_cnt)
if n.node_type == SpatialNodeType.OUTPUT:
net.neurons[neuron_cnt].max_output = np.deg2rad(60)
net.out_neurons.append(neuron_cnt)
neuron_cnt += 1
# Assuming every substrate module is equal
for c in intra_conn_table[idx]:
cppn_input_data = np.zeros(8) # cppn.num_inputs
x1, y1 = s.nodes[c[0]].coordinates
x2, y2 = s.nodes[c[1]].coordinates
cppn_input_data[0] = s.coordinates[0]
cppn_input_data[1] = s.coordinates[1]
cppn_input_data[2] = x1
cppn_input_data[3] = y1
cppn_input_data[4] = s.coordinates[0]
cppn_input_data[5] = s.coordinates[1]
cppn_input_data[6] = x2
cppn_input_data[7] = y2
cppn.reset_values()
cppn.input(cppn_input_data)
cppn.concurrent_activation()
# Intra substrate connection weight
w = cppn.output()[0] * self.max_connection_weight
if math.fabs(w) > self.connection_threshold:
source = node_gene_map[(idx, c[0])]
target = node_gene_map[(idx, c[1])]
net.connections.append(Connection(source, target, w))
# Compute inter substrate connections
for c in inter_conn_table:
cppn_input_data = np.zeros(8) # cppn.num_inputs
# Source and target substrate
s_substrate, t_substrate = substrate_set[c[0]], substrate_set[c[2]]
xm1, ym1 = s_substrate.coordinates
x1, y1 = s_substrate.nodes[c[1]].coordinates
xm2, ym2 = t_substrate.coordinates
x2, y2 = t_substrate.nodes[c[3]].coordinates
cppn_input_data[0] = xm1
cppn_input_data[1] = ym1
cppn_input_data[2] = x1
cppn_input_data[3] = y1
cppn_input_data[4] = xm2
cppn_input_data[5] = ym2
cppn_input_data[6] = x2
cppn_input_data[7] = y2
cppn.reset_values()
cppn.input(cppn_input_data)
cppn.concurrent_activation()
# Inter substrate connection weight
w = cppn.output()[0] * self.max_connection_weight
if math.fabs(w) > self.connection_threshold:
source = node_gene_map[(c[0], c[1])]
target = node_gene_map[(c[2], c[3])]
net.connections.append(Connection(source, target, w))
net.num_inputs = num_inputs
net.num_outputs = num_outputs
return net
|
{"hexsha": "0546d3ac37f00397400915b8482160255649c973", "size": 7281, "ext": "py", "lang": "Python", "max_stars_repo_path": "hyperneat/evolution.py", "max_stars_repo_name": "pabloreyesrobles/py-hyperneat", "max_stars_repo_head_hexsha": "3a651b5955fe5d5b4abe2d6abeb161a4d1e6845a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-28T23:20:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-28T23:20:16.000Z", "max_issues_repo_path": "hyperneat/evolution.py", "max_issues_repo_name": "pabloreyesrobles/py-hyperneat", "max_issues_repo_head_hexsha": "3a651b5955fe5d5b4abe2d6abeb161a4d1e6845a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hyperneat/evolution.py", "max_forks_repo_name": "pabloreyesrobles/py-hyperneat", "max_forks_repo_head_hexsha": "3a651b5955fe5d5b4abe2d6abeb161a4d1e6845a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8516949153, "max_line_length": 96, "alphanum_fraction": 0.7033374536, "include": true, "reason": "import numpy", "num_tokens": 2108}
|
/-
Copyright (c) 2022 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import analysis.normed_space.dual
import analysis.normed_space.star.basic
import analysis.complex.basic
import analysis.inner_product_space.adjoint
import algebra.star.subalgebra
/-!
# Von Neumann algebras
We give the "abstract" and "concrete" definitions of a von Neumann algebra.
We still have a major project ahead of us to show the equivalence between these definitions!
An abstract von Neumann algebra `wstar_algebra M` is a C^* algebra with a Banach space predual,
per Sakai (1971).
A concrete von Neumann algebra `von_neumann_algebra H` (where `H` is a Hilbert space)
is a *-closed subalgebra of bounded operators on `H` which is equal to its double commutant.
We'll also need to prove the von Neumann double commutant theorem,
that the concrete definition is equivalent to a *-closed subalgebra which is weakly closed.
-/
universes u v
/--
Sakai's definition of a von Neumann algebra as a C^* algebra with a Banach space predual.
So that we can unambiguously talk about these "abstract" von Neumann algebras
in parallel with the "concrete" ones (weakly closed *-subalgebras of B(H)),
we name this definition `wstar_algebra`.
Note that for now we only assert the mere existence of predual, rather than picking one.
This may later prove problematic, and need to be revisited.
Picking one may cause problems with definitional unification of different instances.
One the other hand, not picking one means that the weak-* topology
(which depends on a choice of predual) must be defined using the choice,
and we may be unhappy with the resulting opaqueness of the definition.
-/
class wstar_algebra (M : Type u) [normed_ring M] [star_ring M] [cstar_ring M]
[module ℂ M] [normed_algebra ℂ M] [star_module ℂ M] :=
(exists_predual : ∃ (X : Type u) [normed_add_comm_group X] [normed_space ℂ X] [complete_space X],
nonempty (normed_space.dual ℂ X ≃ₗᵢ⋆[ℂ] M))
-- TODO: Without this, `von_neumann_algebra` times out. Why?
set_option old_structure_cmd true
/--
The double commutant definition of a von Neumann algebra,
as a *-closed subalgebra of bounded operators on a Hilbert space,
which is equal to its double commutant.
Note that this definition is parameterised by the Hilbert space
on which the algebra faithfully acts, as is standard in the literature.
See `wstar_algebra` for the abstract notion (a C^*-algebra with Banach space predual).
Note this is a bundled structure, parameterised by the Hilbert space `H`,
rather than a typeclass on the type of elements.
Thus we can't say that the bounded operators `H →L[ℂ] H` form a `von_neumann_algebra`
(although we will later construct the instance `wstar_algebra (H →L[ℂ] H)`),
and instead will use `⊤ : von_neumann_algebra H`.
-/
@[nolint has_nonempty_instance]
structure von_neumann_algebra (H : Type u)
[normed_add_comm_group H] [inner_product_space ℂ H] [complete_space H] extends
star_subalgebra ℂ (H →L[ℂ] H) :=
(centralizer_centralizer' :
set.centralizer (set.centralizer carrier) = carrier)
/--
Consider a von Neumann algebra acting on a Hilbert space `H` as a *-subalgebra of `H →L[ℂ] H`.
(That is, we forget that it is equal to its double commutant
or equivalently that it is closed in the weak and strong operator topologies.)
-/
add_decl_doc von_neumann_algebra.to_star_subalgebra
namespace von_neumann_algebra
variables {H : Type u} [normed_add_comm_group H] [inner_product_space ℂ H] [complete_space H]
instance : set_like (von_neumann_algebra H) (H →L[ℂ] H) :=
⟨von_neumann_algebra.carrier, λ S T h, by cases S; cases T; congr'⟩
instance : star_mem_class (von_neumann_algebra H) (H →L[ℂ] H) :=
{ star_mem := λ s a, s.star_mem' }
instance : subring_class (von_neumann_algebra H) (H →L[ℂ] H) :=
{ add_mem := add_mem',
mul_mem := mul_mem',
one_mem := one_mem',
zero_mem := zero_mem' ,
neg_mem := λ s a ha, show -a ∈ s.to_star_subalgebra, from neg_mem ha }
@[simp] lemma mem_carrier {S : von_neumann_algebra H} {x : H →L[ℂ] H}:
x ∈ S.carrier ↔ x ∈ (S : set (H →L[ℂ] H)) := iff.rfl
@[ext] theorem ext {S T : von_neumann_algebra H} (h : ∀ x, x ∈ S ↔ x ∈ T) : S = T :=
set_like.ext h
@[simp] lemma centralizer_centralizer (S : von_neumann_algebra H) :
set.centralizer (set.centralizer (S : set (H →L[ℂ] H))) = S := S.centralizer_centralizer'
/-- The centralizer of a `von_neumann_algebra`, as a `von_neumann_algebra`.-/
def commutant (S : von_neumann_algebra H) : von_neumann_algebra H :=
{ carrier := set.centralizer (S : set (H →L[ℂ] H)),
centralizer_centralizer' := by rw S.centralizer_centralizer,
.. star_subalgebra.centralizer ℂ (S : set (H →L[ℂ] H)) (λ a (ha : a ∈ S), (star_mem ha : _)) }
@[simp] lemma coe_commutant (S : von_neumann_algebra H) :
↑S.commutant = set.centralizer (S : set (H →L[ℂ] H)) := rfl
@[simp] lemma mem_commutant_iff {S : von_neumann_algebra H} {z : H →L[ℂ] H} :
z ∈ S.commutant ↔ ∀ g ∈ S, g * z = z * g :=
iff.rfl
@[simp] lemma commutant_commutant (S : von_neumann_algebra H) :
S.commutant.commutant = S :=
set_like.coe_injective S.centralizer_centralizer'
end von_neumann_algebra
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/analysis/von_neumann_algebra/basic.lean"}
|
import itertools
import os
from tqdm import tqdm
import numpy as np
from absl import flags
from absl import app
import pickle
import util
import sys
import glob
import data
import rouge_functions
FLAGS = flags.FLAGS
if 'dataset_name' not in flags.FLAGS:
flags.DEFINE_string('dataset_name', 'cnn_dm', 'Which dataset to use. Can be {duc_2004, tac_2011, etc}')
if 'dataset_split' not in flags.FLAGS:
flags.DEFINE_string('dataset_split', 'train', 'Which dataset split to use. Must be one of {train, val (or dev), test}')
if 'sentence_limit' not in flags.FLAGS:
flags.DEFINE_integer('sentence_limit', 2, 'Max number of sentences to include for merging.')
if 'num_instances' not in flags.FLAGS:
flags.DEFINE_integer('num_instances', -1,
'Number of instances to run for before stopping. Use -1 to run on all instances.')
if 'mode' not in flags.FLAGS:
flags.DEFINE_string('mode', 'write', 'Can be {write, evaluate}')
if 'train_dataset' not in flags.FLAGS:
flags.DEFINE_string('train_dataset', 'cnn_dm', 'Can be {cnn_dm, gigaword}')
FLAGS(sys.argv)
import convert_data
# import lambdamart_scores_to_summaries
# import preprocess_for_lambdamart_no_flags
data_dir = os.path.expanduser('~') + '/data/tf_data/with_coref_and_ssi'
ssi_dir = 'data/ssi'
names_to_types = [('raw_article_sents', 'string_list'), ('similar_source_indices', 'delimited_list_of_tuples'), ('summary_text', 'string'), ('doc_indices', 'delimited_list')]
if FLAGS.dataset_name == 'duc_2004':
names_to_types[2] = ('summary_text', 'string_list')
# names_to_types = [('raw_article_sents', 'string_list'), ('article', 'string'), ('abstract', 'string_list'), ('doc_indices', 'string')]
# names_to_types = [('raw_article_sents', 'string_list')]
min_matched_tokens = 1
def main(unused_argv):
print('Running statistics on %s' % FLAGS.dataset_name)
if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
raise Exception("Problem with flags: %s" % unused_argv)
out_dir = os.path.join(os.path.expanduser('~') + '/data/kaiqiang_data', FLAGS.dataset_name)
if FLAGS.mode == 'write':
util.create_dirs(out_dir)
if FLAGS.dataset_name == 'duc_2004':
dataset_splits = ['test']
elif FLAGS.dataset_split == 'all':
dataset_splits = ['test', 'val', 'train']
else:
dataset_splits = [FLAGS.dataset_split]
for dataset_split in dataset_splits:
if dataset_split == 'test':
ssi_data_path = os.path.join('logs/%s_bert_both_sentemb_artemb_plushidden' % FLAGS.dataset_name, 'ssi.pkl')
print (util.bcolors.OKGREEN + "Loading SSI from BERT at %s" % ssi_data_path + util.bcolors.ENDC)
with open(ssi_data_path) as f:
ssi_triple_list = pickle.load(f)
source_dir = os.path.join(data_dir, FLAGS.dataset_name)
source_files = sorted(glob.glob(source_dir + '/' + dataset_split + '*'))
total = len(source_files) * 1000 if ('cnn' in FLAGS.dataset_name or 'newsroom' in FLAGS.dataset_name or 'xsum' in FLAGS.dataset_name) else len(source_files)
example_generator = data.example_generator(source_dir + '/' + dataset_split + '*', True, False,
should_check_valid=False)
out_document_path = os.path.join(out_dir, dataset_split + '.Ndocument')
out_summary_path = os.path.join(out_dir, dataset_split + '.Nsummary')
out_example_idx_path = os.path.join(out_dir, dataset_split + '.Nexampleidx')
doc_writer = open(out_document_path, 'w')
if dataset_split != 'test':
sum_writer = open(out_summary_path, 'w')
ex_idx_writer = open(out_example_idx_path, 'w')
for example_idx, example in enumerate(tqdm(example_generator, total=total)):
if FLAGS.num_instances != -1 and example_idx >= FLAGS.num_instances:
break
raw_article_sents, groundtruth_similar_source_indices_list, groundtruth_summary_text, doc_indices = util.unpack_tf_example(
example, names_to_types)
article_sent_tokens = [util.process_sent(sent) for sent in raw_article_sents]
if FLAGS.dataset_name == 'duc_2004':
groundtruth_summ_sents = [[sent.strip() for sent in gt_summ_text.strip().split('\n')] for gt_summ_text in groundtruth_summary_text]
else:
groundtruth_summ_sents = [[sent.strip() for sent in groundtruth_summary_text.strip().split('\n')]]
if doc_indices is None:
doc_indices = [0] * len(util.flatten_list_of_lists(article_sent_tokens))
doc_indices = [int(doc_idx) for doc_idx in doc_indices]
# rel_sent_indices, _, _ = preprocess_for_lambdamart_no_flags.get_rel_sent_indices(doc_indices, article_sent_tokens)
if dataset_split == 'test':
if example_idx >= len(ssi_triple_list):
raise Exception('Len of ssi list (%d) is less than number of examples (>=%d)' % (len(ssi_triple_list), example_idx))
ssi_length_extractive = ssi_triple_list[example_idx][2]
if ssi_length_extractive > 1:
a=0
ssi = ssi_triple_list[example_idx][1]
ssi = ssi[:ssi_length_extractive]
groundtruth_similar_source_indices_list = ssi
else:
groundtruth_similar_source_indices_list = util.enforce_sentence_limit(groundtruth_similar_source_indices_list, FLAGS.sentence_limit)
for ssi_idx, ssi in enumerate(groundtruth_similar_source_indices_list):
if len(ssi) == 0:
continue
my_article = ' '.join(util.reorder(raw_article_sents, ssi))
doc_writer.write(my_article + '\n')
if dataset_split != 'test':
sum_writer.write(groundtruth_summ_sents[0][ssi_idx] + '\n')
ex_idx_writer.write(str(example_idx) + '\n')
elif FLAGS.mode == 'evaluate':
summary_dir = '/home/logan/data/kaiqiang_data/logan_ACL/trained_on_' + FLAGS.train_dataset + '/' + FLAGS.dataset_name
out_summary_path = os.path.join(summary_dir, 'test' + 'Summary.txt')
out_example_idx_path = os.path.join(out_dir, 'test' + '.Nexampleidx')
decode_dir = 'logs/kaiqiang_%s_trainedon%s' % (FLAGS.dataset_name, FLAGS.train_dataset)
rouge_ref_dir = os.path.join(decode_dir, 'reference')
rouge_dec_dir = os.path.join(decode_dir, 'decoded')
util.create_dirs(rouge_ref_dir)
util.create_dirs(rouge_dec_dir)
def num_lines_in_file(file_path):
with open(file_path) as f:
num_lines = sum(1 for line in f)
return num_lines
def process_example(sents, ex_idx, groundtruth_summ_sents):
final_decoded_words = []
for sent in sents:
final_decoded_words.extend(sent.split(' '))
rouge_functions.write_for_rouge(groundtruth_summ_sents, None, ex_idx, rouge_ref_dir, rouge_dec_dir, decoded_words=final_decoded_words, log=False)
num_lines_summary = num_lines_in_file(out_summary_path)
num_lines_example_indices = num_lines_in_file(out_example_idx_path)
if num_lines_summary != num_lines_example_indices:
raise Exception('Num lines summary != num lines example indices: (%d, %d)' % (num_lines_summary, num_lines_example_indices))
source_dir = os.path.join(data_dir, FLAGS.dataset_name)
example_generator = data.example_generator(source_dir + '/' + 'test' + '*', True, False,
should_check_valid=False)
sum_writer = open(out_summary_path)
ex_idx_writer = open(out_example_idx_path)
prev_ex_idx = 0
sents = []
for line_idx in tqdm(range(num_lines_summary)):
line = sum_writer.readline()
ex_idx = int(ex_idx_writer.readline())
if ex_idx == prev_ex_idx:
sents.append(line)
else:
example = example_generator.next()
raw_article_sents, groundtruth_similar_source_indices_list, groundtruth_summary_text, doc_indices = util.unpack_tf_example(
example, names_to_types)
if FLAGS.dataset_name == 'duc_2004':
groundtruth_summ_sents = [[sent.strip() for sent in gt_summ_text.strip().split('\n')] for gt_summ_text in groundtruth_summary_text]
else:
groundtruth_summ_sents = [[sent.strip() for sent in groundtruth_summary_text.strip().split('\n')]]
process_example(sents, ex_idx, groundtruth_summ_sents)
prev_ex_idx = ex_idx
sents = [line]
example = example_generator.next()
raw_article_sents, groundtruth_similar_source_indices_list, groundtruth_summary_text, doc_indices = util.unpack_tf_example(
example, names_to_types)
if FLAGS.dataset_name == 'duc_2004':
groundtruth_summ_sents = [[sent.strip() for sent in gt_summ_text.strip().split('\n')] for gt_summ_text in groundtruth_summary_text]
else:
groundtruth_summ_sents = [[sent.strip() for sent in groundtruth_summary_text.strip().split('\n')]]
process_example(sents, ex_idx, groundtruth_summ_sents)
print("Now starting ROUGE eval...")
if FLAGS.dataset_name == 'xsum':
l_param = 100
else:
l_param = 100
results_dict = rouge_functions.rouge_eval(rouge_ref_dir, rouge_dec_dir, l_param=l_param)
rouge_functions.rouge_log(results_dict, decode_dir)
else:
raise Exception('mode flag was not evaluate or write.')
if __name__ == '__main__':
app.run(main)
|
{"hexsha": "ddd609f6aa67bfffc88d6ae109f9190d319b9425", "size": 10276, "ext": "py", "lang": "Python", "max_stars_repo_path": "kaiqiang_data.py", "max_stars_repo_name": "loganlebanoff/correct_summarization", "max_stars_repo_head_hexsha": "cec0d5401ddb5f7c33aca14f31da68b2f8092c53", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-20T14:57:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T11:14:40.000Z", "max_issues_repo_path": "kaiqiang_data.py", "max_issues_repo_name": "loganlebanoff/correct_summarization", "max_issues_repo_head_hexsha": "cec0d5401ddb5f7c33aca14f31da68b2f8092c53", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kaiqiang_data.py", "max_forks_repo_name": "loganlebanoff/correct_summarization", "max_forks_repo_head_hexsha": "cec0d5401ddb5f7c33aca14f31da68b2f8092c53", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6425120773, "max_line_length": 175, "alphanum_fraction": 0.6306928766, "include": true, "reason": "import numpy", "num_tokens": 2282}
|
# Adapted from https://github.com/bensadeghi/DecisionTree.jl
__precompile__()
module Estimators
import Base: length, convert, promote_rule, show, start, next, done
export Estimator, Leaf, Node,
depth,
fit_regression_tree,
predict, assign_leaves
float(x) = map(Float64, x)
neg(arr) = map(!, arr) # `.!arr` is invalid in 0.5, and `!arr` triggers a warning in 0.6.
const NO_BEST=(0,0)
# converts a boolean vector into an integer by summing the appropriate powers of 2
boolvec2int(v::Vector{Bool}) = sum(b*v for (b,v) in zip(2.^(0:(length(v)-1)), v))
abstract type Estimator
end
immutable Leaf <: Estimator
id::Integer
value::Real
end
length(leaf::Leaf) = 1
depth(leaf::Leaf) = 0
Leaf(id::Vector{Bool}, value::Real) = Leaf(boolvec2int(id), value)
Leaf(value::Real) = Leaf(0,value)
immutable Node <: Estimator
node_id::Integer
featid::Integer
featval::Any
left::Union{Leaf,Node}
right::Union{Leaf,Node}
end
length(tree::Node) = length(tree.left) + length(tree.right)
depth(tree::Node) = 1 + max(depth(tree.left), depth(tree.right))
function Node(id::Vector{Bool}, featid::Integer, featval::Any, left::Union{Leaf,Node}, right::Union{Leaf,Node})
return Node(boolvec2int(id), featid, featval, left, right)
end
convert(::Type{Node}, x::Leaf) = Node(0, nothing, x, Leaf(nothing,[nothing])) #makes a node with that leaf as it's left node
promote_rule(::Type{Node}, ::Type{Leaf}) = Node # this tells julia to call convert(::node, leaf) on the leaf when this happens
promote_rule(::Type{Leaf}, ::Type{Node}) = Node
function fit_regression_tree{T<:Float64, U<:Real}(X::Matrix{U}, Y::Vector{T}; min_samples_leaf=5, max_depth=-1, node_id::Vector{Bool}=[true])
if max_depth < -1
error("Unexpected value for max_depth: $(max_depth) (expected: max_depth >= 0, or max_depth = -1 for infinite depth)")
end
if length(Y) <= min_samples_leaf || max_depth==0
return Leaf(node_id, mean(Y))
end
S = _split_mse(X, Y)
if S == NO_BEST
return Leaf(node_id, mean(Y))
end
feat_id, thresh = S
split = X[:,feat_id] .< thresh
return Node(node_id, feat_id, thresh,
fit_regression_tree(X[split,:], Y[split],
min_samples_leaf=min_samples_leaf, max_depth=max(max_depth-1, -1), node_id=vcat(true,node_id)),
fit_regression_tree(X[neg(split),:], Y[neg(split)],
min_samples_leaf=min_samples_leaf, max_depth=max(max_depth-1, -1), node_id=vcat(false,node_id))
)
end
function _split_mse{T<:Float64, U<:Real}(X::Matrix{U}, Y::Vector{T})
N, p = size(X)
best = NO_BEST
best_val = -Inf
for i in 1:p
ord = sortperm(X[:,i])
X_i = X[ord,i]
Y_i = Y[ord]
if N > 100
if VERSION >= v"0.4.0-dev"
domain_i = quantile(X_i, linspace(0.01, 0.99, 99);
sorted=true)
else # sorted=true isn't supported on StatsBase's Julia 0.3 version
domain_i = quantile(X_i, linspace(0.01, 0.99, 99))
end
else
domain_i = X_i
end
value, thresh = _best_mse_loss(X_i, Y_i, domain_i)
if value > best_val
best_val = value
best = (i, thresh)
end
end
return best
end
function _best_mse_loss{T<:Float64, U<:Real}(X::Vector{U}, Y::Vector{T}, domain)
# True, but costly assert. However, see
# https://github.com/JuliaStats/StatsBase.jl/issues/164
# @assert issorted(X) && issorted(domain)
best_val = -Inf
best_thresh = 0.0
s_l = s2_l = zero(T)
su = sum(Y)::T
su2 = zero(T); for l in Y su2 += l*l end # sum of squares
nl = 0
n = length(Y)
i = 1
# Because the `X` are sorted, below is an O(N) algorithm for finding
# the optimal threshold amongst `domain`. We simply iterate through the
# array and update s_l and s_r (= sum(Y) - s_l) as we go. - @cstjean
@inbounds for thresh in domain
while i <= length(Y) && X[i] < thresh
l = Y[i]
s_l += l
s2_l += l*l
nl += 1
i += 1
end
s_r = su - s_l
s2_r = su2 - s2_l
nr = n - nl
# This check is necessary I think because in theory all Y could
# be the same, then either nl or nr would be 0. - @cstjean
if nr > 0 && nl > 0
loss = s2_l - s_l^2/nl + s2_r - s_r^2/nr
if -loss > best_val
best_val = -loss
best_thresh = thresh
end
end
end
return best_val, best_thresh
end
predict(leaf::Leaf, feature::Vector) = leaf.value
function predict(tree::Node, X::Vector)
if tree.featval == nothing
return predict(tree.left, X)
elseif X[tree.featid] < tree.featval
return predict(tree.left, X)
else
return predict(tree.right, X)
end
end
function predict(tree::Union{Leaf,Node}, X::Matrix)
N = size(X,1)
predictions = Array{Any}(N)
for i in 1:N
predictions[i] = predict(tree, X[i,:])
end
println("predictions ->")
println(predictions)
if typeof(predictions[1]) <: Float64
return float(predictions)
else
return predictions
end
end
function predict(estimator_dict::Dict, X::Matrix)
return Dict(k=>predict(v,X) for (k,v) in estimator_dict)
end
assign_leaves(leaf::Leaf, feature::Vector) = leaf.id
function assign_leaves(tree::Node, X::Vector)
if tree.featval == nothing
return assign_leaves(tree.left, X)
elseif X[tree.featid] < tree.featval
return assign_leaves(tree.left, X)
else
return assign_leaves(tree.right, X)
end
end
function assign_leaves(tree::Union{Leaf,Node}, X::Matrix)
N = size(X,1)
assignments = Array{Int}(N)
for i in 1:N
assignments[i] = assign_leaves(tree, X[i,:])
end
return assignments
end
function assign_leaves(estimator_dict::Dict, X::Matrix)
return Dict(k=>assign_leaves(v,X) for (k,v) in estimator_dict)
end
end #module
|
{"hexsha": "259d065793e1111f43fa668bcd639df736878852", "size": 6104, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Julia/estimators.jl", "max_stars_repo_name": "naskoD/bachelorThesis", "max_stars_repo_head_hexsha": "028ffe0990df9fc72f43024eae67d968dbfb7ae6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-04T09:53:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-04T09:53:36.000Z", "max_issues_repo_path": "src/Julia/estimators.jl", "max_issues_repo_name": "naskoD/bachelorThesis", "max_issues_repo_head_hexsha": "028ffe0990df9fc72f43024eae67d968dbfb7ae6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Julia/estimators.jl", "max_forks_repo_name": "naskoD/bachelorThesis", "max_forks_repo_head_hexsha": "028ffe0990df9fc72f43024eae67d968dbfb7ae6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.52, "max_line_length": 141, "alphanum_fraction": 0.6087811271, "num_tokens": 1796}
|
from __future__ import print_function
from scipy.misc import imsave
import image
import os
import struct
import json
class BasicRunner(object):
def __init__(self, config, optimizer):
self.config = config
self.optimizer = optimizer
def run(self, (initial_image, initial_loss)):
self.save_image(initial_image, 'iteration_0.jpg')
for i in range(self.config.iterations):
self.run_once(i + 1)
def run_once(self, iteration):
(img, loss) = self.optimizer.optimize()
file_name = self.save_image(img, 'iteration_%d.jpg' % iteration)
return self.log_img(iteration, file_name, loss)
def log_img(self, iteration, file_name, loss):
return
def save_image(self, img, name):
img = image.deprocess_image(img, self.config.img_size)
file_name = self.config.output_path + '/' + name
imsave(file_name, img)
return os.path.abspath(file_name)
class CLIRunner(BasicRunner):
def log_img(self, iteration, file_name, loss):
print('Iteration %d finished!' % iteration)
print('Image saved as: ', file_name)
print('Current loss: ', loss)
class PortRunner(BasicRunner):
def __init__(self, config, optimizer):
super(PortRunner, self).__init__(config, optimizer)
self.input = 3
self.output = 4
self.packet_size = 4
def run(self, (initial_image, initial_loss)):
file_name = self.save_image(initial_image, 'iteration_0.jpg')
self.send_response(self.log_img(0, file_name, initial_loss))
n = 0
while True:
input_received = self.receive_input()
if input_received == "CONT":
n += 1
response = self.run_once(n)
self.send_response(response)
else:
break
def log_img(self, iteration, file_name, loss):
response = json.dumps({'iteration': iteration, 'file_name': file_name, 'loss': str(loss)})
f = open(file_name + '.log', 'w')
f.write(str(response))
print(response)
return response
def receive_input(self):
encoded_length = os.read(self.input, self.packet_size)
if encoded_length == "":
return None
else:
(length,) = struct.unpack(">I", encoded_length)
return os.read(self.input, length)
def send_response(self, response):
os.write(self.output, struct.pack(">I", len(response)))
os.write(self.output, response)
|
{"hexsha": "4de87a892f65f08e896e0740089c7e16b838ebba", "size": 2539, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/pycasso/runner.py", "max_stars_repo_name": "danmarcab/deep_painting", "max_stars_repo_head_hexsha": "860c7d02bd6b112fffa199f715e61d895cba6623", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/pycasso/runner.py", "max_issues_repo_name": "danmarcab/deep_painting", "max_issues_repo_head_hexsha": "860c7d02bd6b112fffa199f715e61d895cba6623", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:19:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:18:18.000Z", "max_forks_repo_path": "apps/pycasso/runner.py", "max_forks_repo_name": "danmarcab/deep_painting", "max_forks_repo_head_hexsha": "860c7d02bd6b112fffa199f715e61d895cba6623", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7375, "max_line_length": 98, "alphanum_fraction": 0.6211106735, "include": true, "reason": "from scipy", "num_tokens": 567}
|
import pandas
import numpy
import json
import seaborn as sns
from pymea import spikelists as sl
from matplotlib import pyplot as plt
from os import path
from argparse import ArgumentParser
def configure_parser():
parser = ArgumentParser(description='Generates lineplots from one or more spike_list.csv files')
parser.add_argument('-f', '--file', nargs='+', type=path.expanduser,
help='Path(s) to the spike_list.csv files to analyze')
parser.add_argument('-t', '--start_time', nargs='*', type=int, default=[],
help='List of start times ')
parser.add_argument('-b', '--bin_size', type=float, default=60.0,
help='Size of time bins to use for spike counting, default = 60 seconds')
parser.add_argument('-o', '--output_path', type=path.expanduser, default='spike_rate_timeseries.png')
parser.add_argument('-p', '--plate_type', default='1',
help='Number of wells in plate that data was taken from. Default = 1. Options = 1, 12, 48, 96')
parser.add_argument('-r', '--time_resolution', type=float, default=1.0,
help='Time resolution of output spike rates. Default = 1 second')
parser.add_argument('-l', '--use_log_scale', action='store_true',
help='Display output on a log2 scale')
return parser
def electrode_line_plot(spike_lists, electrode, bin_size, time_resolution, use_log_scale=False):
"""
Creates a plot of the time-varying spike rate for a specified electrode
"""
print "Creating plot for %s" % electrode
electrode_spike_lists = sl.filter_spike_lists_for_electrode(spike_lists, electrode)
spike_rate, time = get_spike_rate_vector_from_spike_lists(electrode_spike_lists, bin_size, time_resolution)
if use_log_scale:
plt.plot(time, numpy.log2(spike_rate), lw=0.5)
else:
plt.plot(time, spike_rate, lw=0.5)
def get_spike_rate_vector_from_spike_lists(spike_lists, bin_size, time_resolution):
"""
Generates spike_rate vectors for each spike_list in spike_lists, then concatenates them
"""
spike_times = [spike_list['Time (s)'] for spike_list in spike_lists]
spike_rate_vectors, time_vectors = zip(*map(lambda st: sl.get_spike_rate(st, bin_size, time_resolution), spike_times))
try:
spike_rate = numpy.concatenate(spike_rate_vectors)
except ValueError:
print "ValueError on concatenate"
print spike_rate_vectors, time_vectors
return spike_rate_vectors[0], time_vectors[0]
time = numpy.concatenate(time_vectors)
print spike_rate.shape, time.shape
return spike_rate, time
def main():
# parse command line args
parser = configure_parser()
args = parser.parse_args()
# Check that the start time input is valid
if len(args.start_time) != 0 and len(args.start_time) != len(args.file):
print "-t, --start_time parameter must be specified with the same number of elements as -f, --file"
return
# load spike_list csvs
spike_lists = map(pandas.read_csv, args.file)
# offset spike times
if len(args.start_time) == len(spike_lists):
for spike_list, offset in zip(spike_lists, args.start_time):
spike_list['Time (s)'] += offset
wells = sl.plate_well_map.get(args.plate_type, None)
if wells is None:
print 'Unrecognized plate type encountered: {}'.format(args.plate_type)
return
subplot_size = sl.plot_dims_map[args.plate_type]
plt.figure(figsize=(45 ,45))
for i, well in enumerate(wells):
plt.subplot(subplot_size[0], subplot_size[1], i + 1)
for ele in sl.electrode_map[args.plate_type]:
ele_name = '{well}_{electrode}'.format(well=well, electrode=ele)
electrode_line_plot(spike_lists, ele_name, args.bin_size, args.time_resolution, args.use_log_scale)
#plt.title(e)
plt.savefig(args.output_path)
if __name__ == '__main__':
main()
# Savin this for later yum yum
#get_ele_list = lambda sl: sl.get_spike_list_for_electrode(sl, ele_name)
#electrode_lists = map(get_ele_list, spike_lists)
#electrode_times = [l['Time (s)'] for l in electrode_lists]
#ele_hist = lambda st: sl.get_spike_histogram(st, bin_size=args.bin_size)
#hists, bins = zip(*map(ele_hist, electrode_times))
#spike_counts = numpy.concatenate(hists)
#time = numpy.concatenate(bins)
#plt.plot(time, spike_counts)
|
{"hexsha": "bc73aab844ceb488e43a198a7eae859738fe664e", "size": 4392, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/generate_lineplots.py", "max_stars_repo_name": "sdrendall/mea_analysis", "max_stars_repo_head_hexsha": "62006e35bcf92b5d9ec19a6f89f4a748ae36bf76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/generate_lineplots.py", "max_issues_repo_name": "sdrendall/mea_analysis", "max_issues_repo_head_hexsha": "62006e35bcf92b5d9ec19a6f89f4a748ae36bf76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/generate_lineplots.py", "max_forks_repo_name": "sdrendall/mea_analysis", "max_forks_repo_head_hexsha": "62006e35bcf92b5d9ec19a6f89f4a748ae36bf76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4339622642, "max_line_length": 122, "alphanum_fraction": 0.6987704918, "include": true, "reason": "import numpy", "num_tokens": 1084}
|
#!/usr/bin/env python3
"""
Author: Jordan R. Abrahams (jabrahams@hmc.edu)
Last Updated: 11 January 2018
This program runs multiple Monte-Carlo simulations for a given execution
strategy.
This file is the primary running access point for the new RobotBrunch
simulator. Thus, it holds the main() function.
This file also holds the multithreading code, allowing large scale
simulation runs to take place.
"""
import sys
try:
assert (sys.version_info >= (3, 0))
except AssertionError:
print("Simulations must be run with Python3 or a later version.")
import os
import os.path
import time
import multiprocessing
import argparse
import numpy as np
from libheat import functiontimer
from libheat.stntools import load_stn_from_json_file, mitparser
from libheat.montsim import Simulator
from libheat.dmontsim import DecoupledSimulator
import libheat.printers as pr
import libheat.parseindefinite
from libheat import sim2csv
MAX_SEED = 2 ** 31 - 1
"""The maximum number a random seed can be."""
DEFAULT_DECOUPLE = "srea"
"""The default decoupling type for DecoupledSimulator"""
def main():
args = parse_args()
# Set the random seed
if args.seed is not None:
random_seed = int(args.seed)
else:
random_seed = np.random.randint(MAX_SEED)
if args.verbose:
pr.set_verbosity(1)
pr.verbose("Verbosity set to: 1")
sim_count = args.samples
sim_options = {"ar_threshold": args.ar_threshold,
"alp_threshold": args.si_threshold,
"si_threshold": args.si_threshold}
# Check to see if we need to create the ordering pairs from the parsed
# user input.
if args.ordering_pairs is not None:
ordering_pairs = libheat.parseindefinite.parse_ind_arg(
args.ordering_pairs)
else:
ordering_pairs = None
# simulate across multiple paths.
stn_paths = folder_harvest(args.stns, recurse=True, only_json=True)
across_paths(stn_paths, args.execution, args.threads, sim_count,
sim_options,
output=args.output,
live_updates=(not args.no_live),
random_seed=random_seed,
mitparse=args.mit_parse,
start_index=args.start_point,
stop_index=args.stop_point,
ordering_pairs=ordering_pairs)
def across_paths(stn_paths, execution, threads, sim_count, sim_options,
output=None, live_updates=True, random_seed=None,
mitparse=False, start_index=0, stop_index=None,
ordering_pairs=None):
"""Runs multiple simulations for each STN in the provided iterable.
Args:
stn_paths (iterable): iterable (like a List) of strings.
execution (str): Execution strategy to use on each STN.
threads (int): Number of threads to use.
sim_count (int): Number of simulations (samples) to use.
sim_options (dict): Dictionary of simulation options to use.
output (str, optional): Output file path. Default no output.
live_updates (boolean, optional): Whether to provide live updates.
random_seed (int, optional): The random seed to start out with,
defaults to a random... random seed.
mitparse (boolean, optional): Parse STN JSON files as MIT format.
ordering_pairs (list, optional): List of tuples of AR and SC settings.
Each STN will be run with a separate simulation for each tuple.
"""
stn_pairs = []
# Collect the STNs from all the passed in paths
# Make sure we keep the path around though, and keep them in the pair.
for i, path in enumerate(stn_paths):
if mitparse:
mitstns = mitparser.mit2stn(path, add_z=True, connect_origin=True)
stn_pairs += [(path, k) for k in mitstns]
else:
stn = load_stn_from_json_file(path)["stn"]
stn_pairs.append((path, stn))
# We must separate these for loops because MIT stns can hold several
# instances in a single file.
for i, pair in enumerate(stn_pairs):
if i < start_index:
continue
if stop_index is not None:
if i >= stop_index:
break
if ordering_pairs is not None:
for j, execution_setting in enumerate(ordering_pairs):
sim_option_instance = sim_options.copy()
sim_option_instance["ar_threshold"] = execution_setting[0]
sim_option_instance["si_threshold"] = execution_setting[1]
results_dict = _run_stage(pair, execution, sim_count, threads,
random_seed, sim_option_instance)
if live_updates:
_print_results(results_dict,
j + len(ordering_pairs)*i + 1,
len(stn_pairs)*len(ordering_pairs))
if output is not None:
sim2csv.save_csv_row(results_dict, output)
else:
results_dict = _run_stage(pair, execution, sim_count, threads,
random_seed, sim_options)
if live_updates:
_print_results(results_dict, i + 1, len(stn_pairs))
if output is not None:
sim2csv.save_csv_row(results_dict, output)
def _run_stage(pair, execution, sim_count, threads, random_seed, sim_options):
"""Run a single stage of the multiple simulation set up."""
path, stn = pair
start_time = time.time()
response_dict = multiple_simulations(stn, execution, sim_count,
threads=threads,
random_seed=random_seed,
sim_options=sim_options)
runtime = time.time() - start_time
results = response_dict["sample_results"]
reschedules = response_dict["reschedules"]
sent_schedules = response_dict["sent_schedules"]
robustness = results.count(True)/len(results)
vert_count = len(stn.verts)
max_verts_on_agent = max_agent_verts(stn)
mean_verts_on_agent = (len(stn.verts) - 1)/len(stn.agents)
cont_dens = len(stn.contingent_edges)/len(stn.edges)
synchrony = len(stn.interagent_edges)/len(stn.edges)
total_sd = 0
for e in stn.contingent_edges.values():
try:
total_sd += e.sigma
except ValueError:
continue
sd_avg = total_sd / len(stn.contingent_edges)
results_dict = {}
results_dict["execution"] = execution
results_dict["robustness"] = robustness
results_dict["threads"] = threads
results_dict["random_seed"] = random_seed
results_dict["runtime"] = runtime
results_dict["samples"] = sim_count
results_dict["timestamp"] = time.time()
results_dict["stn_path"] = path
results_dict["stn_name"] = stn.name
results_dict["ar_threshold"] = sim_options["ar_threshold"]
results_dict["si_threshold"] = sim_options["si_threshold"]
results_dict["synchronous_density"] = synchrony
results_dict["sd_avg"] = sd_avg
results_dict["vert_count"] = vert_count
results_dict["agents"] = len(stn.agents)
results_dict["mean_verts_agent"] = mean_verts_on_agent
results_dict["max_verts_agent"] = max_verts_on_agent
results_dict["contingent_density"] = cont_dens
results_dict["reschedule_freq"] = sum(reschedules)/len(reschedules)
results_dict["send_freq"] = sum(sent_schedules)/len(sent_schedules)
return results_dict
def _print_results(results_dict, i, stn_count):
"""Pretty print the results of N samples of simulation"""
print("-"*79)
print(" Ran on: {}".format(results_dict["stn_path"]))
print(" Name: {}".format(results_dict["stn_name"]))
print(" Timestamp: {}".format(results_dict["timestamp"]))
print(" Samples: {}".format(results_dict["samples"]))
print(" Threads: {}".format(results_dict["threads"]))
print(" Execution: {}".format(results_dict["execution"]))
print(" AR Threshold: {}".format(results_dict["ar_threshold"]))
print(" SI Threshold: {}".format(results_dict["si_threshold"]))
print(" Robustness: {}".format(results_dict["robustness"]))
print(" Seed: {}".format(results_dict["random_seed"]))
print(" Runtime: {}".format(results_dict["runtime"]))
print(" Vert Count: {}".format(results_dict["vert_count"]))
print(" Agents: {}".format(results_dict["agents"]))
print(" Max verts on Agent: {}".format(results_dict["max_verts_agent"]))
print(" Mean verts on Agent: {}".format(
results_dict["mean_verts_agent"]))
print(" Cont Edge Dens: {}".format(results_dict["contingent_density"]))
print(" Cont SD Avg: {}".format(results_dict["sd_avg"]))
print(" Sync Density: {}".format(results_dict["synchronous_density"]))
print(" Resc Freq: {}".format(results_dict["reschedule_freq"]))
print(" Send Freq: {}".format(results_dict["send_freq"]))
print(" Total Progress: {}/{}".format(i, stn_count))
print("-"*79)
def multiple_simulations(starting_stn, execution_strat,
count, threads=1, random_seed=None,
sim_options={}):
"""Run multiple simulations on a single STN.
Args:
starting_stn (STN): STN to simulate on.
execution_strat (str): Execution strategy to simulate with.
count (int): Number of simulations to run.
threads (int, optional): Number of threads to use.
random_seed (int, optional): The random seed to use. Generates new
seeds from this instance. None indicates a random random-seed.
sim_options (dict): A set of options (usually thresholds) for the
simulator.
Returns:
A response dictionary with three entries in it.
The response dictionary contains the following keys:
* "sample_results": A list of bools of how the simulations went.
* "reschedules": A list of ints counting how many reschedules a sim took.
* "sent_schedules": A list of ints counting how many schedules were sent
for each sim.
"""
# Each thread needs its own simulator, otherwise the progress of one thread
# can overwrite the progress of another
print("Random seed is: {}".format(random_seed))
if random_seed is not None:
seed_gen = np.random.RandomState(random_seed)
seeds = [seed_gen.randint(MAX_SEED) for i in range(count)]
tasks = _make_simulator_tasks(seeds, starting_stn,
execution_strat, sim_options,
count)
else:
tasks = _make_simulator_tasks(None, starting_stn,
execution_strat, sim_options,
count)
if threads > 1:
print("Using multithreading; threads = {}".format(threads))
try_count = 0
while try_count <= 3:
try_count += 1
response = None
try:
with multiprocessing.Pool(threads) as pool:
response = pool.map(_multisim_thread_helper, tasks)
break
except BlockingIOError:
pr.warning("Got BlockingIOError; attempting to remake threads")
pr.warning("Retrying in 3 seconds...")
time.sleep(3.0)
pr.warning("Retrying now")
else:
print("Using single thread; threads = {}".format(threads))
response = list(map(_multisim_thread_helper, tasks))
# Unzip each of the response values.
sample_results = [r[0] for r in response]
reschedules = [r[1] for r in response]
sent_schedules = [r[2] for r in response]
# Package the response into a nice dict to send back.
response_dict = {"sample_results": sample_results, "reschedules":
reschedules, "sent_schedules": sent_schedules}
return response_dict
def _make_simulator_tasks(seeds, stn, execution_strat, sim_options, count):
"""Helper function to generate a list of tasks for the thread pool"""
if seeds is not None:
if execution_strat == "da":
tasks = [(DecoupledSimulator(seeds[i]), stn,
execution_strat,
sim_options, i)
for i in range(count)]
else:
tasks = [(Simulator(seeds[i]), stn, execution_strat,
sim_options, i)
for i in range(count)]
else:
if execution_strat == "da":
tasks = [(DecoupledSimulator(None), stn,
execution_strat,
sim_options, i)
for i in range(count)]
else:
tasks = [(Simulator(None), stn, execution_strat,
sim_options, i)
for i in range(count)]
return tasks
def _multisim_thread_helper(tup):
""" Helper function to allow passing multiple arguments to the simulator.
"""
simulator = tup[0]
if tup[2] == "da":
ans = simulator.simulate(tup[1], sim_options=tup[3],
decouple_type=DEFAULT_DECOUPLE)
else:
ans = simulator.simulate(tup[1], tup[2], sim_options=tup[3])
reschedule_count = simulator.num_reschedules
sent_count = simulator.num_sent_schedules
pr.verbose("Task: {}".format(tup[4]))
pr.verbose("Assigned Times: {}".format(simulator.get_assigned_times()))
pr.verbose("Successful?: {}".format(ans))
return ans, reschedule_count, sent_count
def folder_harvest(folder_paths: list, recurse=True, only_json=True) -> list:
""" Retrieves a list of STN filepaths given a list of folderpaths.
Args:
folder_paths (list): List of strings that represent file/folder paths.
recursive (:obj:`bool`, optional): Boolean indicating whether to
recurse into directories.
Returns:
Returns a flat list of STN paths.
"""
stn_files = []
for folder_path in folder_paths:
if os.path.isfile(folder_path):
# Folder was actually a stn file. Just append it.
if only_json:
_, ext = os.path.splitext(folder_path)
if ext == ".json":
stn_files.append(folder_path)
else:
stn_files.append(folder_path)
elif os.path.isdir(folder_path):
# This was actually a folder this time!
contents = os.listdir(folder_path)
for c in contents:
# Make sure to include the folder path
long_path = folder_path + "/" + c
if os.path.isfile(long_path):
if only_json:
_, ext = os.path.splitext(long_path)
if ext == ".json":
stn_files.append(long_path)
else:
stn_files.append(long_path)
elif os.path.isdir(long_path) and recurse:
stn_files += folder_harvest([long_path], recurse=True)
else:
# This should never happen, but maybe?
pr.warning("STN path was not file or directory: " +
folder_path)
pr.warning("Skipping...")
else:
# This should never happen, but maybe?
pr.warning("STN path was not file or directory: " +
folder_path)
pr.warning("Skipping...")
return stn_files
def max_agent_verts(stn):
"""Returns the maximum amount of vertices belonging to any one agent"""
return max([get_agent_verts(stn, a) for a in stn.agents])
def mean_agent_verts(stn):
counts = [get_agent_verts(stn, a) for a in stn.agents]
return sum(counts)/len(counts)
def get_agent_verts(stn, agent):
"""Returns the number of vertices owned by the provided agent in the given
STN.
Args:
stn (STN): STN to use.
agent (int): Agent ID to get verts of.
Returns:
The number of vertices owned by the provided agent.
"""
count = 0
for vert in stn.verts.values():
if vert.ownerID == agent:
count += 1
return count
def parse_args():
"""Parse the program arguments."""
parser = argparse.ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true",
help="Turns on more printing")
parser.add_argument("-t", "--threads", type=int, default=1,
help="Number of system threads to use. Default is 1.")
parser.add_argument("-s", "--samples", type=int, default=100,
help="Number of Monte-Carlo samples to use, default"
" is 100")
parser.add_argument("-e", "--execution", type=str, default="early",
help="Set the execution strategy to use. Default is"
" 'early'")
parser.add_argument("-o", "--output", type=str,
help="Write the simulation results to a CSV")
parser.add_argument("--ar-threshold", type=float, default=0.0,
help="AR Threshold to use for AR and ARSI")
parser.add_argument("--si-threshold", type=float, default=0.0,
help="SI Threshold to use for SI, ALP and ARSI")
parser.add_argument("--mit-parse", action="store_true",
help="Use MIT parsing to read in STN JSON files")
parser.add_argument("--seed", default=None, help="Set the random seed")
parser.add_argument("--ordering-pairs", type=str, help="Flag "
"for indefinite ordering. Requires a string "
"argument, which takes the form '[(XX,XX), (XX,XX), "
" ...]' which creates a new simulation for each "
"argument pari, and runs on each on a single STN "
" until moving to the next STN. Pairs are (AR,SC).")
parser.add_argument("--start-point", type=int, default=0,
help="Index of STN to begin simulating at, "
"inclusively. Default is '0'. Not thoroughly tested, "
"be warned.")
parser.add_argument("--stop-point", type=int,
help="Index of STN to stop simulating at, "
"exclusively. Do not set if you want to run through "
"the entire data set. Not thoroughly tested, be "
"warned.")
parser.add_argument("--no-live", action="store_true",
help="Turn off live update printing")
parser.add_argument("stns", help="The STN JSON files to run on",
nargs="+")
return parser.parse_args()
if __name__ == "__main__":
main()
print("Time spent per function:")
print(functiontimer.get_times())
|
{"hexsha": "dea3582e221eb8b74502d3ecc016219a2f8e4ea3", "size": 19120, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_simulator.py", "max_stars_repo_name": "HEATlab/DREAM", "max_stars_repo_head_hexsha": "3e63d04ad77bbeefc102a72c7b131bc0a6a33656", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-07T08:03:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T08:03:49.000Z", "max_issues_repo_path": "run_simulator.py", "max_issues_repo_name": "HEATlab/DREAM", "max_issues_repo_head_hexsha": "3e63d04ad77bbeefc102a72c7b131bc0a6a33656", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_simulator.py", "max_forks_repo_name": "HEATlab/DREAM", "max_forks_repo_head_hexsha": "3e63d04ad77bbeefc102a72c7b131bc0a6a33656", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-05-17T07:48:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-10T22:27:16.000Z", "avg_line_length": 40.5084745763, "max_line_length": 79, "alphanum_fraction": 0.6030334728, "include": true, "reason": "import numpy", "num_tokens": 4141}
|
import numpy as np
from tqdm import tqdm
import lasagne
import theano
def create_dataset(npts):
""" Sample data uniformly in a [-5,5] x [-5, 5] window"""
# Create data
np.random.seed(20) # set seed for reproducibility
X = np.random.uniform(-5,5, (npts, 2)).astype(np.float32)
return X
def get_list_batches(npts, batch_size):
"""Create batches (i.e a list of index) such that an array of size npts
is split in batches of size batch_size"""
num_elem = npts
num_batches = num_elem / batch_size
# list_batches is a list of array. Each array contains the indeces of the batch elements.
list_batches = np.array_split(np.arange(num_elem), num_batches)
return list_batches
def train_network(train_fn, X, list_batches, nb_epoch):
# Store train loss for each epoch
list_loss = []
for epoch in tqdm(range(nb_epoch), desc="Training normally"):
# Store train loss for each batch
epoch_losses = []
# Loop over batches
for batch_idxs in list_batches:
X_batch = X[batch_idxs]
epoch_losses.append(train_fn(X_batch))
list_loss.append(np.mean(epoch_losses))
return list_loss
def train_network_sobolev(train_fn, X, list_batches, nb_epoch):
# Store train loss for each epoch
list_loss = []
list_loss_J = []
for epoch in tqdm(range(nb_epoch), desc="Training with Sobolev"):
epoch_losses = []
epoch_losses_J = []
for batch_idxs in list_batches:
X_batch = X[batch_idxs]
loss, J_loss = train_fn(X_batch)
epoch_losses.append(loss)
epoch_losses_J.append(J_loss)
list_loss.append(np.mean(epoch_losses))
list_loss_J.append(np.mean(epoch_losses_J))
return list_loss, list_loss_J
def get_prediction_fn(input_var, network):
# Create a prediction function
test_prediction = lasagne.layers.get_output(network, deterministic=True)
predict_fn = theano.function([input_var], test_prediction)
return predict_fn
|
{"hexsha": "856be77b5c6fde858b89aabcf50310d1f6f253df", "size": 2055, "ext": "py", "lang": "Python", "max_stars_repo_path": "Sobolev/utils.py", "max_stars_repo_name": "inamori/DeepLearningImplementations", "max_stars_repo_head_hexsha": "8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2010, "max_stars_repo_stars_event_min_datetime": "2016-09-22T22:22:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T02:43:21.000Z", "max_issues_repo_path": "Sobolev/utils.py", "max_issues_repo_name": "inamori/DeepLearningImplementations", "max_issues_repo_head_hexsha": "8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 81, "max_issues_repo_issues_event_min_datetime": "2016-09-13T01:00:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-17T15:10:28.000Z", "max_forks_repo_path": "Sobolev/utils.py", "max_forks_repo_name": "inamori/DeepLearningImplementations", "max_forks_repo_head_hexsha": "8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 777, "max_forks_repo_forks_event_min_datetime": "2016-09-10T12:44:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-16T04:19:55.000Z", "avg_line_length": 25.6875, "max_line_length": 93, "alphanum_fraction": 0.6749391727, "include": true, "reason": "import numpy,import theano", "num_tokens": 480}
|
import random
import numpy as np
import tensorflow as tf
import rlkit.misc.hyperparameter as hyp
from rlkit.envs.multitask.ant_env import GoalXYPosAnt
from rlkit.envs.multitask.pusher2d import CylinderXYPusher2DEnv
from rlkit.envs.multitask.her_half_cheetah import HalfCheetah, \
half_cheetah_cost_fn
from rlkit.envs.multitask.her_pusher_env import Pusher2DEnv, \
pusher2d_cost_fn
from rlkit.envs.multitask.her_reacher_7dof_env import Reacher7Dof, \
reacher7dof_cost_fn
from rlkit.envs.multitask.reacher_7dof import (
Reacher7DofXyzGoalState,
)
from rlkit.envs.multitask.pusher3d import MultitaskPusher3DEnv
from rlkit.envs.multitask.multitask_env import MultitaskToFlatEnv
from rlkit.launchers.launcher_util import run_experiment
def experiment(variant):
from cheetah_env import HalfCheetahEnvNew
from cost_functions import cheetah_cost_fn, \
hopper_cost_fn, \
swimmer_cost_fn
from hopper_env import HopperEnvNew
from main_solution import train_dagger
from rlkit.core import logger
from swimmer_env import SwimmerEnvNew
env_name_or_class = variant['env_name_or_class']
if type(env_name_or_class) == str:
if 'cheetah' in str.lower(env_name_or_class):
env = HalfCheetahEnvNew()
cost_fn = cheetah_cost_fn
elif 'hopper' in str.lower(env_name_or_class):
env = HopperEnvNew()
cost_fn = hopper_cost_fn
elif 'swimmer' in str.lower(env_name_or_class):
env = SwimmerEnvNew()
cost_fn = swimmer_cost_fn
else:
raise NotImplementedError
else:
env = env_name_or_class()
from rlkit.envs.wrappers import NormalizedBoxEnv
env = NormalizedBoxEnv(env)
if env_name_or_class == Pusher2DEnv:
cost_fn = pusher2d_cost_fn
elif env_name_or_class == Reacher7Dof:
cost_fn = reacher7dof_cost_fn
elif env_name_or_class == HalfCheetah:
cost_fn = half_cheetah_cost_fn
else:
if variant['multitask']:
env = MultitaskToFlatEnv(env)
cost_fn = env.cost_fn
train_dagger(
env=env,
cost_fn=cost_fn,
logdir=logger.get_snapshot_dir(),
**variant['dagger_params']
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# Experiment meta-params
parser.add_argument('--exp_name', type=str, default='mb_mpc')
parser.add_argument('--seed', type=int, default=3)
parser.add_argument('--render', action='store_true')
# Training args
parser.add_argument('--learning_rate', '-lr', type=float, default=1e-3)
parser.add_argument('--dagger_iters', '-n', type=int, default=10)
parser.add_argument('--dyn_iters', '-nd', type=int, default=60)
parser.add_argument('--batch_size', '-b', type=int, default=512)
# Neural network architecture args
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=300)
# MPC Controller
parser.add_argument('--simulated_paths', '-sp', type=int, default=512)
parser.add_argument('--mpc_horizon', '-m', type=int, default=15)
args = parser.parse_args()
# Set seed
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
n_seeds = 1
mode = "local"
exp_prefix = "dev-abhishek-mb"
n_seeds = 3
mode = "ec2"
exp_prefix = "model-based-reacher-multitask-fixed-2"
num_epochs = 100
num_steps_per_epoch = 1000
max_path_length = 50
variant = dict(
# env='HalfCheetah-v1',
env_name_or_class='HalfCheetah-v1',
dagger_params=dict(
render=args.render,
learning_rate=args.learning_rate,
dagger_iters=num_epochs,
dynamics_iters=args.dyn_iters,
batch_size=args.batch_size,
num_paths_random=num_steps_per_epoch // max_path_length,
num_paths_dagger=num_steps_per_epoch // max_path_length,
num_simulated_paths=args.simulated_paths,
env_horizon=max_path_length,
mpc_horizon=args.mpc_horizon,
n_layers=2,
size=300,
activation=tf.nn.relu,
output_activation=None,
normalize=True,
),
multitask=True,
version="Model-Based - Abhishek",
algorithm="Model-Based",
)
use_gpu = True
if mode != "local":
use_gpu = False
search_space = {
'env_name_or_class': [
Reacher7DofXyzGoalState,
# MultitaskPusher3DEnv,
# GoalXYPosAnt,
# CylinderXYPusher2DEnv,
],
'multitask': [True],
'dagger_params.normalize': [True, False],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
if variant['env_name_or_class'] == CylinderXYPusher2DEnv:
max_path_length = 100
variant['dagger_params']['num_paths_random'] = (
num_steps_per_epoch // max_path_length
)
variant['dagger_params']['num_paths_dagger'] = (
num_steps_per_epoch // max_path_length
)
variant['dagger_params']['env_horizon'] = (
max_path_length
)
for i in range(n_seeds):
seed = random.randint(0, 999999)
run_experiment(
experiment,
mode=mode,
exp_prefix=exp_prefix,
seed=seed,
variant=variant,
exp_id=exp_id,
)
|
{"hexsha": "3852faa016e53dbd37bbf1df122262bf50ccd417", "size": 5729, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/state_distance/baselines/abhishek_mb.py", "max_stars_repo_name": "Asap7772/railrl_evalsawyer", "max_stars_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/state_distance/baselines/abhishek_mb.py", "max_issues_repo_name": "Asap7772/railrl_evalsawyer", "max_issues_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/state_distance/baselines/abhishek_mb.py", "max_forks_repo_name": "Asap7772/railrl_evalsawyer", "max_forks_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.899408284, "max_line_length": 75, "alphanum_fraction": 0.6404259033, "include": true, "reason": "import numpy", "num_tokens": 1425}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import scipy.optimize as scop
def get_maxima_minima(xs, ys):
ys_avg = moving_average(ys, n=30)
maxima = []
minima = []
last = ys_avg[0]
status = "up"
for y_idx, y in enumerate(ys_avg):
if status=="up":
if y<last:
maxima.append([xs[y_idx-1],last])
status="down"
elif status=="down":
if y>last:
minima.append([xs[y_idx-1],last])
status="up"
last=y
maxima = np.array(maxima)
minima = np.array(minima)
#return(maxima, minima)
left_min_possible = [m for m in minima if m[0]>349 and m[0]<400]
if len(left_min_possible)!=0:
left_min_min_idx = np.argmin([m[1] for m in left_min_possible])
left_min = left_min_possible[left_min_min_idx]
else:
left_min = [xs[0], ys_avg[0]]
max_1_possible = [m for m in maxima if m[0]>370 and m[0]<470]
max_1_max_idx = np.argmax([m[1] for m in max_1_possible])
max_1 = max_1_possible[max_1_max_idx]
max_2_possible = [m for m in maxima if m[0]>500 and m[0]<620]
max_2_max_idx = np.argmax([m[1] for m in max_2_possible])
max_2 = max_2_possible[max_2_max_idx]
min_1_possible = [m for m in minima if m[0]>420 and m[0]<530]
min_1_min_idx = np.argmin([m[1] for m in min_1_possible])
min_1 = min_1_possible[min_1_min_idx]
right_min_possible = [m for m in minima if m[0]>600 and m[0]<651]
if len(right_min_possible)!=0:
right_min_min_idx = np.argmin([m[1] for m in right_min_possible])
right_min = right_min_possible[right_min_min_idx]
else:
right_min = [xs[-1], ys_avg[-1]]
return(left_min, max_1, min_1, max_2, right_min)
def fit_maxima(name, xs, ys, maxima_minima, num=2):
max_1 = maxima_minima[1]
max_2 = maxima_minima[3]
if num == 2:
a0, m0, sigma0 = max_1[1], max_1[0], 20
a1, m1, sigma1 = max_2[1], max_2[0], 30
x_fit = np.linspace(350, 650, 2000)
y_fit_init = fitfunction_maxima2(x_fit, a0, m0, sigma0, a1, m1, sigma1)
fit_init = [a0, m0, sigma0, a1, m1, sigma1]
try:
fit_opt, fit_cov = scop.curve_fit(fitfunction_maxima2, xs, ys, p0=[a0, m0, sigma0, a1, m1, sigma1])
except:
print("Fit of %i gaussians to %s did not converge"%(num, name))
fit_opt = [0.0, m0, sigma0, 0.0, m1, sigma1]
fit_cov = None
y_fit = fitfunction_maxima2(x_fit, fit_opt[0], fit_opt[1], fit_opt[2], fit_opt[3], fit_opt[4], fit_opt[5])
#return(x, y_init, y_init, [])
if num == 3:
a0, m0, sigma0 = max_1[1], max_1[0], 20
a1, m1, sigma1 = 0.98*max_2[1], max_2[0]+5, 30
a2, m2, sigma2 = 0.2*max_2[1], max_2[0]-40, 20
x_fit = np.linspace(350, 650, 2000)
y_fit_init = fitfunction_maxima3(x_fit, a0, m0, sigma0, a1, m1, sigma1, a2, m2, sigma2)
fit_init = [a0, m0, sigma0, a1, m1, sigma1, a2, m2, sigma2]
try:
fit_opt, fit_cov = scop.curve_fit(fitfunction_maxima3, xs, ys, p0=[a0, m0, sigma0, a1, m1, sigma1, a2, m2, sigma2])
except:
print("Fit of %i gaussians to %s did not converge"%(num, name))
fit_opt = [0.0, m0, sigma0, 0.0, m1, sigma1, 0.0, m2, sigma2]
fit_cov = None
y_fit = fitfunction_maxima3(x_fit, fit_opt[0], fit_opt[1], fit_opt[2], fit_opt[3], fit_opt[4], fit_opt[5], fit_opt[6], fit_opt[7], fit_opt[8])
#return(x_fit, y_init, y_init, [])
return(x_fit, y_fit, y_fit_init, fit_init, fit_opt)
def fitfunction_maxima4(x, a0, m0, sigma0, a1, m1, sigma1, a2, m2, sigma2, a3, m3, sigma3):
y = a0*np.exp(-0.5*((x-m0)/(sigma0))**2.0) + a1*np.exp(-0.5*((x-m1)/(sigma1))**2.0) + a2*np.exp(-0.5*((x-m2)/(sigma2))**2.0) + a3*np.exp(-0.5*((x-m3)/(sigma3))**2.0)
return(y)
def fitfunction_maxima3(x, a0, m0, sigma0, a1, m1, sigma1, a2, m2, sigma2):
y = a0*np.exp(-0.5*((x-m0)/(sigma0))**2.0) + a1*np.exp(-0.5*((x-m1)/(sigma1))**2.0) + a2*np.exp(-0.5*((x-m2)/(sigma2))**2.0)
return(y)
def fitfunction_maxima2(x, a0, m0, sigma0, a1, m1, sigma1):
y = a0*np.exp(-0.5*((x-m0)/(sigma0))**2.0) + a1*np.exp(-0.5*((x-m1)/(sigma1))**2.0)
return(y)
def fitfunction_maxima1(x, a0, m0, sigma0):
y = a0*np.exp(-0.5*((x-m0)/(sigma0))**2.0)
return(y)
def get_gaussians(xs, fit_opt):
gaussians=[]
for i in range(len(fit_opt)//3):
a0, m0, sigma0 = fit_opt[3*i], fit_opt[3*i+1], fit_opt[3*i+2]
gaussians.append(fitfunction_maxima1(xs, a0, m0, sigma0))
return(gaussians)
def moving_average(a, n=3):
m = len(a)
avg = []
for idx in range(m):
if idx<n//2:
avg.append(np.mean(a[0:idx+n//2]))
elif idx > m-n//2:
avg.append(np.mean(a[idx:]))
else:
avg.append(np.mean(a[idx-n//2:idx+n//2]))
avg = np.array(avg)
return avg
def reduce(data_x, data_y, num=10):
n_new = len(data_y[0])//num
data_x_new = np.zeros((len(data_y), n_new))
data_y_new = np.zeros((len(data_y), n_new))
for i1 in range(len(data_y)):
for i2 in range(n_new):
data_x_new[i1][i2]=np.mean(data_x[i1][i2*num:(i2+1)*num])
data_y_new[i1][i2]=np.max(data_y[i1][i2*num:(i2+1)*num])
if (i1+1)%1000==0 or (i1+1)==len(data_y):
print("reduced %i of %i XRD samples"%(i1+1, len(data_y)))
return(data_x_new, data_y_new)
def data_augmentation(features, labels, factor=2.0):
num_target = int(round(factor*len(features)))
num_todo = len(features)-num_target
def bg(l):
a, b, c = np.random.random(3)-0.5
xs = np.linspace(-1.0, 1.0, l)
ys = a*xs**3.0 + b*xs**2.0 + c*xs
ys-=np.min(ys)
ys/=np.max(ys)
fluct_noise_level_max = 100
ys*=(np.random.random()*0.8+0.2)*fluct_noise_level_max
base_noise_level_max = 100.0
ys+=(np.random.random()*0.8+0.2)*base_noise_level_max
return(ys)
def disturb_spectrum(to_add, l):
# shift randomly in x
s = np.random.randint(140)-70
to_add_new = []
if s>=0: # shift right -> fill beginning with 0s
for j in range(s):
to_add_new.append(to_add[0])
for j in range(l-s):
to_add_new.append(to_add[j])
if s<0: # shift left
for entry in to_add[-s:]:
to_add_new.append(entry)
for j in range(-s):
to_add_new.append(to_add[-1])
to_add = np.array(to_add_new)
# scale in x
factor_scale_x = np.random.random()*0.05+0.975
to_add_new = []
for j in range(l):
j2 = int(round(factor_scale_x*j))
if j2>=l:
break
to_add_new.append(to_add[j2])
for j in range(l-len(to_add_new)):
to_add_new.append(to_add[-1])
to_add=np.array(to_add_new)
# scale in y
factor_scale_y = np.random.random()*0.8+0.3
to_add *= factor_scale_y
# make wider
w = np.random.randint(10)+1
to_add = np.array([np.max(to_add[max(0,i):min(l,i+w)]) for i in range(l)])
return(to_add)
def generate_fake_data(data_x, data_y, num=100):
print("generate %i fake spectra"%(num))
data_x_fake = []
data_y_fake = []
labels_fake = []
l = len(data_x[0])
probability_pure = 0.5
do_plot=True
if do_plot:
plt.figure(figsize=(30,20))
num_to_plot = 200
num_plotted = 0
#fig, axes = plt.subplots(4,4)
#i1 = 0
#j1 = 0
#num_reference_spectra = len(data_x)
num_reference_spectra = 2
for i in range(num):
x = data_x[0]
# generate some background
y = bg(l)
# add gaussian noise
max_noise = 20
strength = max_noise*(np.random.random()*0.9+0.1)
y += np.random.normal(loc=0.0, scale = 1.0, size=l)*strength
# select a random spectrum
idx = np.random.randint(num_reference_spectra)
to_add = data_y[idx]
to_add-=np.min(to_add)
# disturb it
to_add = disturb_spectrum(to_add, l)
# finally add it
y += to_add
# make a non pure one
additional_peaks = np.zeros((l))
if np.random.random()<probability_pure:
label = "pure"
else:
label = "mixed"
num_gaussians = np.random.randint(10)+2
a0_overall = np.random.random()*0.9+0.1
for j in range(num_gaussians):
a0 = np.abs(np.random.normal())*330.0+43.0
m0 = np.random.random()*40.0+10.0
sigma0 = np.random.random()*0.35+0.15
additional_peak = a0_overall*a0*np.exp(-0.5*((x-m0)/(sigma0))**2.0)
#additional_peak = a0*np.exp(-0.5*((x-m0)/(sigma0))**2.0)
additional_peaks += additional_peak
y+=additional_peaks
# sometimes add small fractions of other spectra
if np.random.random()<0.2:
if num_reference_spectra==1:
other_idx = 1
else:
other_idx = idx
while other_idx==idx:
other_idx = np.random.randint(num_reference_spectra)
to_add = data_y[other_idx]
to_add-=np.min(to_add)
to_add = disturb_spectrum(to_add, l)
to_add *= (np.random.random()*0.2+0.05)
y += to_add
if do_plot:
if num_plotted < num_to_plot:
if label=="mixed":
pass
plt.plot(x, y, "r-")
else:
plt.plot(x, y, "k-")
num_plotted+=1
#else:
# break
#plt.figure()
#plt.plot(x, y, "k-", label="final fake spectrum")
#plt.plot(x, data_y[idx], "r-", label="underlying reference")
#plt.plot(x, additional_peaks, "b-", label="fake peaks")
#axes[i1][j1].plot(x, data_y[idx], "k-")
#axes[i1][j1].plot(x, y, "r-")
#axes[i1][j1].get_xaxis().set_ticks([])
#axes[i1][j1].get_yaxis().set_ticks([])
#i1+=1
#if i1==4:
# i1=0
# j1+=1
labels_fake.append(label)
data_x_fake.append(x.tolist())
data_y_fake.append(y.tolist())
if (i+1)%100==0 or (i+1)==num:
print("%i of %i fake spectra done"%(i+1, num))
#if i==15:
# break
if do_plot:
plt.xlabel("Angle [degree]")
plt.ylabel("XRD intensity [a.u.]")
plt.xlim([10.0, 50.0])
plt.ylim([0.0, 4500.0])
#plt.legend(loc="upper right")
plt.savefig("fake_spectra.png", dpi=300)
plt.close()
data_x_fake = np.array(data_x_fake)
data_y_fake = np.array(data_y_fake)
#axes[3][2].set_xlabel("Angle [degree]")
#axes[2][0].set_ylabel("XRD intensity [a.u.]")
#plt.subplots_adjust(hspace=0, wspace=0)
#plt.savefig("fake_spectra.png", dpi=120)
#plt.close()
#exit()
return(data_x_fake, data_y_fake, labels_fake)
def r2_metric(y_true, y_pred):
"""
Compute r2 metric.
Args:
y_true (tf.tensor): True y-values.
y_pred (tf.tensor): Predicted y-values.
Returns:
tf.tensor: r2 metric.
"""
SS_res = ks.backend.sum(ks.backend.square(y_true - y_pred))
SS_tot = ks.backend.sum(ks.backend.square(y_true-ks.backend.mean(y_true)))
return ( 1 - SS_res/(SS_tot + ks.backend.epsilon()) )
def lr_lin_reduction(learning_rate_start = 1e-3, learning_rate_stop = 1e-5, epo = 10000, epomin= 1000):
"""
Make learning rate schedule function for linear reduction.
Args:
learning_rate_start (float, optional): Learning rate to start with. The default is 1e-3.
learning_rate_stop (float, optional): Final learning rate at the end of epo. The default is 1e-5.
epo (int, optional): Total number of epochs to reduce learning rate towards. The default is 10000.
epomin (int, optional): Minimum number of epochs at beginning to leave learning rate constant. The default is 1000.
Returns:
func: Function to use with LearningRateScheduler.
Example:
lr_schedule_lin = tf.keras.callbacks.LearningRateScheduler(lr_lin_reduction)
"""
def lr_out_lin(epoch):
if(epoch < epomin):
out = learning_rate_start
else:
out = float(learning_rate_start - (learning_rate_start-learning_rate_stop)/(epo-epomin)*(epoch-epomin))
return out
return lr_out_lin
def lr_log_reduction(learning_rate_start = 1e-3, learning_rate_stop = 1e-5, epo = 10000, epomin= 1000):
"""
Make learning rate schedule function for linear reduction.
Args:
learning_rate_start (float, optional): Learning rate to start with. The default is 1e-3.
learning_rate_stop (float, optional): Final learning rate at the end of epo. The default is 1e-5.
epo (int, optional): Total number of epochs to reduce learning rate towards. The default is 10000.
epomin (int, optional): Minimum number of epochs at beginning to leave learning rate constant. The default is 1000.
Returns:
func: Function to use with LearningRateScheduler.
Example:
lr_schedule_lin = tf.keras.callbacks.LearningRateScheduler(lr_lin_reduction)
"""
def lr_out_log(epoch):
if(epoch < epomin):
out = learning_rate_start
else:
out = np.exp(float(np.log(learning_rate_start) - (np.log(learning_rate_start)-np.log(learning_rate_stop))/(epo-epomin)*(epoch-epomin)))
return out
return lr_out_log
|
{"hexsha": "7ca83682af6a18f16e3d66d0ab4f2c23a5a08950", "size": 14006, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/utils.py", "max_stars_repo_name": "aimat-lab/ML4HEOs", "max_stars_repo_head_hexsha": "047f3414e77cbdad2c0264e54f1395b699f7eb31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/utils.py", "max_issues_repo_name": "aimat-lab/ML4HEOs", "max_issues_repo_head_hexsha": "047f3414e77cbdad2c0264e54f1395b699f7eb31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/utils.py", "max_forks_repo_name": "aimat-lab/ML4HEOs", "max_forks_repo_head_hexsha": "047f3414e77cbdad2c0264e54f1395b699f7eb31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0051413882, "max_line_length": 170, "alphanum_fraction": 0.5673996858, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4241}
|
import torch
torch.manual_seed(10)
torch.cuda.manual_seed_all(10)
import os
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity as cs
import Energies
import Leapfrog
import Tangent
from Utils import writer,Off,Obj,Pts,rect_remesh
import header
from torch.autograd import Variable
def hmcExplore(training_params,data_classes,network_params,losses,misc_variables,rounds):
for parameter in network_params.autoEncoder.decoder1.parameters():
parameter.requires_grad = False
shapes = []
meshNames = []
for ind,data in enumerate(data_classes.full_arap):
pts,faces,numNeighbors,accnumNeighbors,neighborsMatrix,weightMatrix,names,_ = data
pts = torch.unsqueeze(torch.from_numpy(pts).float().cuda(),0)
pts = pts[:,:,:network_params.dims]
meshNames.extend(names)
shapes.extend(pts)
names = meshNames[0]
print("Round:",rounds," HMC",flush=True)
network_params.autoEncoder.eval()
network_params.autoEncoder.load_state_dict(torch.load(network_params.weightPath+"_r"+str(rounds)))
if not os.path.exists(misc_variables.sampleDir):
os.makedirs(misc_variables.sampleDir)
def dumpSamples(outDir,samples,corner=-1):
for i in range(len(samples)):
objInstance = Obj.Obj("dummy.obj")
objInstance.setVertices(samples[i])
objInstance.setFaces(faces)
objInstance.saveAs(os.path.join(outDir,str(rounds) + "_" + names.split(".")[0] + "_" + str(i)+".obj"))
hmc = Leapfrog.Leapfrog(training_params.numsteps,0.1)
for ind,data in enumerate(data_classes.full_arap):
samples = []
energies = []
print("HMC Source Data:",ind,len(data_classes.full_arap),flush=True)
pts,faces,numNeighbors,accnumNeighbors,neighborsMatrix,weightMatrix,names,_ = data
query = np.expand_dims(pts[:,:network_params.dims],0)
query = np.reshape(query,(1,-1))
pts = torch.unsqueeze(torch.from_numpy(pts).float().cuda(),0)
pts = pts[:,:,:network_params.dims]
numNeighbors = torch.unsqueeze(torch.from_numpy(numNeighbors).int().cuda(),0)
accnumNeighbors = torch.unsqueeze(torch.from_numpy(accnumNeighbors).int().cuda(),0)
neighborsMatrix = torch.unsqueeze(torch.from_numpy(neighborsMatrix).int().cuda(),0)
weightMatrix = torch.unsqueeze(torch.from_numpy(weightMatrix).float().cuda(),0)
energy_fn = None
if training_params.energy=='pdist':
energy_fn = Energies.NLLPairwiseDistanceEnergy(p_pts,network_params.testEnergyWeight)
if training_params.energy=='arap':
energy_fn = Energies.ArapEnergy(pts,neighborsMatrix,numNeighbors,accnumNeighbors,weightMatrix,network_params.testEnergyWeight)
if training_params.energy=='arap2d':
energy_fn = Energies.ArapEnergy2D(pts,neighborsMatrix,numNeighbors,weightMatrix,network_params.testEnergyWeight)
if training_params.energy=='asap':
energy_fn = Energies.AsapEnergy(pts,neighborsMatrix,numNeighbors,weightMatrix,network_params.testEnergyWeight)
if training_params.energy=='asap2d':
energy_fn = Energies.AsapEnergy2D(pts,neighborsMatrix,numNeighbors,weightMatrix,network_params.testEnergyWeight)
if training_params.energy=='carap':
energy_fn = Energies.CArapEnergy(pts,neighborsMatrix,numNeighbors,weightMatrix,misc_variables.alpha,area,network_params.testEnergyWeight)
#seedCode = network_params.autoEncoder.encoder(torch.unsqueeze(pts,1))
seedCode = network_params.autoEncoder.encoder(pts.transpose(2,1))
#network_params.noise.normal_()
#addedNoise = Variable(0.5*network_params.noise[:seedCode.size()[0],:])
#seedCode += addedNoise
recon = network_params.autoEncoder.decoder1(seedCode)
#recon = network_params.autoEncoder.decoder2(pts,recon,neighborsMatrix,numNeighbors,weightMatrix,network_params.testEnergyWeight)
objInstance = Obj.Obj("dummy.obj")
objInstance.setVertices(recon.cpu().detach().numpy()[0])
objInstance.setFaces(faces)
#dumpSamples(misc_variables.sampleDir,selectedSamples,p_pts=original_parent)
objInstance.saveAs(os.path.join(misc_variables.reconDir,str(rounds) + "_" + names.split(".")[0] + "_recon.obj"))
stepSize = training_params.stepsize
stepsizeMin = 0.001
stepsizeMax = 0.1
stepsizeInc = 1.02
stepsizeDec = 0.98
targetAcceptanceRate = 0.7
avgAcceptanceSlowness = 0.9
avgAcceptance = 0.0
burnin = 0
code = seedCode.clone().detach()
from datetime import datetime
import time
start = datetime.now()
for i in range(training_params.hmcEpochs):
print(i)
newCode,energy,accept = hmc(code,energy_fn,stepSize=stepSize,decoder=network_params.autoEncoder)
newSample = network_params.autoEncoder.decoder1(newCode)
network_params.autoEncoder.zero_grad()
code = newCode.clone().detach()
if network_params.dims == 2:
z = torch.zeros(1,network_params.numPoints,1).float().cuda()
newSample = torch.cat((newSample,z),2)
if accept>0: # and energy<hmcThresh:
print("accepted")
energies.append(energy/network_params.testEnergyWeight.item())
samples.append(newSample.cpu().detach().numpy()[0])
end = datetime.now()
print("HMC Time:",(end-start).seconds)
if len(energies) == 0:
continue
print("min energy:",min(energies))
if len(samples)==0:
continue
start = datetime.now()
selectedSamples = np.array(samples)
print(names)
dumpSamples(misc_variables.sampleDir,selectedSamples)
for parameter in network_params.autoEncoder.decoder1.parameters():
parameter.requires_grad = False
|
{"hexsha": "c888d8092648ae639b43970894029ac72192f17a", "size": 5963, "ext": "py", "lang": "Python", "max_stars_repo_path": "Src/latentSpaceExplore_VanillaHMC.py", "max_stars_repo_name": "sanjeevmk/GLASS", "max_stars_repo_head_hexsha": "91c0954eab87d25d4866fea5c338f79fbca4f79e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-22T17:36:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T05:03:39.000Z", "max_issues_repo_path": "Src/latentSpaceExplore_VanillaHMC.py", "max_issues_repo_name": "sanjeevmk/glass", "max_issues_repo_head_hexsha": "91c0954eab87d25d4866fea5c338f79fbca4f79e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Src/latentSpaceExplore_VanillaHMC.py", "max_forks_repo_name": "sanjeevmk/glass", "max_forks_repo_head_hexsha": "91c0954eab87d25d4866fea5c338f79fbca4f79e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.8345864662, "max_line_length": 149, "alphanum_fraction": 0.6837162502, "include": true, "reason": "import numpy", "num_tokens": 1350}
|
[STATEMENT]
lemma zfact_iso_bij:
"bij_betw (zfact_iso n) {..<n} (carrier (ZFact (int n)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw (zfact_iso n) {..<n} (carrier (ZFact (int n)))
[PROOF STEP]
using bij_betw_def zfact_iso_inj zfact_iso_ran
[PROOF STATE]
proof (prove)
using this:
bij_betw ?f ?A ?B = (inj_on ?f ?A \<and> ?f ` ?A = ?B)
inj_on (zfact_iso n) {..<n}
zfact_iso n ` {..<n} = carrier (ZFact (int n))
goal (1 subgoal):
1. bij_betw (zfact_iso n) {..<n} (carrier (ZFact (int n)))
[PROOF STEP]
by blast
|
{"llama_tokens": 262, "file": "Finite_Fields_Ring_Characteristic", "length": 2}
|
# coding: utf-8
__author__ = 'Alain Lichnewsky'
__license__ = 'MIT License'
__version__ = '1.0'
# (C) A.Lichnewsky, 2018, 2020
#
# My own library organization (TBD: clean up ?)
import sys
import traceback
sys.path.append("pylib")
from UnitTest import *
# Common toolkit imports
import numpy as NP
import numpy.random as RAND
import scipy.stats as STATS
from scipy import sparse
from scipy import linalg
# Using scikit-learn
import sklearn as SKL
from sklearn import linear_model, model_selection
from sklearn import ensemble, tree, discriminant_analysis, svm, naive_bayes
from sklearn import neighbors
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
# Using pandas
import pandas as PAN
# To plot pretty figures
import matplotlib as MPL
import matplotlib.pyplot as PLT
import seaborn as SNS
# Python programming
from itertools import cycle
import time as TIME
from time import time
from enum import Enum
from string import ascii_uppercase
import basicUtils as BU
from IPython.display import display
from basicDataCTE import dataModel
import basicDataCTE as BCTE
import basicUtils as BU
import lib.utilities as LIBU
#
# ----------------------------------------
# TEST FUNCTIONS
# ++++++++++++++++++++++++++++++++++++++++
#
#
#
# ----------------------------------------
# TEST of Dataframe normalization functions
# ++++++++++++++++++++++++++++++++++++++++
#
def test1():
tframe = PAN.DataFrame([
[12, True, "aa", "large" , 12.0 ],
[1, True, "ab", "large", NP.NaN],
[10, False, NP.NaN, "avg", 34],
[20, True, 12, "avg", -1],
[21, True, 12, "large", NP.NaN],
[22, True, 12, "avg", -23],
],
columns=("vals", "truth", "mixed", "style", "valNAN")
)
print("Test data frame:")
print(tframe)
print("\nTest data frame info:")
tframe.info()
print("\nTest data frame (verbose) info:")
tframe.info(verbose=True, memory_usage='deep', null_counts=True)
print("\nDescription:")
print(tframe.describe(include='all', percentiles= ( 0.5, 1)))
print("\nDisplay:")
display(tframe)
# ----------------------------------------
# Check multiple masking in views
#
display(tframe.loc[:,"mixed"])
mask1 = tframe.loc[:,"mixed"].notna()
display(mask1)
print("after droping all nas")
tframe1 = tframe.dropna(axis="index")
display(tframe1)
print("after droping nas in col mixed and valNAN")
tframe1 = tframe.dropna(axis="index", subset=("mixed","valNAN"))
display(tframe1)
print("after sequential droping nas in col mixed and valNAN")
tframe1 = tframe.copy()
tframe1.dropna(axis="index", subset=("valNAN",), inplace=True)
tframe1.dropna(axis="index", subset=("mixed",), inplace=True)
display(tframe1)
print("Check that original frame not affected")
display(tframe)
# ----------------------------------------
# Back to testing
#
print("\nPresenting in a dataModel:")
myModel = dataModel(tframe)
myModel.showNulls()
print("\nNormalizing")
print("\nKeep 3 rows, 2 cols")
myModel.normalize(rowsMask=NP.array([True, True, False, False, False, True]),
cols=("truth","mixed"))
myModel.DFApply(print)
print("\nKeep all rows, 2 cols")
myModel.normalize( cols=("truth","mixed"), colNorm={"truth": {} })
myModel.DFApply(print)
print("\nKeep all rows, all cols")
myModel.normalize( colNorm={ "truth": {"OneHot" : True,
"ColInsert":("NEW-Truth-1","NEW-Truth-2") },
"style": {"Category": True, "InPlace" : False ,
"ColInsert":"NEW-Style",
"values": ("large","avg","small","bizar","zarbi")},
"vals": {"MeanVar" : True, "ColInsert":"NEW-Vals"},
"mixed": {"MissingInput": {"dropNA" : True,
"InPlace" : True}},
"valNAN": {"MissingInput": {"avgNA" : True,
"InPlace" : True}}
})
myModel.DFApply(print, which=dataModel.DM.NORM)
myModel.normalize( add=True, colNorm={
"NEW-Style": {
"OneHot" : True,
"ColInsert":("NEW-Style-1","NEW-Style-2","NEW-Style-3"),
"values":(0,1,2)
}})
myModel.DFApply(print, which=dataModel.DM.NORM)
print("\nTackle NaNs")
myModel.normalize( cols=("vals","valNAN"),
colNorm={"vals":{ "MeanVar" : True,
"ColInsert":"NEW-Vals"},
"valNAN" :{ "MissingInput": {"dropNA" : True,
"InPlace" : True},
"MeanVar" : True,
"ColInsert":"NEW-Vals"}
})
print("Normalisaton by applying a user provided function")
def see(*args,**kwargs):
" This function allows to understand how it is called... "
print("In function see, args=", args,"\ttype(args)=",type(args),
"\tkwargs=",**kwargs)
return [ str(x)[0] for x in args[0]]
def seemap( *args,**kwargs):
" This function allows to understand how it is called... "
print("In function seemap, args=", args,"\ttype(args)=",type(args),
"\tkwargs=",**kwargs)
return str(args[0])[0]
def seemap2( arg,**kwargs):
" This function has a single argument, simpler to use in practice "
print("In function seemap2, arg=", arg,"\ttype(arg)=",type(arg),
"\tkwargs=",**kwargs)
return str(arg)[0]
myModel.normalize( colNorm={ "mixed": {"Apply":seemap2,
"Map": True,
"ColInsert":"NEW-mixed"},
"style": {"Apply":see,
"ColInsert":"NEW-style"}
})
print("Original dataframe")
myModel.DFApply(print)
print("Normalized dataframe")
myModel.DFApply(print, which = dataModel.DM.NORM)
myModel.normalize( colNorm={ "mixed": {"Apply":seemap,
"Map": True,
"ColInsert":"NEW-mixed"},
"style": {"Apply":see,
"ColInsert":"NEW-style"}
})
print("Normalized dataframe")
myModel.DFApply(print, which = dataModel.DM.NORM)
print(id (myModel.getFrame()))
print(id (myModel.getFrame(which= dataModel.DM.NORM)))
print(id (myModel.getFrame(copy=True)))
print(id (myModel.getFrame(which= dataModel.DM.NORM,copy=True )))
print(id (myModel.getFrame()))
print(id (myModel.getFrame(which= dataModel.DM.NORM)))
print(id (myModel.getFrame(copy=True)))
print(id (myModel.getFrame(which= dataModel.DM.NORM,copy=True )))
# ----------------------------------------
# Check multiple masking in views
#
mask1 = tframe["mixed"] == NP.NaN
tframe1 = tframe[~mask1]
display(tframe1)
#
# Experiment with FunctionTransformer:
# ..... not very useful as far as I understand
#
from sklearn.preprocessing import FunctionTransformer
def see(*args,**kwargs):
print("In function see, args=", *args, "\tkwargs=",**kwargs)
return 1
transformer = FunctionTransformer(see)
r = transformer.transform(tframe.loc[:,["vals","truth"]])
print(r)
#
# Outlier detection
#
N=5
M=10
zmat = PAN.DataFrame(NP.ones((M,N)))
zmat.loc[0,3] = 10000
zmat.loc[2,1] = 10000
zmat.loc[2,0] = 10000
zmat.loc[8,2] = 20000
zmat.loc[8,4] = 20000
zmat.loc[8,3] = 20000
zmat.loc[M-1,N-1] = 30000
zframe =dataModel( zmat)
print(zframe.getFrame().shape)
display(zframe)
display(zframe.outlier_detect())
display(zframe.outlier_detect(showCols=True))
display(zframe.outlier_detect(showCols=True,cols=(2,3,4)))
display(zframe.outlierLocate(n=4,showCols=True,cols=(2,3,4)))
#
# ----------------------------------------
# TEST Frame
# ++++++++++++++++++++++++++++++++++++++++
#
class DModelTest(ALTestFrame):
def runTest(self):
print ("IN DModelTest")
#executes any member which name starts with test, don't bother
#calling
def testOK(self):
ltest = (testInOut( () , out=None ),
)
self.applyTestList(ltest, test1,redirect=True)
def testFail(self):
ltest = (
testInOut( () , out=(), exception=TypeError),
)
self.applyTestListFail(ltest,
1,
redirect=True)
class GraphicTest(ALTestFrameGraphics):
""" Here we perform test of Seaborn features and of functions derived from
them; many tests inspired from Seaborn manual
"""
def start(self):
args = {}
if "--wait" in arguments:
print(f"arguments={arguments}")
if "--wait" in arguments and (arguments["--wait"] is not None ):
args["pause"] = int(arguments["--wait"])
self._start(**args)
def mkDF(addCat=None,**kwargs):
""" Make a dataframe of floats
"""
print(f'In mkDF arguments:{arguments}')
### pandas.DataFrame.apply: returns a <class 'pandas.core.series.Series'>
### w/o .info method etc
### Therefore applymap is used
def strOrNan(x):
if x < -0.5:
return NP.nan
else:
return str(x)
GraphicTest.randseed=981 # make output deterministic
def myRandom():
"This is my deterministic random function, good enough for generating test"
GraphicTest.randseed = (GraphicTest.randseed+320)%1024
return float(GraphicTest.randseed)/512 - 1.0
od = {}
LIBU.setDefaults(od, optDict=kwargs, defaultDict={'nc' : 5, 'nl':8,
'ai':1,'brand':0.1 })
nc,nl,ai,brand = list(map(lambda x: od[x], ("nc","nl","ai","brand")))
print(f"parms = {nc,nl,ai,brand}")
print(od)
array = [ ai*i + brand * myRandom() for i in range(0,nc*nl)]
npA = NP.array(array).reshape((nl,nc))
df = PAN.DataFrame( npA,
index = [ f"row{i:03}" for i in range(1,nl+1)],
columns= [ f"col{i:03}" for i in range(1,nc+1)]
)
if addCat:
print("Miaou (Meow ! )")
if "modulo" in kwargs and kwargs["modulo"]:
imod= kwargs["modulo"]
df.loc[:,"catCol"] = [ f"Meow{(i%imod):03}" for i in range(1,nl+1)]
else:
df.loc[:,"catCol"] = [ f"Meow{i:03}" for i in range(1,nl+1)]
if "modulo" in kwargs and kwargs["modulo"]:
imod= kwargs["modulo"]
df.loc["catRow",:nc] = [ f"Miaou{(i%imod):03}" for i in range(1,nc+1)]
else:
df.loc["catRow",:nc] = [ f"Miaou{i:03}" for i in range(1,nc+1)]
df.iloc[-1,-1] = "MIA-MEOW"
return df
def test_DF(self):
self.start()
df = GraphicTest.mkDF(False, nl=30, ai=0.02, brand=1, modulo=4)
print(f"test_DF\ndf={df}")
df.info()
# makes a figure with a subplot for each column showing an histogram of the column
# here it is important that all values are numerical
df.hist(bins=5,figsize=(10,8),grid=False)
self.show()
self.start()
# here we make a dataframe with categorical data
df = GraphicTest.mkDF(True, nl=30, ai=0.02, brand=1, modulo=4)
# add a second categorical column
df.loc[:,"modCol"]= df.iloc[:-1,:].loc[:,"col001"].apply (lambda x: int(2.78*x) % 3)
df.iloc[-1,-1] = "Added!"
df1 = df.iloc[:-1,:]
# plot a grid of histograms based on the 2 categorical cols of data in col001
g = SNS.FacetGrid(df1, col="catCol", row="modCol", margin_titles=True)
g.map(PLT.hist, "col001", color="green", bins=3, density=True);
# plot a grid of scatter plots based on the 2 categorical cols of data in col001
# could add hue and changing markers..
g = SNS.FacetGrid(df1, col="catCol", row="modCol", margin_titles=True)
g.map(PLT.scatter, "col001", "col002", color="green");
self.show()
self._setAdd("SNS.FacetGrid","PLT.hist","PLT.scatter","PAN.DataFrame.hist")
def test_DF1(self):
self.start()
# here we make a dataframe with categorical data
df = GraphicTest.mkDF(True, nl=30, ai=0.02, brand=1, modulo=4)
# add a second categorical column
df.loc[:,"modCol"]= df.iloc[:-1,:].loc[:,"col001"]\
.apply (lambda x: int(2.78*x) % 3)
df.iloc[-1,-1] = "Added!"
df1 = df.iloc[:-1,:]
print(df1)
df1.info()
df1.describe()
# Still find it difficult to understand what I have got:
SNS.set(font_scale=1)
g = SNS.catplot(x="col001", y="col002", col="catCol",
data=df1, saturation=.5,
kind="bar", ci=None, aspect=.6)
( g.set_axis_labels("", "Cat Col lab")
.set_xticklabels(["Cats", "Chats"])
.set_titles("{col_name} {col_var}")
.set(ylim=(0, 1))
.despine(left=True))
PLT.subplots_adjust(top=0.8)
g.fig.suptitle('How a test was made up');
self.show()
self._setAdd("SNS.set", "SNS.catplot", "PLT.subplots_adjust")
def test_1(self):
self.start()
df = GraphicTest.mkDF()
print(f"test_1\ndf={df}")
df.info()
PLT.figure()
SNS.boxplot(df)
PLT.figure()
SNS.boxplot(x=df.loc["row002"])
PLT.figure()
SNS.boxplot(y=df.loc[:,["col003", "col001"]])
PLT.figure()
SNS.boxplot(x=df)
PLT.figure()
SNS.boxplot(y=df)
self.show()
# This does not look great!
BCTE.doBoxPlots( df, ycols = ["col003", "col001"], xcols=["col002","col004"],
stripIt=True)
self.show()
self._setAdd("SNS.boxplot", "BCTE.doBoxPlots")
def test_2(self):
self._start()
df = GraphicTest.mkDF(True)
print(f"test_2\ndf={df}")
df.info()
PLT.figure()
## make boxplots based on columns
SNS.boxplot(data=df.iloc[:-1,:])
self.show()
self._setAdd("SNS.boxplot")
@unittest.expectedFailure
def test_2F(self):
# same as test_2, shows failure when there are non numerical data in column!
self._start()
df = GraphicTest.mkDF(True)
print(f"test_2\ndf={df}")
df.info()
PLT.figure()
## make boxplots based on columns
SNS.boxplot(data=df)
## make boxplots based on lines (transpose)
PLT.figure()
SNS.boxplot(data=df.drop('catCol',axis=1).transpose())
self.show()
self._setAdd("SNS.boxplot")
def test_3(self):
self._start()
df = GraphicTest.mkDF(False)
print(f"test_3\ndf={df}")
df.info()
snsDtl={"SNSBox+":{"xtickLabels":{"rotation":45}},
"SNSStrip+":{"xtickLabels":{"rotation":45}}}
PLT.figure()
BCTE.boxStripPlot(data=df,x=None,y="col001",
title="Single box and pts, vertical")
PLT.figure()
BCTE.boxStripPlot(data=df,x="col002",y=None,
title="Single box and pts, horizontal")
PLT.figure()
BCTE.boxStripPlot(data=df,x=None,y=None,
title="Looks OK with boxes and points", **snsDtl)
self.show()
self._setAdd("BCTE.boxStripPlot")
def test_4(self):
self._start()
df = GraphicTest.mkDF(True, multiple=True)
print(f"test_4\ndf={df}")
df.info()
BCTE. densityCatPlot(data=df,x=[f"col{i:03}" for i in range(1,6)], cats="catCol",
title = "Title",
legend = ("legend1", "legend2","legend3"))
self.show()
# Now, this tests the seaborn version, the following gives a 2D density
# https://seaborn.pydata.org/generated/seaborn.kdeplot.html
SNS.kdeplot(df.iloc[:,0:2])
self.show()
self._setAdd("BCTE. densityCatPlot", "SNS.kdeplot")
#
# ----------------------------------------
# LAUNCHING TESTS
# ++++++++++++++++++++++++++++++++++++++++
#
__cmdspecs__ = """"
testDataCTE : run tests under unittest environment
Usage: tesDataCTE [ <testcase> ] [ --wait=<wait> ] [ --parm=<parm>]
Options:
--parm=<parm> pass parameter
--wait=<wait> pass parameter
Here testcase is the optional testcase in the form of <class> or <class>.<method>
Please use the form --parm val and NOT --parm=val
"""
from docopt import docopt
if __name__ == '__main__':
# analyze command line args
arguments = docopt(__cmdspecs__)
ALTestFrameGraphics.processDocoptArgs(arguments)
# Now we need to remove docopt argv arguments which unittest.main() cannot handle
print ("Launching test with unittest package/framework")
r= unittest.main()
print ("RESULT=", r)
# ----------------------------------------
# Specializing tests
# ++++++++++++++++++++++++++++++++++++++++
#
# Use syntax: <python>|<script> <class>[.<method>]
# eg.
# python3 ../source/lib/testDataCTE.py GraphicTest.test_boxplot
#
|
{"hexsha": "a767a9922f7c8c53b66021fce8b9643d7ee13bed", "size": 18857, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/lib/testDataCTE.py", "max_stars_repo_name": "AlainLich/COVID-Data", "max_stars_repo_head_hexsha": "43d7f950c86270bfe411af8bc899464f0599f48e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/lib/testDataCTE.py", "max_issues_repo_name": "AlainLich/COVID-Data", "max_issues_repo_head_hexsha": "43d7f950c86270bfe411af8bc899464f0599f48e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-16T07:29:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-29T10:04:17.000Z", "max_forks_repo_path": "source/lib/testDataCTE.py", "max_forks_repo_name": "AlainLich/COVID-Data", "max_forks_repo_head_hexsha": "43d7f950c86270bfe411af8bc899464f0599f48e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7334525939, "max_line_length": 94, "alphanum_fraction": 0.5075568754, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4684}
|
############################################################
# joLinearFunction - operator constructors ################
############################################################
# FFT operators: joDFT
include("joLinearFunctionConstructors/joDFT.jl")
# DCT operators: joDCT
include("joLinearFunctionConstructors/joDCT.jl")
# NFFT operators: joNFFT
include("joLinearFunctionConstructors/joNFFT.jl")
# DWT operators: joDWT
include("joLinearFunctionConstructors/joDWT.jl")
# SWT operators: joSWT
include("joLinearFunctionConstructors/joSWT.jl")
# Romberg operator
include("joLinearFunctionConstructors/joRomberg.jl")
# CurveLab operators: joCurvelet2D joCurvelet2DnoFFT
include("joLinearFunctionConstructors/joCurvelet2D.jl")
include("joLinearFunctionConstructors/joCurvelet2DnoFFT.jl")
# Restriction operator
include("joLinearFunctionConstructors/joRestriction.jl")
# Mask operator
include("joLinearFunctionConstructors/joMask.jl")
# Padding/extension operators: joExtend
include("joLinearFunctionConstructors/joExtend.jl")
# Permutation operator: joPermutation
include("joLinearFunctionConstructors/joPermutation.jl")
# Outer product operator: joOuterProd
include("joLinearFunctionConstructors/joOuterProd.jl")
|
{"hexsha": "e154d9e09c18b3a7e89e8f6ebd49a51e06aeec8e", "size": 1227, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/joLinearFunctionConstructors.jl", "max_stars_repo_name": "slimgroup/JOLI.jl", "max_stars_repo_head_hexsha": "c1f669e34353394fd9a4711dc0038cf697bc0ad3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2017-02-28T21:50:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-16T16:03:45.000Z", "max_issues_repo_path": "src/joLinearFunctionConstructors.jl", "max_issues_repo_name": "slimgroup/JOLI.jl", "max_issues_repo_head_hexsha": "c1f669e34353394fd9a4711dc0038cf697bc0ad3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-08-03T21:02:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T20:17:27.000Z", "max_forks_repo_path": "src/joLinearFunctionConstructors.jl", "max_forks_repo_name": "slimgroup/JOLI.jl", "max_forks_repo_head_hexsha": "c1f669e34353394fd9a4711dc0038cf697bc0ad3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-11-11T02:00:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T16:06:20.000Z", "avg_line_length": 29.2142857143, "max_line_length": 60, "alphanum_fraction": 0.739201304, "num_tokens": 302}
|
[STATEMENT]
lemma sort_conv_fold:
"sort xs = fold insort xs []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sort xs = fold insort xs []
[PROOF STEP]
by (rule sort_key_conv_fold) simp
|
{"llama_tokens": 79, "file": null, "length": 1}
|
from ..utils import unique_row_count
from numpy import (array, atleast_1d, digitize, empty, floor, linspace, log2,
histogramdd, hstack, ndarray, sqrt, vstack)
from scipy.stats import skew
__all__ = ['hist', 'symbolic', 'doanes_rule']
def doanes_rule(x):
"""Convenience function for choosing an optimal number of bins using Doane's Rule.
Parameters
----------
x : numpy.ndarray or list of floats
Data to be binned.
Returns
-------
n_bins : int
"""
if not isinstance(x, ndarray):
x = array(x)
n = x.shape[0]
g1 = atleast_1d(skew(x))
sg1 = sqrt(6 * (n - 2) / ((n + 1) * (n + 3)))
return min(floor(1 + log2(n) + log2(1 + abs(g1)/sg1)))
def hist(n_bins, rng, *args):
"""Convenience function for histogramming N-dimentional data
Parameters
----------
n_bins : int
Number of bins.
rng : list of lists
List of min/max values to bin data over.
args : array_like, shape = (n_samples, )
Data of which to histogram.
Returns
-------
bins : array_like, shape = (n_bins, )
"""
data = vstack((args)).T
if n_bins is None:
n_bins = doanes_rule(data)
return histogramdd(data, bins=n_bins, range=rng)[0].flatten()
def symbolic(n_bins, rng, *args):
"""Symbolic binning of data
Parameters
----------
rng : list of lists
List of min/max values for each dimention.
n_bins : int
Number of bins to use.
args : array_like, shape = (n_samples, )
Data of which to calculate entropy. Each array must have the same
number of samples.
Returns
-------
counts : float
"""
labels = empty(0).reshape(args[0].shape[0], 0)
if n_bins is None:
n_bins = min(map(doanes_rule, args))
for i, arg in enumerate(args):
partitions = linspace(rng[i][0], rng[i][1], n_bins + 1)
label = digitize(arg, partitions).reshape(-1, 1)
labels = hstack((labels, label))
return unique_row_count(labels)
|
{"hexsha": "34345a54d5bfdc2dc09994ef452f4bc84e3756fe", "size": 2055, "ext": "py", "lang": "Python", "max_stars_repo_path": "mdentropy/core/binning.py", "max_stars_repo_name": "msmbuilder/mdentropy", "max_stars_repo_head_hexsha": "82d616ddffe11283052b2d870c3b0274736a173c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-10-03T00:40:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T14:33:56.000Z", "max_issues_repo_path": "mdentropy/core/binning.py", "max_issues_repo_name": "shozebhaider/mdentropy", "max_issues_repo_head_hexsha": "82d616ddffe11283052b2d870c3b0274736a173c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2016-04-01T15:44:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-13T20:04:16.000Z", "max_forks_repo_path": "mdentropy/core/binning.py", "max_forks_repo_name": "shozebhaider/mdentropy", "max_forks_repo_head_hexsha": "82d616ddffe11283052b2d870c3b0274736a173c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2016-03-28T21:45:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T13:21:09.000Z", "avg_line_length": 24.1764705882, "max_line_length": 86, "alphanum_fraction": 0.5902676399, "include": true, "reason": "from numpy,from scipy", "num_tokens": 555}
|
using Plots, LaTeXStrings, Measures; pyplot()
a, c, m = 69069, 1, 2^32
next(z) = (a*z + c) % m
N = 10^6
data = Array{Float64,1}(undef, N)
x = 808
for i in 1:N
data[i] = x/m
global x = next(x)
end
p1 = scatter(1:1000, data[1:1000],
c=:blue, m=4, msw=0, xlabel=L"n", ylabel=L"x_n")
p2 = histogram(data, bins=50, normed=:true,
ylims=(0,1.1), xlabel="Support", ylabel="Density")
plot(p1, p2, size=(800, 400), legend=:none, margin = 5mm)
|
{"hexsha": "4383542e57dd7535966d9ed17b445124fb226e29", "size": 454, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "1_chapter/lcg.jl", "max_stars_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_stars_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 988, "max_stars_repo_stars_event_min_datetime": "2018-06-21T00:44:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:37:47.000Z", "max_issues_repo_path": "1_chapter/lcg.jl", "max_issues_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_issues_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2019-02-20T05:06:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-23T16:53:08.000Z", "max_forks_repo_path": "1_chapter/lcg.jl", "max_forks_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_forks_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 264, "max_forks_repo_forks_event_min_datetime": "2018-07-31T03:11:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T16:12:13.000Z", "avg_line_length": 23.8947368421, "max_line_length": 57, "alphanum_fraction": 0.5991189427, "num_tokens": 191}
|
from pymongo import MongoClient, TEXT
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='No description')
parser.add_argument('--embeddings', type=str, help='embeddings txt file', required=True)
parser.add_argument('--port', type=int, help='local mongo instance port', required=True)
args = parser.parse_args()
client = MongoClient('localhost', args.port)
db = client.word_embeddings_300d
collection = db.embeddings
with open(args.embeddings, 'r') as file:
for line in file:
data = line.split()
word = data[0]
embedding = list(data[1:])
collection.insert_one({
'word': word,
'embedding': embedding
})
print(f'Inserted documents: {collection.count_documents({})}')
print(f'Creating text index on "word" key...')
collection.create_index([('word', TEXT)])
print(f'Index creation finished.')
|
{"hexsha": "37e164e408e2f4a03cfc8d4e83df99397e281204", "size": 891, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/create_embeddings_database.py", "max_stars_repo_name": "lffloyd/reddit-topic-modelling", "max_stars_repo_head_hexsha": "b34d7095cdd3ee66dfd95f8319f078449213e26f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-15T03:52:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T03:52:50.000Z", "max_issues_repo_path": "scripts/create_embeddings_database.py", "max_issues_repo_name": "lffloyd/reddit-topic-modelling", "max_issues_repo_head_hexsha": "b34d7095cdd3ee66dfd95f8319f078449213e26f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-04-26T22:31:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-02T18:45:07.000Z", "max_forks_repo_path": "scripts/create_embeddings_database.py", "max_forks_repo_name": "lffloyd/reddit-topic-modelling", "max_forks_repo_head_hexsha": "b34d7095cdd3ee66dfd95f8319f078449213e26f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.724137931, "max_line_length": 88, "alphanum_fraction": 0.6924803591, "include": true, "reason": "import numpy", "num_tokens": 192}
|
%
% LibQPEP: A Library for Globally Optimal Solving Quadratic Pose Estimation Problems (QPEPs),
% It also gives highly accurate uncertainty description of the solutions.
%
%
% Article:
% Wu, J., Zheng, Y., Gao, Z., Jiang, Y., Hu, X., Zhu, Y., Jiao, J., Liu, M. (2020)
% Quadratic Pose Estimation Problems: Globally Optimal Solutions,
% Solvability/Observability Analysis and Uncertainty Description.
% IEEE Transactions on Robotics.
% https://doi.org/10.1109/TRO.2022.3155880
%
%
% Authors: Jin Wu and Ming Liu
% Affiliation: Hong Kong University of Science and Technology (HKUST)
% Emails: jin_wu_uestc@hotmail.com; eelium@ust.hk
% Websites: https://zarathustr.github.io
% https://ram-lab.com
%
%
% test_stewart.m: The QPEP illustration of forwart kinematics of hexapod
% Stewart platform
clear all
close all
clc
if(verLessThan('matlab', '8.0.0'))
error('The MATLAB version is too old to be supported.');
end
format long g
addpath('func_files');
addpath('solvers');
addpath('utils');
addpath('homotopy');
R0 = angle2dcm(-8 * pi / 180, 12 * pi / 180, -15 * pi / 180, 'XYZ');
q0 = dcm2quat(R0).';
if(q0(1) < 0)
q0 = - q0;
end
t0 = 1e-2 * randn(3, 1);
X0 = inv([R0, t0;
zeros(1, 3), 1]);
base = [
0.1448888739433600, 1, 0.0388228567653781;
-0.0388228567653781, 1, 0.1448888739433600;
-0.1060660171779820, 1, 0.1060660171779820;
-0.1060660171779820, 1, -0.1060660171779820;
-0.0388228567653781, 1, -0.1448888739433600;
0.1448888739433600, 1, -0.0388228567653781;
].';
plat = [
0.0707106781186548, 1, 0.07071067811865480;
0.0258819045102521, 1, 0.09659258262890680;
-0.0965925826289068, 1, 0.02588190451025210;
-0.0965925826289068, 1, -0.0258819045102521;
0.0258819045102521, 1, -0.0965925826289068;
0.0707106781186548, 1, -0.0707106781186548;
].';
conv = [
1, 0, 0;
0, 0, 1;
0, -1, 0;
];
height = 0.15;
base = conv * base;
plat = conv * plat;
plat(3, :) = plat(3, :) + height;
plat00 = plat;
base00 = base;
leg0 = zeros(6, 1);
plat0 = zeros(3, 6);
for i = 1 : 6
plat0(:, i) = R0 * plat(:, i) + t0;
res = base(:, i) - plat0(:, i);
leg0(i) = sqrt(res.' * res);
end
colors = linspecer(8);
figure(1);
subplot(1, 2, 1);
plot3(base(1, :), base(2, :), base(3, :), 'LineStyle', 'None', 'Marker', '.', 'MarkerSize', 10); hold on
plot3(base(1, 1 : 6), base(2, 1 : 6), base(3, 1 : 6), 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
plot3([base(1, 1), base(1, 6)], [base(2, 1), base(2, 6)], [base(3, 1), base(3, 6)], 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
plot3(plat0(1, :), plat0(2, :), plat0(3, :), 'LineStyle', 'None', 'Marker', '.', 'MarkerSize', 10); hold on
plot3(plat0(1, 1 : 6), plat0(2, 1 : 6), plat0(3, 1 : 6), 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
plot3([plat0(1, 1), plat0(1, 6)], [plat0(2, 1), plat0(2, 6)], [plat0(3, 1), plat0(3, 6)], 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
for i = 1 : 6
plot3([base(1, i), plat0(1, i)], [base(2, i), plat0(2, i)], [base(3, i), plat0(3, i)], 'LineStyle', '-', 'LineWidth', 4, 'Marker', 'None'); hold on
end
hold on
fill3(base(1, :), base(2, :), base(3, :), colors(8, :)); hold on
fill3(plat0(1, :), plat0(2, :), plat0(3, :), colors(5, :)); hold off
grid on
grid minor
title('Reference Result', 'Interpreter', 'LaTeX', 'FontSize', 14);
base_ = base.';
plat_ = plat.';
counter = 1;
base = [];
plat = [];
for i = 1 : 6
base = [base; base_(i, :)];
plat = [plat; plat_(i, :)];
if(i < 6)
tmp = (base_(i, :) + base_(i + 1, :)) / 2;
base = [base; tmp];
tmp = (plat_(i, :) + plat_(i + 1, :)) / 2;
plat = [plat; tmp];
end
end
base = base.';
plat = plat.';
len = size(base, 2);
leg0 = zeros(len, 1);
plat0 = zeros(3, len);
for i = 1 : len
plat0(:, i) = R0 * plat(:, i) + t0;
res = base(:, i) - plat0(:, i);
leg0(i) = sqrt(res.' * res);
end
syms q0 q1 q2 q3
q = [q0; q1; q2; q3];
syms t1 t2 t3;
t = [t1; t2; t3];
R = q2R(q);
syms r1 r2 r3 r4
r = [r1; r2; r3; r4];
rr = r(1 : 3);
eqs = sym(zeros(len + 3, 1));
for i = 1 : len
eqs(i) = base(:, i).' * base(:, i) - 2 * plat(:, i).' * R.' * base(:, i) + ...
plat(:, i).' * plat(:, i) - 2 * base(:, i).' * t + 2 * plat(:, i).' * rr + r4 - leg0(i)^2;
end
eqs(len + 1) = q.' * q - 1;
eqs(len + 2) = rr.' * rr - r4;
eqs(len + 3) = t.' * t - r4;
eqs = expand(eqs);
x = [q; t; r];
H = expand(jacobian(eqs, x).' * eqs);
assumeAlso(q.' * q == 1);
assumeAlso(rr.' * rr - r4 == 0);
assumeAlso(t.' * t - r4 == 0);
eq = vpa(expand(simplify(H)), 32);
eq_ = eq(1 : 4);
G = jacobian(eq(5 : 11), [t; r]);
ss = - pinv(G) * (eq(5 : 11) - G * [t; r]);
ss = neglect_tiny_terms(ss, 32);
ss = ss.';
eq_ = eq(1 : 4);
eq_ = subs(eq_, t1, ss(1));
eq_ = subs(eq_, t2, ss(2));
eq_ = subs(eq_, t3, ss(3));
eq_ = subs(eq_, r1, ss(4));
eq_ = subs(eq_, r2, ss(5));
eq_ = subs(eq_, r3, ss(6));
eq_ = subs(eq_, r4, ss(7));
t_func = matlabFunction([ss(1); ss(2); ss(3)], 'Vars', {q});
r_func = matlabFunction([ss(4); ss(5); ss(6)], 'Vars', {q});
r4_func = matlabFunction(ss(7), 'Vars', {q});
eq_ = vpa(expand(eval(eq_)), 32);
syms lambda
eq_ = [
neglect_tiny_terms(eq_, 32).';
q.' * q - 1;
]
eqs = [
eq_(1 : 4) + lambda * q;
eq_(5);
];
str = '';
for i = 1 : length(eqs)
str = strcat(str, sprintf(' PP{%d} = char(vpa(%%s, 32));', i));
end
str_ = sprintf(str, char(eqs(1)), ...
char(eqs(2)), ...
char(eqs(3)), ...
char(eqs(4)), ...
char(eqs(5)));
eval(str_);
[S, vars] = psolve(PP);
S = S.';
SS = S;
for i = 1 : length(vars)
if(strcmp(vars{i}, 'q0'))
SS(:, 1) = S(:, i);
elseif(strcmp(vars{i}, 'q1'))
SS(:, 2) = S(:, i);
elseif(strcmp(vars{i}, 'q2'))
SS(:, 3) = S(:, i);
elseif(strcmp(vars{i}, 'q3'))
SS(:, 4) = S(:, i);
elseif(strcmp(vars{i}, 'lambda'))
SS(:, 5) = S(:, i);
end
end
S = real(SS);
xs_ = S;
sols = SS.';
num = size(sols, 2);
sol = zeros(4, num);
ts = zeros(3, num);
Ls = 1e50 * ones(num, 1);
q_true = dcm2quat(R0).';
if(q_true(1) < 0)
q_true = - q_true;
end
for i = 1 : num
sol(:, i) = real(sols(1 : 4, i));
sol(:, i) = sol(:, i) ./ norm(sol(:, i));
if(sol(1, i) < 0)
sol(:, i) = - sol(:, i);
end
C = q2R(sol(:, i));
t = t_func(sol(:, i));
r4 = r4_func(sol(:, i));
ts(:, i) = t;
res = abs(q_true - sol(:, i));
loss = res.' * res;
Ls(i) = loss;
end
[~, idx] = sort(Ls);
q_ = sol(:, idx(1)).'
q_true_ = q_true.'
R_ = quat2dcm(sol(:, idx(1)).');
t_ = t0;
plat0 = zeros(3, 6);
for i = 1 : 6
plat0(:, i) = R_ * plat00(:, i) + t_;
end
base = base00;
subplot(1, 2, 2);
plot3(base(1, :), base(2, :), base(3, :), 'LineStyle', 'None', 'Marker', '.', 'MarkerSize', 10); hold on
plot3(base(1, 1 : 6), base(2, 1 : 6), base(3, 1 : 6), 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
plot3([base(1, 1), base(1, 6)], [base(2, 1), base(2, 6)], [base(3, 1), base(3, 6)], 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
plot3(plat0(1, :), plat0(2, :), plat0(3, :), 'LineStyle', 'None', 'Marker', '.', 'MarkerSize', 10); hold on
plot3(plat0(1, 1 : 6), plat0(2, 1 : 6), plat0(3, 1 : 6), 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
plot3([plat0(1, 1), plat0(1, 6)], [plat0(2, 1), plat0(2, 6)], [plat0(3, 1), plat0(3, 6)], 'LineStyle', '-', 'LineWidth', 2, 'Marker', 'None'); hold on
for i = 1 : 6
plot3([base(1, i), plat0(1, i)], [base(2, i), plat0(2, i)], [base(3, i), plat0(3, i)], 'LineStyle', '-', 'LineWidth', 4, 'Marker', 'None'); hold on
end
hold on
fill3(base(1, :), base(2, :), base(3, :), colors(8, :)); hold on
fill3(plat0(1, :), plat0(2, :), plat0(3, :), colors(5, :)); hold off
grid on
grid minor
title('QPEP Result', 'Interpreter', 'LaTeX', 'FontSize', 14);
if(~ispc())
set(gcf, 'Position', [634 780 1159 320])
end
|
{"author": "zarathustr", "repo": "LibQPEP", "sha": "99e5c23e746ace0bac4a86742c31db6fcf7297ba", "save_path": "github-repos/MATLAB/zarathustr-LibQPEP", "path": "github-repos/MATLAB/zarathustr-LibQPEP/LibQPEP-99e5c23e746ace0bac4a86742c31db6fcf7297ba/MATLAB/test_stewart.m"}
|
\section{Other Characteristics}
\label{sec:3_other}
{\bf Lifetime\ } We chose Mozilla to investigate the lifetime of performance bugs,
due to its convenient CVS query interface.
We consider a bug's life to have started when its buggy code
was first written. The 36 Mozilla bugs in our study
took 966 days on average to get discovered, and another 140 days on average
to be fixed.
For comparison, we randomly sampled 36 functional bugs from Mozilla.
These bugs took 252 days on average to be discovered, which is much shorter
than that of performance bugs in Mozilla.
These bugs took another 117
days on average to be fixed, which is a similar amount of time with those
performance bugs.
{\bf Location\ }
For each bug, we studied the location of its minimum unit of inefficiency.
Our first finding shows that
most performance bugs happen at call sites, and their fix are changing the usage of function calls,
such as replacing old call sequences with new call sequences, conditionally or unconditionally skipping buggy functions or changing parameters, and so on.
For example, {\it Retrieve Unnecessary} (Figure~\ref{fig:Apache45464}), {\it Transparent Draw} (Figure~\ref{fig:Mozilla66461}),
{\it Intensive GC} (Figure~\ref{fig:Mozilla515287}),
{\it Bookmark All} (Figure~\ref{fig:Mozilla490742}), and
{\it Slow Fast-Lock} (Figure~\ref{fig:MySQL38941}) are all fixed by changing function-call usage.
This is probably because developers and compilers have already done a good job in optimizing code within each procedure.
Therefore, future work to detect,
diagnose and fix performance bugs should allocate more effort at call sites and procedure boundaries.
There are also 32 bugs not fixed by changing function-call usage.
These bugs mainly arise from two scenarios.
In one scenario, the buggy code unit itself does not directly waste performance.
Instead, its impact propagates to other places in the software and causes performance loss there.
For example, the {\it No Cache} (Figure~\ref{fig:MySQL26527})
bug happens when MySQL mistakenly does not allocate cache.
This operation itself does not take time, but it causes performance loss later.
The second scenario is to optimize code units inside functions, like MySQL\#14637,
whose patch replaces byte-wise parsing with
four-byte-wise parsing to accelerate trimming blank characters from the end of a string.
Our second finding shows that
around three quarters of bugs are
located inside either an input-dependent loop or an input-event handler.
For example, the buggy code in Figure~\ref{fig:Mozilla515287}
is executed at every XHR completion.
The bug in Figure~\ref{fig:Mozilla66461} wastes performance
for every transparent image on a web page.
In addition, about half performance bugs involve I/Os or
other time-consuming system calls.
There are a few bugs whose buggy code units only execute once or twice
during each program execution. For example, the Mozilla\#110555 bug wastes
performance while processing exactly two fixed-size
default configuration files,
userChrome.css and userContent.css, during the startup of a browser.
{\bf Correlation Among Categories}
Following previous empirical studies \citep{LiASID06}, we use a statistical
metric {\it lift} to study the correlation among characteristic categories.
The {\it lift} of category A and category B, denoted as {\it lift(AB)},
is calculated
as $\frac{P(AB)}{P(A)P(B)}$, where P(AB) is the probability of a bug belonging
to both categories A and B. When {\it lift(AB)} equals 1,
category A and category B are independent of each other.
When {\it lift(AB)} is greater than 1, categories A and B are
positively correlated: when a bug belongs to A, it likely
also belongs to B. The larger the {\it lift} is, the more positively A and B
are correlated.
When {\it lift(AB)} is smaller than 1, A and B are negatively
correlated: when a bug belongs to A, it likely does not belong to B.
The smaller the {\it lift} is, the more negatively A and B are correlated.
Root cause categories are highly correlated with fix strategies.
Among all correlations, the redundant root cause and the memoization fix strategy are
the most positively correlated with a 3.54.
The wrong branch selection root cause is strongly correlated with the change condition
fix strategy with a 2.74 lift. The redundant root cause and the batch fix strategy are the third
most positively correlated pair with a 2.36 lift.
On the other hand, the wrong branch selection root cause has the most negative correlation
with in-place call change, memoization and batch bug-fix strategies.
Their lifts are all 0.
{\bf Server Bugs vs. Client Bugs}
Our study includes 41 bugs from server applications and 69 bugs from client
applications. To understand whether these two types of bugs have different
characteristics, we apply chi-square test
\citep{chisquared} to each
category listed in Table~\ref{tab:3_root}, Table~\ref{tab:3_intro}, Table~\ref{tab:3_exp} and Table~\ref{tab:3_fix}.
We choose 0.01 as the significance level of our chi-square test.
Under this setting, if we conclude that server and client bugs have different
probabilities of falling into a particular characteristic category,
this conclusion only has a 1\% probability to be wrong.
We find that, among all the categories listed in Table~\ref{tab:3_root}, Table~\ref{tab:3_intro}, Table~\ref{tab:3_exp} and Table~\ref{tab:3_fix},
only the synchronization issues category
is significantly different between server bugs
and client bugs ---
synchronization issues have caused 31.7\% of server bugs and only
4.3\% of client bugs.
|
{"hexsha": "ec0ee1d35b75391a23373e0b1ddd8de6a929082a", "size": 5632, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapter-study/8_other.tex", "max_stars_repo_name": "songlh/thesis", "max_stars_repo_head_hexsha": "d5820825a2e9e3c53de37f2925ea0d87b8b2c73b", "max_stars_repo_licenses": ["Artistic-1.0-Perl", "ClArtistic"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapter-study/8_other.tex", "max_issues_repo_name": "songlh/thesis", "max_issues_repo_head_hexsha": "d5820825a2e9e3c53de37f2925ea0d87b8b2c73b", "max_issues_repo_licenses": ["Artistic-1.0-Perl", "ClArtistic"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapter-study/8_other.tex", "max_forks_repo_name": "songlh/thesis", "max_forks_repo_head_hexsha": "d5820825a2e9e3c53de37f2925ea0d87b8b2c73b", "max_forks_repo_licenses": ["Artistic-1.0-Perl", "ClArtistic"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.6796116505, "max_line_length": 155, "alphanum_fraction": 0.7848011364, "num_tokens": 1310}
|
import cv2
import random
import numpy as np
IMG_WIDTH = 1200
IMG_HEIGHT = 800
WATERMARK_WIDTH = 256
WATERMARK_HEIGHT = 256
IMG_SIZE = IMG_HEIGHT * IMG_WIDTH
WATERMARK_SIZE = WATERMARK_HEIGHT * WATERMARK_WIDTH
KEY = 1001
THRESH = 75
def xor(x ,y):
if x == 0 and y == 0:
return 0
elif x == 0 and y != 0:
return 255
elif x != 0 and y == 0:
return 255
elif x !=0 and y != 0:
return 0
def mean_neighbour(img, x, y):
val = 0
num = 0
i = x
j = y
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x + 1
j = y + 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x - 1
j = y - 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x + 1
j = y
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x
j = y + 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x + 1
j = y - 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x - 1
j = y + 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x - 1
j = y
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val = val + img[i, j]
num += 1
i = x
j = y - 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
return val/float(num)
og_img = cv2.imread('images\original_image.jpg',0)
watermark_img = cv2.imread('images\watermark.jpg', 0)
ret,watermark_img = cv2.threshold(watermark_img,127,255,cv2.THRESH_BINARY)
master_img = np.zeros((WATERMARK_WIDTH, WATERMARK_HEIGHT, 1), np.uint8)
owner_img = np.zeros((WATERMARK_WIDTH, WATERMARK_HEIGHT, 1), np.uint8)
random.seed(a=KEY)
random_points = random.sample(range(IMG_SIZE), WATERMARK_SIZE)
i = 0
j = 0
for k in random_points:
x = k // IMG_WIDTH
y = k % IMG_WIDTH
if mean_neighbour(og_img, x, y) > THRESH:
master_img[i,j] = 255
j += 1
if j == 256:
j = 0
i += 1
for i in range(0, WATERMARK_HEIGHT):
for j in range(0, WATERMARK_WIDTH):
owner_img[i, j] = xor(master_img[i, j], watermark_img[i, j])
cv2.imshow('M', master_img)
cv2.imshow('O', owner_img)
cv2.imwrite('images\master_img.jpg', master_img)
cv2.imwrite('images\owner_img.jpg', owner_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "159f9b796deee599550ec149ac3c53a5435a1905", "size": 2658, "ext": "py", "lang": "Python", "max_stars_repo_path": "owernership_share_generator.py", "max_stars_repo_name": "Shikhar0051/Visual-Cryptography-for-Copyright-Protection", "max_stars_repo_head_hexsha": "9605b99cdae7c0c3ca398bf3d291cb5a6b7c622d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-11T16:39:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-11T16:39:38.000Z", "max_issues_repo_path": "owernership_share_generator.py", "max_issues_repo_name": "Shikhar0051/Visual-Cryptography-for-Copyright-Protection", "max_issues_repo_head_hexsha": "9605b99cdae7c0c3ca398bf3d291cb5a6b7c622d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "owernership_share_generator.py", "max_forks_repo_name": "Shikhar0051/Visual-Cryptography-for-Copyright-Protection", "max_forks_repo_head_hexsha": "9605b99cdae7c0c3ca398bf3d291cb5a6b7c622d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3853211009, "max_line_length": 74, "alphanum_fraction": 0.5500376223, "include": true, "reason": "import numpy", "num_tokens": 930}
|
#!/usr/bin/env python
"""
This module is used for carrying out a simple Metropolis Monte Carlo simulation of Lennard Jones particles
"""
import numpy as np
import pint
ureg = pint.UnitRegistry()
Q_ = ureg.Quantity
class MCLJ:
"""
This module is used for carrying out a simple Metropolis Monte Carlo simulation of Lennard Jones particles
"""
def __init__(self):
self.R = Q_(0.001985875, "kcal/mol/K").magnitude
self.p_accept = 1
self.accept = False
self.trajectories = []
self.potential_energies = []
@property
def epsilon(self):
"""
Q_(0.238, "kcal/mol").magnitude for Argon
:return:
"""
try:
return self._epsilon
except AttributeError:
print("Set epsilon!")
@epsilon.setter
def epsilon(self, value):
if isinstance(value, pint.Quantity):
self._epsilon = value.magnitude
elif isinstance(value, float):
self._epsilon = Q_(value, "kcal/mol").magnitude
print("Default unit of epsilon, kcal/mol")
else:
print("Invalid epsilon", type(value))
@property
def sigma(self):
"""
Q_(3.405, ureg.angstrom).magnitude for Argon
:return:
"""
try:
return self._sigma
except AttributeError:
print("Set sigma!")
@sigma.setter
def sigma(self, value):
if isinstance(value, pint.Quantity):
self._sigma = value.magnitude
elif isinstance(value, float):
self._sigma = Q_(value, ureg.angstrom).magnitude
print("Default unit of sigma, angstrom")
else:
print("Invalid sigma", type(value))
@property
def temperature(self):
"""
298 Kelvin for room temperature
:return:
"""
try:
return self._temperature
except AttributeError:
print("Set Temperature!")
@temperature.setter
def temperature(self, value):
if isinstance(value, pint.Quantity):
self._temperature = value.magnitude
elif isinstance(value, (int, float)):
self._temperature = Q_(value, ureg.kelvin).magnitude
else:
print("Invalid temperature", type(value))
@property
def system_size(self):
"""
Units in angstrom
:return:
"""
try:
return self._system_size
except AttributeError:
print("Set system size!")
@system_size.setter
def system_size(self, value):
self._system_size = value
@property
def nparticles(self):
"""
Number of particles in this system
:return:
"""
try:
return self._nparticles
except AttributeError:
print("Define number of particles in the system")
@nparticles.setter
def nparticles(self, value):
self._nparticles = value
def new_positions(self):
"""
Generate a new position
:return:
"""
return np.random.uniform(0, self.system_size, size=(self.nparticles, 3))
# return np.random.uniform(-self.system_size, self.system_size, size=(self.nparticles, 3))
def _pbcs(self, p1, p2):
"""
:param p1: position 1
:param p2: position 2
:return: distance matrix with periodic boundary conditions
"""
new = p1 - p2 - self.system_size * np.round((p1 - p2) / self.system_size, 0)
# return np.sqrt(np.sum(np.square(new), axis=-1))
return np.linalg.norm(new)
def pbcs_distance(self, trajectory):
"""
Calculate distance matrix with periodic boundary conditions
:param trajectory: coordinates
:return: this_distance
distance matrix with periodic boundary conditions
"""
this_distance = np.zeros((self.nparticles, self.nparticles))
for atom1 in range(self.nparticles):
for atom2 in range(self.nparticles):
this_distance[atom1, atom2] = self._pbcs(trajectory[atom1], trajectory[atom2])
this_distance = np.tril(this_distance)
return this_distance
def calc_potential_energy(self, trajectory):
"""
Calculate the potential energy of lennard jones fluids
.. math:: V(r) = 4\epsilon[(\sigma/r)^{12} - (\sigma/r)^6]
switching function
.. math:: S = 1 - 6x^5 + 15x^4 - 10x^3
http://docs.openmm.org/latest/userguide/theory.html#lennard-jones-interaction
:param trajectory: coordinates
:return: potential energies matrix
"""
this_distance = self.pbcs_distance(trajectory)
shift = lambda x: 1 - 6 * np.power(x, 5) + 15 * np.power(x, 4) - 10 * np.power(x, 3)
this_distance = np.where(this_distance > 3*self.sigma, 0.0, this_distance)
scaled = np.where(this_distance > 2*self.sigma, shift(((this_distance - 2*self.sigma) / (self.sigma))), 1.0)
this_distance = np.where(this_distance == 0.0, 0.0, np.reciprocal(this_distance))
potential_energy = 4 * self.epsilon * (np.power(self.sigma * this_distance, 12) - np.power(self.sigma * this_distance, 6))
potential_energy = np.multiply(scaled, potential_energy)
result = np.sum(potential_energy)
del potential_energy
del this_distance
return result
def possibility(self, pot_energy1, pot_energy2):
"""
Calculate the possibility of accepting or rejecting a trial move
.. math:: P_{accept} = P_{trial}/{P_i} = e^{-U_{trial}/RT}/{e^{-U/RT}} = e^{-(U_{trial} - U_i)/RT}
:param pot_energy1: Potential Energies of last position
:param pot_energy2: Potential Energies of this position
:return:
"""
#possibility = np.power(np.e, -(pot_energy2 - pot_energy1) / (self.R * self.temperature))
possibility = np.exp(-(pot_energy2 - pot_energy1)/(self.R*self.temperature))
return possibility
def decision_maker(self, pot_energy1, pot_energy2):
"""
Use Metropolis Monte Carlo algorithm to decide if the trial run will be accepted
:param pot_energy1: Potential Energies of last position
:param pot_energy2: Potential Energies of this position
:return:
"""
if pot_energy1 > pot_energy2:
self.accept = True
else:
p_accept = self.possibility(pot_energy1, pot_energy2)
mc_nu = np.random.uniform(0, 1, size=(1))[0]
if mc_nu < p_accept:
self.accept = True
else:
self.accept = False
|
{"hexsha": "a457412edd24d12a5c92c317bafb99293ca2a908", "size": 6713, "ext": "py", "lang": "Python", "max_stars_repo_path": "lj_mmcmd/mclj.py", "max_stars_repo_name": "wwilla7/lj-mmcmd", "max_stars_repo_head_hexsha": "e7b6e18c0eb2ff9d612e579d6c93b79ef7ec352e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lj_mmcmd/mclj.py", "max_issues_repo_name": "wwilla7/lj-mmcmd", "max_issues_repo_head_hexsha": "e7b6e18c0eb2ff9d612e579d6c93b79ef7ec352e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lj_mmcmd/mclj.py", "max_forks_repo_name": "wwilla7/lj-mmcmd", "max_forks_repo_head_hexsha": "e7b6e18c0eb2ff9d612e579d6c93b79ef7ec352e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6529680365, "max_line_length": 130, "alphanum_fraction": 0.5952629227, "include": true, "reason": "import numpy", "num_tokens": 1557}
|
[STATEMENT]
lemma fbd_inj_iff: "(bd\<^sub>\<F> f = bd\<^sub>\<F> g) = (f = g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (bd\<^sub>\<F> f = bd\<^sub>\<F> g) = (f = g)
[PROOF STEP]
by (meson injD fbd_inj)
|
{"llama_tokens": 107, "file": "Transformer_Semantics_Kleisli_Transformers", "length": 1}
|
% Normalized Leaky Kernel Affine Projection Algorithm
%
% W. Liu and J.C. Principe, "Kernel Affine Projection Algorithms", EURASIP
% Journal on Advances in Signal Processing, Volume 2008, Article ID 784292,
% 12 pages. http://dx.doi.org/10.1155/2008/784292
%
% Remark: This implementation includes a maximum dictionary size M. With
% M=Inf this algorithm is equivalent to KAPA-4 from the publication. With
% M=Inf and lambda=0 it is equivalent to KAPA-2.
%
% This file is part of the Kernel Adaptive Filtering Toolbox for Matlab.
% https://github.com/steven2358/kafbox/
classdef nlkapa < kernel_adaptive_filter
properties (GetAccess = 'public', SetAccess = 'private') % parameters
eta = .05; % learning rate
eps = 1E-4; % Newton regularization
lambda = 1E-2; % Tikhonov regularization
M = 1000; % maximum dictionary size
P = 20; % number of regressors
kerneltype = 'gauss'; % kernel type
kernelpar = 1; % kernel parameter
end
properties (GetAccess = 'protected', SetAccess = 'private') % variables
xmem = []; % input memory
ymem = []; % output memory
dict = []; % dictionary
alpha = []; % expansion coefficients
end
methods
function kaf = nlkapa(parameters) % constructor
if (nargin > 0) % copy valid parameters
for fn = fieldnames(parameters)'
if ismember(fn,fieldnames(kaf))
kaf.(fn{1}) = parameters.(fn{1});
end
end
end
end
function y_est = evaluate(kaf,x) % evaluate the algorithm
if size(kaf.dict,1)>0
k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar);
y_est = k'*kaf.alpha;
else
y_est = zeros(size(x,1),1);
end
end
function train(kaf,x,y) % train the algorithm
if size(kaf.dict,2)==0 % initialize
kaf.dict = x;
kaf.alpha = kaf.eta*y;
kaf.xmem = x;
kaf.ymem = y;
else
if size(kaf.dict,1) < kaf.M
if size(kaf.xmem,1)<kaf.P
% grow memory
kaf.xmem = [kaf.xmem; x];
kaf.ymem = [kaf.ymem; y];
else
% slide memory
kaf.xmem = [kaf.xmem(2:kaf.P,:); x];
kaf.ymem = [kaf.ymem(2:kaf.P); y];
end
ymem_est = kaf.evaluate(kaf.xmem);
e = kaf.ymem - ymem_est;
G = kernel(kaf.xmem,kaf.xmem,...
kaf.kerneltype,kaf.kernelpar);
kaf.dict = [kaf.dict; x]; % grow dictionary
kaf.alpha = [(1-kaf.lambda*kaf.eta)*kaf.alpha; 0];
% leak and grow coefficients
m = size(kaf.alpha,1);
p = size(kaf.xmem,1);
% update p last coefficients
kaf.alpha(m-p+1:m) = kaf.alpha(m-p+1:m) + ...
kaf.eta*inv(G+kaf.eps*eye(p))*e; %#ok<MINV>
% prefer inv to \ to avoid instability
end
end
end
end
end
|
{"author": "steven2358", "repo": "kafbox", "sha": "694cf94df02a9728a90d7bacda1a8520b425f86f", "save_path": "github-repos/MATLAB/steven2358-kafbox", "path": "github-repos/MATLAB/steven2358-kafbox/kafbox-694cf94df02a9728a90d7bacda1a8520b425f86f/lib/nlkapa.m"}
|
#!/usr/bin/python
# ホモグラフィ変換
# sympyを使って連立方程式を解き、その解を用いてopenCVでホモグラフィ変換を行なう
#
# Copyright 2020 YUUKIToriyama
import cv2
import sympy as sym
import numpy as np
import json
import math
# Webページから送られてきたJSONファイルの読み込み
tmp = open("test.json", "r")
json = json.load(tmp)
ab = math.floor(np.sqrt((json[0]["x"] - json[1]["x"])**2 + (json[0]["y"] - json[1]["y"])**2))
bc = math.floor(np.sqrt((json[1]["x"] - json[2]["x"])**2 + (json[1]["y"] - json[2]["y"])**2))
print((ab,bc))
json[0].update({"u": 0, "v": 0})
json[1].update({"u": ab, "v": 0})
json[2].update({"u": ab, "v": bc})
json[3].update({"u": 0, "v": bc})
# 連立方程式を解いて変換行列を求める
a,b,c,d,e,f,g,h = sym.symbols("a b c d e f g h")
coordinates = json
equations = []
for i in range(4):
coords = coordinates[i]
equations.append(a * coords["x"] + b * coords["y"] + c - coords["u"] * (g * coords["x"] + h * coords["y"] + 1))
equations.append(d * coords["x"] + e * coords["y"] + f - coords["v"] * (g * coords["x"] + h * coords["y"] + 1))
result = sym.solve(equations)
matrix = np.array([
[result[a], result[b], result[c]],
[result[d], result[e], result[f]],
[result[g], result[h], 1]
], dtype=np.float32)
# 画像を読み込み変形させる
image = cv2.imread("tmp/IMG_4081.JPG")
new_image = cv2.warpPerspective(image, matrix, (ab,bc))
cv2.imwrite("tmp/output.png", new_image)
|
{"hexsha": "2deac77dc33f7972e95091b5d70d22c67c44a830", "size": 1330, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "YUUKIToriyama/homograpy-sample", "max_stars_repo_head_hexsha": "6ba52a9675ea69f6d6acf3d1780898315d55d8b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "YUUKIToriyama/homograpy-sample", "max_issues_repo_head_hexsha": "6ba52a9675ea69f6d6acf3d1780898315d55d8b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "YUUKIToriyama/homograpy-sample", "max_forks_repo_head_hexsha": "6ba52a9675ea69f6d6acf3d1780898315d55d8b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6296296296, "max_line_length": 115, "alphanum_fraction": 0.5977443609, "include": true, "reason": "import numpy,import sympy", "num_tokens": 546}
|
import numpy as np
from ..local_interpolation import ThirdOrderHermitePolynomialInterpolation
from .runge_kutta import AbstractESDIRK, ButcherTableau
# This γ notation is from the original paper. All the coefficients are described in
# terms of it.
#
# In passing: DifferentialEquations.jl actually gets this wrong. In her paper Kvaerno
# describes two different 4/3 methods. Both of them use the same 4-3 error estimate;
# the difference is that one is a "normal" 4/3 method, advancing the solution according
# to the 4th-order final stage, whilst the other is a "3/4" method, advancing the
# solution using the 3rd-order penultimate stage. Each approach mandates a different
# values of γ (but the same formulae for a21 etc.)
# DifferentialEquations.jl muddles these two up: it uses the "3/4" value for γ whilst
# advancing the solution according to the final stage.
γ = 0.5728160625
def poly(*args):
return np.polyval(args, γ)
a21 = γ
a31 = poly(144, -180, 81, -15, 1) * γ / poly(12, -6, 1) ** 2
a32 = poly(-36, 39, -15, 2) * γ / poly(12, -6, 1) ** 2
a41 = poly(-144, 396, -330, 117, -18, 1) / (12 * γ**2 * poly(12, -9, 2))
a42 = poly(72, -126, 69, -15, 1) / (12 * γ**2 * poly(3, -1))
a43 = (poly(-6, 6, -1) * poly(12, -6, 1) ** 2) / (
12 * γ**2 * poly(12, -9, 2) * poly(3, -1)
)
a51 = poly(288, -312, 120, -18, 1) / (48 * γ**2 * poly(12, -9, 2))
a52 = poly(24, -12, 1) / (48 * γ**2 * poly(3, -1))
a53 = -poly(12, -6, 1) ** 3 / (
48 * γ**2 * poly(3, -1) * poly(12, -9, 2) * poly(6, -6, 1)
)
a54 = poly(-24, 36, -12, 1) / poly(24, -24, 4)
c2 = γ + a21
c3 = γ + a31 + a32
c4 = 1.0
c5 = 1.0
# See /devdocs/predictor_dirk.md
θ1 = c3 / c2
θ2 = (c4 - c2) / (c3 - c2)
α21 = 1.0
α31 = 1 - θ1
α32 = θ1
α41 = 0
α42 = 1 - θ2
α43 = θ2
α51 = a41
α52 = a42
α53 = a43
α54 = γ
_kvaerno4_tableau = ButcherTableau(
a_lower=(
np.array([a21]),
np.array([a31, a32]),
np.array([a41, a42, a43]),
np.array([a51, a52, a53, a54]),
),
a_predictor=(
np.array([α21]),
np.array([α31, α32]),
np.array([α41, α42, α43]),
np.array([α51, α52, α53, α54]),
),
a_diagonal=np.array([0, γ, γ, γ, γ]),
b_sol=np.array([a51, a52, a53, a54, γ]),
b_error=np.array([a51 - a41, a52 - a42, a53 - a43, a54 - γ, γ]),
c=np.array([c2, c3, c4, c5]),
)
class Kvaerno4(AbstractESDIRK):
r"""Kvaerno's 4/3 method.
A-L stable stiffly accurate 4th order ESDIRK method. Has an embedded 3rd order
method for adaptive step sizing. Uses 5 stages.
When solving an ODE over the interval $[t_0, t_1]$, note that this method will make
some evaluations slightly past $t_1$.
??? cite "Reference"
```bibtex
@article{kvaerno2004singly,
title={Singly diagonally implicit Runge--Kutta methods with an explicit first
stage},
author={Kv{\ae}rn{\o}, Anne},
journal={BIT Numerical Mathematics},
volume={44},
number={3},
pages={489--502},
year={2004},
publisher={Springer}
}
```
"""
tableau = _kvaerno4_tableau
interpolation_cls = ThirdOrderHermitePolynomialInterpolation.from_k
def order(self, terms):
return 4
|
{"hexsha": "f5b15da7f4a096f735b1374558dcbeb5d2869f97", "size": 3237, "ext": "py", "lang": "Python", "max_stars_repo_path": "diffrax/solver/kvaerno4.py", "max_stars_repo_name": "FedericoV/diffrax", "max_stars_repo_head_hexsha": "98b010242394491fea832e77dc94f456b48495fa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 377, "max_stars_repo_stars_event_min_datetime": "2022-02-07T11:13:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T18:35:51.000Z", "max_issues_repo_path": "diffrax/solver/kvaerno4.py", "max_issues_repo_name": "FedericoV/diffrax", "max_issues_repo_head_hexsha": "98b010242394491fea832e77dc94f456b48495fa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2022-02-08T23:08:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T21:21:18.000Z", "max_forks_repo_path": "diffrax/solver/kvaerno4.py", "max_forks_repo_name": "FedericoV/diffrax", "max_forks_repo_head_hexsha": "98b010242394491fea832e77dc94f456b48495fa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2022-02-08T04:46:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T20:53:10.000Z", "avg_line_length": 30.2523364486, "max_line_length": 87, "alphanum_fraction": 0.5999382144, "include": true, "reason": "import numpy", "num_tokens": 1187}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.