file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
code.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vterrors
import (
"fmt"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
var (
VT03001 = errorWithState("VT03001", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "aggregate functions take a single argument '%s'", "This aggregation function only takes a single argument.")
VT03002 = errorWithState("VT03002", vtrpcpb.Code_INVALID_ARGUMENT, ForbidSchemaChange, "changing schema from '%s' to '%s' is not allowed", "This schema change is not allowed. You cannot change the keyspace of a table.")
VT03003 = errorWithState("VT03003", vtrpcpb.Code_INVALID_ARGUMENT, UnknownTable, "unknown table '%s' in MULTI DELETE", "The specified table in this DELETE statement is unknown.")
VT03004 = errorWithState("VT03004", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the DELETE is not updatable", "You cannot delete something that is not a real MySQL table.")
VT03005 = errorWithState("VT03005", vtrpcpb.Code_INVALID_ARGUMENT, WrongGroupField, "cannot group on '%s'", "The planner does not allow grouping on certain field. For instance, aggregation function.")
VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count at row 1", "The number of columns you want to insert do not match the number of columns of your SELECT query.")
VT03007 = errorWithoutState("VT03007", vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified", "You need to add a keyspace qualifier.")
VT03008 = errorWithState("VT03008", vtrpcpb.Code_INVALID_ARGUMENT, CantUseOptionHere, "incorrect usage/placement of '%s'", "The given token is not usable in this situation. Please refer to the MySQL documentation to learn more about your token's syntax.")
VT03009 = errorWithState("VT03009", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueForVar, "unexpected value type for '%s': %v", "You cannot assign this type to the given variable.")
VT03010 = errorWithState("VT03010", vtrpcpb.Code_INVALID_ARGUMENT, IncorrectGlobalLocalVar, "variable '%s' is a read only variable", "You cannot set the given variable as it is a read-only variable.")
VT03011 = errorWithoutState("VT03011", vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", "The given value type is not accepted.")
VT03012 = errorWithoutState("VT03012", vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", "The syntax is invalid. Please refer to the MySQL documentation for the proper syntax.")
VT03013 = errorWithState("VT03013", vtrpcpb.Code_INVALID_ARGUMENT, NonUniqTable, "not unique table/alias: '%s'", "This table or alias name is already use. Please use another one that is unique.")
VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%d' in '%s'", "The given column is unknown.")
VT03015 = errorWithoutState("VT03015", vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", "Cannot assign multiple values to a column in an update statement.")
VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.")
VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > <value>'", "This vstream where clause can only be a greater than filter.")
VT03018 = errorWithoutState("VT03018", vtrpcpb.Code_INVALID_ARGUMENT, "NEXT used on a non-sequence table", "You cannot use the NEXT syntax on a table that is not a sequence table.")
VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found", "The given column was not found or is not available.")
VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found in subquery", "The given column was not found in the subquery.")
VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous column reference: %v", "The given column is ambiguous. You can use a table qualifier to make it unambiguous.")
VT03022 = errorWithoutState("VT03022", vtrpcpb.Code_INVALID_ARGUMENT, "column %v not found in %v", "The given column cannot be found.")
VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.")
VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.")
VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments")
VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.")
VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.")
VT05003 = errorWithState("VT05003", vtrpcpb.Code_NOT_FOUND, BadDb, "unknown database '%s' in vschema", "The given database does not exist in the VSchema.")
VT05004 = errorWithState("VT05004", vtrpcpb.Code_NOT_FOUND, UnknownTable, "table '%s' does not exist", "The given table is unknown.")
VT05005 = errorWithState("VT05005", vtrpcpb.Code_NOT_FOUND, NoSuchTable, "table '%s' does not exist in keyspace '%s'", "The given table does not exist in this keyspace.")
VT05006 = errorWithState("VT05006", vtrpcpb.Code_NOT_FOUND, UnknownSystemVariable, "unknown system variable '%s'", "The given system variable is unknown.")
VT05007 = errorWithoutState("VT05007", vtrpcpb.Code_NOT_FOUND, "no table info", "Table information is not available.")
VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.")
VT07001 = errorWithState("VT07001", vtrpcpb.Code_PERMISSION_DENIED, KillDeniedError, "%s", "Kill statement is not allowed. More in docs about how to enable it and its limitations.")
VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.")
VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.")
VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.")
VT09004 = errorWithoutState("VT09004", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT should contain column list or the table should have authoritative columns in vschema", "You need to provide the list of columns you want to insert, or provide a VSchema with authoritative columns. If schema tracking is disabled you can enable it to automatically have authoritative columns.")
VT09005 = errorWithState("VT09005", vtrpcpb.Code_FAILED_PRECONDITION, NoDB, "no database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)", "A database must be selected.")
VT09006 = errorWithoutState("VT09006", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_MIGRATION works only on primary tablet", "VITESS_MIGRATION commands work only on primary tablets, you must send such commands to a primary tablet.")
VT09007 = errorWithoutState("VT09007", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_THROTTLED_APPS works only on primary tablet", "VITESS_THROTTLED_APPS commands work only on primary tablet, you must send such commands to a primary tablet.")
VT09008 = errorWithoutState("VT09008", vtrpcpb.Code_FAILED_PRECONDITION, "vexplain queries/all will actually run queries", "vexplain queries/all will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from t1`")
VT09009 = errorWithoutState("VT09009", vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", "Stream is only supported for primary tablets, please use a stream on those tablets.")
VT09010 = errorWithoutState("VT09010", vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_THROTTLER STATUS works only on primary tablet", "SHOW VITESS_THROTTLER STATUS works only on primary tablet.")
VT09011 = errorWithState("VT09011", vtrpcpb.Code_FAILED_PRECONDITION, UnknownStmtHandler, "Unknown prepared statement handler (%s) given to %s", "The prepared statement is not available")
VT09012 = errorWithoutState("VT09012", vtrpcpb.Code_FAILED_PRECONDITION, "%s statement with %s tablet not allowed", "This type of statement is not allowed on the given tablet.")
VT09013 = errorWithoutState("VT09013", vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded", "Durability policy wants Vitess to use semi-sync, but the MySQL instances don't have the semi-sync plugin loaded.")
VT09014 = errorWithoutState("VT09014", vtrpcpb.Code_FAILED_PRECONDITION, "vindex cannot be modified", "The vindex cannot be used as table in DML statement")
VT09015 = errorWithoutState("VT09015", vtrpcpb.Code_FAILED_PRECONDITION, "schema tracking required", "This query cannot be planned without more information on the SQL schema. Please turn on schema tracking or add authoritative columns information to your VSchema.")
VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB")
VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.")
VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.")
VT12002 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard foreign keys", "Vitess does not support cross shard foreign keys.")
VT12003 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: foreign keys management at vitess", "Vitess does not support managing foreign keys tables.")
// VT13001 General Error
VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT13002 = errorWithoutState("VT13002", vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT14001 = errorWithoutState("VT14001", vtrpcpb.Code_UNAVAILABLE, "connection error", "The connection failed.")
VT14002 = errorWithoutState("VT14002", vtrpcpb.Code_UNAVAILABLE, "no available connection", "No available connection.")
VT14003 = errorWithoutState("VT14003", vtrpcpb.Code_UNAVAILABLE, "no connection for tablet %v", "No connection for the given tablet.")
VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.")
VT14005 = errorWithoutState("VT14005", vtrpcpb.Code_UNAVAILABLE, "cannot lookup sidecar database for keyspace: %s", "Failed to read sidecar database identifier.")
Errors = []func(args ...any) *VitessError{
VT03001,
VT03002,
VT03003,
VT03004,
VT03005,
VT03006,
VT03007,
VT03008,
VT03009,
VT03010,
VT03011,
VT03012,
VT03013,
VT03014,
VT03015,
VT03016,
VT03017,
VT03018,
VT03019,
VT03020,
VT03021,
VT03022,
VT03023,
VT03024,
VT03025,
VT05001,
VT05002,
VT05003,
VT05004,
VT05005,
VT05006,
VT05007,
VT06001,
VT07001,
VT09001,
VT09002,
VT09003,
VT09004,
VT09005,
VT09006,
VT09007,
VT09008,
VT09009,
VT09010,
VT09011,
VT09012,
VT09013,
VT09014,
VT09015,
VT09016,
VT10001,
VT12001,
VT12002,
VT12003,
VT13001,
VT13002,
VT14001,
VT14002,
VT14003,
VT14004,
VT14005,
}
)
type VitessError struct {
Err error
Description string
ID string
State State
}
func (o *VitessError) | () string {
return o.Err.Error()
}
func (o *VitessError) Cause() error {
return o.Err
}
var _ error = (*VitessError)(nil)
func errorWithoutState(id string, code vtrpcpb.Code, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
s := short
if len(args) != 0 {
s = fmt.Sprintf(s, args...)
}
return &VitessError{
Err: New(code, id+": "+s),
Description: long,
ID: id,
}
}
}
func errorWithState(id string, code vtrpcpb.Code, state State, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
return &VitessError{
Err: NewErrorf(code, state, id+": "+short, args...),
Description: long,
ID: id,
State: state,
}
}
}
| Error | identifier_name |
run_utils.py | #!/usr/bin/env python3
import os
from enum import Enum
import numpy as np
import scipy.sparse as sps
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm, trange
from cython_modules.leave_one_out import train_test_loo_split as __train_test_loo_split_cython
from csv_utils import load_csv, export_csv
from multiprocessing import Pool
from collections import namedtuple
class DataFiles:
TRAIN = 'data/data_train.csv'
TARGET_USERS_TEST = 'data/data_target_users_test.csv'
ICM_ASSET = 'data/data_ICM_asset.csv'
ICM_PRICE = 'data/data_ICM_price.csv'
ICM_SUBCLASS = 'data/data_ICM_sub_class.csv'
UCM_AGE = 'data/data_UCM_age.csv'
UCM_REGION = 'data/data_UCM_region.csv'
CLUSTERS = 'data/user_clustered.csv'
class SplitType(Enum):
PROBABILISTIC = 1
LOO = 2
LOO_CYTHON = 3
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def | (n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users):
age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features)
age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose)
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, zip(*data))
return items, features, values
def __encode_values(values):
le = LabelEncoder()
le.fit(values)
return le.transform(values)
group_struct = namedtuple('group_struct', ['in_group', 'not_in_group'])
def user_segmenter(urm_train, n_groups=10):
groups = dict()
users = dict()
profile_length = np.ediff1d(urm_train.indptr)
group_size = int(profile_length.size/n_groups)
sorted_users = np.argsort(profile_length)
for group_id in range(n_groups):
start_pos = group_id * group_size
end_pos = min((group_id + 1) * group_size, len(profile_length))
users_in_group = sorted_users[start_pos:end_pos]
for user in users_in_group:
users[user] = group_id
users_not_in_group_flag = np.isin(sorted_users, users_in_group, invert=True)
users_not_in_group = sorted_users[users_not_in_group_flag]
groups[group_id] = group_struct(in_group=users_in_group, not_in_group=users_not_in_group)
return groups, users
def multiple_splitting(seeds=(4951, 893, 2618, 39, 4947)):
urm, icm, ucm, target_users = build_all_matrices()
trains = list()
tests = list()
for seed in seeds:
set_seed(seed)
urm_train, urm_test = train_test_split(urm)
trains.append(urm_train)
tests.append(urm_test)
return trains, tests, seeds
if __name__ == '__main__':
from evaluation import evaluate_by_cluster
from cf import ItemCFKNNRecommender
from basic_recommenders import TopPopRecommender
np.random.seed(42)
urm, icm, ucm, target_users = build_all_matrices()
urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)
top_pop = TopPopRecommender()
top_pop.fit(urm_train)
cf = ItemCFKNNRecommender(fallback_recommender=top_pop)
cf.fit(urm_train, top_k=690, shrink=66, normalize=False, similarity='tanimoto')
evaluate_by_cluster(cf, urm_test, clusterise()) | build_price_icm | identifier_name |
run_utils.py | #!/usr/bin/env python3
import os
from enum import Enum
import numpy as np
import scipy.sparse as sps
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm, trange
from cython_modules.leave_one_out import train_test_loo_split as __train_test_loo_split_cython
from csv_utils import load_csv, export_csv
from multiprocessing import Pool
from collections import namedtuple
class DataFiles:
TRAIN = 'data/data_train.csv'
TARGET_USERS_TEST = 'data/data_target_users_test.csv'
ICM_ASSET = 'data/data_ICM_asset.csv'
ICM_PRICE = 'data/data_ICM_price.csv'
ICM_SUBCLASS = 'data/data_ICM_sub_class.csv'
UCM_AGE = 'data/data_UCM_age.csv'
UCM_REGION = 'data/data_UCM_region.csv'
CLUSTERS = 'data/user_clustered.csv'
class SplitType(Enum):
PROBABILISTIC = 1
LOO = 2
LOO_CYTHON = 3
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def build_price_icm(n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users):
age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features)
age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose)
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, zip(*data))
return items, features, values
def __encode_values(values):
|
group_struct = namedtuple('group_struct', ['in_group', 'not_in_group'])
def user_segmenter(urm_train, n_groups=10):
groups = dict()
users = dict()
profile_length = np.ediff1d(urm_train.indptr)
group_size = int(profile_length.size/n_groups)
sorted_users = np.argsort(profile_length)
for group_id in range(n_groups):
start_pos = group_id * group_size
end_pos = min((group_id + 1) * group_size, len(profile_length))
users_in_group = sorted_users[start_pos:end_pos]
for user in users_in_group:
users[user] = group_id
users_not_in_group_flag = np.isin(sorted_users, users_in_group, invert=True)
users_not_in_group = sorted_users[users_not_in_group_flag]
groups[group_id] = group_struct(in_group=users_in_group, not_in_group=users_not_in_group)
return groups, users
def multiple_splitting(seeds=(4951, 893, 2618, 39, 4947)):
urm, icm, ucm, target_users = build_all_matrices()
trains = list()
tests = list()
for seed in seeds:
set_seed(seed)
urm_train, urm_test = train_test_split(urm)
trains.append(urm_train)
tests.append(urm_test)
return trains, tests, seeds
if __name__ == '__main__':
from evaluation import evaluate_by_cluster
from cf import ItemCFKNNRecommender
from basic_recommenders import TopPopRecommender
np.random.seed(42)
urm, icm, ucm, target_users = build_all_matrices()
urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)
top_pop = TopPopRecommender()
top_pop.fit(urm_train)
cf = ItemCFKNNRecommender(fallback_recommender=top_pop)
cf.fit(urm_train, top_k=690, shrink=66, normalize=False, similarity='tanimoto')
evaluate_by_cluster(cf, urm_test, clusterise()) | le = LabelEncoder()
le.fit(values)
return le.transform(values) | identifier_body |
run_utils.py | #!/usr/bin/env python3
import os
from enum import Enum
import numpy as np
import scipy.sparse as sps
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm, trange
from cython_modules.leave_one_out import train_test_loo_split as __train_test_loo_split_cython
from csv_utils import load_csv, export_csv
from multiprocessing import Pool
from collections import namedtuple
class DataFiles:
TRAIN = 'data/data_train.csv'
TARGET_USERS_TEST = 'data/data_target_users_test.csv'
ICM_ASSET = 'data/data_ICM_asset.csv'
ICM_PRICE = 'data/data_ICM_price.csv'
ICM_SUBCLASS = 'data/data_ICM_sub_class.csv'
UCM_AGE = 'data/data_UCM_age.csv'
UCM_REGION = 'data/data_UCM_region.csv'
CLUSTERS = 'data/user_clustered.csv'
class SplitType(Enum):
PROBABILISTIC = 1
LOO = 2
LOO_CYTHON = 3
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def build_price_icm(n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users):
age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features)
age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
|
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, zip(*data))
return items, features, values
def __encode_values(values):
le = LabelEncoder()
le.fit(values)
return le.transform(values)
group_struct = namedtuple('group_struct', ['in_group', 'not_in_group'])
def user_segmenter(urm_train, n_groups=10):
groups = dict()
users = dict()
profile_length = np.ediff1d(urm_train.indptr)
group_size = int(profile_length.size/n_groups)
sorted_users = np.argsort(profile_length)
for group_id in range(n_groups):
start_pos = group_id * group_size
end_pos = min((group_id + 1) * group_size, len(profile_length))
users_in_group = sorted_users[start_pos:end_pos]
for user in users_in_group:
users[user] = group_id
users_not_in_group_flag = np.isin(sorted_users, users_in_group, invert=True)
users_not_in_group = sorted_users[users_not_in_group_flag]
groups[group_id] = group_struct(in_group=users_in_group, not_in_group=users_not_in_group)
return groups, users
def multiple_splitting(seeds=(4951, 893, 2618, 39, 4947)):
urm, icm, ucm, target_users = build_all_matrices()
trains = list()
tests = list()
for seed in seeds:
set_seed(seed)
urm_train, urm_test = train_test_split(urm)
trains.append(urm_train)
tests.append(urm_test)
return trains, tests, seeds
if __name__ == '__main__':
from evaluation import evaluate_by_cluster
from cf import ItemCFKNNRecommender
from basic_recommenders import TopPopRecommender
np.random.seed(42)
urm, icm, ucm, target_users = build_all_matrices()
urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)
top_pop = TopPopRecommender()
top_pop.fit(urm_train)
cf = ItemCFKNNRecommender(fallback_recommender=top_pop)
cf.fit(urm_train, top_k=690, shrink=66, normalize=False, similarity='tanimoto')
evaluate_by_cluster(cf, urm_test, clusterise()) | if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose) | conditional_block |
run_utils.py | #!/usr/bin/env python3
import os
from enum import Enum
import numpy as np
import scipy.sparse as sps
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm, trange
from cython_modules.leave_one_out import train_test_loo_split as __train_test_loo_split_cython
from csv_utils import load_csv, export_csv
from multiprocessing import Pool
from collections import namedtuple
class DataFiles:
TRAIN = 'data/data_train.csv'
TARGET_USERS_TEST = 'data/data_target_users_test.csv'
ICM_ASSET = 'data/data_ICM_asset.csv'
ICM_PRICE = 'data/data_ICM_price.csv'
ICM_SUBCLASS = 'data/data_ICM_sub_class.csv'
UCM_AGE = 'data/data_UCM_age.csv'
UCM_REGION = 'data/data_UCM_region.csv'
CLUSTERS = 'data/user_clustered.csv'
class SplitType(Enum):
PROBABILISTIC = 1
LOO = 2
LOO_CYTHON = 3
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def build_price_icm(n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users): | age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose)
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, zip(*data))
return items, features, values
def __encode_values(values):
le = LabelEncoder()
le.fit(values)
return le.transform(values)
group_struct = namedtuple('group_struct', ['in_group', 'not_in_group'])
def user_segmenter(urm_train, n_groups=10):
groups = dict()
users = dict()
profile_length = np.ediff1d(urm_train.indptr)
group_size = int(profile_length.size/n_groups)
sorted_users = np.argsort(profile_length)
for group_id in range(n_groups):
start_pos = group_id * group_size
end_pos = min((group_id + 1) * group_size, len(profile_length))
users_in_group = sorted_users[start_pos:end_pos]
for user in users_in_group:
users[user] = group_id
users_not_in_group_flag = np.isin(sorted_users, users_in_group, invert=True)
users_not_in_group = sorted_users[users_not_in_group_flag]
groups[group_id] = group_struct(in_group=users_in_group, not_in_group=users_not_in_group)
return groups, users
def multiple_splitting(seeds=(4951, 893, 2618, 39, 4947)):
urm, icm, ucm, target_users = build_all_matrices()
trains = list()
tests = list()
for seed in seeds:
set_seed(seed)
urm_train, urm_test = train_test_split(urm)
trains.append(urm_train)
tests.append(urm_test)
return trains, tests, seeds
if __name__ == '__main__':
from evaluation import evaluate_by_cluster
from cf import ItemCFKNNRecommender
from basic_recommenders import TopPopRecommender
np.random.seed(42)
urm, icm, ucm, target_users = build_all_matrices()
urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)
top_pop = TopPopRecommender()
top_pop.fit(urm_train)
cf = ItemCFKNNRecommender(fallback_recommender=top_pop)
cf.fit(urm_train, top_k=690, shrink=66, normalize=False, similarity='tanimoto')
evaluate_by_cluster(cf, urm_test, clusterise()) | age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features) | random_line_split |
Home.js | import React, { useEffect, useState } from 'react';
import firebaseInstance from "../FirebaseInstance"
import
DevSettings,
{
ActivityIndicator,
StyleSheet,
Modal,
View,
ScrollView
} from 'react-native';
import {Button, Text, Header, Image} from "react-native-elements"
import Icon from "react-native-vector-icons/FontAwesome"
import DayItem from "../components/DayItem";
import ModalContent from "../components/ModalContent";
import {
filter,
randomIndex,
userIsBusy,
getNewCourse,
handleOpenWithWebBrowser } from "../utils/helperFunctions"
import { useStorageContext } from "../context/StorageContext";
export default function Home() {
const [error, setError] = useState(null);
const [isLoading, setIsLoading] = useState(false);
const [weekday, setWeekday] = useState([]);
const [friday, setFriday] = useState([]);
const [sunday, setSunday] = useState([]);
const [fastFood, setFastFood] = useState([]);
const [dinnerList, setDinnerList] = useState(null);
const [filteredData, setFilteredData] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3},
{type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
| readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
}
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96dd8"/>
}
raised={true}
onPress={toggleModal}
type="outline"
containerStyle={{height: 50}}/>
}/>
<View style={{flexDirection: "row", alignItems: "center", marginTop: 30}}>
<Text h1 >Lag ukeplan</Text>
</View>
<View style={{marginBottom: 10}}>
{isChecked.filters.map((param, index) => {
let type = param.type;
if (param.checked) {
return(
<Button
accessibilityHint={`Fjern ${param.text} fra filteret`}
title={param.text}
key={"btnKey" + index}
type="outline"
raised={true}
onPress={() => {toggleFilters({type})}}
/>
)
}
})}
</View>
<Modal
transparent={true}
visible={isModal}
onRequestClose={() => {
setIsModal(!isModal);
}}>
<View style={styles.container}>
<ScrollView contentContainerStyle={styles.modalView}>
<Button
accessibilityLabel={"Lukk filteroversikt"}
buttonStyle={{backgroundColor: "#fff"}}
icon={
<Icon name="times-circle" size={50}/>
}
onPress={() => {setIsModal(!isModal)}}
/>
<ModalContent
toggleFilter={toggleFilters}
isChecked={isChecked.filters}
/>
</ScrollView>
</View>
</Modal>
<ScrollView>
<View style={styles.itemWrap}>
{dinnerList !== null && (
dinnerList.map((item, index) => {
return (
<DayItem
handleUrl={handleOpenWithWebBrowser}
data={item}
index={index}
key={index + "dayItem"}
handlePress={changeCourse} />
)
})
)}
</View>
</ScrollView>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#f9f9f8',
alignItems: 'center',
justifyContent: 'center',
padding: 10
},
itemWrap: {
maxWidth: 320,
},
modalView: {
margin: 20,
width: 320,
backgroundColor: "white",
borderRadius: 20,
padding: 35,
alignItems: "center",
shadowColor: "#333",
shadowOffset: {
width: 0,
height: 2
},
shadowOpacity: 0.25,
shadowRadius: 4,
elevation: 5
},
});
| try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
| identifier_body |
Home.js | import React, { useEffect, useState } from 'react';
import firebaseInstance from "../FirebaseInstance"
import
DevSettings,
{
ActivityIndicator,
StyleSheet,
Modal,
View,
ScrollView
} from 'react-native';
import {Button, Text, Header, Image} from "react-native-elements"
import Icon from "react-native-vector-icons/FontAwesome"
import DayItem from "../components/DayItem";
import ModalContent from "../components/ModalContent";
import {
filter,
randomIndex,
userIsBusy,
getNewCourse,
handleOpenWithWebBrowser } from "../utils/helperFunctions"
import { useStorageContext } from "../context/StorageContext";
export default function Home() {
const [error, setError] = useState(null);
const [isLoading, setIsLoading] = useState(false);
const [weekday, setWeekday] = useState([]);
const [friday, setFriday] = useState([]);
const [sunday, setSunday] = useState([]);
const [fastFood, setFastFood] = useState([]);
const [dinnerList, setDinnerList] = useState(null);
const [filteredData, setFilteredData] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3},
{type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
| }
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96dd8"/>
}
raised={true}
onPress={toggleModal}
type="outline"
containerStyle={{height: 50}}/>
}/>
<View style={{flexDirection: "row", alignItems: "center", marginTop: 30}}>
<Text h1 >Lag ukeplan</Text>
</View>
<View style={{marginBottom: 10}}>
{isChecked.filters.map((param, index) => {
let type = param.type;
if (param.checked) {
return(
<Button
accessibilityHint={`Fjern ${param.text} fra filteret`}
title={param.text}
key={"btnKey" + index}
type="outline"
raised={true}
onPress={() => {toggleFilters({type})}}
/>
)
}
})}
</View>
<Modal
transparent={true}
visible={isModal}
onRequestClose={() => {
setIsModal(!isModal);
}}>
<View style={styles.container}>
<ScrollView contentContainerStyle={styles.modalView}>
<Button
accessibilityLabel={"Lukk filteroversikt"}
buttonStyle={{backgroundColor: "#fff"}}
icon={
<Icon name="times-circle" size={50}/>
}
onPress={() => {setIsModal(!isModal)}}
/>
<ModalContent
toggleFilter={toggleFilters}
isChecked={isChecked.filters}
/>
</ScrollView>
</View>
</Modal>
<ScrollView>
<View style={styles.itemWrap}>
{dinnerList !== null && (
dinnerList.map((item, index) => {
return (
<DayItem
handleUrl={handleOpenWithWebBrowser}
data={item}
index={index}
key={index + "dayItem"}
handlePress={changeCourse} />
)
})
)}
</View>
</ScrollView>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#f9f9f8',
alignItems: 'center',
justifyContent: 'center',
padding: 10
},
itemWrap: {
maxWidth: 320,
},
modalView: {
margin: 20,
width: 320,
backgroundColor: "white",
borderRadius: 20,
padding: 35,
alignItems: "center",
shadowColor: "#333",
shadowOffset: {
width: 0,
height: 2
},
shadowOpacity: 0.25,
shadowRadius: 4,
elevation: 5
},
});
| let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
| conditional_block |
Home.js | import React, { useEffect, useState } from 'react';
import firebaseInstance from "../FirebaseInstance"
import
DevSettings,
{
ActivityIndicator,
StyleSheet,
Modal,
View,
ScrollView
} from 'react-native';
import {Button, Text, Header, Image} from "react-native-elements"
import Icon from "react-native-vector-icons/FontAwesome"
import DayItem from "../components/DayItem";
import ModalContent from "../components/ModalContent";
import {
filter,
randomIndex,
userIsBusy,
getNewCourse,
handleOpenWithWebBrowser } from "../utils/helperFunctions"
import { useStorageContext } from "../context/StorageContext";
export default function | () {
const [error, setError] = useState(null);
const [isLoading, setIsLoading] = useState(false);
const [weekday, setWeekday] = useState([]);
const [friday, setFriday] = useState([]);
const [sunday, setSunday] = useState([]);
const [fastFood, setFastFood] = useState([]);
const [dinnerList, setDinnerList] = useState(null);
const [filteredData, setFilteredData] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3},
{type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
}
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96dd8"/>
}
raised={true}
onPress={toggleModal}
type="outline"
containerStyle={{height: 50}}/>
}/>
<View style={{flexDirection: "row", alignItems: "center", marginTop: 30}}>
<Text h1 >Lag ukeplan</Text>
</View>
<View style={{marginBottom: 10}}>
{isChecked.filters.map((param, index) => {
let type = param.type;
if (param.checked) {
return(
<Button
accessibilityHint={`Fjern ${param.text} fra filteret`}
title={param.text}
key={"btnKey" + index}
type="outline"
raised={true}
onPress={() => {toggleFilters({type})}}
/>
)
}
})}
</View>
<Modal
transparent={true}
visible={isModal}
onRequestClose={() => {
setIsModal(!isModal);
}}>
<View style={styles.container}>
<ScrollView contentContainerStyle={styles.modalView}>
<Button
accessibilityLabel={"Lukk filteroversikt"}
buttonStyle={{backgroundColor: "#fff"}}
icon={
<Icon name="times-circle" size={50}/>
}
onPress={() => {setIsModal(!isModal)}}
/>
<ModalContent
toggleFilter={toggleFilters}
isChecked={isChecked.filters}
/>
</ScrollView>
</View>
</Modal>
<ScrollView>
<View style={styles.itemWrap}>
{dinnerList !== null && (
dinnerList.map((item, index) => {
return (
<DayItem
handleUrl={handleOpenWithWebBrowser}
data={item}
index={index}
key={index + "dayItem"}
handlePress={changeCourse} />
)
})
)}
</View>
</ScrollView>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#f9f9f8',
alignItems: 'center',
justifyContent: 'center',
padding: 10
},
itemWrap: {
maxWidth: 320,
},
modalView: {
margin: 20,
width: 320,
backgroundColor: "white",
borderRadius: 20,
padding: 35,
alignItems: "center",
shadowColor: "#333",
shadowOffset: {
width: 0,
height: 2
},
shadowOpacity: 0.25,
shadowRadius: 4,
elevation: 5
},
});
| Home | identifier_name |
Home.js | import React, { useEffect, useState } from 'react';
import firebaseInstance from "../FirebaseInstance"
import
DevSettings,
{
ActivityIndicator,
StyleSheet,
Modal,
View,
ScrollView
} from 'react-native';
import {Button, Text, Header, Image} from "react-native-elements"
import Icon from "react-native-vector-icons/FontAwesome"
import DayItem from "../components/DayItem";
import ModalContent from "../components/ModalContent";
import {
filter,
randomIndex,
userIsBusy,
getNewCourse,
handleOpenWithWebBrowser } from "../utils/helperFunctions"
import { useStorageContext } from "../context/StorageContext";
export default function Home() {
const [error, setError] = useState(null);
const [isLoading, setIsLoading] = useState(false);
const [weekday, setWeekday] = useState([]);
const [friday, setFriday] = useState([]);
const [sunday, setSunday] = useState([]);
const [fastFood, setFastFood] = useState([]);
const [dinnerList, setDinnerList] = useState(null);
const [filteredData, setFilteredData] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3}, | {type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
}
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96dd8"/>
}
raised={true}
onPress={toggleModal}
type="outline"
containerStyle={{height: 50}}/>
}/>
<View style={{flexDirection: "row", alignItems: "center", marginTop: 30}}>
<Text h1 >Lag ukeplan</Text>
</View>
<View style={{marginBottom: 10}}>
{isChecked.filters.map((param, index) => {
let type = param.type;
if (param.checked) {
return(
<Button
accessibilityHint={`Fjern ${param.text} fra filteret`}
title={param.text}
key={"btnKey" + index}
type="outline"
raised={true}
onPress={() => {toggleFilters({type})}}
/>
)
}
})}
</View>
<Modal
transparent={true}
visible={isModal}
onRequestClose={() => {
setIsModal(!isModal);
}}>
<View style={styles.container}>
<ScrollView contentContainerStyle={styles.modalView}>
<Button
accessibilityLabel={"Lukk filteroversikt"}
buttonStyle={{backgroundColor: "#fff"}}
icon={
<Icon name="times-circle" size={50}/>
}
onPress={() => {setIsModal(!isModal)}}
/>
<ModalContent
toggleFilter={toggleFilters}
isChecked={isChecked.filters}
/>
</ScrollView>
</View>
</Modal>
<ScrollView>
<View style={styles.itemWrap}>
{dinnerList !== null && (
dinnerList.map((item, index) => {
return (
<DayItem
handleUrl={handleOpenWithWebBrowser}
data={item}
index={index}
key={index + "dayItem"}
handlePress={changeCourse} />
)
})
)}
</View>
</ScrollView>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#f9f9f8',
alignItems: 'center',
justifyContent: 'center',
padding: 10
},
itemWrap: {
maxWidth: 320,
},
modalView: {
margin: 20,
width: 320,
backgroundColor: "white",
borderRadius: 20,
padding: 35,
alignItems: "center",
shadowColor: "#333",
shadowOffset: {
width: 0,
height: 2
},
shadowOpacity: 0.25,
shadowRadius: 4,
elevation: 5
},
}); | random_line_split | |
proxy.rs | // #![deny(warnings)]
extern crate clap;
extern crate futures;
extern crate hyper;
extern crate mproxy;
extern crate openssl;
extern crate pretty_env_logger;
extern crate rmp;
extern crate rmp_serde;
extern crate tokio_io;
extern crate tokio_openssl;
extern crate tokio_tcp;
extern crate tokio_tls;
extern crate uuid;
use futures::future::{err, ok, FutureResult};
use futures::sink::Sink;
use futures::stream::Stream;
use hyper::client::connect::{Connect, Connected};
use hyper::http::uri::Authority;
use hyper::rt::Future;
use hyper::server::conn::Http;
use hyper::service::{service_fn, service_fn_ok};
use hyper::{Body, Client, Method, Request, Response, Server, StatusCode};
use mproxy::ca;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io;
use std::io::{Error, ErrorKind};
use std::net::ToSocketAddrs;
use std::sync::Mutex;
use tokio_io::io::copy;
use clap::{App, Arg, SubCommand};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
use openssl::ssl::{SslAcceptor, SslConnector, SslMethod, SslStream};
use tokio_openssl::{SslAcceptorExt, SslConnectorExt};
use futures::sync::{mpsc, oneshot};
use std::sync::Arc;
use mproxy::pool;
// use std::sync::mpsc::{channel,Sender};
struct UpstreamConnect<T: AsyncRead + AsyncWrite + Send + 'static + Sync> {
connect: Mutex<Option<T>>,
}
impl<T> UpstreamConnect<T>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
fn new(t: T) -> UpstreamConnect<T> {
UpstreamConnect {
connect: Mutex::new(Some(t)),
}
}
}
impl<T> Connect for UpstreamConnect<T>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
type Transport = T;
type Error = io::Error;
type Future = FutureResult<(Self::Transport, Connected), io::Error>;
fn connect(
&self,
_dst: hyper::client::connect::Destination,
) -> <Self as hyper::client::connect::Connect>::Future {
let mut n = self.connect.lock().unwrap();
n.take()
.map(|t| ok((t, Connected::new())))
.unwrap_or(err(Error::new(ErrorKind::Other, "oh no!")))
}
}
fn do_forward<T>(t: T, req: Request<Body>) -> Response<Body>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
let uc = UpstreamConnect::new(t);
let mut res = Response::new(Body::empty());
*res.status_mut() = StatusCode::OK;
res
}
fn result_502_resolve_failed<'a>(m: &'a str) -> Response<Body> {
let mut res = Response::new(Body::from(format!("Failed to resolve upstream: {}", m)));
*res.status_mut() = StatusCode::BAD_GATEWAY;
return res;
}
fn result_unboxed(c: u16) -> Response<Body> {
let mut res = Response::new(Body::empty());
// TODO(matt) use constants
*res.status_mut() = StatusCode::from_u16(c).unwrap();
res
}
fn result(c: u16) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let mut res = Response::new(Body::empty());
// TODO(matt) use constants
*res.status_mut() = StatusCode::from_u16(c).unwrap();
Box::new(futures::future::ok(res))
}
fn crappy_log(r: &Request<Body>) {
println!("{:?} {}", r.method(), r.uri())
}
fn normalize_authority(uri: &hyper::Uri) -> String {
// There are 3 forms
let pp = uri.port_u16().unwrap_or(80);
format!("{}:{}", uri.host().unwrap_or(""), pp)
}
pub struct UserIdentity {
pub uuid: String,
pub friendly_name: Option<String>,
pub attributes: Option<HashMap<String, String>>,
}
pub enum Identity {
User(UserIdentity),
Anonymous,
Role(String),
}
pub trait Authenticate {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>;
}
pub enum AuthzResult {
Allow,
Disallow,
}
pub trait Authorize {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>;
}
pub trait SiteAuthorize {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>;
}
#[derive(Clone)]
pub struct AuthConfig<U, S, A>
where
U: Authenticate + Clone,
S: SiteAuthorize + Clone,
A: Authorize + Clone,
{
authenticate: U,
site: S,
authorize: A,
}
fn handle_tls_raw<C: Connect + 'static>(
req_uuid: uuid::Uuid,
_client: &Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let cpair = TcpStream::connect(&upstream_addr)
.map(|upstream| {
println!("Connection established");
let _ = resp_tx.send(()).unwrap();
upstream
})
.map_err(|err| eprintln!("connect: {}", err));
let upgraded = req.into_body().on_upgrade();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(|(upstream, downstream)| {
println!("In up/down");
let (u2dr, u2dw) = upstream.split();
let (d2ur, d2uw) = downstream.split();
let u2df = copy(u2dr, d2uw);
let d2uf = copy(d2ur, u2dw);
d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err))
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
// result(200)
}
fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool {
true
}
trait RequestFilter {
type Future: Future<Item = Request<Body>>;
fn filter(&self, req: Request<Body>) -> Self::Future;
}
trait ResponseFilter {
type Future: Future<Item = Response<Body>>;
fn filter(&self, req: Response<Body>) -> Self::Future;
}
#[derive(Clone)]
struct AdWareBlock;
impl SiteAuthorize for AdWareBlock {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> {
if url.starts_with("adservice.google.com") {
return Ok(AuthzResult::Disallow);
}
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct AllowAll;
impl Authorize for AllowAll {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> {
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct NoAuth;
impl Authenticate for NoAuth {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> |
}
pub enum Trace {
TraceId(String),
TraceSecurity(String, openssl::x509::X509),
TraceRequest(String, Request<Body>),
TraceResponse(String, Request<Body>),
}
fn make_absolute(req: &mut Request<Body>) {
/* RFC 7312 5.4
When a proxy receives a request with an absolute-form of
request-target, the proxy MUST ignore the received Host header field
(if any) and instead replace it with the host information of the
request-target. A proxy that forwards such a request MUST generate a
new Host field-value based on the received request-target rather than
forward the received Host field-value.
*/
match req.method() {
&Method::CONNECT => {}
_ => {
let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) };
if let Some(n) = nhost {
req.headers_mut()
.insert(http::header::HOST, n.parse().unwrap());
return;
}
let nuri = req.headers().get(http::header::HOST).map(|host| {
let autht: Authority = host.to_str().unwrap().parse().unwrap();
let mut builder = hyper::Uri::builder();
builder.authority(autht);
//TODO(matt) do as map[
if let Some(p) = req.uri().path_and_query() {
builder.path_and_query(p.as_str());
}
if let Some(p) = req.uri().scheme_part() {
builder.scheme(p.as_str());
} else {
// Ok so this kind of sketchy, but since this is fixing up a client connection
// we'll never see an https one. Why? https is via CONNECT at the proxy
builder.scheme("http");
}
builder.build().unwrap()
});
match nuri {
Some(n) => *req.uri_mut() = n,
None => {}
}
}
}
}
#[derive(Clone)]
struct Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone + 'static,
S: SiteAuthorize + Sync + Send + Clone + 'static,
A: Authorize + Sync + Send + Clone + 'static,
{
//TODO(matt) - trace filter
tracer: Option<mpsc::Sender<Trace>>,
ca: Arc<ca::CertAuthority>,
auth_config: AuthConfig<U, S, A>,
upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>,
}
impl<U, S, A> Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone,
S: SiteAuthorize + Sync + Send + Clone,
A: Authorize + Sync + Send + Clone,
{
// Rework this instead of duping proxy do somehting else
fn dup(&self) -> Proxy<U, S, A> {
Proxy {
tracer: self.tracer.iter().map(|t| t.clone()).next(),
ca: self.ca.clone(),
auth_config: self.auth_config.clone(),
upstream_ssl_pool: pool::Pool::empty(100),
}
}
fn handle<C: Connect + 'static>(
&self,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let req_uuid = uuid::Uuid::new_v4();
let hostname = normalize_authority(req.uri());
// TODO this is slow and not async, and crappy
let upstream_addr = match hostname.to_socket_addrs() {
Ok(mut addrs) => match addrs.next() {
Some(addr) => addr,
None => return result(502),
},
Err(e) => {
eprintln!("Upstream resolution: ({}): {}", hostname, e);
return Box::new(futures::future::ok(result_502_resolve_failed(&hostname)));
}
};
let uid = self.auth_config.authenticate.authenticate(&req);
let x = uid
.and_then(|u| {
self.auth_config
.site
.authorize(&u, &hostname)
.map(|r| (u, r))
})
.and_then(|(u, site_result)| {
self.auth_config
.authorize
.authorize(&u, &req)
.map(|ar| (u, site_result, ar))
});
let _user = match x {
Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u,
Err(_) => return result(401),
_ => return result(403),
};
self.handle_inner(req_uuid, upstream_addr, client, req)
}
fn handle_inner<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
upstream_addr: std::net::SocketAddr,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
crappy_log(&req);
let mitm_enabled = true;
match req.method() {
&Method::CONNECT => match is_mitm(&req, mitm_enabled) {
true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req),
false => handle_tls_raw(req_uuid, client, upstream_addr, req),
},
_ => self.handle_http(req_uuid, client, req),
}
}
fn handle_http_forward<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
mut client: Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let client = client.request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_http<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: &Client<C>,
mut req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
make_absolute(&mut req);
let client = client.clone().request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_mitm<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let authority = req.uri().authority_part().unwrap().clone();
let cpair = TcpStream::connect(&upstream_addr)
.map_err(|err| eprintln!("mitm tcp connect: {}", err))
.and_then(move |upstream| {
let cx = SslConnector::builder(SslMethod::tls()).unwrap().build();
cx.connect_async(authority.host(), upstream)
.map(|ssl_conn| {
let _ = resp_tx.send(()).unwrap();
println!("MITM Connection established");
let peer_cert =
{ ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() };
(ssl_conn, peer_cert)
})
.map_err(|e| println!("tls error: {:}", e))
});
let upgraded = req.into_body().on_upgrade();
let ca = self.ca.clone();
let np = self.clone();
let req_uuid = req_uuid.clone();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(move |tuple| {
let (downstream, (upstream, peer_cert)) = tuple;
let ca = ca;
let req_uuid = req_uuid;
let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap();
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
acceptor.set_private_key(ca.child_key.as_ref()).unwrap();
acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap();
acceptor.check_private_key().unwrap();
let acceptor = acceptor.build();
acceptor
.accept_async(downstream)
.map_err(|e| eprintln!("accept: {}", e))
.and_then(move |tls_downstream| {
// This should cause the pool to have a single entry
// and then magic
let upstream_pool = {
let local_pool = pool::Pool::empty(1);
let pooled_upstream = pool::PoolItem::new(upstream);
pool::PoolItem::attach(pooled_upstream, local_pool.clone());
local_pool
};
Http::new()
.serve_connection(
tls_downstream,
service_fn(move |req: Request<Body>| {
let upstream_pool = upstream_pool.clone();
let uc = Client::builder()
.keep_alive(false)
.build(AlreadyConnected(upstream_pool));
// println!("In inner client handler: {} {:?}", req_uuid, req);
np.handle_http(req_uuid, &uc, req)
}),
)
.map_err(|err| {
eprintln!("Error in inner http: {}", err);
()
})
// This is proxy without analysis, just forward
// serve_connection
// let (u2dr, u2dw) = upstream_conn.split();
// let (d2ur, d2uw) = tls_downstream.split();
// let u2df = copy(u2dr, d2uw);
// let d2uf = copy(d2ur, u2dw);
// d2uf.join(u2df)
// .map_err(|err| eprintln!("mitm forward: {}", err));
})
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
}
}
struct AlreadyConnected<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync>(
Arc<pool::Pool<T>>,
);
impl<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync> Connect for AlreadyConnected<T> {
type Transport = pool::PoolItem<T>;
/// An error occured when trying to connect.
type Error = io::Error;
/// A Future that will resolve to the connected Transport.
type Future = Box<Future<Item = (Self::Transport, Connected), Error = Self::Error> + Send>;
/// Connect to a destination.
fn connect(&self, _: hyper::client::connect::Destination) -> Self::Future {
let o = pool::Pool::checkout(self.0.clone()).unwrap();
Box::new(futures::future::ok((
o,
hyper::client::connect::Connected::new(),
)))
}
}
fn trace_handler(mut rx: mpsc::Receiver<Trace>) {
let _t = std::thread::spawn(move || {
let done = rx.for_each(|tx| {
match tx {
Trace::TraceId(uuid) => {
println!("Begin Tracing {}", uuid);
}
_ => {}
}
println!("Trace recv");
Ok(())
});
hyper::rt::run(done);
});
}
struct ProxyUserConfig {
key: String,
ca: String,
port: u16,
}
fn parse_options() -> Option<ProxyUserConfig> {
let matches = App::new("My Super Program")
.version("1.0")
.author("Matt Woodyard <matt@mattwoodyard.com>")
.about("Be a proxy")
.arg(
Arg::with_name("cafile")
.short("c")
.long("cafile")
.value_name("CAFILE")
.help("Set the root CA certificate ")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("caprivatekeyfile")
.short("k")
.long("caprivatekey")
.value_name("KEYFILE")
.help("Set the private key file root")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT")
.help("which port do you want to run on")
.required(true)
.takes_value(true),
)
.get_matches();
Some(ProxyUserConfig {
ca: String::from(matches
.value_of("cafile")
.expect("Must specify CA root certificate file")),
key: String::from(matches
.value_of("caprivatekeyfile")
.expect("Must specify root key file")),
port: matches
.value_of("port")
.map(|s| s.parse::<u16>().expect("Invalid"))
.unwrap_or(8080),
})
}
fn main() {
pretty_env_logger::init();
let pconfig = parse_options().unwrap();
let ca = Arc::new(ca::CertAuthority::from_files(&pconfig.key, &pconfig.ca).unwrap());
let client = Client::new();
let addr = ([127, 0, 0, 1], pconfig.port).into();
let (tx, rx) = mpsc::channel(1024);
trace_handler(rx);
let proxy = Proxy {
tracer: Some(tx),
ca: ca,
auth_config: AuthConfig {
authenticate: NoAuth,
site: AdWareBlock,
authorize: AllowAll,
},
upstream_ssl_pool: pool::Pool::empty(100),
};
let new_svc = move || {
let proxy = proxy.clone();
let client = client.clone();
service_fn(move |req: Request<Body>| proxy.handle(&client, req))
};
// Need an Http
let server = Server::bind(&addr)
.serve(new_svc)
.map_err(|e| eprintln!("server error: {}", e));
hyper::rt::run(server);
}
| {
Ok(Identity::Anonymous)
} | identifier_body |
proxy.rs | // #![deny(warnings)]
extern crate clap;
extern crate futures;
extern crate hyper;
extern crate mproxy;
extern crate openssl;
extern crate pretty_env_logger;
extern crate rmp;
extern crate rmp_serde;
extern crate tokio_io;
extern crate tokio_openssl;
extern crate tokio_tcp;
extern crate tokio_tls;
extern crate uuid;
use futures::future::{err, ok, FutureResult};
use futures::sink::Sink;
use futures::stream::Stream;
use hyper::client::connect::{Connect, Connected};
use hyper::http::uri::Authority;
use hyper::rt::Future;
use hyper::server::conn::Http;
use hyper::service::{service_fn, service_fn_ok};
use hyper::{Body, Client, Method, Request, Response, Server, StatusCode};
use mproxy::ca;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io;
use std::io::{Error, ErrorKind};
use std::net::ToSocketAddrs;
use std::sync::Mutex;
use tokio_io::io::copy;
use clap::{App, Arg, SubCommand};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
use openssl::ssl::{SslAcceptor, SslConnector, SslMethod, SslStream};
use tokio_openssl::{SslAcceptorExt, SslConnectorExt};
use futures::sync::{mpsc, oneshot};
use std::sync::Arc;
use mproxy::pool;
// use std::sync::mpsc::{channel,Sender};
struct UpstreamConnect<T: AsyncRead + AsyncWrite + Send + 'static + Sync> {
connect: Mutex<Option<T>>,
}
impl<T> UpstreamConnect<T>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
fn new(t: T) -> UpstreamConnect<T> {
UpstreamConnect {
connect: Mutex::new(Some(t)),
}
}
}
impl<T> Connect for UpstreamConnect<T>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
type Transport = T;
type Error = io::Error;
type Future = FutureResult<(Self::Transport, Connected), io::Error>;
fn connect(
&self,
_dst: hyper::client::connect::Destination,
) -> <Self as hyper::client::connect::Connect>::Future {
let mut n = self.connect.lock().unwrap();
n.take()
.map(|t| ok((t, Connected::new())))
.unwrap_or(err(Error::new(ErrorKind::Other, "oh no!")))
}
}
fn do_forward<T>(t: T, req: Request<Body>) -> Response<Body>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
let uc = UpstreamConnect::new(t);
let mut res = Response::new(Body::empty());
*res.status_mut() = StatusCode::OK;
res
}
fn result_502_resolve_failed<'a>(m: &'a str) -> Response<Body> {
let mut res = Response::new(Body::from(format!("Failed to resolve upstream: {}", m)));
*res.status_mut() = StatusCode::BAD_GATEWAY;
return res;
}
fn result_unboxed(c: u16) -> Response<Body> {
let mut res = Response::new(Body::empty());
// TODO(matt) use constants
*res.status_mut() = StatusCode::from_u16(c).unwrap();
res
}
fn result(c: u16) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let mut res = Response::new(Body::empty());
// TODO(matt) use constants
*res.status_mut() = StatusCode::from_u16(c).unwrap();
Box::new(futures::future::ok(res))
}
fn crappy_log(r: &Request<Body>) {
println!("{:?} {}", r.method(), r.uri())
}
fn normalize_authority(uri: &hyper::Uri) -> String {
// There are 3 forms
let pp = uri.port_u16().unwrap_or(80);
format!("{}:{}", uri.host().unwrap_or(""), pp)
}
pub struct UserIdentity {
pub uuid: String,
pub friendly_name: Option<String>,
pub attributes: Option<HashMap<String, String>>,
}
pub enum Identity {
User(UserIdentity),
Anonymous,
Role(String),
}
pub trait Authenticate {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>;
}
pub enum AuthzResult {
Allow,
Disallow,
}
pub trait Authorize {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>;
}
pub trait SiteAuthorize {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>;
}
#[derive(Clone)]
pub struct AuthConfig<U, S, A>
where
U: Authenticate + Clone,
S: SiteAuthorize + Clone,
A: Authorize + Clone,
{
authenticate: U,
site: S,
authorize: A,
}
fn handle_tls_raw<C: Connect + 'static>(
req_uuid: uuid::Uuid,
_client: &Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let cpair = TcpStream::connect(&upstream_addr)
.map(|upstream| {
println!("Connection established");
let _ = resp_tx.send(()).unwrap();
upstream
})
.map_err(|err| eprintln!("connect: {}", err));
let upgraded = req.into_body().on_upgrade();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(|(upstream, downstream)| {
println!("In up/down");
let (u2dr, u2dw) = upstream.split();
let (d2ur, d2uw) = downstream.split();
let u2df = copy(u2dr, d2uw);
let d2uf = copy(d2ur, u2dw);
d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err))
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
// result(200)
}
fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool {
true
}
trait RequestFilter {
type Future: Future<Item = Request<Body>>;
fn filter(&self, req: Request<Body>) -> Self::Future;
}
trait ResponseFilter {
type Future: Future<Item = Response<Body>>;
fn filter(&self, req: Response<Body>) -> Self::Future;
}
#[derive(Clone)]
struct AdWareBlock;
impl SiteAuthorize for AdWareBlock {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> {
if url.starts_with("adservice.google.com") {
return Ok(AuthzResult::Disallow);
}
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct AllowAll;
impl Authorize for AllowAll {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> {
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct NoAuth;
impl Authenticate for NoAuth {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> {
Ok(Identity::Anonymous)
}
}
pub enum Trace {
TraceId(String),
TraceSecurity(String, openssl::x509::X509),
TraceRequest(String, Request<Body>),
TraceResponse(String, Request<Body>),
}
fn make_absolute(req: &mut Request<Body>) {
/* RFC 7312 5.4
When a proxy receives a request with an absolute-form of
request-target, the proxy MUST ignore the received Host header field
(if any) and instead replace it with the host information of the
request-target. A proxy that forwards such a request MUST generate a
new Host field-value based on the received request-target rather than
forward the received Host field-value.
*/
match req.method() {
&Method::CONNECT => {}
_ => {
let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) };
if let Some(n) = nhost {
req.headers_mut()
.insert(http::header::HOST, n.parse().unwrap());
return;
}
let nuri = req.headers().get(http::header::HOST).map(|host| {
let autht: Authority = host.to_str().unwrap().parse().unwrap();
let mut builder = hyper::Uri::builder();
builder.authority(autht);
//TODO(matt) do as map[
if let Some(p) = req.uri().path_and_query() {
builder.path_and_query(p.as_str());
}
if let Some(p) = req.uri().scheme_part() {
builder.scheme(p.as_str());
} else {
// Ok so this kind of sketchy, but since this is fixing up a client connection
// we'll never see an https one. Why? https is via CONNECT at the proxy
builder.scheme("http");
}
builder.build().unwrap()
});
match nuri {
Some(n) => *req.uri_mut() = n,
None => {}
}
}
}
}
#[derive(Clone)]
struct Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone + 'static,
S: SiteAuthorize + Sync + Send + Clone + 'static,
A: Authorize + Sync + Send + Clone + 'static,
{
//TODO(matt) - trace filter
tracer: Option<mpsc::Sender<Trace>>,
ca: Arc<ca::CertAuthority>,
auth_config: AuthConfig<U, S, A>,
upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>,
}
impl<U, S, A> Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone,
S: SiteAuthorize + Sync + Send + Clone,
A: Authorize + Sync + Send + Clone,
{
// Rework this instead of duping proxy do somehting else
fn dup(&self) -> Proxy<U, S, A> {
Proxy {
tracer: self.tracer.iter().map(|t| t.clone()).next(),
ca: self.ca.clone(),
auth_config: self.auth_config.clone(),
upstream_ssl_pool: pool::Pool::empty(100),
}
}
fn handle<C: Connect + 'static>(
&self,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let req_uuid = uuid::Uuid::new_v4();
let hostname = normalize_authority(req.uri());
// TODO this is slow and not async, and crappy
let upstream_addr = match hostname.to_socket_addrs() {
Ok(mut addrs) => match addrs.next() {
Some(addr) => addr,
None => return result(502),
},
Err(e) => {
eprintln!("Upstream resolution: ({}): {}", hostname, e);
return Box::new(futures::future::ok(result_502_resolve_failed(&hostname)));
}
};
let uid = self.auth_config.authenticate.authenticate(&req);
let x = uid
.and_then(|u| {
self.auth_config
.site
.authorize(&u, &hostname)
.map(|r| (u, r))
})
.and_then(|(u, site_result)| {
self.auth_config
.authorize
.authorize(&u, &req)
.map(|ar| (u, site_result, ar))
});
let _user = match x {
Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u,
Err(_) => return result(401),
_ => return result(403),
};
self.handle_inner(req_uuid, upstream_addr, client, req)
}
fn handle_inner<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
upstream_addr: std::net::SocketAddr,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
crappy_log(&req);
let mitm_enabled = true;
match req.method() {
&Method::CONNECT => match is_mitm(&req, mitm_enabled) {
true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req),
false => handle_tls_raw(req_uuid, client, upstream_addr, req),
},
_ => self.handle_http(req_uuid, client, req),
}
}
fn handle_http_forward<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
mut client: Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let client = client.request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_http<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: &Client<C>,
mut req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
make_absolute(&mut req);
let client = client.clone().request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_mitm<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let authority = req.uri().authority_part().unwrap().clone();
let cpair = TcpStream::connect(&upstream_addr)
.map_err(|err| eprintln!("mitm tcp connect: {}", err))
.and_then(move |upstream| {
let cx = SslConnector::builder(SslMethod::tls()).unwrap().build();
cx.connect_async(authority.host(), upstream)
.map(|ssl_conn| {
let _ = resp_tx.send(()).unwrap();
println!("MITM Connection established");
let peer_cert =
{ ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() };
(ssl_conn, peer_cert)
})
.map_err(|e| println!("tls error: {:}", e))
});
let upgraded = req.into_body().on_upgrade();
let ca = self.ca.clone();
let np = self.clone();
let req_uuid = req_uuid.clone();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(move |tuple| {
let (downstream, (upstream, peer_cert)) = tuple;
let ca = ca;
let req_uuid = req_uuid;
let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap();
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
acceptor.set_private_key(ca.child_key.as_ref()).unwrap();
acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap();
acceptor.check_private_key().unwrap();
let acceptor = acceptor.build();
acceptor
.accept_async(downstream)
.map_err(|e| eprintln!("accept: {}", e))
.and_then(move |tls_downstream| {
// This should cause the pool to have a single entry
// and then magic
let upstream_pool = {
let local_pool = pool::Pool::empty(1);
let pooled_upstream = pool::PoolItem::new(upstream);
pool::PoolItem::attach(pooled_upstream, local_pool.clone());
local_pool
};
Http::new()
.serve_connection(
tls_downstream,
service_fn(move |req: Request<Body>| {
let upstream_pool = upstream_pool.clone();
let uc = Client::builder()
.keep_alive(false)
.build(AlreadyConnected(upstream_pool));
// println!("In inner client handler: {} {:?}", req_uuid, req);
np.handle_http(req_uuid, &uc, req)
}),
)
.map_err(|err| {
eprintln!("Error in inner http: {}", err);
()
})
// This is proxy without analysis, just forward
// serve_connection
// let (u2dr, u2dw) = upstream_conn.split();
// let (d2ur, d2uw) = tls_downstream.split();
// let u2df = copy(u2dr, d2uw);
// let d2uf = copy(d2ur, u2dw);
// d2uf.join(u2df)
// .map_err(|err| eprintln!("mitm forward: {}", err));
})
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
}
}
struct AlreadyConnected<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync>(
Arc<pool::Pool<T>>,
);
impl<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync> Connect for AlreadyConnected<T> {
type Transport = pool::PoolItem<T>;
/// An error occured when trying to connect.
type Error = io::Error;
/// A Future that will resolve to the connected Transport.
type Future = Box<Future<Item = (Self::Transport, Connected), Error = Self::Error> + Send>;
/// Connect to a destination.
fn connect(&self, _: hyper::client::connect::Destination) -> Self::Future {
let o = pool::Pool::checkout(self.0.clone()).unwrap();
Box::new(futures::future::ok((
o,
hyper::client::connect::Connected::new(),
)))
}
}
fn trace_handler(mut rx: mpsc::Receiver<Trace>) {
let _t = std::thread::spawn(move || {
let done = rx.for_each(|tx| {
match tx {
Trace::TraceId(uuid) => {
println!("Begin Tracing {}", uuid);
}
_ => {} | hyper::rt::run(done);
});
}
struct ProxyUserConfig {
key: String,
ca: String,
port: u16,
}
fn parse_options() -> Option<ProxyUserConfig> {
let matches = App::new("My Super Program")
.version("1.0")
.author("Matt Woodyard <matt@mattwoodyard.com>")
.about("Be a proxy")
.arg(
Arg::with_name("cafile")
.short("c")
.long("cafile")
.value_name("CAFILE")
.help("Set the root CA certificate ")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("caprivatekeyfile")
.short("k")
.long("caprivatekey")
.value_name("KEYFILE")
.help("Set the private key file root")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT")
.help("which port do you want to run on")
.required(true)
.takes_value(true),
)
.get_matches();
Some(ProxyUserConfig {
ca: String::from(matches
.value_of("cafile")
.expect("Must specify CA root certificate file")),
key: String::from(matches
.value_of("caprivatekeyfile")
.expect("Must specify root key file")),
port: matches
.value_of("port")
.map(|s| s.parse::<u16>().expect("Invalid"))
.unwrap_or(8080),
})
}
fn main() {
pretty_env_logger::init();
let pconfig = parse_options().unwrap();
let ca = Arc::new(ca::CertAuthority::from_files(&pconfig.key, &pconfig.ca).unwrap());
let client = Client::new();
let addr = ([127, 0, 0, 1], pconfig.port).into();
let (tx, rx) = mpsc::channel(1024);
trace_handler(rx);
let proxy = Proxy {
tracer: Some(tx),
ca: ca,
auth_config: AuthConfig {
authenticate: NoAuth,
site: AdWareBlock,
authorize: AllowAll,
},
upstream_ssl_pool: pool::Pool::empty(100),
};
let new_svc = move || {
let proxy = proxy.clone();
let client = client.clone();
service_fn(move |req: Request<Body>| proxy.handle(&client, req))
};
// Need an Http
let server = Server::bind(&addr)
.serve(new_svc)
.map_err(|e| eprintln!("server error: {}", e));
hyper::rt::run(server);
} | }
println!("Trace recv");
Ok(())
}); | random_line_split |
proxy.rs | // #![deny(warnings)]
extern crate clap;
extern crate futures;
extern crate hyper;
extern crate mproxy;
extern crate openssl;
extern crate pretty_env_logger;
extern crate rmp;
extern crate rmp_serde;
extern crate tokio_io;
extern crate tokio_openssl;
extern crate tokio_tcp;
extern crate tokio_tls;
extern crate uuid;
use futures::future::{err, ok, FutureResult};
use futures::sink::Sink;
use futures::stream::Stream;
use hyper::client::connect::{Connect, Connected};
use hyper::http::uri::Authority;
use hyper::rt::Future;
use hyper::server::conn::Http;
use hyper::service::{service_fn, service_fn_ok};
use hyper::{Body, Client, Method, Request, Response, Server, StatusCode};
use mproxy::ca;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io;
use std::io::{Error, ErrorKind};
use std::net::ToSocketAddrs;
use std::sync::Mutex;
use tokio_io::io::copy;
use clap::{App, Arg, SubCommand};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
use openssl::ssl::{SslAcceptor, SslConnector, SslMethod, SslStream};
use tokio_openssl::{SslAcceptorExt, SslConnectorExt};
use futures::sync::{mpsc, oneshot};
use std::sync::Arc;
use mproxy::pool;
// use std::sync::mpsc::{channel,Sender};
struct UpstreamConnect<T: AsyncRead + AsyncWrite + Send + 'static + Sync> {
connect: Mutex<Option<T>>,
}
impl<T> UpstreamConnect<T>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
fn new(t: T) -> UpstreamConnect<T> {
UpstreamConnect {
connect: Mutex::new(Some(t)),
}
}
}
impl<T> Connect for UpstreamConnect<T>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
type Transport = T;
type Error = io::Error;
type Future = FutureResult<(Self::Transport, Connected), io::Error>;
fn connect(
&self,
_dst: hyper::client::connect::Destination,
) -> <Self as hyper::client::connect::Connect>::Future {
let mut n = self.connect.lock().unwrap();
n.take()
.map(|t| ok((t, Connected::new())))
.unwrap_or(err(Error::new(ErrorKind::Other, "oh no!")))
}
}
fn do_forward<T>(t: T, req: Request<Body>) -> Response<Body>
where
T: AsyncRead + AsyncWrite + Send + 'static + Sync,
{
let uc = UpstreamConnect::new(t);
let mut res = Response::new(Body::empty());
*res.status_mut() = StatusCode::OK;
res
}
fn result_502_resolve_failed<'a>(m: &'a str) -> Response<Body> {
let mut res = Response::new(Body::from(format!("Failed to resolve upstream: {}", m)));
*res.status_mut() = StatusCode::BAD_GATEWAY;
return res;
}
fn result_unboxed(c: u16) -> Response<Body> {
let mut res = Response::new(Body::empty());
// TODO(matt) use constants
*res.status_mut() = StatusCode::from_u16(c).unwrap();
res
}
fn result(c: u16) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let mut res = Response::new(Body::empty());
// TODO(matt) use constants
*res.status_mut() = StatusCode::from_u16(c).unwrap();
Box::new(futures::future::ok(res))
}
fn crappy_log(r: &Request<Body>) {
println!("{:?} {}", r.method(), r.uri())
}
fn normalize_authority(uri: &hyper::Uri) -> String {
// There are 3 forms
let pp = uri.port_u16().unwrap_or(80);
format!("{}:{}", uri.host().unwrap_or(""), pp)
}
pub struct UserIdentity {
pub uuid: String,
pub friendly_name: Option<String>,
pub attributes: Option<HashMap<String, String>>,
}
pub enum Identity {
User(UserIdentity),
Anonymous,
Role(String),
}
pub trait Authenticate {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>;
}
pub enum AuthzResult {
Allow,
Disallow,
}
pub trait Authorize {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>;
}
pub trait SiteAuthorize {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>;
}
#[derive(Clone)]
pub struct AuthConfig<U, S, A>
where
U: Authenticate + Clone,
S: SiteAuthorize + Clone,
A: Authorize + Clone,
{
authenticate: U,
site: S,
authorize: A,
}
fn handle_tls_raw<C: Connect + 'static>(
req_uuid: uuid::Uuid,
_client: &Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let cpair = TcpStream::connect(&upstream_addr)
.map(|upstream| {
println!("Connection established");
let _ = resp_tx.send(()).unwrap();
upstream
})
.map_err(|err| eprintln!("connect: {}", err));
let upgraded = req.into_body().on_upgrade();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(|(upstream, downstream)| {
println!("In up/down");
let (u2dr, u2dw) = upstream.split();
let (d2ur, d2uw) = downstream.split();
let u2df = copy(u2dr, d2uw);
let d2uf = copy(d2ur, u2dw);
d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err))
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
// result(200)
}
fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool {
true
}
trait RequestFilter {
type Future: Future<Item = Request<Body>>;
fn filter(&self, req: Request<Body>) -> Self::Future;
}
trait ResponseFilter {
type Future: Future<Item = Response<Body>>;
fn filter(&self, req: Response<Body>) -> Self::Future;
}
#[derive(Clone)]
struct AdWareBlock;
impl SiteAuthorize for AdWareBlock {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> {
if url.starts_with("adservice.google.com") {
return Ok(AuthzResult::Disallow);
}
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct AllowAll;
impl Authorize for AllowAll {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> {
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct NoAuth;
impl Authenticate for NoAuth {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> {
Ok(Identity::Anonymous)
}
}
pub enum Trace {
TraceId(String),
TraceSecurity(String, openssl::x509::X509),
TraceRequest(String, Request<Body>),
TraceResponse(String, Request<Body>),
}
fn make_absolute(req: &mut Request<Body>) {
/* RFC 7312 5.4
When a proxy receives a request with an absolute-form of
request-target, the proxy MUST ignore the received Host header field
(if any) and instead replace it with the host information of the
request-target. A proxy that forwards such a request MUST generate a
new Host field-value based on the received request-target rather than
forward the received Host field-value.
*/
match req.method() {
&Method::CONNECT => {}
_ => {
let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) };
if let Some(n) = nhost {
req.headers_mut()
.insert(http::header::HOST, n.parse().unwrap());
return;
}
let nuri = req.headers().get(http::header::HOST).map(|host| {
let autht: Authority = host.to_str().unwrap().parse().unwrap();
let mut builder = hyper::Uri::builder();
builder.authority(autht);
//TODO(matt) do as map[
if let Some(p) = req.uri().path_and_query() {
builder.path_and_query(p.as_str());
}
if let Some(p) = req.uri().scheme_part() {
builder.scheme(p.as_str());
} else {
// Ok so this kind of sketchy, but since this is fixing up a client connection
// we'll never see an https one. Why? https is via CONNECT at the proxy
builder.scheme("http");
}
builder.build().unwrap()
});
match nuri {
Some(n) => *req.uri_mut() = n,
None => {}
}
}
}
}
#[derive(Clone)]
struct Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone + 'static,
S: SiteAuthorize + Sync + Send + Clone + 'static,
A: Authorize + Sync + Send + Clone + 'static,
{
//TODO(matt) - trace filter
tracer: Option<mpsc::Sender<Trace>>,
ca: Arc<ca::CertAuthority>,
auth_config: AuthConfig<U, S, A>,
upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>,
}
impl<U, S, A> Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone,
S: SiteAuthorize + Sync + Send + Clone,
A: Authorize + Sync + Send + Clone,
{
// Rework this instead of duping proxy do somehting else
fn | (&self) -> Proxy<U, S, A> {
Proxy {
tracer: self.tracer.iter().map(|t| t.clone()).next(),
ca: self.ca.clone(),
auth_config: self.auth_config.clone(),
upstream_ssl_pool: pool::Pool::empty(100),
}
}
fn handle<C: Connect + 'static>(
&self,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let req_uuid = uuid::Uuid::new_v4();
let hostname = normalize_authority(req.uri());
// TODO this is slow and not async, and crappy
let upstream_addr = match hostname.to_socket_addrs() {
Ok(mut addrs) => match addrs.next() {
Some(addr) => addr,
None => return result(502),
},
Err(e) => {
eprintln!("Upstream resolution: ({}): {}", hostname, e);
return Box::new(futures::future::ok(result_502_resolve_failed(&hostname)));
}
};
let uid = self.auth_config.authenticate.authenticate(&req);
let x = uid
.and_then(|u| {
self.auth_config
.site
.authorize(&u, &hostname)
.map(|r| (u, r))
})
.and_then(|(u, site_result)| {
self.auth_config
.authorize
.authorize(&u, &req)
.map(|ar| (u, site_result, ar))
});
let _user = match x {
Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u,
Err(_) => return result(401),
_ => return result(403),
};
self.handle_inner(req_uuid, upstream_addr, client, req)
}
fn handle_inner<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
upstream_addr: std::net::SocketAddr,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
crappy_log(&req);
let mitm_enabled = true;
match req.method() {
&Method::CONNECT => match is_mitm(&req, mitm_enabled) {
true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req),
false => handle_tls_raw(req_uuid, client, upstream_addr, req),
},
_ => self.handle_http(req_uuid, client, req),
}
}
fn handle_http_forward<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
mut client: Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let client = client.request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_http<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: &Client<C>,
mut req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
make_absolute(&mut req);
let client = client.clone().request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_mitm<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let authority = req.uri().authority_part().unwrap().clone();
let cpair = TcpStream::connect(&upstream_addr)
.map_err(|err| eprintln!("mitm tcp connect: {}", err))
.and_then(move |upstream| {
let cx = SslConnector::builder(SslMethod::tls()).unwrap().build();
cx.connect_async(authority.host(), upstream)
.map(|ssl_conn| {
let _ = resp_tx.send(()).unwrap();
println!("MITM Connection established");
let peer_cert =
{ ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() };
(ssl_conn, peer_cert)
})
.map_err(|e| println!("tls error: {:}", e))
});
let upgraded = req.into_body().on_upgrade();
let ca = self.ca.clone();
let np = self.clone();
let req_uuid = req_uuid.clone();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(move |tuple| {
let (downstream, (upstream, peer_cert)) = tuple;
let ca = ca;
let req_uuid = req_uuid;
let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap();
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
acceptor.set_private_key(ca.child_key.as_ref()).unwrap();
acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap();
acceptor.check_private_key().unwrap();
let acceptor = acceptor.build();
acceptor
.accept_async(downstream)
.map_err(|e| eprintln!("accept: {}", e))
.and_then(move |tls_downstream| {
// This should cause the pool to have a single entry
// and then magic
let upstream_pool = {
let local_pool = pool::Pool::empty(1);
let pooled_upstream = pool::PoolItem::new(upstream);
pool::PoolItem::attach(pooled_upstream, local_pool.clone());
local_pool
};
Http::new()
.serve_connection(
tls_downstream,
service_fn(move |req: Request<Body>| {
let upstream_pool = upstream_pool.clone();
let uc = Client::builder()
.keep_alive(false)
.build(AlreadyConnected(upstream_pool));
// println!("In inner client handler: {} {:?}", req_uuid, req);
np.handle_http(req_uuid, &uc, req)
}),
)
.map_err(|err| {
eprintln!("Error in inner http: {}", err);
()
})
// This is proxy without analysis, just forward
// serve_connection
// let (u2dr, u2dw) = upstream_conn.split();
// let (d2ur, d2uw) = tls_downstream.split();
// let u2df = copy(u2dr, d2uw);
// let d2uf = copy(d2ur, u2dw);
// d2uf.join(u2df)
// .map_err(|err| eprintln!("mitm forward: {}", err));
})
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
}
}
struct AlreadyConnected<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync>(
Arc<pool::Pool<T>>,
);
impl<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync> Connect for AlreadyConnected<T> {
type Transport = pool::PoolItem<T>;
/// An error occured when trying to connect.
type Error = io::Error;
/// A Future that will resolve to the connected Transport.
type Future = Box<Future<Item = (Self::Transport, Connected), Error = Self::Error> + Send>;
/// Connect to a destination.
fn connect(&self, _: hyper::client::connect::Destination) -> Self::Future {
let o = pool::Pool::checkout(self.0.clone()).unwrap();
Box::new(futures::future::ok((
o,
hyper::client::connect::Connected::new(),
)))
}
}
fn trace_handler(mut rx: mpsc::Receiver<Trace>) {
let _t = std::thread::spawn(move || {
let done = rx.for_each(|tx| {
match tx {
Trace::TraceId(uuid) => {
println!("Begin Tracing {}", uuid);
}
_ => {}
}
println!("Trace recv");
Ok(())
});
hyper::rt::run(done);
});
}
struct ProxyUserConfig {
key: String,
ca: String,
port: u16,
}
fn parse_options() -> Option<ProxyUserConfig> {
let matches = App::new("My Super Program")
.version("1.0")
.author("Matt Woodyard <matt@mattwoodyard.com>")
.about("Be a proxy")
.arg(
Arg::with_name("cafile")
.short("c")
.long("cafile")
.value_name("CAFILE")
.help("Set the root CA certificate ")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("caprivatekeyfile")
.short("k")
.long("caprivatekey")
.value_name("KEYFILE")
.help("Set the private key file root")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("port")
.short("p")
.long("port")
.value_name("PORT")
.help("which port do you want to run on")
.required(true)
.takes_value(true),
)
.get_matches();
Some(ProxyUserConfig {
ca: String::from(matches
.value_of("cafile")
.expect("Must specify CA root certificate file")),
key: String::from(matches
.value_of("caprivatekeyfile")
.expect("Must specify root key file")),
port: matches
.value_of("port")
.map(|s| s.parse::<u16>().expect("Invalid"))
.unwrap_or(8080),
})
}
fn main() {
pretty_env_logger::init();
let pconfig = parse_options().unwrap();
let ca = Arc::new(ca::CertAuthority::from_files(&pconfig.key, &pconfig.ca).unwrap());
let client = Client::new();
let addr = ([127, 0, 0, 1], pconfig.port).into();
let (tx, rx) = mpsc::channel(1024);
trace_handler(rx);
let proxy = Proxy {
tracer: Some(tx),
ca: ca,
auth_config: AuthConfig {
authenticate: NoAuth,
site: AdWareBlock,
authorize: AllowAll,
},
upstream_ssl_pool: pool::Pool::empty(100),
};
let new_svc = move || {
let proxy = proxy.clone();
let client = client.clone();
service_fn(move |req: Request<Body>| proxy.handle(&client, req))
};
// Need an Http
let server = Server::bind(&addr)
.serve(new_svc)
.map_err(|e| eprintln!("server error: {}", e));
hyper::rt::run(server);
}
| dup | identifier_name |
partdisk.go | package partdisk
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
//Max number of files for each size that can be created
const limitFiles uint64 = 25
//Length of random id string
const rstl int = 7
//Holds a representation of the file data
type FileCollection struct {
//Sizes of files in bytes
fileSizes []uint64
//Amount of files per size
fileAmmount []uint64
//Last request ID
flid int64
//Base dir made of random id string
frandi string
}
//Get FileCollection random string
func (fc FileCollection) GetRandStr() string {
return fc.frandi
}
//Get FileCollection fileSizes
func (fc FileCollection) GetFileSizes() []uint64 {
return fc.fileSizes
}
//Initializes a FileCollection struct
func (fc *FileCollection) NewfC(basedir string) {
// 512Kb 2Mb 8Mb 32Mb 128Mb
fc.fileSizes = []uint64{524288, 2097152, 8388608, 33554432, 134217728}
fc.fileAmmount = make([]uint64, len(fc.fileSizes))
fc.frandi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined | for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func CreateFiles(fS *FileCollection, ts int64, filelock chan int64) {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount[index]*value
log.Printf("Requested size: %d",rqsize)
if tfsize > rqsize { //Need to remove files
deltasize = tfsize - rqsize
fdelta = deltasize / value
log.Printf("- Need to remove %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=0;n<int(fdelta);n++{
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,int(lastfnum)-n)
err = os.Remove(filename)
if err != nil {
log.Printf("adrefiles(): error deleting file %s:",filename)
return err
}
}
} else if tfsize < rqsize { //Need to create files
deltasize = rqsize - tfsize
fdelta = deltasize / value
log.Printf("+ Need to add %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=1;n<=int(fdelta);n++ {
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,n+int(lastfnum))
err = newFile(filename,value)
if err != nil {
log.Printf("adrefiles(): error creating file %s:",filename)
return err
}
}
} else { //No need to add or remove anything
log.Printf("= No need to add or remove any files")
}
}
return nil
}
//Creates a single file of the indicated size
func newFile(filename string, size uint64) error {
const blength int = 1024
burval := make([]byte,blength)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
f,err := os.Create(filename)
defer f.Close()
if err != nil {
log.Printf("newFile(): Error creating file: %s",filename)
return err
}
burval = base[:]
for i:=uint64(0); i<size; i++ {
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
burval[i%uint64(len(base))]=base[index]
if i%uint64(blength) == 0 {
_,err = f.Write(burval)
if err != nil {
log.Printf("newFile(): Error writing to file: %s",filename)
return err
}
counter = uint64(rand.Intn(blength))
}
}
return nil
}
//Returns a list of regular files with the correct name, in the directory specified, without
//directories or other types of files
func getFilesInDir(directory string) ([]os.FileInfo,error) {
entries, err := ioutil.ReadDir(directory)
if err != nil {
log.Printf("getFilesInDir(): Error reading directory: %s",directory)
return nil,err
}
var files []os.FileInfo
for _,entry := range entries {
if entry.Mode().IsRegular() {
match,_ := regexp.Match("f-[0-9]+",[]byte(entry.Name()))
if match {
files = append(files,entry)
}
}
}
return files,nil
} | func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string | random_line_split |
partdisk.go | package partdisk
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
//Max number of files for each size that can be created
const limitFiles uint64 = 25
//Length of random id string
const rstl int = 7
//Holds a representation of the file data
type FileCollection struct {
//Sizes of files in bytes
fileSizes []uint64
//Amount of files per size
fileAmmount []uint64
//Last request ID
flid int64
//Base dir made of random id string
frandi string
}
//Get FileCollection random string
func (fc FileCollection) GetRandStr() string {
return fc.frandi
}
//Get FileCollection fileSizes
func (fc FileCollection) GetFileSizes() []uint64 {
return fc.fileSizes
}
//Initializes a FileCollection struct
func (fc *FileCollection) NewfC(basedir string) {
// 512Kb 2Mb 8Mb 32Mb 128Mb
fc.fileSizes = []uint64{524288, 2097152, 8388608, 33554432, 134217728}
fc.fileAmmount = make([]uint64, len(fc.fileSizes))
fc.frandi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined
func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string
for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func | (fS *FileCollection, ts int64, filelock chan int64) {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount[index]*value
log.Printf("Requested size: %d",rqsize)
if tfsize > rqsize { //Need to remove files
deltasize = tfsize - rqsize
fdelta = deltasize / value
log.Printf("- Need to remove %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=0;n<int(fdelta);n++{
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,int(lastfnum)-n)
err = os.Remove(filename)
if err != nil {
log.Printf("adrefiles(): error deleting file %s:",filename)
return err
}
}
} else if tfsize < rqsize { //Need to create files
deltasize = rqsize - tfsize
fdelta = deltasize / value
log.Printf("+ Need to add %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=1;n<=int(fdelta);n++ {
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,n+int(lastfnum))
err = newFile(filename,value)
if err != nil {
log.Printf("adrefiles(): error creating file %s:",filename)
return err
}
}
} else { //No need to add or remove anything
log.Printf("= No need to add or remove any files")
}
}
return nil
}
//Creates a single file of the indicated size
func newFile(filename string, size uint64) error {
const blength int = 1024
burval := make([]byte,blength)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
f,err := os.Create(filename)
defer f.Close()
if err != nil {
log.Printf("newFile(): Error creating file: %s",filename)
return err
}
burval = base[:]
for i:=uint64(0); i<size; i++ {
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
burval[i%uint64(len(base))]=base[index]
if i%uint64(blength) == 0 {
_,err = f.Write(burval)
if err != nil {
log.Printf("newFile(): Error writing to file: %s",filename)
return err
}
counter = uint64(rand.Intn(blength))
}
}
return nil
}
//Returns a list of regular files with the correct name, in the directory specified, without
//directories or other types of files
func getFilesInDir(directory string) ([]os.FileInfo,error) {
entries, err := ioutil.ReadDir(directory)
if err != nil {
log.Printf("getFilesInDir(): Error reading directory: %s",directory)
return nil,err
}
var files []os.FileInfo
for _,entry := range entries {
if entry.Mode().IsRegular() {
match,_ := regexp.Match("f-[0-9]+",[]byte(entry.Name()))
if match {
files = append(files,entry)
}
}
}
return files,nil
} | CreateFiles | identifier_name |
partdisk.go | package partdisk
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
//Max number of files for each size that can be created
const limitFiles uint64 = 25
//Length of random id string
const rstl int = 7
//Holds a representation of the file data
type FileCollection struct {
//Sizes of files in bytes
fileSizes []uint64
//Amount of files per size
fileAmmount []uint64
//Last request ID
flid int64
//Base dir made of random id string
frandi string
}
//Get FileCollection random string
func (fc FileCollection) GetRandStr() string {
return fc.frandi
}
//Get FileCollection fileSizes
func (fc FileCollection) GetFileSizes() []uint64 {
return fc.fileSizes
}
//Initializes a FileCollection struct
func (fc *FileCollection) NewfC(basedir string) {
// 512Kb 2Mb 8Mb 32Mb 128Mb
fc.fileSizes = []uint64{524288, 2097152, 8388608, 33554432, 134217728}
fc.fileAmmount = make([]uint64, len(fc.fileSizes))
fc.frandi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined
func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string
for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func CreateFiles(fS *FileCollection, ts int64, filelock chan int64) |
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount[index]*value
log.Printf("Requested size: %d",rqsize)
if tfsize > rqsize { //Need to remove files
deltasize = tfsize - rqsize
fdelta = deltasize / value
log.Printf("- Need to remove %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=0;n<int(fdelta);n++{
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,int(lastfnum)-n)
err = os.Remove(filename)
if err != nil {
log.Printf("adrefiles(): error deleting file %s:",filename)
return err
}
}
} else if tfsize < rqsize { //Need to create files
deltasize = rqsize - tfsize
fdelta = deltasize / value
log.Printf("+ Need to add %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=1;n<=int(fdelta);n++ {
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,n+int(lastfnum))
err = newFile(filename,value)
if err != nil {
log.Printf("adrefiles(): error creating file %s:",filename)
return err
}
}
} else { //No need to add or remove anything
log.Printf("= No need to add or remove any files")
}
}
return nil
}
//Creates a single file of the indicated size
func newFile(filename string, size uint64) error {
const blength int = 1024
burval := make([]byte,blength)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
f,err := os.Create(filename)
defer f.Close()
if err != nil {
log.Printf("newFile(): Error creating file: %s",filename)
return err
}
burval = base[:]
for i:=uint64(0); i<size; i++ {
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
burval[i%uint64(len(base))]=base[index]
if i%uint64(blength) == 0 {
_,err = f.Write(burval)
if err != nil {
log.Printf("newFile(): Error writing to file: %s",filename)
return err
}
counter = uint64(rand.Intn(blength))
}
}
return nil
}
//Returns a list of regular files with the correct name, in the directory specified, without
//directories or other types of files
func getFilesInDir(directory string) ([]os.FileInfo,error) {
entries, err := ioutil.ReadDir(directory)
if err != nil {
log.Printf("getFilesInDir(): Error reading directory: %s",directory)
return nil,err
}
var files []os.FileInfo
for _,entry := range entries {
if entry.Mode().IsRegular() {
match,_ := regexp.Match("f-[0-9]+",[]byte(entry.Name()))
if match {
files = append(files,entry)
}
}
}
return files,nil
} | {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
} | identifier_body |
partdisk.go | package partdisk
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
//Max number of files for each size that can be created
const limitFiles uint64 = 25
//Length of random id string
const rstl int = 7
//Holds a representation of the file data
type FileCollection struct {
//Sizes of files in bytes
fileSizes []uint64
//Amount of files per size
fileAmmount []uint64
//Last request ID
flid int64
//Base dir made of random id string
frandi string
}
//Get FileCollection random string
func (fc FileCollection) GetRandStr() string {
return fc.frandi
}
//Get FileCollection fileSizes
func (fc FileCollection) GetFileSizes() []uint64 {
return fc.fileSizes
}
//Initializes a FileCollection struct
func (fc *FileCollection) NewfC(basedir string) {
// 512Kb 2Mb 8Mb 32Mb 128Mb
fc.fileSizes = []uint64{524288, 2097152, 8388608, 33554432, 134217728}
fc.fileAmmount = make([]uint64, len(fc.fileSizes))
fc.frandi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined
func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string
for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func CreateFiles(fS *FileCollection, ts int64, filelock chan int64) {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount[index]*value
log.Printf("Requested size: %d",rqsize)
if tfsize > rqsize { //Need to remove files
deltasize = tfsize - rqsize
fdelta = deltasize / value
log.Printf("- Need to remove %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=0;n<int(fdelta);n++{
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,int(lastfnum)-n)
err = os.Remove(filename)
if err != nil {
log.Printf("adrefiles(): error deleting file %s:",filename)
return err
}
}
} else if tfsize < rqsize { //Need to create files
deltasize = rqsize - tfsize
fdelta = deltasize / value
log.Printf("+ Need to add %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=1;n<=int(fdelta);n++ {
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,n+int(lastfnum))
err = newFile(filename,value)
if err != nil {
log.Printf("adrefiles(): error creating file %s:",filename)
return err
}
}
} else { //No need to add or remove anything
log.Printf("= No need to add or remove any files")
}
}
return nil
}
//Creates a single file of the indicated size
func newFile(filename string, size uint64) error {
const blength int = 1024
burval := make([]byte,blength)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ |
f,err := os.Create(filename)
defer f.Close()
if err != nil {
log.Printf("newFile(): Error creating file: %s",filename)
return err
}
burval = base[:]
for i:=uint64(0); i<size; i++ {
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
burval[i%uint64(len(base))]=base[index]
if i%uint64(blength) == 0 {
_,err = f.Write(burval)
if err != nil {
log.Printf("newFile(): Error writing to file: %s",filename)
return err
}
counter = uint64(rand.Intn(blength))
}
}
return nil
}
//Returns a list of regular files with the correct name, in the directory specified, without
//directories or other types of files
func getFilesInDir(directory string) ([]os.FileInfo,error) {
entries, err := ioutil.ReadDir(directory)
if err != nil {
log.Printf("getFilesInDir(): Error reading directory: %s",directory)
return nil,err
}
var files []os.FileInfo
for _,entry := range entries {
if entry.Mode().IsRegular() {
match,_ := regexp.Match("f-[0-9]+",[]byte(entry.Name()))
if match {
files = append(files,entry)
}
}
}
return files,nil
} | {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
} | conditional_block |
main.py | # import libraries
import cv2
import time
import numpy as np
import sqlite3
import matplotlib.pyplot as plt
con = sqlite3.connect("traffic.db")
c = con.cursor()
data = c.execute("""SELECT * FROM data""")
rows = c.fetchall()
df = []
df1 = []
df2 = []
df3 = []
for rows in rows:
df.append(rows[0])
df1.append(rows[1])
df2.append(rows[2])
df3.append(rows[3])
con.commit()
c.close()
con.close()
def capturing(p):
net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
counter = 1
with open("yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3)) # to get list of colors for each possible class
# Loading image
with open("{}.jpeg".format(counter), "wb") as f:
f.write(p)
frame = cv2.imread("{}.jpeg".format(counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def | ():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of a random action)
discount_factor = 0.9 # discount factor for future rewards
learning_rate = 0.9 # the rate at which the AI agent should learn
# run through 1000 training episodes
for episode in range(1000):
# get the starting location for this episode
row_index, column_index = get_starting_location()
# continue taking actions (i.e., moving) until we reach a terminal state
# (i.e., until we reach goal or crash )
while not is_terminal_state(row_index, column_index):
# choose which action to take (i.e., where to move next)
action_index = get_next_action(row_index, column_index, epsilon)
# perform the chosen action, and transition to the next state (i.e., move to the next location)
old_row_index, old_column_index = row_index, column_index # store the old row and column indexes
row_index, column_index = get_next_location(row_index, column_index, action_index)
# receive the reward for moving to the new state, and calculate the temporal difference
reward = rewards[row_index, column_index]
old_q_value = q_values[old_row_index, old_column_index, action_index]
temporal_difference = reward + (discount_factor * np.max(q_values[row_index, column_index])) - old_q_value
# update the Q-value for the previous state and action pair
new_q_value = old_q_value + (learning_rate * temporal_difference)
q_values[old_row_index, old_column_index, action_index] = new_q_value
print('Training complete!')
sourceone = -1
sourcetwo = -1
sourcelo = input("Enter the source from same list Location : ")
for i in range(len(df)):
if df3[i] == sourcelo:
sourceone = df1[i]-23
sourcetwo = df2[i]-23
if sourceone == -1 or sourcetwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
q = get_shortest_path(sourceone, sourcetwo)
q1 = []
if q == q1:
print("Your are on the Destination :")
exit()
row = np.array(q)
x = []
y = []
for i in range(len(row)):
x.append(23+row[i][0])
y.append(23+row[i][1])
for i in range(len(x)-1):
for j in range(len(df)):
if df1[j] == x[i] and df2[j] == y[i]:
print(df3[j], "-->", end=" ")
print(goallo)
x = []
y = []
for i in range(len(row)):
x.append(row[i][0])
y.append(row[i][1])
# Plotting the Graph
plt.scatter(x, y)
plt.plot(x, y)
plt.xlabel("Latitude (in Minutes X 10^2)")
plt.ylabel("Longitude (in Minutes X 10^2)")
plt.show()
cv2.destroyAllWindows()
| get_starting_location | identifier_name |
main.py | # import libraries
import cv2
import time
import numpy as np
import sqlite3
import matplotlib.pyplot as plt
con = sqlite3.connect("traffic.db")
c = con.cursor()
data = c.execute("""SELECT * FROM data""")
rows = c.fetchall()
df = []
df1 = []
df2 = []
df3 = []
for rows in rows:
df.append(rows[0])
df1.append(rows[1])
df2.append(rows[2])
df3.append(rows[3])
con.commit()
c.close()
con.close()
def capturing(p):
| output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3)) # to get list of colors for each possible class
# Loading image
with open("{}.jpeg".format(counter), "wb") as f:
f.write(p)
frame = cv2.imread("{}.jpeg".format(counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of a random action)
discount_factor = 0.9 # discount factor for future rewards
learning_rate = 0.9 # the rate at which the AI agent should learn
# run through 1000 training episodes
for episode in range(1000):
# get the starting location for this episode
row_index, column_index = get_starting_location()
# continue taking actions (i.e., moving) until we reach a terminal state
# (i.e., until we reach goal or crash )
while not is_terminal_state(row_index, column_index):
# choose which action to take (i.e., where to move next)
action_index = get_next_action(row_index, column_index, epsilon)
# perform the chosen action, and transition to the next state (i.e., move to the next location)
old_row_index, old_column_index = row_index, column_index # store the old row and column indexes
row_index, column_index = get_next_location(row_index, column_index, action_index)
# receive the reward for moving to the new state, and calculate the temporal difference
reward = rewards[row_index, column_index]
old_q_value = q_values[old_row_index, old_column_index, action_index]
temporal_difference = reward + (discount_factor * np.max(q_values[row_index, column_index])) - old_q_value
# update the Q-value for the previous state and action pair
new_q_value = old_q_value + (learning_rate * temporal_difference)
q_values[old_row_index, old_column_index, action_index] = new_q_value
print('Training complete!')
sourceone = -1
sourcetwo = -1
sourcelo = input("Enter the source from same list Location : ")
for i in range(len(df)):
if df3[i] == sourcelo:
sourceone = df1[i]-23
sourcetwo = df2[i]-23
if sourceone == -1 or sourcetwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
q = get_shortest_path(sourceone, sourcetwo)
q1 = []
if q == q1:
print("Your are on the Destination :")
exit()
row = np.array(q)
x = []
y = []
for i in range(len(row)):
x.append(23+row[i][0])
y.append(23+row[i][1])
for i in range(len(x)-1):
for j in range(len(df)):
if df1[j] == x[i] and df2[j] == y[i]:
print(df3[j], "-->", end=" ")
print(goallo)
x = []
y = []
for i in range(len(row)):
x.append(row[i][0])
y.append(row[i][1])
# Plotting the Graph
plt.scatter(x, y)
plt.plot(x, y)
plt.xlabel("Latitude (in Minutes X 10^2)")
plt.ylabel("Longitude (in Minutes X 10^2)")
plt.show()
cv2.destroyAllWindows() | net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
counter = 1
with open("yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
| random_line_split |
main.py | # import libraries
import cv2
import time
import numpy as np
import sqlite3
import matplotlib.pyplot as plt
con = sqlite3.connect("traffic.db")
c = con.cursor()
data = c.execute("""SELECT * FROM data""")
rows = c.fetchall()
df = []
df1 = []
df2 = []
df3 = []
for rows in rows:
df.append(rows[0])
df1.append(rows[1])
df2.append(rows[2])
df3.append(rows[3])
con.commit()
c.close()
con.close()
def capturing(p):
net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
counter = 1
with open("yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3)) # to get list of colors for each possible class
# Loading image
with open("{}.jpeg".format(counter), "wb") as f:
f.write(p)
frame = cv2.imread("{}.jpeg".format(counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
|
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of a random action)
discount_factor = 0.9 # discount factor for future rewards
learning_rate = 0.9 # the rate at which the AI agent should learn
# run through 1000 training episodes
for episode in range(1000):
# get the starting location for this episode
row_index, column_index = get_starting_location()
# continue taking actions (i.e., moving) until we reach a terminal state
# (i.e., until we reach goal or crash )
while not is_terminal_state(row_index, column_index):
# choose which action to take (i.e., where to move next)
action_index = get_next_action(row_index, column_index, epsilon)
# perform the chosen action, and transition to the next state (i.e., move to the next location)
old_row_index, old_column_index = row_index, column_index # store the old row and column indexes
row_index, column_index = get_next_location(row_index, column_index, action_index)
# receive the reward for moving to the new state, and calculate the temporal difference
reward = rewards[row_index, column_index]
old_q_value = q_values[old_row_index, old_column_index, action_index]
temporal_difference = reward + (discount_factor * np.max(q_values[row_index, column_index])) - old_q_value
# update the Q-value for the previous state and action pair
new_q_value = old_q_value + (learning_rate * temporal_difference)
q_values[old_row_index, old_column_index, action_index] = new_q_value
print('Training complete!')
sourceone = -1
sourcetwo = -1
sourcelo = input("Enter the source from same list Location : ")
for i in range(len(df)):
if df3[i] == sourcelo:
sourceone = df1[i]-23
sourcetwo = df2[i]-23
if sourceone == -1 or sourcetwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
q = get_shortest_path(sourceone, sourcetwo)
q1 = []
if q == q1:
print("Your are on the Destination :")
exit()
row = np.array(q)
x = []
y = []
for i in range(len(row)):
x.append(23+row[i][0])
y.append(23+row[i][1])
for i in range(len(x)-1):
for j in range(len(df)):
if df1[j] == x[i] and df2[j] == y[i]:
print(df3[j], "-->", end=" ")
print(goallo)
x = []
y = []
for i in range(len(row)):
x.append(row[i][0])
y.append(row[i][1])
# Plotting the Graph
plt.scatter(x, y)
plt.plot(x, y)
plt.xlabel("Latitude (in Minutes X 10^2)")
plt.ylabel("Longitude (in Minutes X 10^2)")
plt.show()
cv2.destroyAllWindows()
| new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index | identifier_body |
main.py | # import libraries
import cv2
import time
import numpy as np
import sqlite3
import matplotlib.pyplot as plt
con = sqlite3.connect("traffic.db")
c = con.cursor()
data = c.execute("""SELECT * FROM data""")
rows = c.fetchall()
df = []
df1 = []
df2 = []
df3 = []
for rows in rows:
df.append(rows[0])
df1.append(rows[1])
df2.append(rows[2])
df3.append(rows[3])
con.commit()
c.close()
con.close()
def capturing(p):
net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
counter = 1
with open("yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3)) # to get list of colors for each possible class
# Loading image
with open("{}.jpeg".format(counter), "wb") as f:
f.write(p)
frame = cv2.imread("{}.jpeg".format(counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of a random action)
discount_factor = 0.9 # discount factor for future rewards
learning_rate = 0.9 # the rate at which the AI agent should learn
# run through 1000 training episodes
for episode in range(1000):
# get the starting location for this episode
row_index, column_index = get_starting_location()
# continue taking actions (i.e., moving) until we reach a terminal state
# (i.e., until we reach goal or crash )
while not is_terminal_state(row_index, column_index):
# choose which action to take (i.e., where to move next)
|
print('Training complete!')
sourceone = -1
sourcetwo = -1
sourcelo = input("Enter the source from same list Location : ")
for i in range(len(df)):
if df3[i] == sourcelo:
sourceone = df1[i]-23
sourcetwo = df2[i]-23
if sourceone == -1 or sourcetwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
q = get_shortest_path(sourceone, sourcetwo)
q1 = []
if q == q1:
print("Your are on the Destination :")
exit()
row = np.array(q)
x = []
y = []
for i in range(len(row)):
x.append(23+row[i][0])
y.append(23+row[i][1])
for i in range(len(x)-1):
for j in range(len(df)):
if df1[j] == x[i] and df2[j] == y[i]:
print(df3[j], "-->", end=" ")
print(goallo)
x = []
y = []
for i in range(len(row)):
x.append(row[i][0])
y.append(row[i][1])
# Plotting the Graph
plt.scatter(x, y)
plt.plot(x, y)
plt.xlabel("Latitude (in Minutes X 10^2)")
plt.ylabel("Longitude (in Minutes X 10^2)")
plt.show()
cv2.destroyAllWindows()
| action_index = get_next_action(row_index, column_index, epsilon)
# perform the chosen action, and transition to the next state (i.e., move to the next location)
old_row_index, old_column_index = row_index, column_index # store the old row and column indexes
row_index, column_index = get_next_location(row_index, column_index, action_index)
# receive the reward for moving to the new state, and calculate the temporal difference
reward = rewards[row_index, column_index]
old_q_value = q_values[old_row_index, old_column_index, action_index]
temporal_difference = reward + (discount_factor * np.max(q_values[row_index, column_index])) - old_q_value
# update the Q-value for the previous state and action pair
new_q_value = old_q_value + (learning_rate * temporal_difference)
q_values[old_row_index, old_column_index, action_index] = new_q_value | conditional_block |
smart_contract_service_impl.go | package service
import (
"errors"
"it-chain/domain"
"strings"
"os"
"time"
"io/ioutil"
"bytes"
"context"
"docker.io/go-docker"
"io"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/container"
"encoding/json"
"bufio"
"os/exec"
"it-chain/common"
"fmt"
"github.com/spf13/viper"
)
const (
TMP_DIR string = "/tmp"
)
var logger_s = common.GetLogger("smart_contract_service.go")
type SmartContract struct {
Name string
OriginReposPath string
SmartContractPath string
}
type SmartContractServiceImpl struct {
GithubID string
SmartContractDirPath string
SmartContractMap map[string]SmartContract
}
func Init() {
}
func NewSmartContractService(githubID string,smartContractDirPath string) SmartContractService{
return &SmartContractServiceImpl{
GithubID:githubID,
SmartContractDirPath:smartContractDirPath,
SmartContractMap: make(map[string]SmartContract),
}
}
func (scs *SmartContractServiceImpl) PullAllSmartContracts(authenticatedGit string, errorHandler func(error),
completionHandler func()) {
go func() {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return "", errors.New("An error occured while creating or opening file!")
}
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
config, err := ioutil.ReadFil | tar file!")
return err
}
/*** copy file to docker ***/
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(file), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the smartcontract to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(wsdb), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the worldstateDB to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(config), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the config to the container!")
return err
}
err = cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
logger_s.Errorln("An error occured while starting the container!")
return err
}
/* get docker output
----------------------*/
fmt.Println("============<Docker Output>=============")
reader, err := cli.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
Timestamps: false,
})
if err != nil {
logger_s.Errorln("An error occured while getting the output!")
return err
}
defer reader.Close()
var output = ""
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
output += scanner.Text() + "\n"
}
fmt.Println(output)
smartContractResponse := &domain.SmartContractResponse{}
err = json.Unmarshal([]byte(output), smartContractResponse)
fmt.Println("----Marshaled Output----")
fmt.Println(smartContractResponse)
if smartContractResponse.Result == domain.SUCCESS {
logger_s.Println("Running smartcontract is success")
// tx hash reset
// real running smartcontract
} else if smartContractResponse.Result == domain.FAIL {
logger_s.Errorln("An error occured while running smartcontract!")
}
return nil
}
func (scs *SmartContractServiceImpl) Invoke() {
}
func (scs *SmartContractServiceImpl) keyByValue(OriginReposPath string) (key string, ok bool) {
contractName := strings.Replace(OriginReposPath, "/", "^", -1)
for k, v := range scs.SmartContractMap {
if contractName == v.OriginReposPath {
key = k
ok = true
return key, ok
}
}
return "", false
} | e(tarPath_config)
if err != nil {
logger_s.Errorln("An error occured while reading config | conditional_block |
smart_contract_service_impl.go | package service
import (
"errors"
"it-chain/domain"
"strings"
"os"
"time"
"io/ioutil"
"bytes"
"context"
"docker.io/go-docker"
"io"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/container"
"encoding/json"
"bufio"
"os/exec"
"it-chain/common"
"fmt"
"github.com/spf13/viper"
)
const (
TMP_DIR string = "/tmp"
)
var logger_s = common.GetLogger("smart_contract_service.go")
type SmartContract struct {
Name string
OriginReposPath string
SmartContractPath string
}
type SmartContractServiceImpl struct {
GithubID string
SmartContractDirPath string
SmartContractMap map[string]SmartContract
}
func | () {
}
func NewSmartContractService(githubID string,smartContractDirPath string) SmartContractService{
return &SmartContractServiceImpl{
GithubID:githubID,
SmartContractDirPath:smartContractDirPath,
SmartContractMap: make(map[string]SmartContract),
}
}
func (scs *SmartContractServiceImpl) PullAllSmartContracts(authenticatedGit string, errorHandler func(error),
completionHandler func()) {
go func() {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return "", errors.New("An error occured while creating or opening file!")
}
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
config, err := ioutil.ReadFile(tarPath_config)
if err != nil {
logger_s.Errorln("An error occured while reading config tar file!")
return err
}
/*** copy file to docker ***/
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(file), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the smartcontract to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(wsdb), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the worldstateDB to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(config), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the config to the container!")
return err
}
err = cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
logger_s.Errorln("An error occured while starting the container!")
return err
}
/* get docker output
----------------------*/
fmt.Println("============<Docker Output>=============")
reader, err := cli.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
Timestamps: false,
})
if err != nil {
logger_s.Errorln("An error occured while getting the output!")
return err
}
defer reader.Close()
var output = ""
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
output += scanner.Text() + "\n"
}
fmt.Println(output)
smartContractResponse := &domain.SmartContractResponse{}
err = json.Unmarshal([]byte(output), smartContractResponse)
fmt.Println("----Marshaled Output----")
fmt.Println(smartContractResponse)
if smartContractResponse.Result == domain.SUCCESS {
logger_s.Println("Running smartcontract is success")
// tx hash reset
// real running smartcontract
} else if smartContractResponse.Result == domain.FAIL {
logger_s.Errorln("An error occured while running smartcontract!")
}
return nil
}
func (scs *SmartContractServiceImpl) Invoke() {
}
func (scs *SmartContractServiceImpl) keyByValue(OriginReposPath string) (key string, ok bool) {
contractName := strings.Replace(OriginReposPath, "/", "^", -1)
for k, v := range scs.SmartContractMap {
if contractName == v.OriginReposPath {
key = k
ok = true
return key, ok
}
}
return "", false
} | Init | identifier_name |
smart_contract_service_impl.go | package service
import (
"errors"
"it-chain/domain"
"strings"
"os"
"time"
"io/ioutil"
"bytes"
"context"
"docker.io/go-docker"
"io"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/container"
"encoding/json"
"bufio"
"os/exec"
"it-chain/common"
"fmt"
"github.com/spf13/viper"
)
const (
TMP_DIR string = "/tmp"
)
var logger_s = common.GetLogger("smart_contract_service.go")
type SmartContract struct {
Name string
OriginReposPath string
SmartContractPath string
}
type SmartContractServiceImpl struct {
GithubID string
SmartContractDirPath string
SmartContractMap map[string]SmartContract
}
func Init() {
}
func NewSmartContractService(githubID string,smartContractDirPath string) SmartContractService{
return &SmartContractServiceImpl{
GithubID:githubID,
SmartContractDirPath:smartContractDirPath,
SmartContractMap: make(map[string]SmartContract),
}
}
func (scs *SmartContractServiceImpl) PullAllSmartContracts(authenticatedGit string, errorHandler func(error),
completionHandler func()) {
go func() {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return "", errors.New("An error occured while creating or opening file!")
}
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil | g) (key string, ok bool) {
contractName := strings.Replace(OriginReposPath, "/", "^", -1)
for k, v := range scs.SmartContractMap {
if contractName == v.OriginReposPath {
key = k
ok = true
return key, ok
}
}
return "", false
} | {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
config, err := ioutil.ReadFile(tarPath_config)
if err != nil {
logger_s.Errorln("An error occured while reading config tar file!")
return err
}
/*** copy file to docker ***/
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(file), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the smartcontract to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(wsdb), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the worldstateDB to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(config), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the config to the container!")
return err
}
err = cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
logger_s.Errorln("An error occured while starting the container!")
return err
}
/* get docker output
----------------------*/
fmt.Println("============<Docker Output>=============")
reader, err := cli.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
Timestamps: false,
})
if err != nil {
logger_s.Errorln("An error occured while getting the output!")
return err
}
defer reader.Close()
var output = ""
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
output += scanner.Text() + "\n"
}
fmt.Println(output)
smartContractResponse := &domain.SmartContractResponse{}
err = json.Unmarshal([]byte(output), smartContractResponse)
fmt.Println("----Marshaled Output----")
fmt.Println(smartContractResponse)
if smartContractResponse.Result == domain.SUCCESS {
logger_s.Println("Running smartcontract is success")
// tx hash reset
// real running smartcontract
} else if smartContractResponse.Result == domain.FAIL {
logger_s.Errorln("An error occured while running smartcontract!")
}
return nil
}
func (scs *SmartContractServiceImpl) Invoke() {
}
func (scs *SmartContractServiceImpl) keyByValue(OriginReposPath strin | identifier_body |
smart_contract_service_impl.go | package service
import (
"errors"
"it-chain/domain"
"strings"
"os"
"time"
"io/ioutil"
"bytes"
"context"
"docker.io/go-docker"
"io"
"docker.io/go-docker/api/types"
"docker.io/go-docker/api/types/container"
"encoding/json"
"bufio"
"os/exec"
"it-chain/common"
"fmt"
"github.com/spf13/viper"
)
const (
TMP_DIR string = "/tmp"
)
var logger_s = common.GetLogger("smart_contract_service.go")
type SmartContract struct {
Name string
OriginReposPath string
SmartContractPath string
}
type SmartContractServiceImpl struct {
GithubID string
SmartContractDirPath string
SmartContractMap map[string]SmartContract
}
func Init() {
}
func NewSmartContractService(githubID string,smartContractDirPath string) SmartContractService{
return &SmartContractServiceImpl{
GithubID:githubID,
SmartContractDirPath:smartContractDirPath,
SmartContractMap: make(map[string]SmartContract),
}
}
func (scs *SmartContractServiceImpl) PullAllSmartContracts(authenticatedGit string, errorHandler func(error),
completionHandler func()) {
go func() {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil { | }
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
config, err := ioutil.ReadFile(tarPath_config)
if err != nil {
logger_s.Errorln("An error occured while reading config tar file!")
return err
}
/*** copy file to docker ***/
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(file), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the smartcontract to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(wsdb), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the worldstateDB to the container!")
return err
}
err = cli.CopyToContainer(ctx, resp.ID, "/go/src/", bytes.NewReader(config), types.CopyToContainerOptions{
AllowOverwriteDirWithFile: false,
})
if err != nil {
logger_s.Errorln("An error occured while copying the config to the container!")
return err
}
err = cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
logger_s.Errorln("An error occured while starting the container!")
return err
}
/* get docker output
----------------------*/
fmt.Println("============<Docker Output>=============")
reader, err := cli.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
Timestamps: false,
})
if err != nil {
logger_s.Errorln("An error occured while getting the output!")
return err
}
defer reader.Close()
var output = ""
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
output += scanner.Text() + "\n"
}
fmt.Println(output)
smartContractResponse := &domain.SmartContractResponse{}
err = json.Unmarshal([]byte(output), smartContractResponse)
fmt.Println("----Marshaled Output----")
fmt.Println(smartContractResponse)
if smartContractResponse.Result == domain.SUCCESS {
logger_s.Println("Running smartcontract is success")
// tx hash reset
// real running smartcontract
} else if smartContractResponse.Result == domain.FAIL {
logger_s.Errorln("An error occured while running smartcontract!")
}
return nil
}
func (scs *SmartContractServiceImpl) Invoke() {
}
func (scs *SmartContractServiceImpl) keyByValue(OriginReposPath string) (key string, ok bool) {
contractName := strings.Replace(OriginReposPath, "/", "^", -1)
for k, v := range scs.SmartContractMap {
if contractName == v.OriginReposPath {
key = k
ok = true
return key, ok
}
}
return "", false
} | return "", errors.New("An error occured while creating or opening file!") | random_line_split |
main.rs | use std::{collections::HashSet, fs::File, sync::Arc, time::Duration};
use anyhow::{Context as _, Result};
use serenity::{
async_trait,
client::bridge::gateway::GatewayIntents,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
futures::StreamExt,
http::Http,
model::{channel::Message, gateway::Ready, id::RoleId},
prelude::*,
utils::{content_safe, ContentSafeOptions, MessageBuilder},
};
use tracing::info;
use types::*;
mod types;
const POSITIVE_REACTION: char = '✅';
const NEGATIVE_REACTION: char = '❌';
const SENT_REACTION: char = '📨';
const REACTION_TIMEOUT: Duration = Duration::from_secs(30 * 60);
struct ConfigContainer;
impl TypeMapKey for ConfigContainer {
type Value = Arc<Config>;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, _: Context, ready: Ready) {
info!("{} is connected!", ready.user.name);
}
}
// Simple `split_once` "polyfill" since it's currently unstable.
fn split_once(text: &str, pat: char) -> Option<(&str, &str)> {
let mut iter = text.splitn(2, pat);
Some((iter.next()?, iter.next()?))
}
async fn parse_name_and_discriminator(
args: &mut Args,
) -> Option<Result<(String, u16), &'static str>> {
let mut name = String::new();
while let Ok(arg) = args.single::<String>() {
let mut fragment = arg.as_str();
if name.is_empty() {
match fragment.strip_prefix('@') {
Some(trimmed) => fragment = trimmed,
None => {
args.rewind();
return None;
}
}
}
match split_once(fragment, '#') {
Some((name_tail, discriminator_str)) => {
name.push_str(name_tail);
match discriminator_str.parse() {
Ok(discriminator) if (1..=9999).contains(&discriminator) => {
return Some(Ok((name, discriminator)))
}
_ => return Some(Err("invalid discriminator")),
}
}
None => name.push_str(fragment),
}
}
Some(Err(
"invalid format; mention should be in the form `@username#discriminator`",
))
}
#[group("relay")]
#[commands(forward)]
struct Relay;
#[command("forward")]
async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let delegate_member = if let Ok(member) = ctx
.http
.get_member(config.guild_id(), msg.author.id.into())
.await
{
member
} else {
msg.channel_id
.say(ctx, "Umm... have I made your acquaintance?")
.await?;
return Ok(());
};
if !delegate_member
.roles
.contains(&config.delegate_role_id().into())
{
msg.channel_id
.say(ctx, format!("This command is only available to delegates."))
.await?;
return Ok(());
}
let committee = if let Some(committee) = config
.committees()
.iter()
.find(|&committee| delegate_member.roles.contains(&committee.role_id().into()))
{
committee
} else {
msg.channel_id
.say(ctx, "Sorry, but I'm not sure which committee you're on.")
.await?;
return Ok(());
};
let committee_channel = ctx
.cache
.guild_channel(committee.channel_id())
.await
.expect("failed to find committee channel");
let recipient_id = match parse_name_and_discriminator(&mut args).await {
Some(res) => match res {
Ok((name, discriminator)) => {
let members = delegate_member.guild_id.members(ctx, None, None).await?;
match members
.iter()
.map(|member| &member.user)
.find(|&user| user.name == name && user.discriminator == discriminator)
.map(|user| user.id)
{
Some(id) => Some(id),
None => {
msg.channel_id
.say(ctx, "Sorry, I couldn't find that user.")
.await?;
return Ok(());
}
}
}
Err(err) => {
msg.channel_id
.say(
ctx,
format!(
"Sorry, I couldn't understand your mention. Problem: `{}`",
err
),
)
.await?;
return Ok(());
}
},
None => None,
};
let is_external = recipient_id.is_some(); | let committee_msg = committee_channel
.say(
ctx,
&MessageBuilder::new()
.push("Received request from ")
.mention(&msg.author)
.push(if is_external {
format!(
" to forward message to {}",
&recipient_id.unwrap().mention()
)
} else {
String::new()
})
.push_line(":")
.push_quote_line(cleaned_content.clone())
.push_line("")
.push(if is_external {
"Use the reactions below to approve or deny this request. "
} else {
""
})
.push(format!(
"Reply to this message within the next {} minutes{}to send a response.",
REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " }
))
.build(),
)
.await?;
if is_external {
committee_msg.react(ctx, POSITIVE_REACTION).await?;
committee_msg.react(ctx, NEGATIVE_REACTION).await?;
}
msg.reply(
ctx,
&MessageBuilder::new()
.push("Your message has been forwarded to ")
.push_bold_safe(committee.name())
.push(if is_external { " for approval" } else { "" })
.push(".")
.build(),
)
.await?;
typing.stop();
if is_external {
let approved = if let Some(reaction) = committee_msg
.await_reaction(ctx)
.timeout(REACTION_TIMEOUT)
.await
{
match reaction
.as_inner_ref()
.emoji
.as_data()
.chars()
.next()
.unwrap()
{
POSITIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("approved")
.push(".")
.build(),
)
.await?;
true
}
NEGATIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("rejected")
.push(".")
.build(),
)
.await?;
false
}
_ => {
committee_msg
.reply(ctx, "Invalid reaction; rejecting request.")
.await?;
false
}
}
} else {
committee_msg.delete_reactions(ctx).await?;
committee_msg
.reply(
ctx,
"No consensus reached; rejecting request.",
)
.await?;
false
};
msg.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold(if approved { "approved" } else { "rejected" })
.push(".")
.build(),
)
.await?;
if approved {
recipient_id
.unwrap()
.create_dm_channel(ctx)
.await?
.say(
ctx,
&MessageBuilder::new()
.push("Received message from ")
.mention(&msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
}
}
let committee_msg_id = committee_msg.id;
let mut replies = committee_channel
.id
.await_replies(ctx)
.timeout(REACTION_TIMEOUT)
.filter(move |msg| match msg.message_reference {
Some(ref msg_ref) => match msg_ref.message_id {
Some(m) => m == committee_msg_id,
None => false,
},
None => false,
})
.await;
while let Some(reply_msg) = replies.next().await {
let cleaned_content = content_safe(
&ctx.cache,
&reply_msg.content,
&ContentSafeOptions::default(),
)
.await;
msg.channel_id
.say(
ctx,
&MessageBuilder::new()
.push("Received reply from ")
.mention(&reply_msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
reply_msg.react(ctx, SENT_REACTION).await?;
}
Ok(())
}
#[group("role")]
#[commands(join)]
struct Role;
#[command("join")]
async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let in_valid_guild = match msg.guild_id {
Some(id) => id.as_u64() == &config.guild_id(),
None => false,
};
if !in_valid_guild {
msg.channel_id
.say(ctx, "I'm not configured to work here.")
.await?;
return Ok(());
}
let guild = msg.guild(ctx).await.unwrap();
let query = args.rest().to_lowercase();
let committee = if let Some(committee) = config.committees().iter().find(|&committee| {
query == guild.roles[&committee.role_id().into()].name.to_lowercase()
|| query == committee.name()
}) {
committee
} else {
msg.reply(ctx, "Sorry, I couldn't find a committee by that name.")
.await?;
return Ok(());
};
let mut member = msg.member(ctx).await?;
let committee_role_ids: HashSet<RoleId> = config
.committees()
.iter()
.map(|committee| committee.role_id().into())
.collect();
let member_role_ids: HashSet<RoleId> = member.roles.iter().copied().collect();
let other_committee_roles: Vec<_> = committee_role_ids
.intersection(&member_role_ids)
.cloned()
.collect();
if !other_committee_roles.is_empty() {
member.remove_roles(ctx, &other_committee_roles).await?;
}
let committee_role_id: RoleId = committee.role_id().into();
let delegate_role_id: RoleId = config.delegate_role_id().into();
let mut intended_roles = HashSet::with_capacity(2);
intended_roles.insert(committee_role_id);
intended_roles.insert(delegate_role_id);
let roles_to_add: Vec<_> = intended_roles
.difference(&member_role_ids)
.cloned()
.collect();
if !roles_to_add.is_empty() {
member.add_roles(ctx, &roles_to_add).await?;
}
msg.react(ctx, POSITIVE_REACTION).await?;
Ok(())
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let config_file = File::open("config.ron").context("missing config file")?;
let config: Config = ron::de::from_reader(config_file).context("invalid config file")?;
let bot_id = Http::new_with_token(config.token())
.get_current_application_info()
.await?
.id;
let framework = StandardFramework::new()
.configure(|c| {
c.no_dm_prefix(true)
.with_whitespace(true)
.on_mention(Some(bot_id))
})
.group(&RELAY_GROUP)
.group(&ROLE_GROUP);
let mut client = Client::builder(config.token())
.event_handler(Handler)
.framework(framework)
.intents(
GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::DIRECT_MESSAGE_TYPING
| GatewayIntents::DIRECT_MESSAGE_REACTIONS
| GatewayIntents::GUILDS
| GatewayIntents::GUILD_MESSAGES
| GatewayIntents::GUILD_MESSAGE_TYPING
| GatewayIntents::GUILD_MESSAGE_REACTIONS
| GatewayIntents::GUILD_MEMBERS,
)
.await
.context("failed to create client")?;
{
let mut data = client.data.write().await;
data.insert::<ConfigContainer>(Arc::new(config));
}
client.start().await.context("failed to start client")?;
Ok(())
} |
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await;
let typing = msg.channel_id.start_typing(&ctx.http)?;
| random_line_split |
main.rs | use std::{collections::HashSet, fs::File, sync::Arc, time::Duration};
use anyhow::{Context as _, Result};
use serenity::{
async_trait,
client::bridge::gateway::GatewayIntents,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
futures::StreamExt,
http::Http,
model::{channel::Message, gateway::Ready, id::RoleId},
prelude::*,
utils::{content_safe, ContentSafeOptions, MessageBuilder},
};
use tracing::info;
use types::*;
mod types;
const POSITIVE_REACTION: char = '✅';
const NEGATIVE_REACTION: char = '❌';
const SENT_REACTION: char = '📨';
const REACTION_TIMEOUT: Duration = Duration::from_secs(30 * 60);
struct ConfigContainer;
impl TypeMapKey for ConfigContainer {
type Value = Arc<Config>;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, _: Context, ready: Ready) {
info!("{} is connected!", ready.user.name);
}
}
// Simple `split_once` "polyfill" since it's currently unstable.
fn split_once(text: &str, pat: char) -> Option<(&str, &str)> {
let mut iter = text.splitn(2, pat);
Some((iter.next()?, iter.next()?))
}
async fn parse_name_and_discriminator(
args: &mut Args,
) -> Option<Result<(String, u16), &'static str>> {
let mut name = String::new();
while let Ok(arg) = args.single::<String>() {
let mut fragment = arg.as_str();
if name.is_empty() {
match fragment.strip_prefix('@') {
Some(trimmed) => fragment = trimmed,
None => {
args.rewind();
return None;
}
}
}
match split_once(fragment, '#') {
Some((name_tail, discriminator_str)) => {
name.push_str(name_tail);
match discriminator_str.parse() {
Ok(discriminator) if (1..=9999).contains(&discriminator) => {
return Some(Ok((name, discriminator)))
}
_ => return Some(Err("invalid discriminator")),
}
}
None => name.push_str(fragment),
}
}
Some(Err(
"invalid format; mention should be in the form `@username#discriminator`",
))
}
#[group("relay")]
#[commands(forward)]
struct Relay;
| mmand("forward")]
async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let delegate_member = if let Ok(member) = ctx
.http
.get_member(config.guild_id(), msg.author.id.into())
.await
{
member
} else {
msg.channel_id
.say(ctx, "Umm... have I made your acquaintance?")
.await?;
return Ok(());
};
if !delegate_member
.roles
.contains(&config.delegate_role_id().into())
{
msg.channel_id
.say(ctx, format!("This command is only available to delegates."))
.await?;
return Ok(());
}
let committee = if let Some(committee) = config
.committees()
.iter()
.find(|&committee| delegate_member.roles.contains(&committee.role_id().into()))
{
committee
} else {
msg.channel_id
.say(ctx, "Sorry, but I'm not sure which committee you're on.")
.await?;
return Ok(());
};
let committee_channel = ctx
.cache
.guild_channel(committee.channel_id())
.await
.expect("failed to find committee channel");
let recipient_id = match parse_name_and_discriminator(&mut args).await {
Some(res) => match res {
Ok((name, discriminator)) => {
let members = delegate_member.guild_id.members(ctx, None, None).await?;
match members
.iter()
.map(|member| &member.user)
.find(|&user| user.name == name && user.discriminator == discriminator)
.map(|user| user.id)
{
Some(id) => Some(id),
None => {
msg.channel_id
.say(ctx, "Sorry, I couldn't find that user.")
.await?;
return Ok(());
}
}
}
Err(err) => {
msg.channel_id
.say(
ctx,
format!(
"Sorry, I couldn't understand your mention. Problem: `{}`",
err
),
)
.await?;
return Ok(());
}
},
None => None,
};
let is_external = recipient_id.is_some();
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await;
let typing = msg.channel_id.start_typing(&ctx.http)?;
let committee_msg = committee_channel
.say(
ctx,
&MessageBuilder::new()
.push("Received request from ")
.mention(&msg.author)
.push(if is_external {
format!(
" to forward message to {}",
&recipient_id.unwrap().mention()
)
} else {
String::new()
})
.push_line(":")
.push_quote_line(cleaned_content.clone())
.push_line("")
.push(if is_external {
"Use the reactions below to approve or deny this request. "
} else {
""
})
.push(format!(
"Reply to this message within the next {} minutes{}to send a response.",
REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " }
))
.build(),
)
.await?;
if is_external {
committee_msg.react(ctx, POSITIVE_REACTION).await?;
committee_msg.react(ctx, NEGATIVE_REACTION).await?;
}
msg.reply(
ctx,
&MessageBuilder::new()
.push("Your message has been forwarded to ")
.push_bold_safe(committee.name())
.push(if is_external { " for approval" } else { "" })
.push(".")
.build(),
)
.await?;
typing.stop();
if is_external {
let approved = if let Some(reaction) = committee_msg
.await_reaction(ctx)
.timeout(REACTION_TIMEOUT)
.await
{
match reaction
.as_inner_ref()
.emoji
.as_data()
.chars()
.next()
.unwrap()
{
POSITIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("approved")
.push(".")
.build(),
)
.await?;
true
}
NEGATIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("rejected")
.push(".")
.build(),
)
.await?;
false
}
_ => {
committee_msg
.reply(ctx, "Invalid reaction; rejecting request.")
.await?;
false
}
}
} else {
committee_msg.delete_reactions(ctx).await?;
committee_msg
.reply(
ctx,
"No consensus reached; rejecting request.",
)
.await?;
false
};
msg.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold(if approved { "approved" } else { "rejected" })
.push(".")
.build(),
)
.await?;
if approved {
recipient_id
.unwrap()
.create_dm_channel(ctx)
.await?
.say(
ctx,
&MessageBuilder::new()
.push("Received message from ")
.mention(&msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
}
}
let committee_msg_id = committee_msg.id;
let mut replies = committee_channel
.id
.await_replies(ctx)
.timeout(REACTION_TIMEOUT)
.filter(move |msg| match msg.message_reference {
Some(ref msg_ref) => match msg_ref.message_id {
Some(m) => m == committee_msg_id,
None => false,
},
None => false,
})
.await;
while let Some(reply_msg) = replies.next().await {
let cleaned_content = content_safe(
&ctx.cache,
&reply_msg.content,
&ContentSafeOptions::default(),
)
.await;
msg.channel_id
.say(
ctx,
&MessageBuilder::new()
.push("Received reply from ")
.mention(&reply_msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
reply_msg.react(ctx, SENT_REACTION).await?;
}
Ok(())
}
#[group("role")]
#[commands(join)]
struct Role;
#[command("join")]
async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let in_valid_guild = match msg.guild_id {
Some(id) => id.as_u64() == &config.guild_id(),
None => false,
};
if !in_valid_guild {
msg.channel_id
.say(ctx, "I'm not configured to work here.")
.await?;
return Ok(());
}
let guild = msg.guild(ctx).await.unwrap();
let query = args.rest().to_lowercase();
let committee = if let Some(committee) = config.committees().iter().find(|&committee| {
query == guild.roles[&committee.role_id().into()].name.to_lowercase()
|| query == committee.name()
}) {
committee
} else {
msg.reply(ctx, "Sorry, I couldn't find a committee by that name.")
.await?;
return Ok(());
};
let mut member = msg.member(ctx).await?;
let committee_role_ids: HashSet<RoleId> = config
.committees()
.iter()
.map(|committee| committee.role_id().into())
.collect();
let member_role_ids: HashSet<RoleId> = member.roles.iter().copied().collect();
let other_committee_roles: Vec<_> = committee_role_ids
.intersection(&member_role_ids)
.cloned()
.collect();
if !other_committee_roles.is_empty() {
member.remove_roles(ctx, &other_committee_roles).await?;
}
let committee_role_id: RoleId = committee.role_id().into();
let delegate_role_id: RoleId = config.delegate_role_id().into();
let mut intended_roles = HashSet::with_capacity(2);
intended_roles.insert(committee_role_id);
intended_roles.insert(delegate_role_id);
let roles_to_add: Vec<_> = intended_roles
.difference(&member_role_ids)
.cloned()
.collect();
if !roles_to_add.is_empty() {
member.add_roles(ctx, &roles_to_add).await?;
}
msg.react(ctx, POSITIVE_REACTION).await?;
Ok(())
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let config_file = File::open("config.ron").context("missing config file")?;
let config: Config = ron::de::from_reader(config_file).context("invalid config file")?;
let bot_id = Http::new_with_token(config.token())
.get_current_application_info()
.await?
.id;
let framework = StandardFramework::new()
.configure(|c| {
c.no_dm_prefix(true)
.with_whitespace(true)
.on_mention(Some(bot_id))
})
.group(&RELAY_GROUP)
.group(&ROLE_GROUP);
let mut client = Client::builder(config.token())
.event_handler(Handler)
.framework(framework)
.intents(
GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::DIRECT_MESSAGE_TYPING
| GatewayIntents::DIRECT_MESSAGE_REACTIONS
| GatewayIntents::GUILDS
| GatewayIntents::GUILD_MESSAGES
| GatewayIntents::GUILD_MESSAGE_TYPING
| GatewayIntents::GUILD_MESSAGE_REACTIONS
| GatewayIntents::GUILD_MEMBERS,
)
.await
.context("failed to create client")?;
{
let mut data = client.data.write().await;
data.insert::<ConfigContainer>(Arc::new(config));
}
client.start().await.context("failed to start client")?;
Ok(())
}
|
#[co | identifier_name |
main.rs | use std::{collections::HashSet, fs::File, sync::Arc, time::Duration};
use anyhow::{Context as _, Result};
use serenity::{
async_trait,
client::bridge::gateway::GatewayIntents,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
futures::StreamExt,
http::Http,
model::{channel::Message, gateway::Ready, id::RoleId},
prelude::*,
utils::{content_safe, ContentSafeOptions, MessageBuilder},
};
use tracing::info;
use types::*;
mod types;
const POSITIVE_REACTION: char = '✅';
const NEGATIVE_REACTION: char = '❌';
const SENT_REACTION: char = '📨';
const REACTION_TIMEOUT: Duration = Duration::from_secs(30 * 60);
struct ConfigContainer;
impl TypeMapKey for ConfigContainer {
type Value = Arc<Config>;
}
struct Handler;
#[async_trait]
impl EventHandler for Handler {
async fn ready(&self, _: Context, ready: Ready) {
info!("{} is connected!", ready.user.name);
}
}
// Simple `split_once` "polyfill" since it's currently unstable.
fn split_once(text: &str, pat: char) -> Option<(&str, &str)> {
l | fn parse_name_and_discriminator(
args: &mut Args,
) -> Option<Result<(String, u16), &'static str>> {
let mut name = String::new();
while let Ok(arg) = args.single::<String>() {
let mut fragment = arg.as_str();
if name.is_empty() {
match fragment.strip_prefix('@') {
Some(trimmed) => fragment = trimmed,
None => {
args.rewind();
return None;
}
}
}
match split_once(fragment, '#') {
Some((name_tail, discriminator_str)) => {
name.push_str(name_tail);
match discriminator_str.parse() {
Ok(discriminator) if (1..=9999).contains(&discriminator) => {
return Some(Ok((name, discriminator)))
}
_ => return Some(Err("invalid discriminator")),
}
}
None => name.push_str(fragment),
}
}
Some(Err(
"invalid format; mention should be in the form `@username#discriminator`",
))
}
#[group("relay")]
#[commands(forward)]
struct Relay;
#[command("forward")]
async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let delegate_member = if let Ok(member) = ctx
.http
.get_member(config.guild_id(), msg.author.id.into())
.await
{
member
} else {
msg.channel_id
.say(ctx, "Umm... have I made your acquaintance?")
.await?;
return Ok(());
};
if !delegate_member
.roles
.contains(&config.delegate_role_id().into())
{
msg.channel_id
.say(ctx, format!("This command is only available to delegates."))
.await?;
return Ok(());
}
let committee = if let Some(committee) = config
.committees()
.iter()
.find(|&committee| delegate_member.roles.contains(&committee.role_id().into()))
{
committee
} else {
msg.channel_id
.say(ctx, "Sorry, but I'm not sure which committee you're on.")
.await?;
return Ok(());
};
let committee_channel = ctx
.cache
.guild_channel(committee.channel_id())
.await
.expect("failed to find committee channel");
let recipient_id = match parse_name_and_discriminator(&mut args).await {
Some(res) => match res {
Ok((name, discriminator)) => {
let members = delegate_member.guild_id.members(ctx, None, None).await?;
match members
.iter()
.map(|member| &member.user)
.find(|&user| user.name == name && user.discriminator == discriminator)
.map(|user| user.id)
{
Some(id) => Some(id),
None => {
msg.channel_id
.say(ctx, "Sorry, I couldn't find that user.")
.await?;
return Ok(());
}
}
}
Err(err) => {
msg.channel_id
.say(
ctx,
format!(
"Sorry, I couldn't understand your mention. Problem: `{}`",
err
),
)
.await?;
return Ok(());
}
},
None => None,
};
let is_external = recipient_id.is_some();
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await;
let typing = msg.channel_id.start_typing(&ctx.http)?;
let committee_msg = committee_channel
.say(
ctx,
&MessageBuilder::new()
.push("Received request from ")
.mention(&msg.author)
.push(if is_external {
format!(
" to forward message to {}",
&recipient_id.unwrap().mention()
)
} else {
String::new()
})
.push_line(":")
.push_quote_line(cleaned_content.clone())
.push_line("")
.push(if is_external {
"Use the reactions below to approve or deny this request. "
} else {
""
})
.push(format!(
"Reply to this message within the next {} minutes{}to send a response.",
REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " }
))
.build(),
)
.await?;
if is_external {
committee_msg.react(ctx, POSITIVE_REACTION).await?;
committee_msg.react(ctx, NEGATIVE_REACTION).await?;
}
msg.reply(
ctx,
&MessageBuilder::new()
.push("Your message has been forwarded to ")
.push_bold_safe(committee.name())
.push(if is_external { " for approval" } else { "" })
.push(".")
.build(),
)
.await?;
typing.stop();
if is_external {
let approved = if let Some(reaction) = committee_msg
.await_reaction(ctx)
.timeout(REACTION_TIMEOUT)
.await
{
match reaction
.as_inner_ref()
.emoji
.as_data()
.chars()
.next()
.unwrap()
{
POSITIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("approved")
.push(".")
.build(),
)
.await?;
true
}
NEGATIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("rejected")
.push(".")
.build(),
)
.await?;
false
}
_ => {
committee_msg
.reply(ctx, "Invalid reaction; rejecting request.")
.await?;
false
}
}
} else {
committee_msg.delete_reactions(ctx).await?;
committee_msg
.reply(
ctx,
"No consensus reached; rejecting request.",
)
.await?;
false
};
msg.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold(if approved { "approved" } else { "rejected" })
.push(".")
.build(),
)
.await?;
if approved {
recipient_id
.unwrap()
.create_dm_channel(ctx)
.await?
.say(
ctx,
&MessageBuilder::new()
.push("Received message from ")
.mention(&msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
}
}
let committee_msg_id = committee_msg.id;
let mut replies = committee_channel
.id
.await_replies(ctx)
.timeout(REACTION_TIMEOUT)
.filter(move |msg| match msg.message_reference {
Some(ref msg_ref) => match msg_ref.message_id {
Some(m) => m == committee_msg_id,
None => false,
},
None => false,
})
.await;
while let Some(reply_msg) = replies.next().await {
let cleaned_content = content_safe(
&ctx.cache,
&reply_msg.content,
&ContentSafeOptions::default(),
)
.await;
msg.channel_id
.say(
ctx,
&MessageBuilder::new()
.push("Received reply from ")
.mention(&reply_msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
reply_msg.react(ctx, SENT_REACTION).await?;
}
Ok(())
}
#[group("role")]
#[commands(join)]
struct Role;
#[command("join")]
async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let in_valid_guild = match msg.guild_id {
Some(id) => id.as_u64() == &config.guild_id(),
None => false,
};
if !in_valid_guild {
msg.channel_id
.say(ctx, "I'm not configured to work here.")
.await?;
return Ok(());
}
let guild = msg.guild(ctx).await.unwrap();
let query = args.rest().to_lowercase();
let committee = if let Some(committee) = config.committees().iter().find(|&committee| {
query == guild.roles[&committee.role_id().into()].name.to_lowercase()
|| query == committee.name()
}) {
committee
} else {
msg.reply(ctx, "Sorry, I couldn't find a committee by that name.")
.await?;
return Ok(());
};
let mut member = msg.member(ctx).await?;
let committee_role_ids: HashSet<RoleId> = config
.committees()
.iter()
.map(|committee| committee.role_id().into())
.collect();
let member_role_ids: HashSet<RoleId> = member.roles.iter().copied().collect();
let other_committee_roles: Vec<_> = committee_role_ids
.intersection(&member_role_ids)
.cloned()
.collect();
if !other_committee_roles.is_empty() {
member.remove_roles(ctx, &other_committee_roles).await?;
}
let committee_role_id: RoleId = committee.role_id().into();
let delegate_role_id: RoleId = config.delegate_role_id().into();
let mut intended_roles = HashSet::with_capacity(2);
intended_roles.insert(committee_role_id);
intended_roles.insert(delegate_role_id);
let roles_to_add: Vec<_> = intended_roles
.difference(&member_role_ids)
.cloned()
.collect();
if !roles_to_add.is_empty() {
member.add_roles(ctx, &roles_to_add).await?;
}
msg.react(ctx, POSITIVE_REACTION).await?;
Ok(())
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let config_file = File::open("config.ron").context("missing config file")?;
let config: Config = ron::de::from_reader(config_file).context("invalid config file")?;
let bot_id = Http::new_with_token(config.token())
.get_current_application_info()
.await?
.id;
let framework = StandardFramework::new()
.configure(|c| {
c.no_dm_prefix(true)
.with_whitespace(true)
.on_mention(Some(bot_id))
})
.group(&RELAY_GROUP)
.group(&ROLE_GROUP);
let mut client = Client::builder(config.token())
.event_handler(Handler)
.framework(framework)
.intents(
GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::DIRECT_MESSAGE_TYPING
| GatewayIntents::DIRECT_MESSAGE_REACTIONS
| GatewayIntents::GUILDS
| GatewayIntents::GUILD_MESSAGES
| GatewayIntents::GUILD_MESSAGE_TYPING
| GatewayIntents::GUILD_MESSAGE_REACTIONS
| GatewayIntents::GUILD_MEMBERS,
)
.await
.context("failed to create client")?;
{
let mut data = client.data.write().await;
data.insert::<ConfigContainer>(Arc::new(config));
}
client.start().await.context("failed to start client")?;
Ok(())
}
| et mut iter = text.splitn(2, pat);
Some((iter.next()?, iter.next()?))
}
async | identifier_body |
NeatDatePicker.js | import React, { useState, useEffect, } from 'react'
import { StyleSheet, TouchableOpacity, View, Text, Dimensions } from 'react-native'
import Modal from 'react-native-modal'
import PropTypes from 'prop-types'
import useDaysOfMonth from '../hooks/useDaysOfMonth';
import MDicon from 'react-native-vector-icons/MaterialIcons'
import { getMonthInChinese, getMonthInEnglish } from '../lib/lib';
import ChangeYearModal from './ChangeYearModal';
// import {
// useFonts,
// Roboto_100Thin,
// Roboto_300Light,
// Roboto_400Regular,
// Roboto_500Medium,
// Roboto_700Bold,
// } from '@expo-google-fonts/roboto'
import Key from './Key'
const winY = Dimensions.get('window').height
const NeatDatePicker = ({
isVisible,
initialDate, mode,
onCancel, onConfirm,
minDate, maxDate,
startDate, endDate,
onBackButtonPress, onBackdropPress,
chinese, colorOptions,
}) => {
const [showChangeYearModal, setShowChangeYearModal] = useState(false);
const sevenDays = chinese
? ['日', '一', '二', '三', '四', '五', '六']
: ['S', 'M', 'T', 'W', 'T', 'F', 'S']
// displayTime defines which month is going to be shown onto the screen
// For 'single' mode, displayTime is also the initial selected date when opening DatePicker at the first time.
const [displayTime, setDisplayTime] = useState(initialDate || new Date());
const year = displayTime.getFullYear()
const month = displayTime.getMonth()// 0-base
const date = displayTime.getDate()
const TODAY = new Date(year, month, date)
// output decides which date should be active.
const [output, setOutput] = useState(
mode === 'single'
? { date: TODAY, startDate: null, endDate: null }
: { date: null, startDate: startDate || null, endDate: endDate || null }
);
// If user presses cancel, reset 'output' state to this 'originalOutput'
const [originalOutput, setOriginalOutput] = useState(output);
const minTime = minDate?.getTime()
const maxTime = maxDate?.getTime()
// useDaysOfMonth returns an array that having several objects,
// representing all the days that are going to be rendered on screen.
// Each object contains five properties, 'year', 'month', 'date', 'isCurrentMonth' and 'disabled'.
const daysArray = useDaysOfMonth(year, month, minTime, maxTime)
const onCancelPress = () => {
onCancel()
setTimeout(() => {
// reset output to originalOutput
setOutput(originalOutput)
// originalOutput.startDate will be null only when the user hasn't picked any date using RANGE DatePicker.
// If that's the case, don't reset displayTime to originalOutput but initialDate/new Date()
if (mode === 'range' & !originalOutput.startDate) return setDisplayTime(initialDate || new Date())
// reset displayTime
return (mode === 'single')
? setDisplayTime(originalOutput.date)
: setDisplayTime(originalOutput.startDate)
}, 300);
}
const autoCompleteEndDate = () => {
// set endDate to startDate
output.endDate = output.startDate
// After successfully passing arguments in onConfirm, in next life cycle set endDate to null.
// Therefore, next time when user opens DatePicker he can start from selecting endDate.
setOutput({ ...output, endDate: null })
}
const onConfirmPress = () => {
if (mode === 'single') onConfirm(output.date)
else {
// If have not selected any date, just to onCancel
if (mode === 'range' & !output.startDate) return onCancel()
// If have not selected endDate, set it same as startDate
if (!output.endDate) autoCompleteEndDate()
onConfirm(output.startDate, output.endDate)
}
// Because the selected dates are confirmed, originalOutput should be updated.
setOriginalOutput({ ...output })
// reset displayTime
setTimeout(() => {
return (mode === 'single')
? setDisplayTime(output.date)
: setDisplayTime(output.startDate)
}, 300);
}
const [btnDisabled, setBtnDisabled] = useState(false);
// move to previous month
const onPrev = () => {
setBtnDisabled(true)
setDisplayTime(new Date(year, month - 1, date))
}
// move to next month
const onNext = () => {
setBtnDisabled(true)
setDisplayTime(new Date(year, month + 1, date))
}
// Disable Prev & Next buttons for a while after pressing them.
// Otherwise if the user presses the button rapidly in a short time
// the switching delay of the calendar is not neglectable
useEffect(() => {
setTimeout(setBtnDisabled, 300, false)
}, [btnDisabled])
// destructure colorOptions
const {
backgroundColor,
headerColor,
headerTextColor,
changeYearModalColor,
weekDaysColor,
dateTextColor,
selectedDateTextColor,
selectedDateBackgroundColor,
confirmButtonColor,
} = { ...defaultColorOptions, ...colorOptions }
// const [isFontsLoaded] = useFonts({
// Roboto_100Thin,
// Roboto_300Light,
// Roboto_400Regular,
// Roboto_500Medium,
// Roboto_700Bold,
// })
// if (!isFontsLoaded) return null
return (
<Modal
isVisible={isVisible}
animationIn={'zoomIn'}
animationOut={'zoomOut'}
useNativeDriver
hideModalContentWhileAnimating
onBackButtonPress={onBackButtonPress || onCancelPress}
onBackdropPress={onBackdropPress || onCancelPress}
style={styles.modal}
>
<View style={[styles.container, { backgroundColor: backgroundColor, }]}>
<View style={[styles.header, { backgroundColor: headerColor }]}>
{/* last month */}
<TouchableOpacity style={styles.changeMonthTO} onPress={onPrev} disabled={btnDisabled} >
<MDicon name={'keyboard-arrow-left'} size={32} color={headerTextColor} />
</TouchableOpacity>
{/* displayed year and month */}
<TouchableOpacity onPress={() => { setShowChangeYearModal(true) }}>
<Text style={[styles.header__title, { color: headerTextColor }]}>
{daysArray.length !== 0 && daysArray[10].year + ' '}
{daysArray.length !== 0 && (chinese ? getMonthInChinese(daysArray[10].month) : getMonthInEnglish(daysArray[10].month))}
</Text>
</TouchableOpacity>
{/* next month */}
<TouchableOpacity style={styles.changeMonthTO} onPress={onNext} disabled={btnDisabled} >
<MDicon name={'keyboard-arrow-right'} size={32} color={headerTextColor} />
</TouchableOpacity>
</View>
| {sevenDays.map((weekDay, index) => (
<View style={styles.keys} key={index.toString()}>
<Text style={[styles.weekDays, { color: weekDaysColor }]}>
{weekDay}
</Text>
</View>
))}
{/* every days */}
{daysArray.map((Day, i) => (
<Key key={Day.year.toString() + Day.month.toString() + i.toString()}
Day={Day}
mode={mode}
output={output}
setOutput={setOutput}
colorOptions={{
dateTextColor,
backgroundColor,
selectedDateTextColor,
selectedDateBackgroundColor
}}
/>
))}
</View>
<View style={styles.footer}>
<View style={styles.btn_box}>
<TouchableOpacity style={styles.btn} onPress={onCancelPress}>
<Text style={styles.btn_text}>
{chinese ? '取消' : 'Cancel'}
</Text>
</TouchableOpacity>
<TouchableOpacity style={styles.btn} onPress={onConfirmPress}>
<Text style={[styles.btn_text, { color: confirmButtonColor }]}>
{chinese ? '確定' : 'OK'}
</Text>
</TouchableOpacity>
</View>
</View>
<ChangeYearModal
isVisible={showChangeYearModal}
dismiss={() => { setShowChangeYearModal(false) }}
displayTime={displayTime}
setDisplayTime={setDisplayTime}
colorOptions={{
primary: changeYearModalColor,
backgroundColor
}}
/>
</View>
</Modal>
)
}
NeatDatePicker.proptype = {
isVisible: PropTypes.bool.isRequired,
mode: PropTypes.string.isRequired,
onConfirm: PropTypes.func,
minDate: PropTypes.object,
maxDate: PropTypes.object,
}
NeatDatePicker.defaultProps = {
}
// Notice: only six-digit HEX values are allowed.
const defaultColorOptions = {
backgroundColor: '#ffffff',
headerColor: '#4682E9',
headerTextColor: '#ffffff',
changeYearModalColor: '#4682E9',
weekDaysColor: '#4682E9',
dateTextColor: '#000000',
selectedDateTextColor: '#ffffff',
selectedDateBackgroundColor: '#4682E9',
confirmButtonColor: '#4682E9',
}
export default NeatDatePicker
const styles = StyleSheet.create({
modal: {
flex: 0,
height: winY,
alignItems: 'center',
padding: 0,
margin: 0,
},
container: {
width: 328,
justifyContent: 'center',
alignItems: 'center',
borderRadius: 12,
overflow: 'hidden'
},
header: {
// borderWidth: 1,
flexDirection: 'row',
width: '100%',
height: 68,
paddingHorizontal: 24,
justifyContent: 'space-between',
alignItems: 'center',
marginBottom: 8,
},
header__title: {
// borderWidth: 1,
fontSize: 24,
color: '#fff',
fontWeight: "500"
// fontFamily: 'Roboto_500Medium'
},
keys_container: {
// borderWidth: 1,
width: 300,
height: 264,
justifyContent: 'center',
flexDirection: 'row',
flexWrap: 'wrap',
},
weekDays: {
fontSize: 16,
// fontFamily: 'Roboto_400Regular'
},
keys: {
// borderWidth: 1,
width: 34,
height: 30,
borderRadius: 10,
marginTop: 4,
marginHorizontal: 4,
justifyContent: 'center',
alignItems: 'center',
},
footer: {
// borderWidth: 1,
width: '100%',
height: 52,
flexDirection: 'row',
justifyContent: 'flex-end',
},
btn_box: {
// borderWidth: 1,
height: '100%',
flexDirection: 'row',
alignItems: 'center',
padding: 8,
},
btn: {
// borderWidth: 1,
width: 80,
height: 44,
justifyContent: 'center',
alignItems: 'center',
},
btn_text: {
fontSize: 18,
// fontFamily: 'Roboto_400Regular',
color: '#777',
},
changeMonthTO: {
// borderWidth: 1,
justifyContent: 'center',
alignItems: 'center',
width: 50,
height: 50,
padding: 4,
borderColor: 'black',
}
}); | <View style={styles.keys_container}>
{/* week days */} | random_line_split |
country-card.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { Router, ActivatedRoute, Params } from '@angular/router';
import {Location, LocationStrategy, PathLocationStrategy} from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { CountryCardService } from './country-card.service';
declare var L: any;
declare var Highcharts: any;
@Component({
selector: 'app-country-card',
templateUrl: './country-card.component.html',
styleUrls: ['./country-card.component.css']
})
export class CountryCardComponent implements OnInit {
mapObj: any;
lat: number = 4.565473550710278;
lng: number = 17.2265625;
zoom: number = 1;
maxZoom: number = 10;
minZoom: number = 0;
geoJsonLayer: any;
geoJsonData: any;
countriesList: any;
selectedCountryName: string;
selectedCountryId: string;
selectedCountryFlagId: string;
countryCardData: any;
countryCardMetaData: any;
countryCardDisplayData = [[], [], []]; // it contain objects i.e max year data, min year data, metadata
private sub: any;
shareTitle = "Share:";
fbInner = "<img src='./assets/images/custom-facebook.svg'>";
twitterInner = "<img src='./assets/images/custom-twitter.svg'>";
inInner = "<img src='./assets/images/custom-linkedin.svg'>";
linkToShare;
location: Location;
constructor(
private elementRef: ElementRef,
private route: ActivatedRoute,
private router: Router,
location: Location,
private countryCardService: CountryCardService) { this.location = location; }
ngOnInit() {
this.sub = this.route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
}
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) |
});
}
resetStyle(e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
}
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category Score: 20)", "_": this.countryCardDisplayData[0][0].cluster1RA},
{" ":"Cluster 2: REGULATORY MANDATE (Max Category Score: 22)", "_": this.countryCardDisplayData[0][0].cluster2RM},
{" ":"Cluster 3: REGULATORY REGIME (Max Category Score: 30)", "_": this.countryCardDisplayData[0][0].cluster3RR},
{" ":"Cluster 4: COMPETITION FRAMEWORK (Max Category Score: 28)", "_": this.countryCardDisplayData[0][0].cluster4CF}
];
}
// download data in csv format
download() {
let csvData = this.ConvertToCSV(this.prepareFormatForDownloadData(),"Country Card - " + this.selectedCountryName, true);
let a = document.createElement("a");
a.setAttribute('style', 'display:none;');
document.body.appendChild(a);
let blob = new Blob([csvData], { type: 'text/csv' });
let url = window.URL.createObjectURL(blob);
a.href = url;
a.download = this.selectedCountryName + "_Country_Card" + ".csv";
a.click();
}
ConvertToCSV(JSONData, ReportTitle, ShowLabel ) {
//If JSONData is not an object then JSON.parse will parse the JSON string in an Object
let arrData = typeof JSONData != 'object' ? JSON.parse(JSONData) : JSONData;
let CSV = '';
//Set Report title in first row or line
CSV += ReportTitle + '\r\n\n';
//This condition will generate the Label/Header
if (ShowLabel) {
let row = "";
//This loop will extract the label from 1st index of on array
for (let index in arrData[0]) {
//Now convert each value to string and comma-seprated
row += index + ',';
}
row = row.slice(0, -1);
//append Label row with line break
CSV += row + '\r\n';
}
//1st loop is to extract each row
for (let i = 0; i < arrData.length; i++) {
let row = "";
//2nd loop will extract each column and convert it in string comma-seprated
for (var index in arrData[i]) {
row += '"' + arrData[i][index] + '",';
}
row.slice(0, row.length - 1);
//add a line break after each row
CSV += row + '\r\n';
}
if (CSV == '') {
alert("Invalid data");
return;
}
return CSV;
}
// print page
print(): void {
let docprint = window.open("about:blank", "_blank");
let oTable = document.getElementById("print-section");
docprint.document.open();
docprint.document.write('<html><head><title>Country Card</title>');
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/bootstrap.min.css\" type=\"text/css\"/>" );
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/country-card-print.css\" type=\"text/css\" media=\"print\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/flag-icon-css/css/flag-icon.min.css\" type=\"text/css\" media=\"all\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"styles.bundle.css\" type=\"text/css\" media=\"print\"/>" );
docprint.document.write('</head><body><center>');
docprint.document.write(`
<div class="text-left">
<h3> ` + this.selectedCountryName + ` </h3>
</div>
`);
docprint.document.write(oTable.innerHTML);
docprint.document.write('</center></body></html>');
docprint.document.close();
docprint.onload=function(){
docprint.focus();
setTimeout( () => {
docprint.print();
docprint.close();
},1200);
}
}
}
| {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
} | conditional_block |
country-card.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { Router, ActivatedRoute, Params } from '@angular/router';
import {Location, LocationStrategy, PathLocationStrategy} from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { CountryCardService } from './country-card.service';
declare var L: any;
declare var Highcharts: any;
@Component({
selector: 'app-country-card',
templateUrl: './country-card.component.html',
styleUrls: ['./country-card.component.css']
})
export class CountryCardComponent implements OnInit {
mapObj: any;
lat: number = 4.565473550710278;
lng: number = 17.2265625;
zoom: number = 1;
maxZoom: number = 10;
minZoom: number = 0;
geoJsonLayer: any;
geoJsonData: any;
countriesList: any;
selectedCountryName: string;
selectedCountryId: string;
selectedCountryFlagId: string;
countryCardData: any;
countryCardMetaData: any;
countryCardDisplayData = [[], [], []]; // it contain objects i.e max year data, min year data, metadata
private sub: any;
shareTitle = "Share:";
fbInner = "<img src='./assets/images/custom-facebook.svg'>";
twitterInner = "<img src='./assets/images/custom-twitter.svg'>";
inInner = "<img src='./assets/images/custom-linkedin.svg'>";
linkToShare;
location: Location;
constructor(
private elementRef: ElementRef,
private route: ActivatedRoute,
private router: Router,
location: Location,
private countryCardService: CountryCardService) { this.location = location; }
ngOnInit() {
this.sub = this.route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() |
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
}
});
}
resetStyle(e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
}
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category Score: 20)", "_": this.countryCardDisplayData[0][0].cluster1RA},
{" ":"Cluster 2: REGULATORY MANDATE (Max Category Score: 22)", "_": this.countryCardDisplayData[0][0].cluster2RM},
{" ":"Cluster 3: REGULATORY REGIME (Max Category Score: 30)", "_": this.countryCardDisplayData[0][0].cluster3RR},
{" ":"Cluster 4: COMPETITION FRAMEWORK (Max Category Score: 28)", "_": this.countryCardDisplayData[0][0].cluster4CF}
];
}
// download data in csv format
download() {
let csvData = this.ConvertToCSV(this.prepareFormatForDownloadData(),"Country Card - " + this.selectedCountryName, true);
let a = document.createElement("a");
a.setAttribute('style', 'display:none;');
document.body.appendChild(a);
let blob = new Blob([csvData], { type: 'text/csv' });
let url = window.URL.createObjectURL(blob);
a.href = url;
a.download = this.selectedCountryName + "_Country_Card" + ".csv";
a.click();
}
ConvertToCSV(JSONData, ReportTitle, ShowLabel ) {
//If JSONData is not an object then JSON.parse will parse the JSON string in an Object
let arrData = typeof JSONData != 'object' ? JSON.parse(JSONData) : JSONData;
let CSV = '';
//Set Report title in first row or line
CSV += ReportTitle + '\r\n\n';
//This condition will generate the Label/Header
if (ShowLabel) {
let row = "";
//This loop will extract the label from 1st index of on array
for (let index in arrData[0]) {
//Now convert each value to string and comma-seprated
row += index + ',';
}
row = row.slice(0, -1);
//append Label row with line break
CSV += row + '\r\n';
}
//1st loop is to extract each row
for (let i = 0; i < arrData.length; i++) {
let row = "";
//2nd loop will extract each column and convert it in string comma-seprated
for (var index in arrData[i]) {
row += '"' + arrData[i][index] + '",';
}
row.slice(0, row.length - 1);
//add a line break after each row
CSV += row + '\r\n';
}
if (CSV == '') {
alert("Invalid data");
return;
}
return CSV;
}
// print page
print(): void {
let docprint = window.open("about:blank", "_blank");
let oTable = document.getElementById("print-section");
docprint.document.open();
docprint.document.write('<html><head><title>Country Card</title>');
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/bootstrap.min.css\" type=\"text/css\"/>" );
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/country-card-print.css\" type=\"text/css\" media=\"print\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/flag-icon-css/css/flag-icon.min.css\" type=\"text/css\" media=\"all\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"styles.bundle.css\" type=\"text/css\" media=\"print\"/>" );
docprint.document.write('</head><body><center>');
docprint.document.write(`
<div class="text-left">
<h3> ` + this.selectedCountryName + ` </h3>
</div>
`);
docprint.document.write(oTable.innerHTML);
docprint.document.write('</center></body></html>');
docprint.document.close();
docprint.onload=function(){
docprint.focus();
setTimeout( () => {
docprint.print();
docprint.close();
},1200);
}
}
}
| {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
} | identifier_body |
country-card.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { Router, ActivatedRoute, Params } from '@angular/router';
import {Location, LocationStrategy, PathLocationStrategy} from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { CountryCardService } from './country-card.service';
declare var L: any;
declare var Highcharts: any;
@Component({
selector: 'app-country-card',
templateUrl: './country-card.component.html',
styleUrls: ['./country-card.component.css']
})
export class CountryCardComponent implements OnInit {
mapObj: any;
lat: number = 4.565473550710278;
lng: number = 17.2265625;
zoom: number = 1;
maxZoom: number = 10;
minZoom: number = 0;
geoJsonLayer: any;
geoJsonData: any;
countriesList: any;
selectedCountryName: string;
selectedCountryId: string;
selectedCountryFlagId: string;
countryCardData: any;
countryCardMetaData: any;
countryCardDisplayData = [[], [], []]; // it contain objects i.e max year data, min year data, metadata
private sub: any;
shareTitle = "Share:";
fbInner = "<img src='./assets/images/custom-facebook.svg'>";
twitterInner = "<img src='./assets/images/custom-twitter.svg'>";
inInner = "<img src='./assets/images/custom-linkedin.svg'>";
linkToShare;
location: Location;
constructor(
private elementRef: ElementRef,
private route: ActivatedRoute,
private router: Router,
location: Location,
private countryCardService: CountryCardService) { this.location = location; }
ngOnInit() {
this.sub = this.route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
}
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
}
});
}
resetStyle(e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
} |
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category Score: 20)", "_": this.countryCardDisplayData[0][0].cluster1RA},
{" ":"Cluster 2: REGULATORY MANDATE (Max Category Score: 22)", "_": this.countryCardDisplayData[0][0].cluster2RM},
{" ":"Cluster 3: REGULATORY REGIME (Max Category Score: 30)", "_": this.countryCardDisplayData[0][0].cluster3RR},
{" ":"Cluster 4: COMPETITION FRAMEWORK (Max Category Score: 28)", "_": this.countryCardDisplayData[0][0].cluster4CF}
];
}
// download data in csv format
download() {
let csvData = this.ConvertToCSV(this.prepareFormatForDownloadData(),"Country Card - " + this.selectedCountryName, true);
let a = document.createElement("a");
a.setAttribute('style', 'display:none;');
document.body.appendChild(a);
let blob = new Blob([csvData], { type: 'text/csv' });
let url = window.URL.createObjectURL(blob);
a.href = url;
a.download = this.selectedCountryName + "_Country_Card" + ".csv";
a.click();
}
ConvertToCSV(JSONData, ReportTitle, ShowLabel ) {
//If JSONData is not an object then JSON.parse will parse the JSON string in an Object
let arrData = typeof JSONData != 'object' ? JSON.parse(JSONData) : JSONData;
let CSV = '';
//Set Report title in first row or line
CSV += ReportTitle + '\r\n\n';
//This condition will generate the Label/Header
if (ShowLabel) {
let row = "";
//This loop will extract the label from 1st index of on array
for (let index in arrData[0]) {
//Now convert each value to string and comma-seprated
row += index + ',';
}
row = row.slice(0, -1);
//append Label row with line break
CSV += row + '\r\n';
}
//1st loop is to extract each row
for (let i = 0; i < arrData.length; i++) {
let row = "";
//2nd loop will extract each column and convert it in string comma-seprated
for (var index in arrData[i]) {
row += '"' + arrData[i][index] + '",';
}
row.slice(0, row.length - 1);
//add a line break after each row
CSV += row + '\r\n';
}
if (CSV == '') {
alert("Invalid data");
return;
}
return CSV;
}
// print page
print(): void {
let docprint = window.open("about:blank", "_blank");
let oTable = document.getElementById("print-section");
docprint.document.open();
docprint.document.write('<html><head><title>Country Card</title>');
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/bootstrap.min.css\" type=\"text/css\"/>" );
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/country-card-print.css\" type=\"text/css\" media=\"print\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/flag-icon-css/css/flag-icon.min.css\" type=\"text/css\" media=\"all\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"styles.bundle.css\" type=\"text/css\" media=\"print\"/>" );
docprint.document.write('</head><body><center>');
docprint.document.write(`
<div class="text-left">
<h3> ` + this.selectedCountryName + ` </h3>
</div>
`);
docprint.document.write(oTable.innerHTML);
docprint.document.write('</center></body></html>');
docprint.document.close();
docprint.onload=function(){
docprint.focus();
setTimeout( () => {
docprint.print();
docprint.close();
},1200);
}
}
} | random_line_split | |
country-card.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { Router, ActivatedRoute, Params } from '@angular/router';
import {Location, LocationStrategy, PathLocationStrategy} from '@angular/common';
import 'rxjs/add/operator/switchMap';
import { CountryCardService } from './country-card.service';
declare var L: any;
declare var Highcharts: any;
@Component({
selector: 'app-country-card',
templateUrl: './country-card.component.html',
styleUrls: ['./country-card.component.css']
})
export class CountryCardComponent implements OnInit {
mapObj: any;
lat: number = 4.565473550710278;
lng: number = 17.2265625;
zoom: number = 1;
maxZoom: number = 10;
minZoom: number = 0;
geoJsonLayer: any;
geoJsonData: any;
countriesList: any;
selectedCountryName: string;
selectedCountryId: string;
selectedCountryFlagId: string;
countryCardData: any;
countryCardMetaData: any;
countryCardDisplayData = [[], [], []]; // it contain objects i.e max year data, min year data, metadata
private sub: any;
shareTitle = "Share:";
fbInner = "<img src='./assets/images/custom-facebook.svg'>";
twitterInner = "<img src='./assets/images/custom-twitter.svg'>";
inInner = "<img src='./assets/images/custom-linkedin.svg'>";
linkToShare;
location: Location;
constructor(
private elementRef: ElementRef,
private route: ActivatedRoute,
private router: Router,
location: Location,
private countryCardService: CountryCardService) { this.location = location; }
ngOnInit() {
this.sub = this.route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
}
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
}
});
}
| (e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
}
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category Score: 20)", "_": this.countryCardDisplayData[0][0].cluster1RA},
{" ":"Cluster 2: REGULATORY MANDATE (Max Category Score: 22)", "_": this.countryCardDisplayData[0][0].cluster2RM},
{" ":"Cluster 3: REGULATORY REGIME (Max Category Score: 30)", "_": this.countryCardDisplayData[0][0].cluster3RR},
{" ":"Cluster 4: COMPETITION FRAMEWORK (Max Category Score: 28)", "_": this.countryCardDisplayData[0][0].cluster4CF}
];
}
// download data in csv format
download() {
let csvData = this.ConvertToCSV(this.prepareFormatForDownloadData(),"Country Card - " + this.selectedCountryName, true);
let a = document.createElement("a");
a.setAttribute('style', 'display:none;');
document.body.appendChild(a);
let blob = new Blob([csvData], { type: 'text/csv' });
let url = window.URL.createObjectURL(blob);
a.href = url;
a.download = this.selectedCountryName + "_Country_Card" + ".csv";
a.click();
}
ConvertToCSV(JSONData, ReportTitle, ShowLabel ) {
//If JSONData is not an object then JSON.parse will parse the JSON string in an Object
let arrData = typeof JSONData != 'object' ? JSON.parse(JSONData) : JSONData;
let CSV = '';
//Set Report title in first row or line
CSV += ReportTitle + '\r\n\n';
//This condition will generate the Label/Header
if (ShowLabel) {
let row = "";
//This loop will extract the label from 1st index of on array
for (let index in arrData[0]) {
//Now convert each value to string and comma-seprated
row += index + ',';
}
row = row.slice(0, -1);
//append Label row with line break
CSV += row + '\r\n';
}
//1st loop is to extract each row
for (let i = 0; i < arrData.length; i++) {
let row = "";
//2nd loop will extract each column and convert it in string comma-seprated
for (var index in arrData[i]) {
row += '"' + arrData[i][index] + '",';
}
row.slice(0, row.length - 1);
//add a line break after each row
CSV += row + '\r\n';
}
if (CSV == '') {
alert("Invalid data");
return;
}
return CSV;
}
// print page
print(): void {
let docprint = window.open("about:blank", "_blank");
let oTable = document.getElementById("print-section");
docprint.document.open();
docprint.document.write('<html><head><title>Country Card</title>');
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/bootstrap.min.css\" type=\"text/css\"/>" );
docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/country-card-print.css\" type=\"text/css\" media=\"print\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"./assets/print/flag-icon-css/css/flag-icon.min.css\" type=\"text/css\" media=\"all\"/>" );
// docprint.document.write( "<link rel=\"stylesheet\" href=\"styles.bundle.css\" type=\"text/css\" media=\"print\"/>" );
docprint.document.write('</head><body><center>');
docprint.document.write(`
<div class="text-left">
<h3> ` + this.selectedCountryName + ` </h3>
</div>
`);
docprint.document.write(oTable.innerHTML);
docprint.document.write('</center></body></html>');
docprint.document.close();
docprint.onload=function(){
docprint.focus();
setTimeout( () => {
docprint.print();
docprint.close();
},1200);
}
}
}
| resetStyle | identifier_name |
pars_upload.rs | use crate::{
create_manager::models::BlobMetadata,
document_manager::{accept_job, check_in_document_handler, delete_document_handler},
models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier},
multipart_form_data::{collect_fields, Field},
state_manager::{with_state, JobStatusClient, StateManager},
storage_client::{models::StorageFile, AzureBlobStorage, StorageClient},
};
use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, str::FromStr};
use uuid::Uuid;
use warp::{
http::{header, Method},
multipart::FormData,
Filter, Rejection, Reply,
};
pub fn update_handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file) | }
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct UploadResponse {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => {
group.push(field);
}
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
impl warp::reject::Reject for SubmissionError {}
#[derive(Debug, Serialize, Deserialize)]
struct Claims {
sub: String,
preferred_username: String,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::multipart_form_data::UploadFieldValue;
use pretty_assertions::assert_eq;
fn text_field(name: &str, value: &str) -> Field {
Field {
name: name.into(),
value: UploadFieldValue::Text {
value: value.into(),
},
}
}
#[test]
fn converts_form_data_to_metadata() {
let file_name = "file";
let result = product_form_data_to_blob_metadata(
file_name.into(),
vec![
text_field("product_name", "Feel good pills"),
text_field("active_substance", "Ibuprofen"),
text_field("active_substance", "Temazepam"),
text_field(
"title",
"Feel good pills Really Strong High Dose THR 12345/1234",
),
text_field("licence_number", "THR 12345/1234"),
text_field("territory", "UK"),
],
)
.unwrap();
assert_eq!(
result,
BlobMetadata {
file_name: file_name.into(),
doc_type: DocumentType::Par,
title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(),
pl_number: "THR 12345/1234".into(),
territory: Some(TerritoryType::UK),
product_names: vec!["FEEL GOOD PILLS".into()].into(),
active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(),
author: "".into(),
keywords: None
}
)
}
} | random_line_split | |
pars_upload.rs | use crate::{
create_manager::models::BlobMetadata,
document_manager::{accept_job, check_in_document_handler, delete_document_handler},
models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier},
multipart_form_data::{collect_fields, Field},
state_manager::{with_state, JobStatusClient, StateManager},
storage_client::{models::StorageFile, AzureBlobStorage, StorageClient},
};
use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, str::FromStr};
use uuid::Uuid;
use warp::{
http::{header, Method},
multipart::FormData,
Filter, Rejection, Reply,
};
pub fn update_handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file)
}
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct UploadResponse {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> |
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
impl warp::reject::Reject for SubmissionError {}
#[derive(Debug, Serialize, Deserialize)]
struct Claims {
sub: String,
preferred_username: String,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::multipart_form_data::UploadFieldValue;
use pretty_assertions::assert_eq;
fn text_field(name: &str, value: &str) -> Field {
Field {
name: name.into(),
value: UploadFieldValue::Text {
value: value.into(),
},
}
}
#[test]
fn converts_form_data_to_metadata() {
let file_name = "file";
let result = product_form_data_to_blob_metadata(
file_name.into(),
vec![
text_field("product_name", "Feel good pills"),
text_field("active_substance", "Ibuprofen"),
text_field("active_substance", "Temazepam"),
text_field(
"title",
"Feel good pills Really Strong High Dose THR 12345/1234",
),
text_field("licence_number", "THR 12345/1234"),
text_field("territory", "UK"),
],
)
.unwrap();
assert_eq!(
result,
BlobMetadata {
file_name: file_name.into(),
doc_type: DocumentType::Par,
title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(),
pl_number: "THR 12345/1234".into(),
territory: Some(TerritoryType::UK),
product_names: vec!["FEEL GOOD PILLS".into()].into(),
active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(),
author: "".into(),
keywords: None
}
)
}
}
| {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => {
group.push(field);
}
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
} | identifier_body |
pars_upload.rs | use crate::{
create_manager::models::BlobMetadata,
document_manager::{accept_job, check_in_document_handler, delete_document_handler},
models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier},
multipart_form_data::{collect_fields, Field},
state_manager::{with_state, JobStatusClient, StateManager},
storage_client::{models::StorageFile, AzureBlobStorage, StorageClient},
};
use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, str::FromStr};
use uuid::Uuid;
use warp::{
http::{header, Method},
multipart::FormData,
Filter, Rejection, Reply,
};
pub fn update_handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file)
}
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct UploadResponse {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => |
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
impl warp::reject::Reject for SubmissionError {}
#[derive(Debug, Serialize, Deserialize)]
struct Claims {
sub: String,
preferred_username: String,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::multipart_form_data::UploadFieldValue;
use pretty_assertions::assert_eq;
fn text_field(name: &str, value: &str) -> Field {
Field {
name: name.into(),
value: UploadFieldValue::Text {
value: value.into(),
},
}
}
#[test]
fn converts_form_data_to_metadata() {
let file_name = "file";
let result = product_form_data_to_blob_metadata(
file_name.into(),
vec![
text_field("product_name", "Feel good pills"),
text_field("active_substance", "Ibuprofen"),
text_field("active_substance", "Temazepam"),
text_field(
"title",
"Feel good pills Really Strong High Dose THR 12345/1234",
),
text_field("licence_number", "THR 12345/1234"),
text_field("territory", "UK"),
],
)
.unwrap();
assert_eq!(
result,
BlobMetadata {
file_name: file_name.into(),
doc_type: DocumentType::Par,
title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(),
pl_number: "THR 12345/1234".into(),
territory: Some(TerritoryType::UK),
product_names: vec!["FEEL GOOD PILLS".into()].into(),
active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(),
author: "".into(),
keywords: None
}
)
}
}
| {
group.push(field);
} | conditional_block |
pars_upload.rs | use crate::{
create_manager::models::BlobMetadata,
document_manager::{accept_job, check_in_document_handler, delete_document_handler},
models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier},
multipart_form_data::{collect_fields, Field},
state_manager::{with_state, JobStatusClient, StateManager},
storage_client::{models::StorageFile, AzureBlobStorage, StorageClient},
};
use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, str::FromStr};
use uuid::Uuid;
use warp::{
http::{header, Method},
multipart::FormData,
Filter, Rejection, Reply,
};
pub fn update_handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file)
}
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct | {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => {
group.push(field);
}
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
impl warp::reject::Reject for SubmissionError {}
#[derive(Debug, Serialize, Deserialize)]
struct Claims {
sub: String,
preferred_username: String,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::multipart_form_data::UploadFieldValue;
use pretty_assertions::assert_eq;
fn text_field(name: &str, value: &str) -> Field {
Field {
name: name.into(),
value: UploadFieldValue::Text {
value: value.into(),
},
}
}
#[test]
fn converts_form_data_to_metadata() {
let file_name = "file";
let result = product_form_data_to_blob_metadata(
file_name.into(),
vec![
text_field("product_name", "Feel good pills"),
text_field("active_substance", "Ibuprofen"),
text_field("active_substance", "Temazepam"),
text_field(
"title",
"Feel good pills Really Strong High Dose THR 12345/1234",
),
text_field("licence_number", "THR 12345/1234"),
text_field("territory", "UK"),
],
)
.unwrap();
assert_eq!(
result,
BlobMetadata {
file_name: file_name.into(),
doc_type: DocumentType::Par,
title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(),
pl_number: "THR 12345/1234".into(),
territory: Some(TerritoryType::UK),
product_names: vec!["FEEL GOOD PILLS".into()].into(),
active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(),
author: "".into(),
keywords: None
}
)
}
}
| UploadResponse | identifier_name |
calc_CORL2017_Tables.py | # Programmed by Mojtaba Valipour @ Shiraz University - 2018 - vpcom.ir
# Based on the CVC code available in the github repository
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
## Auto table generator for the paper writing
## Tables are based on the CoRL 2017 Carla Team Paper
## This help me to have a very beautiful tables easier
## Output Support: Text, Latex, HTML
# System Test: Ubuntu 16.04 LTS
# Python: 2.7.14, conda 4.3.30
# Environment: perfectEnv.yaml = carlaSimPy2
# Example: python calc_CORL2017_Tables.py --path "./_benchmarks_results/test/" -v -n "CoRL-2017 Carla Paper"
### "./_benchmarks_results/test/" contains the following folders:
### CarlaPaperModel_Test01_CoRL2017_Town01
### CarlaPaperModel_Test01_CoRL2017_Town02
### CarlaPaperModel_Test02_CoRL2017_Town01
### CarlaPaperModel_Test02_CoRL2017_Town02
### CarlaPaperModel_Test03_CoRL2017_Town01
### CarlaPaperModel_Test03_CoRL2017_Town02
##### Each one includes measurements.csv, summary.csv and log_ files
import abc
import argparse
import math
import time
import numpy as np
import logging
import glob
# from tabulate import tabulate # only for the presentation
from carla.driving_benchmark.experiment_suites import CoRL2017
from carla.driving_benchmark.metrics import Metrics
from carla.driving_benchmark import results_printer
# Save tables as html file
htmlWrapper = """
<html>
<head>
<style>
table{
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even){ | </style>
<title> Self-Driving Car Research </title>
<p>By Mojtaba Valipour @ Shiraz University - 2018 </p>
<p><a href="http://vpcom.ir/">vpcom.ir</a></p>
</head>
<body><p>MODEL: %s</a></p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p></body>
</html>
"""
# Tested by latexbase.com
latexWrapper = """
\\documentclass{article}
\\usepackage{graphicx}
\\begin{document}
\\title{Self-Driving Car Research}
\\author{Mojtaba Valipour}
\\maketitle
\\section{Model : %s}
\\subsection{Tables}
\\subsubsection{Percentage of Success}
\\begin{center}
%s
Success rate for the agent (mean and standard deviation shown).
\\end{center}
\\subsubsection{Infractions : Straight}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : One Turn}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Num Infractions : Straight}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : One Turn}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : Navigation}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsection{Num Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\end{document}
"""
if (__name__ == '__main__'):
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'-n', '--model_name',
metavar='T',
default='CoRL2017-Paper',
help='The name of the model for writing in the reports'
)
argparser.add_argument(
'-p', '--path',
metavar='P',
default='test',
help='Path to all log files'
)
args = argparser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('sarting the calculations %s', "0") #TODO: add time instead on zero
experiment_suite = CoRL2017("Town01")
metrics_object = Metrics(experiment_suite.metrics_parameters,
experiment_suite.dynamic_tasks)
# Improve readability by adding a weather dictionary
weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon',
6: 'Heavy Rain Noon', 8: 'Clear Sunset',
4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'}
# names for all the test logs
pathNames = {0:'_Test01_CoRL2017_Town01',
1:'_Test02_CoRL2017_Town01',
2:'_Test03_CoRL2017_Town01',
3:'_Test01_CoRL2017_Town02',
4:'_Test02_CoRL2017_Town02',
5:'_Test03_CoRL2017_Town02'}
tasksSuccessRate = {0: 'Straight', 1: 'One Turn', 2: 'Navigation', 3: 'Nav. Dynamic'} # number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1])
tasksInfractions = {0: 'Opposite Lane', 1: 'Sidewalk', 2: 'Collision-static', 3: 'Collision-car', 4:'Collision-pedestrian'} #
states = {0: 'Training Conditions', 1: 'New Town', 2: 'New Weather', 3: 'New Town & Weather'}
statesSettings = {0: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.train_weathers},
1: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers},
2: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.test_weathers},
3: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers+experiment_suite.test_weathers}}
# In CoRL-2017 paper, infraction are only computed on the fourth task - "Navigation with dynamic obstacles".
dataSuccessRate = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 data
dataInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 data
dataNumInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 data
dataSuccessRateSTD = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 std data
dataInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 std data
dataNumInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 std data
# TABLE 1 - CoRL2017 Paper
metrics_to_average = [
'episodes_fully_completed',
'episodes_completion'
]
infraction_metrics = [
'collision_pedestrians',
'collision_vehicles',
'collision_other',
'intersection_offroad',
'intersection_otherlane'
]
# Configuration
table1Flag = True
table2Flag = True
table3Flag = True
# extract the start name of the folders
#TODO: Automatic this extraction process better and smartly
if args.path[-1]=='/':
addSlashFlag = False
allDir = glob.glob(args.path+'*')
else:
addSlashFlag = True
allDir = glob.glob(args.path+'/*')
extractedPath = allDir[0].split('/')[-1].replace(statesSettings[0]['Path'][0],'')
logging.info('Please make sure all the subdirectory of %s start with %s', args.path, extractedPath)
for sIdx, state in enumerate(states):
logging.debug('State: %s', state)
weathers = statesSettings[state]['Weathers']
allPath = statesSettings[state]['Path']
# This will make life easier for calculating std
dataListTable1 = [[] for i in range(len(tasksSuccessRate))]
dataListTable2 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
dataListTable3 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
#logging.debug("Data list table 2 init: %s",dataListTable2)
# calculate metrics : episodes_fully_completed
for p in allPath:
if addSlashFlag == True:
path = args.path + '/' + extractedPath + p
else:
path = args.path + extractedPath + p
metrics_summary = metrics_object.compute(path)
number_of_tasks = len(list(metrics_summary[metrics_to_average[0]].items())[0][1])
values = metrics_summary[metrics_to_average[0]] # episodes_fully_completed
if(table1Flag):
logging.debug("Working on table 1 ...")
metric_sum_values = np.zeros(number_of_tasks)
for w, tasks in values.items():
if w in set(weathers):
count = 0
for tIdx, t in enumerate(tasks):
#print(weathers[tIdx]) #float(sum(t)) / float(len(t)))
metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float(len(weathers))
count += 1
# array's elements displacement, this is because of std/avg calculation
for j in range(number_of_tasks):
dataListTable1[j].append(metric_sum_values[j])
# table 2
if(table2Flag):
logging.debug("Working on table 2 and 3 ...")
for metricIdx, metric in enumerate(infraction_metrics):
values_driven = metrics_summary['driven_kilometers']
values = metrics_summary[metric]
metric_sum_values = np.zeros(number_of_tasks)
summed_driven_kilometers = np.zeros(number_of_tasks)
for items_metric, items_driven in zip(values.items(), values_driven.items()):
w = items_metric[0] # weather
tasks = items_metric[1]
tasks_driven = items_driven[1]
if w in set(weathers):
count = 0
for t, t_driven in zip(tasks, tasks_driven):
#logging.debug("t_driven: %s \n t: %s \n tSum: %f", t_driven, t, float(sum(t)))
metric_sum_values[count] += float(sum(t))
summed_driven_kilometers[count] += t_driven
count += 1
# array's elements displacement, this is because of std/avg calculation
for i in range(number_of_tasks):
dataListTable3[metricIdx][i].append(metric_sum_values[i])
if metric_sum_values[i] == 0:
dataListTable2[metricIdx][i].append(summed_driven_kilometers[i])
else:
dataListTable2[metricIdx][i].append(summed_driven_kilometers[i] / metric_sum_values[i])
#print(dataListTable2)
if(table1Flag):
# Accumulate the whole results and calculate std and avg
for tIdx, t in enumerate(dataListTable1):
dataSuccessRate[tIdx][sIdx] = np.mean(t)
dataSuccessRateSTD[tIdx][sIdx] = np.std(t)
#print(dataSuccessRate[tIdx][sIdx], ' +/- ', dataSuccessRateSTD[tIdx][sIdx])
if(table2Flag):
for metricIdx in range(len(infraction_metrics)):
tmp = dataListTable2[metricIdx]
for tIdx,t in enumerate(tmp):
# Accumulate the whole results and calculate std and avg
# fill in reverse because infraction matrics is reverse considering the table output
dataInfractions[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.mean(t)
dataInfractionsSTD[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.std(t)
if(table3Flag):
for metricIdx in range(len(infraction_metrics)):
tmp = dataListTable3[metricIdx]
for tIdx,t in enumerate(tmp):
# Accumulate the whole results and calculate std and avg
# fill in reverse because infraction matrics is reverse considering the table output
dataNumInfractions[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.mean(t)
dataNumInfractionsSTD[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.std(t)
# Open external files
fHtml = open(args.path+'/results.html','w')
fLaTex = open(args.path+'/results.laTex','w') # TODO: Fix this later
# This is not an actual laTex format, you should copy this results into a real one ;P
# This is only for a good presentation not for calculation
tableSRRows = []
tableSRHeaders = ['Tasks']
tableSRHeaders.extend(states.values())
allTablesListHtml = []
allTablesListLaTex = []
if(table1Flag):
for tIdx, t in enumerate(dataListTable1): # for each tasks
row = [tasksSuccessRate[tIdx]]
for sIdx, state in enumerate(states): # for each states
row.append("".join([str(round(dataSuccessRate[tIdx][sIdx],2)), ' +/- ', str(round(dataSuccessRateSTD[tIdx][sIdx],2))]))
tableSRRows.append(row)
print("\nPercentage of Success")
print(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'orgtbl'))
allTablesListHtml.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'html'))
allTablesListLaTex.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'latex'))
# This is only for a good presentation not for calculation
if(table2Flag):
for taskIdx in tasksSuccessRate: # for each tasks
tableSRRows = []
tableSRHeaders = ['Infractions']
tableSRHeaders.extend(states.values())
print("\n Task: %s \n" % tasksSuccessRate[taskIdx])
for metricIdx, metric in enumerate(infraction_metrics):
#print(metricIdx, metric)
row = [tasksInfractions[metricIdx]]
for sIdx, state in enumerate(states): # for each states
row.append("".join([str(round(dataInfractions[taskIdx][metricIdx][sIdx],2)), ' +/- ', str(round(dataInfractionsSTD[taskIdx][metricIdx][sIdx],2))]))
tableSRRows.append(row)
print(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'orgtbl'))
allTablesListHtml.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'html'))
allTablesListLaTex.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'latex'))
if(table3Flag):
for taskIdx in tasksSuccessRate: # for each tasks
tableSRRows = []
tableSRHeaders = ['Number of Infractions']
tableSRHeaders.extend(states.values())
print("\n Task: %s \n" % tasksSuccessRate[taskIdx])
for metricIdx, metric in enumerate(infraction_metrics):
#print(metricIdx, metric)
row = [tasksInfractions[metricIdx]]
for sIdx, state in enumerate(states): # for each states
row.append("".join([str(round(dataNumInfractions[taskIdx][metricIdx][sIdx],2)), ' +/- ', str(round(dataNumInfractionsSTD[taskIdx][metricIdx][sIdx],2))]))
tableSRRows.append(row)
print(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'orgtbl'))
allTablesListHtml.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'html'))
allTablesListLaTex.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'latex'))
# stream into the files
htmlBody = htmlWrapper % (args.model_name, allTablesListHtml[0], allTablesListHtml[1], allTablesListHtml[2], allTablesListHtml[3]
,allTablesListHtml[4], allTablesListHtml[5], allTablesListHtml[6], allTablesListHtml[7]
,allTablesListHtml[8]) # TODO: Check if unpacking using * works
latexBody = latexWrapper % (args.model_name, allTablesListLaTex[0], allTablesListLaTex[1], allTablesListLaTex[2], allTablesListLaTex[3]
,allTablesListLaTex[4], allTablesListLaTex[5], allTablesListLaTex[6], allTablesListLaTex[7]
,allTablesListLaTex[8]) # TODO: Check if unpacking using * works
fHtml.write(htmlBody)
fHtml.close()
fLaTex.write(latexBody.replace('+/-','${\pm}$'))
fLaTex.close()
#metrics_summary = metrics_object.compute(args.path)
# # print details
# print("")
# print("")
# print("----- Printing results for training weathers (Seen in Training) -----")
# print("")
# print("")
# results_printer.print_summary(metrics_summary, experiment_suite.train_weathers,
# args.path)
# print("")
# print("")
# print("----- Printing results for test weathers (Unseen in Training) -----")
# print("")
# print("")
# results_printer.print_summary(metrics_summary, experiment_suite.test_weathers,
# args.path) | background-color: #dddddd;
} | random_line_split |
calc_CORL2017_Tables.py | # Programmed by Mojtaba Valipour @ Shiraz University - 2018 - vpcom.ir
# Based on the CVC code available in the github repository
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
## Auto table generator for the paper writing
## Tables are based on the CoRL 2017 Carla Team Paper
## This help me to have a very beautiful tables easier
## Output Support: Text, Latex, HTML
# System Test: Ubuntu 16.04 LTS
# Python: 2.7.14, conda 4.3.30
# Environment: perfectEnv.yaml = carlaSimPy2
# Example: python calc_CORL2017_Tables.py --path "./_benchmarks_results/test/" -v -n "CoRL-2017 Carla Paper"
### "./_benchmarks_results/test/" contains the following folders:
### CarlaPaperModel_Test01_CoRL2017_Town01
### CarlaPaperModel_Test01_CoRL2017_Town02
### CarlaPaperModel_Test02_CoRL2017_Town01
### CarlaPaperModel_Test02_CoRL2017_Town02
### CarlaPaperModel_Test03_CoRL2017_Town01
### CarlaPaperModel_Test03_CoRL2017_Town02
##### Each one includes measurements.csv, summary.csv and log_ files
import abc
import argparse
import math
import time
import numpy as np
import logging
import glob
# from tabulate import tabulate # only for the presentation
from carla.driving_benchmark.experiment_suites import CoRL2017
from carla.driving_benchmark.metrics import Metrics
from carla.driving_benchmark import results_printer
# Save tables as html file
htmlWrapper = """
<html>
<head>
<style>
table{
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even){
background-color: #dddddd;
}
</style>
<title> Self-Driving Car Research </title>
<p>By Mojtaba Valipour @ Shiraz University - 2018 </p>
<p><a href="http://vpcom.ir/">vpcom.ir</a></p>
</head>
<body><p>MODEL: %s</a></p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p></body>
</html>
"""
# Tested by latexbase.com
latexWrapper = """
\\documentclass{article}
\\usepackage{graphicx}
\\begin{document}
\\title{Self-Driving Car Research}
\\author{Mojtaba Valipour}
\\maketitle
\\section{Model : %s}
\\subsection{Tables}
\\subsubsection{Percentage of Success}
\\begin{center}
%s
Success rate for the agent (mean and standard deviation shown).
\\end{center}
\\subsubsection{Infractions : Straight}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : One Turn}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Num Infractions : Straight}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : One Turn}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : Navigation}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsection{Num Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\end{document}
"""
if (__name__ == '__main__'):
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'-n', '--model_name',
metavar='T',
default='CoRL2017-Paper',
help='The name of the model for writing in the reports'
)
argparser.add_argument(
'-p', '--path',
metavar='P',
default='test',
help='Path to all log files'
)
args = argparser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('sarting the calculations %s', "0") #TODO: add time instead on zero
experiment_suite = CoRL2017("Town01")
metrics_object = Metrics(experiment_suite.metrics_parameters,
experiment_suite.dynamic_tasks)
# Improve readability by adding a weather dictionary
weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon',
6: 'Heavy Rain Noon', 8: 'Clear Sunset',
4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'}
# names for all the test logs
pathNames = {0:'_Test01_CoRL2017_Town01',
1:'_Test02_CoRL2017_Town01',
2:'_Test03_CoRL2017_Town01',
3:'_Test01_CoRL2017_Town02',
4:'_Test02_CoRL2017_Town02',
5:'_Test03_CoRL2017_Town02'}
tasksSuccessRate = {0: 'Straight', 1: 'One Turn', 2: 'Navigation', 3: 'Nav. Dynamic'} # number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1])
tasksInfractions = {0: 'Opposite Lane', 1: 'Sidewalk', 2: 'Collision-static', 3: 'Collision-car', 4:'Collision-pedestrian'} #
states = {0: 'Training Conditions', 1: 'New Town', 2: 'New Weather', 3: 'New Town & Weather'}
statesSettings = {0: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.train_weathers},
1: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers},
2: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.test_weathers},
3: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers+experiment_suite.test_weathers}}
# In CoRL-2017 paper, infraction are only computed on the fourth task - "Navigation with dynamic obstacles".
dataSuccessRate = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 data
dataInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 data
dataNumInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 data
dataSuccessRateSTD = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 std data
dataInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 std data
dataNumInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 std data
# TABLE 1 - CoRL2017 Paper
metrics_to_average = [
'episodes_fully_completed',
'episodes_completion'
]
infraction_metrics = [
'collision_pedestrians',
'collision_vehicles',
'collision_other',
'intersection_offroad',
'intersection_otherlane'
]
# Configuration
table1Flag = True
table2Flag = True
table3Flag = True
# extract the start name of the folders
#TODO: Automatic this extraction process better and smartly
if args.path[-1]=='/':
addSlashFlag = False
allDir = glob.glob(args.path+'*')
else:
addSlashFlag = True
allDir = glob.glob(args.path+'/*')
extractedPath = allDir[0].split('/')[-1].replace(statesSettings[0]['Path'][0],'')
logging.info('Please make sure all the subdirectory of %s start with %s', args.path, extractedPath)
for sIdx, state in enumerate(states):
logging.debug('State: %s', state)
weathers = statesSettings[state]['Weathers']
allPath = statesSettings[state]['Path']
# This will make life easier for calculating std
dataListTable1 = [[] for i in range(len(tasksSuccessRate))]
dataListTable2 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
dataListTable3 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
#logging.debug("Data list table 2 init: %s",dataListTable2)
# calculate metrics : episodes_fully_completed
for p in allPath:
if addSlashFlag == True:
path = args.path + '/' + extractedPath + p
else:
path = args.path + extractedPath + p
metrics_summary = metrics_object.compute(path)
number_of_tasks = len(list(metrics_summary[metrics_to_average[0]].items())[0][1])
values = metrics_summary[metrics_to_average[0]] # episodes_fully_completed
if(table1Flag):
logging.debug("Working on table 1 ...")
metric_sum_values = np.zeros(number_of_tasks)
for w, tasks in values.items():
if w in set(weathers):
count = 0
for tIdx, t in enumerate(tasks):
#print(weathers[tIdx]) #float(sum(t)) / float(len(t)))
metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float(len(weathers))
count += 1
# array's elements displacement, this is because of std/avg calculation
for j in range(number_of_tasks):
dataListTable1[j].append(metric_sum_values[j])
# table 2
if(table2Flag):
logging.debug("Working on table 2 and 3 ...")
for metricIdx, metric in enumerate(infraction_metrics):
values_driven = metrics_summary['driven_kilometers']
values = metrics_summary[metric]
metric_sum_values = np.zeros(number_of_tasks)
summed_driven_kilometers = np.zeros(number_of_tasks)
for items_metric, items_driven in zip(values.items(), values_driven.items()):
w = items_metric[0] # weather
tasks = items_metric[1]
tasks_driven = items_driven[1]
if w in set(weathers):
count = 0
for t, t_driven in zip(tasks, tasks_driven):
#logging.debug("t_driven: %s \n t: %s \n tSum: %f", t_driven, t, float(sum(t)))
metric_sum_values[count] += float(sum(t))
summed_driven_kilometers[count] += t_driven
count += 1
# array's elements displacement, this is because of std/avg calculation
for i in range(number_of_tasks):
dataListTable3[metricIdx][i].append(metric_sum_values[i])
if metric_sum_values[i] == 0:
dataListTable2[metricIdx][i].append(summed_driven_kilometers[i])
else:
|
#print(dataListTable2)
if(table1Flag):
# Accumulate the whole results and calculate std and avg
for tIdx, t in enumerate(dataListTable1):
dataSuccessRate[tIdx][sIdx] = np.mean(t)
dataSuccessRateSTD[tIdx][sIdx] = np.std(t)
#print(dataSuccessRate[tIdx][sIdx], ' +/- ', dataSuccessRateSTD[tIdx][sIdx])
if(table2Flag):
for metricIdx in range(len(infraction_metrics)):
tmp = dataListTable2[metricIdx]
for tIdx,t in enumerate(tmp):
# Accumulate the whole results and calculate std and avg
# fill in reverse because infraction matrics is reverse considering the table output
dataInfractions[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.mean(t)
dataInfractionsSTD[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.std(t)
if(table3Flag):
for metricIdx in range(len(infraction_metrics)):
tmp = dataListTable3[metricIdx]
for tIdx,t in enumerate(tmp):
# Accumulate the whole results and calculate std and avg
# fill in reverse because infraction matrics is reverse considering the table output
dataNumInfractions[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.mean(t)
dataNumInfractionsSTD[tIdx][len(infraction_metrics)-1-metricIdx][sIdx] = np.std(t)
# Open external files
fHtml = open(args.path+'/results.html','w')
fLaTex = open(args.path+'/results.laTex','w') # TODO: Fix this later
# This is not an actual laTex format, you should copy this results into a real one ;P
# This is only for a good presentation not for calculation
tableSRRows = []
tableSRHeaders = ['Tasks']
tableSRHeaders.extend(states.values())
allTablesListHtml = []
allTablesListLaTex = []
if(table1Flag):
for tIdx, t in enumerate(dataListTable1): # for each tasks
row = [tasksSuccessRate[tIdx]]
for sIdx, state in enumerate(states): # for each states
row.append("".join([str(round(dataSuccessRate[tIdx][sIdx],2)), ' +/- ', str(round(dataSuccessRateSTD[tIdx][sIdx],2))]))
tableSRRows.append(row)
print("\nPercentage of Success")
print(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'orgtbl'))
allTablesListHtml.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'html'))
allTablesListLaTex.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'latex'))
# This is only for a good presentation not for calculation
if(table2Flag):
for taskIdx in tasksSuccessRate: # for each tasks
tableSRRows = []
tableSRHeaders = ['Infractions']
tableSRHeaders.extend(states.values())
print("\n Task: %s \n" % tasksSuccessRate[taskIdx])
for metricIdx, metric in enumerate(infraction_metrics):
#print(metricIdx, metric)
row = [tasksInfractions[metricIdx]]
for sIdx, state in enumerate(states): # for each states
row.append("".join([str(round(dataInfractions[taskIdx][metricIdx][sIdx],2)), ' +/- ', str(round(dataInfractionsSTD[taskIdx][metricIdx][sIdx],2))]))
tableSRRows.append(row)
print(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'orgtbl'))
allTablesListHtml.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'html'))
allTablesListLaTex.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'latex'))
if(table3Flag):
for taskIdx in tasksSuccessRate: # for each tasks
tableSRRows = []
tableSRHeaders = ['Number of Infractions']
tableSRHeaders.extend(states.values())
print("\n Task: %s \n" % tasksSuccessRate[taskIdx])
for metricIdx, metric in enumerate(infraction_metrics):
#print(metricIdx, metric)
row = [tasksInfractions[metricIdx]]
for sIdx, state in enumerate(states): # for each states
row.append("".join([str(round(dataNumInfractions[taskIdx][metricIdx][sIdx],2)), ' +/- ', str(round(dataNumInfractionsSTD[taskIdx][metricIdx][sIdx],2))]))
tableSRRows.append(row)
print(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'orgtbl'))
allTablesListHtml.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'html'))
allTablesListLaTex.append(tabulate(tableSRRows, headers=tableSRHeaders, tablefmt = 'latex'))
# stream into the files
htmlBody = htmlWrapper % (args.model_name, allTablesListHtml[0], allTablesListHtml[1], allTablesListHtml[2], allTablesListHtml[3]
,allTablesListHtml[4], allTablesListHtml[5], allTablesListHtml[6], allTablesListHtml[7]
,allTablesListHtml[8]) # TODO: Check if unpacking using * works
latexBody = latexWrapper % (args.model_name, allTablesListLaTex[0], allTablesListLaTex[1], allTablesListLaTex[2], allTablesListLaTex[3]
,allTablesListLaTex[4], allTablesListLaTex[5], allTablesListLaTex[6], allTablesListLaTex[7]
,allTablesListLaTex[8]) # TODO: Check if unpacking using * works
fHtml.write(htmlBody)
fHtml.close()
fLaTex.write(latexBody.replace('+/-','${\pm}$'))
fLaTex.close()
#metrics_summary = metrics_object.compute(args.path)
# # print details
# print("")
# print("")
# print("----- Printing results for training weathers (Seen in Training) -----")
# print("")
# print("")
# results_printer.print_summary(metrics_summary, experiment_suite.train_weathers,
# args.path)
# print("")
# print("")
# print("----- Printing results for test weathers (Unseen in Training) -----")
# print("")
# print("")
# results_printer.print_summary(metrics_summary, experiment_suite.test_weathers,
# args.path)
| dataListTable2[metricIdx][i].append(summed_driven_kilometers[i] / metric_sum_values[i]) | conditional_block |
sketch.js |
// Adding comments because I'm going to forget code
// also in case someone decides to use my code for some odd reason
// like at least use it correctly jeez,
// and change the names of those classes if you really don't want to credit me
//-----Variables-----//
///image variables:
////bunny directions
var bunnyLeft;
var bunnyRight;
var bunnyFront;
var bunnyBack;
////other in game object images
var carrotimg;
var floorimg;
var tomatoimg;
var grassimg;
////game over art
var bunnyEnd;
////font
var font;
///arrays
let spots = [];
let platforms = [];
///Constants for convenience
let WIDTH = 500;
let HEIGHT = 500;
let platformSize = 60;
let movement = platformSize + platformSize/6
let gameState = 0;
let score = 0;
let hiscore = 0;
///actual object variables
var bunnyvar;
var carrotvar;
var spotvar;
///preload, loading all images and font
function preload(){
bunnyLeft = loadImage("bunny/assets/bunnyLeft.png");
bunnyRight = loadImage("bunny/assets/bunnyRight.png");
bunnyFront = loadImage("bunny/assets/bunnyFront.png");
bunnyBack = loadImage("bunny/assets/bunnyBack.png");
carrotimg = loadImage("bunny/assets/carrot.png");
floorimg = loadImage("bunny/assets/floor.png");
tomatoimg = loadImage("bunny/assets/tomato.png");
grassimg = loadImage("bunny/assets/grass.png");
bunnyEnd = loadImage("bunny/assets/bunnyEnd.png");
font = loadFont('bunny/assets/Adelle_Reg.otf');
}
///set it up!
function setup(){
framerate = 20;
var myCanvas = createCanvas(WIDTH, HEIGHT);
myCanvas.parent("js-game");
var randomNumber = floor(random(0,3));
}
function draw(){
///Credit to class game template
if (gameState == 0){
startScreen();
} else if (gameState == 1){
update();
} else if (gameState == 2){
gameOver();
}
}
function startScreen(){
///Starting screen graphics & rules///
//background
imageMode(CORNER);
background(grassimg);
//title
textFont(font);
textSize(50);
fill(255, 170, 0, 200);
textAlign(CENTER);
text("Carrot Collector", WIDTH/2, HEIGHT/4);
//high score text:
fill(255, 200, 0, 200);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, HEIGHT/4 + 30);
//overall description of game
fill(0, 100);
rect(0,185, WIDTH, 150);
fill(255, 200, 0, 200);
textAlign(LEFT);
textSize(20);
text("Instructions: You're a bunny", WIDTH/8, HEIGHT/2 - 20);
image(bunnyFront, 4*WIDTH/6+5, HEIGHT/2 - 45, 40, 40);
text("trying to collect carrots!", WIDTH/8, HEIGHT/2 + 20);
image(carrotimg, 3*WIDTH/5+5, HEIGHT/2, 30, 30);
text("Try to avoid the tomatoes, they hurt!", WIDTH/8, HEIGHT/2 + 60);
image(tomatoimg, 4*WIDTH/5+15, HEIGHT/2 + 40, 30, 30);
//instructions/technical rules
textAlign(CENTER);
fill(255, 170, 0, 200);
text("Use arrow keys to move around.", WIDTH/2, 3*HEIGHT/4);
textSize(25);
text("Press Space to Start!", WIDTH/2, 3*HEIGHT/4 + 30);
//this is to set up the platforms 3 x 3
//maybe I shouldn't have done this with an array, because
//every three they change their placement
for (let i = 0; i < 9; i++) {
let x = (platformSize + 10) * (i%3) + WIDTH/2 - WIDTH/5;
if(i < 3) {
let y = HEIGHT/2 - HEIGHT/5;
platforms[i] = new platform(x, y, platformSize);
}
else if (6 <= i) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10);
platforms[i] = new platform(x, y, platformSize);
}
else if (3 <= i < 6) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10) * 2;
platforms[i] = new platform(x, y, platformSize);
}
}
//an array of spots/tomatoes! altered some of class code to fit obstacles
spots = [];
for (let i = 0; i < 3; i++){ // Make a for() loop to create the desired number of Spots
// Add an index [i] to create multiple Spots
spots[i] = new Spot(WIDTH,
0, 0.1 * i + 1, 0);
}
//set up a bunny!
bunnyvar = new Bunny(WIDTH/2, HEIGHT/2, bunnyFront);
//set up a carrot!
carrotvar = new Carrot(floor(random(0,3)) * movement + WIDTH/2 - movement,
floor(random(0,3)) * movement + WIDTH/2 - movement);
//make sure the score is really 0!
score = 0;
}
function update(){
imageMode(CORNER);
//background update
background(grassimg);
noStroke();
//set up for background/information
for (let i = 0; i < platforms.length; i++){
platforms[i].display();
}
textFont(font);
textSize(40);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Score: " + score, WIDTH/2, 80);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, 110);
//displaying bunny & carrot
bunnyvar.display();
carrotvar.display();
//checks consistently if the carrot is hit by the bunny
carrotvar.hit(bunnyvar);
//displaying and moving spots
for (let i = 0; i < spots.length; i++){ // Make a for() loop to loop through each Spot
spots[i].move(); // Move each object
spots[i].display(); // Display each object
spots[i].check(bunnyvar); // Check for mouse overlap
}
//to make the game harder, add a spot for every 10 points
if(spots.length < 3 + floor(score/10)) {
spots.push(new Spot(WIDTH,
0, 0.1 * spots.length + 1, 0));
}
}
//game over
function gameOver(){
imageMode(CORNER);
//graphics
background(grassimg);
textFont(font);
textSize(50);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Game Over!", WIDTH/2, HEIGHT/4);
textSize(20);
text("Final Score: " + score, WIDTH/2, HEIGHT/4 + 30);
textSize(25);
text("Press Space to Continue", WIDTH/2, 3*HEIGHT/4);
imageMode(CENTER);
image(bunnyEnd, WIDTH/2, HEIGHT/2, 150, 150);
//to update the high score if necessary
if(score > hiscore) {
// textSize(15);
// text("New High Score!", WIDTH/2, HEIGHT/2 + 55);
hiscore = score;
}
}
//key pressed function
function keyPressed() {
//key press for game states
if (gameState == 0 && key == ' '){ //start to playing
gameState = 1;
} else if (gameState == 2 && key == ' '){ //game over to restart to start again
gameState = 0;
//other key presses are for movement of bunny
} else if (keyCode === LEFT_ARROW) {
bunnyvar.moveTo(LEFT_ARROW);
} else if (keyCode === RIGHT_ARROW) {
bunnyvar.moveTo(RIGHT_ARROW);
}
else if (keyCode === UP_ARROW) {
bunnyvar.moveTo(UP_ARROW);
}
else if (keyCode === DOWN_ARROW) {
bunnyvar.moveTo(DOWN_ARROW);
}
}
//altered a tiny bit of game template's code/spot class
//included a direction so I could alter how the spots move
//Also, direction is a int. It was easier to randomize rather than strings
class Spot {
constructor(_x, _y, _speed, _direction) {
this.x = _x;
this.y = _y;
this.speed = _speed;
this.direction = _direction;
}
///moves the spot from one end to another
move() {
//if I'm going east & I hit the edge...
if(this.direction === 0) {
this.x += this.speed;
if (this.x > (WIDTH)){
this.place()
} //if I'm going west & I hit the edge...
} else if(this.direction === 1) {
this.x -= this.speed;
if (this.x < 0) {
this.place()
}
//if I'm going south & I hit the edge...
} else if(this.direction === 2) {
this.y += this.speed;
if (this.y > (HEIGHT)) {
this.place()
}
//if I'm going north & I hit the edge...
}else if(this.direction === 3) {
this.y -= this.speed;
if (this.y < 0) {
this.place()
}
}
}
//repspawns the spot in a new place
//this could've been placed in the move class
//but also it's a bit wordy
place() {
//decides the random direction of the spot
var randomDirection = floor(random(0,4));
//alter this placement & direction based on the direction
if(randomDirection === 0) { //east
this.x = 0;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 0;
} else if(randomDirection === 1) { //west
this.x = WIDTH;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 1;
} else if(randomDirection === 2) { //south
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = 0;
this.direction = 2;
} else if(randomDirection === 3) { //north
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = HEIGHT;
this.direction = 3;
}
}
display() {
// fill(255, 0 ,0);
// ellipse(this.x, this.y, 25, 25);
imageMode(CENTER);
image(tomatoimg, this.x, this.y, 30, 30);
}
| (Bunny) {
if (abs(this.x - Bunny.x) < 20
&& abs(this.y - Bunny.y) < 20){
gameState = 2;
}
}
}
class platform {
constructor(_x, _y, _length) {
this.x = _x;
this.y = _y;
this.length = _length;
}
display() {
imageMode(CORNER);
// fill(25);
// rect(this.x, this.y, this.length, this.length);
image(floorimg, this.x, this.y, this.length, this.length);
}
}
class Bunny {
constructor(_x, _y, _img) {
this.x = _x;
this.y = _y;
this.img = _img;
}
moveTo(direction) {
if(direction === LEFT_ARROW && this.x > WIDTH/2 - WIDTH/8) {
this.x = this.x - movement;
this.img = bunnyLeft;
}
else if(direction === RIGHT_ARROW && this.x < WIDTH/2 + WIDTH/8) {
this.x = this.x + movement;
this.img = bunnyRight;
}
else if(direction === UP_ARROW && this.y > WIDTH/2 - WIDTH/8) {
this.y = this.y - movement;
this.img = bunnyBack;
}
else if(direction ===DOWN_ARROW && this.y < WIDTH/2 + WIDTH/8) {
this.y = this.y + movement;
this.img = bunnyFront;
}
}
display() {
// fill(255, 200, 100);
// ellipse(this.x, this.y, 30, 30);
imageMode(CENTER);
image(this.img, this.x, this.y, 40, 40);
}
}
class Carrot {
constructor(_x, _y) {
this.x = _x;
this.y = _y;
}
place() {
new Carrot(
this.x = floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement,
this.y = floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
}
hit(Bunny) {
if(this.x === Bunny.x && this.y === Bunny.y){
score++
this.place();
}
}
display() {
imageMode(CENTER);
// fill(255, 255, 100);
// ellipse(this.x, this.y, 20, 20);
image(carrotimg, this.x, this.y, 30, 30);
}
}
| check | identifier_name |
sketch.js |
// Adding comments because I'm going to forget code
// also in case someone decides to use my code for some odd reason
// like at least use it correctly jeez,
// and change the names of those classes if you really don't want to credit me
//-----Variables-----//
///image variables:
////bunny directions
var bunnyLeft;
var bunnyRight;
var bunnyFront;
var bunnyBack;
////other in game object images
var carrotimg;
var floorimg;
var tomatoimg;
var grassimg;
////game over art
var bunnyEnd;
////font
var font;
///arrays
let spots = [];
let platforms = [];
///Constants for convenience
let WIDTH = 500;
let HEIGHT = 500;
let platformSize = 60;
let movement = platformSize + platformSize/6
let gameState = 0;
let score = 0;
let hiscore = 0;
///actual object variables
var bunnyvar;
var carrotvar;
var spotvar;
///preload, loading all images and font
function preload(){
bunnyLeft = loadImage("bunny/assets/bunnyLeft.png");
bunnyRight = loadImage("bunny/assets/bunnyRight.png");
bunnyFront = loadImage("bunny/assets/bunnyFront.png");
bunnyBack = loadImage("bunny/assets/bunnyBack.png");
carrotimg = loadImage("bunny/assets/carrot.png");
floorimg = loadImage("bunny/assets/floor.png");
tomatoimg = loadImage("bunny/assets/tomato.png");
grassimg = loadImage("bunny/assets/grass.png");
bunnyEnd = loadImage("bunny/assets/bunnyEnd.png");
font = loadFont('bunny/assets/Adelle_Reg.otf');
}
///set it up!
function setup(){
framerate = 20;
var myCanvas = createCanvas(WIDTH, HEIGHT);
myCanvas.parent("js-game");
var randomNumber = floor(random(0,3));
}
function draw(){
///Credit to class game template
if (gameState == 0){
startScreen();
} else if (gameState == 1){
update();
} else if (gameState == 2){
gameOver();
}
}
function startScreen(){
///Starting screen graphics & rules///
//background
imageMode(CORNER);
background(grassimg);
//title
textFont(font);
textSize(50);
fill(255, 170, 0, 200);
textAlign(CENTER);
text("Carrot Collector", WIDTH/2, HEIGHT/4);
//high score text:
fill(255, 200, 0, 200);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, HEIGHT/4 + 30);
//overall description of game
fill(0, 100);
rect(0,185, WIDTH, 150);
fill(255, 200, 0, 200);
textAlign(LEFT);
textSize(20);
text("Instructions: You're a bunny", WIDTH/8, HEIGHT/2 - 20);
image(bunnyFront, 4*WIDTH/6+5, HEIGHT/2 - 45, 40, 40);
text("trying to collect carrots!", WIDTH/8, HEIGHT/2 + 20);
image(carrotimg, 3*WIDTH/5+5, HEIGHT/2, 30, 30);
text("Try to avoid the tomatoes, they hurt!", WIDTH/8, HEIGHT/2 + 60);
image(tomatoimg, 4*WIDTH/5+15, HEIGHT/2 + 40, 30, 30);
//instructions/technical rules
textAlign(CENTER);
fill(255, 170, 0, 200);
text("Use arrow keys to move around.", WIDTH/2, 3*HEIGHT/4);
textSize(25);
text("Press Space to Start!", WIDTH/2, 3*HEIGHT/4 + 30);
//this is to set up the platforms 3 x 3
//maybe I shouldn't have done this with an array, because
//every three they change their placement
for (let i = 0; i < 9; i++) {
let x = (platformSize + 10) * (i%3) + WIDTH/2 - WIDTH/5;
if(i < 3) |
else if (6 <= i) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10);
platforms[i] = new platform(x, y, platformSize);
}
else if (3 <= i < 6) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10) * 2;
platforms[i] = new platform(x, y, platformSize);
}
}
//an array of spots/tomatoes! altered some of class code to fit obstacles
spots = [];
for (let i = 0; i < 3; i++){ // Make a for() loop to create the desired number of Spots
// Add an index [i] to create multiple Spots
spots[i] = new Spot(WIDTH,
0, 0.1 * i + 1, 0);
}
//set up a bunny!
bunnyvar = new Bunny(WIDTH/2, HEIGHT/2, bunnyFront);
//set up a carrot!
carrotvar = new Carrot(floor(random(0,3)) * movement + WIDTH/2 - movement,
floor(random(0,3)) * movement + WIDTH/2 - movement);
//make sure the score is really 0!
score = 0;
}
function update(){
imageMode(CORNER);
//background update
background(grassimg);
noStroke();
//set up for background/information
for (let i = 0; i < platforms.length; i++){
platforms[i].display();
}
textFont(font);
textSize(40);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Score: " + score, WIDTH/2, 80);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, 110);
//displaying bunny & carrot
bunnyvar.display();
carrotvar.display();
//checks consistently if the carrot is hit by the bunny
carrotvar.hit(bunnyvar);
//displaying and moving spots
for (let i = 0; i < spots.length; i++){ // Make a for() loop to loop through each Spot
spots[i].move(); // Move each object
spots[i].display(); // Display each object
spots[i].check(bunnyvar); // Check for mouse overlap
}
//to make the game harder, add a spot for every 10 points
if(spots.length < 3 + floor(score/10)) {
spots.push(new Spot(WIDTH,
0, 0.1 * spots.length + 1, 0));
}
}
//game over
function gameOver(){
imageMode(CORNER);
//graphics
background(grassimg);
textFont(font);
textSize(50);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Game Over!", WIDTH/2, HEIGHT/4);
textSize(20);
text("Final Score: " + score, WIDTH/2, HEIGHT/4 + 30);
textSize(25);
text("Press Space to Continue", WIDTH/2, 3*HEIGHT/4);
imageMode(CENTER);
image(bunnyEnd, WIDTH/2, HEIGHT/2, 150, 150);
//to update the high score if necessary
if(score > hiscore) {
// textSize(15);
// text("New High Score!", WIDTH/2, HEIGHT/2 + 55);
hiscore = score;
}
}
//key pressed function
function keyPressed() {
//key press for game states
if (gameState == 0 && key == ' '){ //start to playing
gameState = 1;
} else if (gameState == 2 && key == ' '){ //game over to restart to start again
gameState = 0;
//other key presses are for movement of bunny
} else if (keyCode === LEFT_ARROW) {
bunnyvar.moveTo(LEFT_ARROW);
} else if (keyCode === RIGHT_ARROW) {
bunnyvar.moveTo(RIGHT_ARROW);
}
else if (keyCode === UP_ARROW) {
bunnyvar.moveTo(UP_ARROW);
}
else if (keyCode === DOWN_ARROW) {
bunnyvar.moveTo(DOWN_ARROW);
}
}
//altered a tiny bit of game template's code/spot class
//included a direction so I could alter how the spots move
//Also, direction is a int. It was easier to randomize rather than strings
class Spot {
constructor(_x, _y, _speed, _direction) {
this.x = _x;
this.y = _y;
this.speed = _speed;
this.direction = _direction;
}
///moves the spot from one end to another
move() {
//if I'm going east & I hit the edge...
if(this.direction === 0) {
this.x += this.speed;
if (this.x > (WIDTH)){
this.place()
} //if I'm going west & I hit the edge...
} else if(this.direction === 1) {
this.x -= this.speed;
if (this.x < 0) {
this.place()
}
//if I'm going south & I hit the edge...
} else if(this.direction === 2) {
this.y += this.speed;
if (this.y > (HEIGHT)) {
this.place()
}
//if I'm going north & I hit the edge...
}else if(this.direction === 3) {
this.y -= this.speed;
if (this.y < 0) {
this.place()
}
}
}
//repspawns the spot in a new place
//this could've been placed in the move class
//but also it's a bit wordy
place() {
//decides the random direction of the spot
var randomDirection = floor(random(0,4));
//alter this placement & direction based on the direction
if(randomDirection === 0) { //east
this.x = 0;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 0;
} else if(randomDirection === 1) { //west
this.x = WIDTH;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 1;
} else if(randomDirection === 2) { //south
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = 0;
this.direction = 2;
} else if(randomDirection === 3) { //north
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = HEIGHT;
this.direction = 3;
}
}
display() {
// fill(255, 0 ,0);
// ellipse(this.x, this.y, 25, 25);
imageMode(CENTER);
image(tomatoimg, this.x, this.y, 30, 30);
}
check(Bunny) {
if (abs(this.x - Bunny.x) < 20
&& abs(this.y - Bunny.y) < 20){
gameState = 2;
}
}
}
class platform {
constructor(_x, _y, _length) {
this.x = _x;
this.y = _y;
this.length = _length;
}
display() {
imageMode(CORNER);
// fill(25);
// rect(this.x, this.y, this.length, this.length);
image(floorimg, this.x, this.y, this.length, this.length);
}
}
class Bunny {
constructor(_x, _y, _img) {
this.x = _x;
this.y = _y;
this.img = _img;
}
moveTo(direction) {
if(direction === LEFT_ARROW && this.x > WIDTH/2 - WIDTH/8) {
this.x = this.x - movement;
this.img = bunnyLeft;
}
else if(direction === RIGHT_ARROW && this.x < WIDTH/2 + WIDTH/8) {
this.x = this.x + movement;
this.img = bunnyRight;
}
else if(direction === UP_ARROW && this.y > WIDTH/2 - WIDTH/8) {
this.y = this.y - movement;
this.img = bunnyBack;
}
else if(direction ===DOWN_ARROW && this.y < WIDTH/2 + WIDTH/8) {
this.y = this.y + movement;
this.img = bunnyFront;
}
}
display() {
// fill(255, 200, 100);
// ellipse(this.x, this.y, 30, 30);
imageMode(CENTER);
image(this.img, this.x, this.y, 40, 40);
}
}
class Carrot {
constructor(_x, _y) {
this.x = _x;
this.y = _y;
}
place() {
new Carrot(
this.x = floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement,
this.y = floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
}
hit(Bunny) {
if(this.x === Bunny.x && this.y === Bunny.y){
score++
this.place();
}
}
display() {
imageMode(CENTER);
// fill(255, 255, 100);
// ellipse(this.x, this.y, 20, 20);
image(carrotimg, this.x, this.y, 30, 30);
}
}
| {
let y = HEIGHT/2 - HEIGHT/5;
platforms[i] = new platform(x, y, platformSize);
} | conditional_block |
sketch.js | // Adding comments because I'm going to forget code
// also in case someone decides to use my code for some odd reason
// like at least use it correctly jeez,
// and change the names of those classes if you really don't want to credit me
//-----Variables-----//
///image variables:
////bunny directions
var bunnyLeft;
var bunnyRight;
var bunnyFront;
var bunnyBack;
////other in game object images
var carrotimg;
var floorimg;
var tomatoimg;
var grassimg;
////game over art
var bunnyEnd;
////font
var font;
///arrays
let spots = [];
let platforms = [];
///Constants for convenience
let WIDTH = 500;
let HEIGHT = 500;
let platformSize = 60;
let movement = platformSize + platformSize/6
let gameState = 0;
let score = 0;
let hiscore = 0;
///actual object variables
var bunnyvar;
var carrotvar;
var spotvar;
///preload, loading all images and font
function preload(){
bunnyLeft = loadImage("bunny/assets/bunnyLeft.png");
bunnyRight = loadImage("bunny/assets/bunnyRight.png");
bunnyFront = loadImage("bunny/assets/bunnyFront.png");
bunnyBack = loadImage("bunny/assets/bunnyBack.png");
carrotimg = loadImage("bunny/assets/carrot.png");
floorimg = loadImage("bunny/assets/floor.png");
tomatoimg = loadImage("bunny/assets/tomato.png");
grassimg = loadImage("bunny/assets/grass.png");
bunnyEnd = loadImage("bunny/assets/bunnyEnd.png");
font = loadFont('bunny/assets/Adelle_Reg.otf');
}
///set it up!
function setup(){
framerate = 20;
var myCanvas = createCanvas(WIDTH, HEIGHT);
myCanvas.parent("js-game");
var randomNumber = floor(random(0,3));
}
function draw(){
///Credit to class game template
if (gameState == 0){
startScreen();
} else if (gameState == 1){
update();
} else if (gameState == 2){
gameOver();
}
}
function startScreen(){
///Starting screen graphics & rules///
//background
imageMode(CORNER);
background(grassimg);
//title
textFont(font);
textSize(50);
fill(255, 170, 0, 200);
textAlign(CENTER);
text("Carrot Collector", WIDTH/2, HEIGHT/4);
//high score text:
fill(255, 200, 0, 200);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, HEIGHT/4 + 30);
//overall description of game
fill(0, 100);
rect(0,185, WIDTH, 150);
fill(255, 200, 0, 200);
textAlign(LEFT);
textSize(20);
text("Instructions: You're a bunny", WIDTH/8, HEIGHT/2 - 20);
image(bunnyFront, 4*WIDTH/6+5, HEIGHT/2 - 45, 40, 40);
text("trying to collect carrots!", WIDTH/8, HEIGHT/2 + 20);
image(carrotimg, 3*WIDTH/5+5, HEIGHT/2, 30, 30);
text("Try to avoid the tomatoes, they hurt!", WIDTH/8, HEIGHT/2 + 60);
image(tomatoimg, 4*WIDTH/5+15, HEIGHT/2 + 40, 30, 30);
//instructions/technical rules
textAlign(CENTER);
fill(255, 170, 0, 200);
text("Use arrow keys to move around.", WIDTH/2, 3*HEIGHT/4);
textSize(25);
text("Press Space to Start!", WIDTH/2, 3*HEIGHT/4 + 30);
//this is to set up the platforms 3 x 3
//maybe I shouldn't have done this with an array, because
//every three they change their placement
for (let i = 0; i < 9; i++) {
let x = (platformSize + 10) * (i%3) + WIDTH/2 - WIDTH/5;
if(i < 3) {
let y = HEIGHT/2 - HEIGHT/5;
platforms[i] = new platform(x, y, platformSize);
}
else if (6 <= i) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10);
platforms[i] = new platform(x, y, platformSize);
}
else if (3 <= i < 6) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10) * 2;
platforms[i] = new platform(x, y, platformSize);
}
}
//an array of spots/tomatoes! altered some of class code to fit obstacles
spots = [];
for (let i = 0; i < 3; i++){ // Make a for() loop to create the desired number of Spots
// Add an index [i] to create multiple Spots
spots[i] = new Spot(WIDTH,
0, 0.1 * i + 1, 0);
}
//set up a bunny!
bunnyvar = new Bunny(WIDTH/2, HEIGHT/2, bunnyFront);
//set up a carrot!
carrotvar = new Carrot(floor(random(0,3)) * movement + WIDTH/2 - movement,
floor(random(0,3)) * movement + WIDTH/2 - movement);
//make sure the score is really 0!
score = 0;
}
function update(){
imageMode(CORNER);
//background update
background(grassimg);
noStroke();
//set up for background/information
for (let i = 0; i < platforms.length; i++){
platforms[i].display();
}
textFont(font);
textSize(40);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Score: " + score, WIDTH/2, 80);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, 110);
//displaying bunny & carrot
bunnyvar.display();
carrotvar.display();
//checks consistently if the carrot is hit by the bunny
carrotvar.hit(bunnyvar);
//displaying and moving spots
for (let i = 0; i < spots.length; i++){ // Make a for() loop to loop through each Spot
spots[i].move(); // Move each object
spots[i].display(); // Display each object
spots[i].check(bunnyvar); // Check for mouse overlap
}
//to make the game harder, add a spot for every 10 points
if(spots.length < 3 + floor(score/10)) {
spots.push(new Spot(WIDTH,
0, 0.1 * spots.length + 1, 0));
}
}
| function gameOver(){
imageMode(CORNER);
//graphics
background(grassimg);
textFont(font);
textSize(50);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Game Over!", WIDTH/2, HEIGHT/4);
textSize(20);
text("Final Score: " + score, WIDTH/2, HEIGHT/4 + 30);
textSize(25);
text("Press Space to Continue", WIDTH/2, 3*HEIGHT/4);
imageMode(CENTER);
image(bunnyEnd, WIDTH/2, HEIGHT/2, 150, 150);
//to update the high score if necessary
if(score > hiscore) {
// textSize(15);
// text("New High Score!", WIDTH/2, HEIGHT/2 + 55);
hiscore = score;
}
}
//key pressed function
function keyPressed() {
//key press for game states
if (gameState == 0 && key == ' '){ //start to playing
gameState = 1;
} else if (gameState == 2 && key == ' '){ //game over to restart to start again
gameState = 0;
//other key presses are for movement of bunny
} else if (keyCode === LEFT_ARROW) {
bunnyvar.moveTo(LEFT_ARROW);
} else if (keyCode === RIGHT_ARROW) {
bunnyvar.moveTo(RIGHT_ARROW);
}
else if (keyCode === UP_ARROW) {
bunnyvar.moveTo(UP_ARROW);
}
else if (keyCode === DOWN_ARROW) {
bunnyvar.moveTo(DOWN_ARROW);
}
}
//altered a tiny bit of game template's code/spot class
//included a direction so I could alter how the spots move
//Also, direction is a int. It was easier to randomize rather than strings
class Spot {
constructor(_x, _y, _speed, _direction) {
this.x = _x;
this.y = _y;
this.speed = _speed;
this.direction = _direction;
}
///moves the spot from one end to another
move() {
//if I'm going east & I hit the edge...
if(this.direction === 0) {
this.x += this.speed;
if (this.x > (WIDTH)){
this.place()
} //if I'm going west & I hit the edge...
} else if(this.direction === 1) {
this.x -= this.speed;
if (this.x < 0) {
this.place()
}
//if I'm going south & I hit the edge...
} else if(this.direction === 2) {
this.y += this.speed;
if (this.y > (HEIGHT)) {
this.place()
}
//if I'm going north & I hit the edge...
}else if(this.direction === 3) {
this.y -= this.speed;
if (this.y < 0) {
this.place()
}
}
}
//repspawns the spot in a new place
//this could've been placed in the move class
//but also it's a bit wordy
place() {
//decides the random direction of the spot
var randomDirection = floor(random(0,4));
//alter this placement & direction based on the direction
if(randomDirection === 0) { //east
this.x = 0;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 0;
} else if(randomDirection === 1) { //west
this.x = WIDTH;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 1;
} else if(randomDirection === 2) { //south
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = 0;
this.direction = 2;
} else if(randomDirection === 3) { //north
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = HEIGHT;
this.direction = 3;
}
}
display() {
// fill(255, 0 ,0);
// ellipse(this.x, this.y, 25, 25);
imageMode(CENTER);
image(tomatoimg, this.x, this.y, 30, 30);
}
check(Bunny) {
if (abs(this.x - Bunny.x) < 20
&& abs(this.y - Bunny.y) < 20){
gameState = 2;
}
}
}
class platform {
constructor(_x, _y, _length) {
this.x = _x;
this.y = _y;
this.length = _length;
}
display() {
imageMode(CORNER);
// fill(25);
// rect(this.x, this.y, this.length, this.length);
image(floorimg, this.x, this.y, this.length, this.length);
}
}
class Bunny {
constructor(_x, _y, _img) {
this.x = _x;
this.y = _y;
this.img = _img;
}
moveTo(direction) {
if(direction === LEFT_ARROW && this.x > WIDTH/2 - WIDTH/8) {
this.x = this.x - movement;
this.img = bunnyLeft;
}
else if(direction === RIGHT_ARROW && this.x < WIDTH/2 + WIDTH/8) {
this.x = this.x + movement;
this.img = bunnyRight;
}
else if(direction === UP_ARROW && this.y > WIDTH/2 - WIDTH/8) {
this.y = this.y - movement;
this.img = bunnyBack;
}
else if(direction ===DOWN_ARROW && this.y < WIDTH/2 + WIDTH/8) {
this.y = this.y + movement;
this.img = bunnyFront;
}
}
display() {
// fill(255, 200, 100);
// ellipse(this.x, this.y, 30, 30);
imageMode(CENTER);
image(this.img, this.x, this.y, 40, 40);
}
}
class Carrot {
constructor(_x, _y) {
this.x = _x;
this.y = _y;
}
place() {
new Carrot(
this.x = floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement,
this.y = floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
}
hit(Bunny) {
if(this.x === Bunny.x && this.y === Bunny.y){
score++
this.place();
}
}
display() {
imageMode(CENTER);
// fill(255, 255, 100);
// ellipse(this.x, this.y, 20, 20);
image(carrotimg, this.x, this.y, 30, 30);
}
} | //game over | random_line_split |
rbtree.go | // Copyright 2020 wongoo@apache.org. All rights reserved.
// a red-black tree implement
// the node only contains pointers to left/right child, not for the parent, for saving storage space for large tree.
package rbtree
import (
"sync"
"github.com/vogo/goalg/compare"
)
// Color node color
type Color bool
func (c Color) String() string {
if c {
return "red"
}
return "black"
}
// Position tree path position, left or right.
type Position bool
const (
Red = Color(true)
Black = Color(false)
Left = Position(true)
Right = Position(false)
)
// Node the node of red-Black tree.
type Node struct {
Item compare.Lesser
Left, Right *Node
Color Color
}
// Black a node is black if nil or its color is black.
func (n *Node) Black() bool {
return n == nil || n.Color == Black
}
// Red a node is red if nil or its color is red.
func (n *Node) Red() bool {
return n == nil || n.Color == Red
}
// LeftBlack the left child of a node is black if nil or its color is black.
func (n *Node) | () bool {
return n.Left == nil || n.Left.Color == Black
}
// LeftRed the left child of a node is black if not nil and its color is black.
func (n *Node) LeftRed() bool {
return n.Left != nil && n.Left.Color == Red
}
// RightBlack the right child of a node is black if nil or its color is black.
func (n *Node) RightBlack() bool {
return n.Right == nil || n.Right.Color == Black
}
// RightRed the right child of a node is black if not nil and its color is black.
func (n *Node) RightRed() bool {
return n.Right != nil && n.Right.Color == Red
}
// RBTree red-black tree
type RBTree struct {
Node *Node
lock sync.RWMutex
stack *stack
}
// New create a new red-black tree
func New() *RBTree {
return &RBTree{
lock: sync.RWMutex{},
Node: nil,
stack: newStack(nil),
}
}
// LeftRotate left rotate a node.
func LeftRotate(n *Node) *Node {
r := n.Right
if r == nil {
return n
}
n.Right = r.Left
r.Left = n
return r
}
// RightRotate right rotate a node.
func RightRotate(n *Node) *Node {
l := n.Left
if l == nil {
return n
}
n.Left = l.Right
l.Right = n
return l
}
// Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil {
return
}
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) (n *Node, ret interface{}) {
if node == nil {
return nil, nil
}
return deleteTreeNode(newStack(node), node, item)
}
// deleteTreeNode delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {
root := node
var ret interface{}
// find the node
FOR:
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
ret = node.Item
break FOR
}
}
// not find
if node == nil {
return root, nil
}
var inorderSuccessor *Node
// find the inorder successor
if node.Right != nil {
stack.push(node, Right)
inorderSuccessor = node.Right
for inorderSuccessor.Left != nil {
stack.push(inorderSuccessor, Left)
inorderSuccessor = inorderSuccessor.Left
}
node.Item = inorderSuccessor.Item
node.Item = inorderSuccessor.Item
node = inorderSuccessor
}
// get the child of node
c := node.Left
if c == nil {
c = node.Right
}
// N has no child
if c == nil {
// delete N
stack.bindChild(nil)
if node.Color == Red {
return root, ret
}
deleteTreeNodeBalance(stack)
root = stack.root()
if root != nil {
root.Color = Black
}
return root, ret
}
// N has one next
// then copy key/value from next to N
node.Item = c.Item
// delete the next
node.Left = nil
node.Right = nil
// N has diff color with next
if node.Color != c.Color {
// set color of N to black
node.Color = Black
return root, ret
}
// the color of N and next are both Black
deleteTreeNodeBalance(stack)
root.Color = Black
return root, ret
}
// deleteTreeNodeBalance balance the tree after deleting.
// code comment use the following terms:
// - N as the balance node
// - P as the father of N
// - PP as the grand father of N
// - S as the sibling of N
// - SL as the left child of S
// - SR as the right child of S
func deleteTreeNodeBalance(stack *stack) {
var (
p, pp, s *Node
pos, ppos Position
)
// case 1: reach the root.
// execute: nothing.
// result: balance finish.
for stack.index > 0 {
p, pp, s = stack.node(), stack.parent(), stack.childSibling()
pos, ppos = stack.position(), stack.parentPosition()
// case 2: S is red.
// execute: rotate S up as the PP of N, and exchange the color of P and S.
// result: the black number not change, but N has a black sibling now.
if s.Color == Red {
p.Color, s.Color = s.Color, p.Color
// np is original S
var np *Node
if pos == Left {
np = LeftRotate(p)
s = p.Right
} else {
np = RightRotate(p)
s = p.Left
}
// insert np in stack
stack.insertBefore(np, pos)
if ppos == Left {
pp.Left = np
} else {
pp.Right = np
}
// reset PP (original S)
pp = np
}
// now S is black.
if s.LeftBlack() && s.RightBlack() {
// case 3: color of P, S, SL, SR are all Black.
// execute: set S to red.
// result: the path through S will reduce one black, and the left and right of P now balance,
// set N to p, and continue execute balance.
if p.Black() {
s.Color = Red
stack.pop()
continue
}
// case4: S, SL, SR are black, P is red.
// execute: exchange the color of S and P.
// result: add one black on the path through N, while that is not change for path through S, balance finish.
p.Color, s.Color = s.Color, p.Color
return
}
// now SL and SR has diff color
if pos == Left {
// case 5: N is left child of P, S is black, SL is red, SR is black.
// execute: right rotate on S, then exchange color of SL(parent of S now) and S.
// result: N has a new black sibling S(original SL), and S has a red right child SR(original S),
// while the black count through S will not change.
if s.LeftRed() {
s = RightRotate(s)
s.Color, s.Right.Color = s.Right.Color, s.Color
p.Right = s
}
// case6: N is left child of P, S is black, SL is black, SR is red.
// execute: set SR to black, left rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Right.Color = Black
p.Color, s.Color = s.Color, p.Color
p = LeftRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
return
}
// case 5: N is right child of P, S is black, SL is black, SR is red.
// execute: left rotate on S, then exchange color of SR(parent of S now) and S.
// result: N has a new black sibling S(original SR), and S has a red left child SL(original S),
// while the black count through S will not change.
if s.RightRed() {
s = LeftRotate(s)
s.Color, s.Left.Color = s.Left.Color, s.Color
p.Left = s
}
// case6: N is right child of P, S is black, SL is red, SR is black.
// execute: set SL to black, right rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Left.Color = Black
p = RightRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
p.Color, s.Color = s.Color, p.Color
return
}
}
| LeftBlack | identifier_name |
rbtree.go | // Copyright 2020 wongoo@apache.org. All rights reserved.
// a red-black tree implement
// the node only contains pointers to left/right child, not for the parent, for saving storage space for large tree.
package rbtree
import (
"sync"
"github.com/vogo/goalg/compare"
)
// Color node color
type Color bool
func (c Color) String() string {
if c {
return "red"
}
return "black"
}
// Position tree path position, left or right.
type Position bool
const (
Red = Color(true)
Black = Color(false)
Left = Position(true)
Right = Position(false)
)
// Node the node of red-Black tree.
type Node struct {
Item compare.Lesser
Left, Right *Node
Color Color
}
// Black a node is black if nil or its color is black.
func (n *Node) Black() bool {
return n == nil || n.Color == Black
}
// Red a node is red if nil or its color is red.
func (n *Node) Red() bool {
return n == nil || n.Color == Red
}
// LeftBlack the left child of a node is black if nil or its color is black.
func (n *Node) LeftBlack() bool {
return n.Left == nil || n.Left.Color == Black
}
// LeftRed the left child of a node is black if not nil and its color is black.
func (n *Node) LeftRed() bool {
return n.Left != nil && n.Left.Color == Red
}
// RightBlack the right child of a node is black if nil or its color is black.
func (n *Node) RightBlack() bool {
return n.Right == nil || n.Right.Color == Black
}
// RightRed the right child of a node is black if not nil and its color is black.
func (n *Node) RightRed() bool {
return n.Right != nil && n.Right.Color == Red
}
// RBTree red-black tree
type RBTree struct {
Node *Node
lock sync.RWMutex
stack *stack
}
// New create a new red-black tree
func New() *RBTree {
return &RBTree{
lock: sync.RWMutex{},
Node: nil,
stack: newStack(nil),
}
}
// LeftRotate left rotate a node.
func LeftRotate(n *Node) *Node {
r := n.Right
if r == nil {
return n
}
n.Right = r.Left
r.Left = n
return r
}
// RightRotate right rotate a node.
func RightRotate(n *Node) *Node {
l := n.Left
if l == nil {
return n
}
n.Left = l.Right
l.Right = n
return l
}
// Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil |
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) (n *Node, ret interface{}) {
if node == nil {
return nil, nil
}
return deleteTreeNode(newStack(node), node, item)
}
// deleteTreeNode delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {
root := node
var ret interface{}
// find the node
FOR:
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
ret = node.Item
break FOR
}
}
// not find
if node == nil {
return root, nil
}
var inorderSuccessor *Node
// find the inorder successor
if node.Right != nil {
stack.push(node, Right)
inorderSuccessor = node.Right
for inorderSuccessor.Left != nil {
stack.push(inorderSuccessor, Left)
inorderSuccessor = inorderSuccessor.Left
}
node.Item = inorderSuccessor.Item
node.Item = inorderSuccessor.Item
node = inorderSuccessor
}
// get the child of node
c := node.Left
if c == nil {
c = node.Right
}
// N has no child
if c == nil {
// delete N
stack.bindChild(nil)
if node.Color == Red {
return root, ret
}
deleteTreeNodeBalance(stack)
root = stack.root()
if root != nil {
root.Color = Black
}
return root, ret
}
// N has one next
// then copy key/value from next to N
node.Item = c.Item
// delete the next
node.Left = nil
node.Right = nil
// N has diff color with next
if node.Color != c.Color {
// set color of N to black
node.Color = Black
return root, ret
}
// the color of N and next are both Black
deleteTreeNodeBalance(stack)
root.Color = Black
return root, ret
}
// deleteTreeNodeBalance balance the tree after deleting.
// code comment use the following terms:
// - N as the balance node
// - P as the father of N
// - PP as the grand father of N
// - S as the sibling of N
// - SL as the left child of S
// - SR as the right child of S
func deleteTreeNodeBalance(stack *stack) {
var (
p, pp, s *Node
pos, ppos Position
)
// case 1: reach the root.
// execute: nothing.
// result: balance finish.
for stack.index > 0 {
p, pp, s = stack.node(), stack.parent(), stack.childSibling()
pos, ppos = stack.position(), stack.parentPosition()
// case 2: S is red.
// execute: rotate S up as the PP of N, and exchange the color of P and S.
// result: the black number not change, but N has a black sibling now.
if s.Color == Red {
p.Color, s.Color = s.Color, p.Color
// np is original S
var np *Node
if pos == Left {
np = LeftRotate(p)
s = p.Right
} else {
np = RightRotate(p)
s = p.Left
}
// insert np in stack
stack.insertBefore(np, pos)
if ppos == Left {
pp.Left = np
} else {
pp.Right = np
}
// reset PP (original S)
pp = np
}
// now S is black.
if s.LeftBlack() && s.RightBlack() {
// case 3: color of P, S, SL, SR are all Black.
// execute: set S to red.
// result: the path through S will reduce one black, and the left and right of P now balance,
// set N to p, and continue execute balance.
if p.Black() {
s.Color = Red
stack.pop()
continue
}
// case4: S, SL, SR are black, P is red.
// execute: exchange the color of S and P.
// result: add one black on the path through N, while that is not change for path through S, balance finish.
p.Color, s.Color = s.Color, p.Color
return
}
// now SL and SR has diff color
if pos == Left {
// case 5: N is left child of P, S is black, SL is red, SR is black.
// execute: right rotate on S, then exchange color of SL(parent of S now) and S.
// result: N has a new black sibling S(original SL), and S has a red right child SR(original S),
// while the black count through S will not change.
if s.LeftRed() {
s = RightRotate(s)
s.Color, s.Right.Color = s.Right.Color, s.Color
p.Right = s
}
// case6: N is left child of P, S is black, SL is black, SR is red.
// execute: set SR to black, left rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Right.Color = Black
p.Color, s.Color = s.Color, p.Color
p = LeftRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
return
}
// case 5: N is right child of P, S is black, SL is black, SR is red.
// execute: left rotate on S, then exchange color of SR(parent of S now) and S.
// result: N has a new black sibling S(original SR), and S has a red left child SL(original S),
// while the black count through S will not change.
if s.RightRed() {
s = LeftRotate(s)
s.Color, s.Left.Color = s.Left.Color, s.Color
p.Left = s
}
// case6: N is right child of P, S is black, SL is red, SR is black.
// execute: set SL to black, right rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Left.Color = Black
p = RightRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
p.Color, s.Color = s.Color, p.Color
return
}
}
| {
return
} | conditional_block |
rbtree.go | // Copyright 2020 wongoo@apache.org. All rights reserved.
// a red-black tree implement
// the node only contains pointers to left/right child, not for the parent, for saving storage space for large tree.
package rbtree
import (
"sync"
"github.com/vogo/goalg/compare"
)
// Color node color
type Color bool
func (c Color) String() string {
if c {
return "red"
}
return "black"
}
// Position tree path position, left or right.
type Position bool
const (
Red = Color(true)
Black = Color(false)
Left = Position(true)
Right = Position(false)
)
// Node the node of red-Black tree.
type Node struct {
Item compare.Lesser
Left, Right *Node
Color Color
}
// Black a node is black if nil or its color is black.
func (n *Node) Black() bool {
return n == nil || n.Color == Black
}
// Red a node is red if nil or its color is red.
func (n *Node) Red() bool {
return n == nil || n.Color == Red
}
// LeftBlack the left child of a node is black if nil or its color is black.
func (n *Node) LeftBlack() bool {
return n.Left == nil || n.Left.Color == Black
}
// LeftRed the left child of a node is black if not nil and its color is black.
func (n *Node) LeftRed() bool {
return n.Left != nil && n.Left.Color == Red
}
// RightBlack the right child of a node is black if nil or its color is black.
func (n *Node) RightBlack() bool {
return n.Right == nil || n.Right.Color == Black
}
// RightRed the right child of a node is black if not nil and its color is black.
func (n *Node) RightRed() bool {
return n.Right != nil && n.Right.Color == Red
}
// RBTree red-black tree
type RBTree struct {
Node *Node
lock sync.RWMutex
stack *stack
}
// New create a new red-black tree
func New() *RBTree {
return &RBTree{
lock: sync.RWMutex{},
Node: nil,
stack: newStack(nil),
}
}
// LeftRotate left rotate a node.
func LeftRotate(n *Node) *Node {
r := n.Right
if r == nil {
return n
}
n.Right = r.Left
r.Left = n
return r
}
// RightRotate right rotate a node.
func RightRotate(n *Node) *Node {
l := n.Left
if l == nil {
return n
}
n.Left = l.Right
l.Right = n
return l
}
// Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil {
return
}
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) (n *Node, ret interface{}) {
if node == nil {
return nil, nil
}
return deleteTreeNode(newStack(node), node, item)
}
// deleteTreeNode delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {
root := node
var ret interface{}
// find the node | node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
ret = node.Item
break FOR
}
}
// not find
if node == nil {
return root, nil
}
var inorderSuccessor *Node
// find the inorder successor
if node.Right != nil {
stack.push(node, Right)
inorderSuccessor = node.Right
for inorderSuccessor.Left != nil {
stack.push(inorderSuccessor, Left)
inorderSuccessor = inorderSuccessor.Left
}
node.Item = inorderSuccessor.Item
node.Item = inorderSuccessor.Item
node = inorderSuccessor
}
// get the child of node
c := node.Left
if c == nil {
c = node.Right
}
// N has no child
if c == nil {
// delete N
stack.bindChild(nil)
if node.Color == Red {
return root, ret
}
deleteTreeNodeBalance(stack)
root = stack.root()
if root != nil {
root.Color = Black
}
return root, ret
}
// N has one next
// then copy key/value from next to N
node.Item = c.Item
// delete the next
node.Left = nil
node.Right = nil
// N has diff color with next
if node.Color != c.Color {
// set color of N to black
node.Color = Black
return root, ret
}
// the color of N and next are both Black
deleteTreeNodeBalance(stack)
root.Color = Black
return root, ret
}
// deleteTreeNodeBalance balance the tree after deleting.
// code comment use the following terms:
// - N as the balance node
// - P as the father of N
// - PP as the grand father of N
// - S as the sibling of N
// - SL as the left child of S
// - SR as the right child of S
func deleteTreeNodeBalance(stack *stack) {
var (
p, pp, s *Node
pos, ppos Position
)
// case 1: reach the root.
// execute: nothing.
// result: balance finish.
for stack.index > 0 {
p, pp, s = stack.node(), stack.parent(), stack.childSibling()
pos, ppos = stack.position(), stack.parentPosition()
// case 2: S is red.
// execute: rotate S up as the PP of N, and exchange the color of P and S.
// result: the black number not change, but N has a black sibling now.
if s.Color == Red {
p.Color, s.Color = s.Color, p.Color
// np is original S
var np *Node
if pos == Left {
np = LeftRotate(p)
s = p.Right
} else {
np = RightRotate(p)
s = p.Left
}
// insert np in stack
stack.insertBefore(np, pos)
if ppos == Left {
pp.Left = np
} else {
pp.Right = np
}
// reset PP (original S)
pp = np
}
// now S is black.
if s.LeftBlack() && s.RightBlack() {
// case 3: color of P, S, SL, SR are all Black.
// execute: set S to red.
// result: the path through S will reduce one black, and the left and right of P now balance,
// set N to p, and continue execute balance.
if p.Black() {
s.Color = Red
stack.pop()
continue
}
// case4: S, SL, SR are black, P is red.
// execute: exchange the color of S and P.
// result: add one black on the path through N, while that is not change for path through S, balance finish.
p.Color, s.Color = s.Color, p.Color
return
}
// now SL and SR has diff color
if pos == Left {
// case 5: N is left child of P, S is black, SL is red, SR is black.
// execute: right rotate on S, then exchange color of SL(parent of S now) and S.
// result: N has a new black sibling S(original SL), and S has a red right child SR(original S),
// while the black count through S will not change.
if s.LeftRed() {
s = RightRotate(s)
s.Color, s.Right.Color = s.Right.Color, s.Color
p.Right = s
}
// case6: N is left child of P, S is black, SL is black, SR is red.
// execute: set SR to black, left rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Right.Color = Black
p.Color, s.Color = s.Color, p.Color
p = LeftRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
return
}
// case 5: N is right child of P, S is black, SL is black, SR is red.
// execute: left rotate on S, then exchange color of SR(parent of S now) and S.
// result: N has a new black sibling S(original SR), and S has a red left child SL(original S),
// while the black count through S will not change.
if s.RightRed() {
s = LeftRotate(s)
s.Color, s.Left.Color = s.Left.Color, s.Color
p.Left = s
}
// case6: N is right child of P, S is black, SL is red, SR is black.
// execute: set SL to black, right rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Left.Color = Black
p = RightRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
p.Color, s.Color = s.Color, p.Color
return
}
} | FOR:
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left) | random_line_split |
rbtree.go | // Copyright 2020 wongoo@apache.org. All rights reserved.
// a red-black tree implement
// the node only contains pointers to left/right child, not for the parent, for saving storage space for large tree.
package rbtree
import (
"sync"
"github.com/vogo/goalg/compare"
)
// Color node color
type Color bool
func (c Color) String() string {
if c {
return "red"
}
return "black"
}
// Position tree path position, left or right.
type Position bool
const (
Red = Color(true)
Black = Color(false)
Left = Position(true)
Right = Position(false)
)
// Node the node of red-Black tree.
type Node struct {
Item compare.Lesser
Left, Right *Node
Color Color
}
// Black a node is black if nil or its color is black.
func (n *Node) Black() bool {
return n == nil || n.Color == Black
}
// Red a node is red if nil or its color is red.
func (n *Node) Red() bool {
return n == nil || n.Color == Red
}
// LeftBlack the left child of a node is black if nil or its color is black.
func (n *Node) LeftBlack() bool {
return n.Left == nil || n.Left.Color == Black
}
// LeftRed the left child of a node is black if not nil and its color is black.
func (n *Node) LeftRed() bool {
return n.Left != nil && n.Left.Color == Red
}
// RightBlack the right child of a node is black if nil or its color is black.
func (n *Node) RightBlack() bool |
// RightRed the right child of a node is black if not nil and its color is black.
func (n *Node) RightRed() bool {
return n.Right != nil && n.Right.Color == Red
}
// RBTree red-black tree
type RBTree struct {
Node *Node
lock sync.RWMutex
stack *stack
}
// New create a new red-black tree
func New() *RBTree {
return &RBTree{
lock: sync.RWMutex{},
Node: nil,
stack: newStack(nil),
}
}
// LeftRotate left rotate a node.
func LeftRotate(n *Node) *Node {
r := n.Right
if r == nil {
return n
}
n.Right = r.Left
r.Left = n
return r
}
// RightRotate right rotate a node.
func RightRotate(n *Node) *Node {
l := n.Left
if l == nil {
return n
}
n.Left = l.Right
l.Right = n
return l
}
// Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil {
return
}
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) (n *Node, ret interface{}) {
if node == nil {
return nil, nil
}
return deleteTreeNode(newStack(node), node, item)
}
// deleteTreeNode delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {
root := node
var ret interface{}
// find the node
FOR:
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
ret = node.Item
break FOR
}
}
// not find
if node == nil {
return root, nil
}
var inorderSuccessor *Node
// find the inorder successor
if node.Right != nil {
stack.push(node, Right)
inorderSuccessor = node.Right
for inorderSuccessor.Left != nil {
stack.push(inorderSuccessor, Left)
inorderSuccessor = inorderSuccessor.Left
}
node.Item = inorderSuccessor.Item
node.Item = inorderSuccessor.Item
node = inorderSuccessor
}
// get the child of node
c := node.Left
if c == nil {
c = node.Right
}
// N has no child
if c == nil {
// delete N
stack.bindChild(nil)
if node.Color == Red {
return root, ret
}
deleteTreeNodeBalance(stack)
root = stack.root()
if root != nil {
root.Color = Black
}
return root, ret
}
// N has one next
// then copy key/value from next to N
node.Item = c.Item
// delete the next
node.Left = nil
node.Right = nil
// N has diff color with next
if node.Color != c.Color {
// set color of N to black
node.Color = Black
return root, ret
}
// the color of N and next are both Black
deleteTreeNodeBalance(stack)
root.Color = Black
return root, ret
}
// deleteTreeNodeBalance balance the tree after deleting.
// code comment use the following terms:
// - N as the balance node
// - P as the father of N
// - PP as the grand father of N
// - S as the sibling of N
// - SL as the left child of S
// - SR as the right child of S
func deleteTreeNodeBalance(stack *stack) {
var (
p, pp, s *Node
pos, ppos Position
)
// case 1: reach the root.
// execute: nothing.
// result: balance finish.
for stack.index > 0 {
p, pp, s = stack.node(), stack.parent(), stack.childSibling()
pos, ppos = stack.position(), stack.parentPosition()
// case 2: S is red.
// execute: rotate S up as the PP of N, and exchange the color of P and S.
// result: the black number not change, but N has a black sibling now.
if s.Color == Red {
p.Color, s.Color = s.Color, p.Color
// np is original S
var np *Node
if pos == Left {
np = LeftRotate(p)
s = p.Right
} else {
np = RightRotate(p)
s = p.Left
}
// insert np in stack
stack.insertBefore(np, pos)
if ppos == Left {
pp.Left = np
} else {
pp.Right = np
}
// reset PP (original S)
pp = np
}
// now S is black.
if s.LeftBlack() && s.RightBlack() {
// case 3: color of P, S, SL, SR are all Black.
// execute: set S to red.
// result: the path through S will reduce one black, and the left and right of P now balance,
// set N to p, and continue execute balance.
if p.Black() {
s.Color = Red
stack.pop()
continue
}
// case4: S, SL, SR are black, P is red.
// execute: exchange the color of S and P.
// result: add one black on the path through N, while that is not change for path through S, balance finish.
p.Color, s.Color = s.Color, p.Color
return
}
// now SL and SR has diff color
if pos == Left {
// case 5: N is left child of P, S is black, SL is red, SR is black.
// execute: right rotate on S, then exchange color of SL(parent of S now) and S.
// result: N has a new black sibling S(original SL), and S has a red right child SR(original S),
// while the black count through S will not change.
if s.LeftRed() {
s = RightRotate(s)
s.Color, s.Right.Color = s.Right.Color, s.Color
p.Right = s
}
// case6: N is left child of P, S is black, SL is black, SR is red.
// execute: set SR to black, left rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Right.Color = Black
p.Color, s.Color = s.Color, p.Color
p = LeftRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
return
}
// case 5: N is right child of P, S is black, SL is black, SR is red.
// execute: left rotate on S, then exchange color of SR(parent of S now) and S.
// result: N has a new black sibling S(original SR), and S has a red left child SL(original S),
// while the black count through S will not change.
if s.RightRed() {
s = LeftRotate(s)
s.Color, s.Left.Color = s.Left.Color, s.Color
p.Left = s
}
// case6: N is right child of P, S is black, SL is red, SR is black.
// execute: set SL to black, right rotate P, the exchange the color of P and S.
// result: S is now the parent of P, the black count through N increase 1,
// the black count through S keep the same,
// balance finish.
s.Left.Color = Black
p = RightRotate(p)
if ppos == Left {
pp.Left = p
} else {
pp.Right = p
}
p.Color, s.Color = s.Color, p.Color
return
}
}
| {
return n.Right == nil || n.Right.Color == Black
} | identifier_body |
Estimate_Hazard.py | import csv
import numpy as np
import scipy as sp
from scipy.stats import exponweib
import matplotlib
import matplotlib.pyplot as plt
import pandas
import thinkstats2
import thinkbayes2
import survival
import thinkplot
import random
import math
houselist=['Wildling','None','Night\'s Watch','Lannister','House Lannister','Stark','House Stark','Tully','House Tully', 'Arryn','House Arryn',
'Tyrell', 'House Tyrell', 'Targaryen','House Targaryen','Martell','House Martell','Baratheon','House Baratheon','Greyjoy','House Greyjoy']
# houselist_short=['Stark','Baratheon','None','Lannister','Tully','Arryn','Targaryen','Greyjoy','Wildling','Night\'s Watch','Tyrell','Martell']
houselist_short1=['Arryn','Stark','Lannister','None']
houselist_short2=['Tyrell','Baratheon','Tully','Night\'s Watch']
houselist_short3=['Martell','Targaryen','Wildling','Greyjoy']
houselist_short=['Arryn','Stark','Lannister','None','Tyrell','Baratheon','Tully','Night\'s Watch','Martell','Targaryen','Wildling','Greyjoy']
def Init_List_Struct():
list_str=[['dead', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]], ['alive', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]]]
return list_str
colordict={'Stark':['SlateGrey','DimGrey','Silver'],'Baratheon':['DarkOrange','Red','Orange'],'None':['MediumTurquoise','Teal','DarkSeaGreen'],
'Lannister':['Maroon','DarkGoldenRod','Gold'],'Tully':['FireBrick','RoyalBlue','LightSteelBlue'],'Arryn':['LightSkyBlue','LightSlateGrey','MidnightBlue'],
'Targaryen':['DarkRed','Black','Brown'],'Greyjoy':['DarkSlateGrey','GoldenRod','Khaki'],'Wildling':['Indigo','BlueViolet','Plum'],
'Night\'s Watch':['Black','LightGrey','Grey'],'Tyrell':['DarkGreen','Yellow','YellowGreen'],'Martell':['PaleGoldenRod','SandyBrown','Tomato']}
No=Init_List_Struct()
Lannister=Init_List_Struct()
Stark=Init_List_Struct()
Tully=Init_List_Struct()
Arryn=Init_List_Struct()
Tyrell=Init_List_Struct() #WTF
Targaryen=Init_List_Struct()
Martell=Init_List_Struct()#WTF2.5559687549462544
Baratheon=Init_List_Struct()
Greyjoy=Init_List_Struct()
Wildling=Init_List_Struct()
NW=Init_List_Struct()
hd={'Wildling':Wildling,'None': No,'Night\'s Watch':NW,'Lannister':Lannister,'House Lannister':Lannister,'Stark':Stark,'House Stark':Stark,
'Tully':Tully,'House Tully':Tully, 'Arryn':Arryn,'House Arryn':Arryn,'Tyrell':Tyrell, 'House Tyrell':Tyrell, 'Targaryen':Targaryen,
'House Targaryen':Targaryen,'Martell':Martell,'House Martell':Martell,'Baratheon':Baratheon,'House Baratheon':Baratheon,'Greyjoy':Greyjoy,'House Greyjoy':Greyjoy}
data=[]
with open('char_final.csv', 'r') as dataset:
reader=csv.reader(dataset)
for row in reader:
data.append(row)
data.pop(0)
data.pop(0)
data.pop(0)
data.pop(0)
def house_list(House_Name,info):
if info [1]==House_Name:
if info[3]!='': #if they are dead
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][0][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][1][2].append(info)
else:
print 'a',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else:
print 'e',info
for info in data:
for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def | (k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
cur_house=hd[house]
alive1=cur_house[1][1][1] #Noble Men
alive2=cur_house[1][1][2] #Noble Women
alive3=cur_house[1][2][1] #Small Men
alive4=cur_house[1][2][2] #Small Women
alive1.pop(0)
alive2.pop(0)
alive3.pop(0)
alive4.pop(0)
dead1=cur_house[0][1][1]
dead2=cur_house[0][1][2]
dead3=cur_house[0][2][1]
dead4=cur_house[0][2][2]
dead1.pop(0)
dead2.pop(0)
dead3.pop(0)
dead4.pop(0)
if Gender=='M' and Class=='Noble':
alive=alive1
dead=dead1
elif Gender=='M' and Class=='Small':
alive=alive3
dead=dead3
elif Gender=='M' and Class=='All':
alive=alive1+alive3
dead=dead1+dead3
elif Gender=='F' and Class=='Noble':
alive=alive2
dead=dead2
elif Gender=='F' and Class=='Small':
alive=alive4
dead=dead4
elif Gender=='F' and Class=='All':
alive=alive2+alive4
dead=dead2+dead4
elif Gender=='All' and Class=='All':
dead=dead1+dead2+dead3+dead4
alive=alive1+alive2+alive3+alive4
else:
print ('Check your entries')
if len(dead)<=5:
print ('There are less than 5 dead in this category. Results may not be meaningful')
return alive,dead
# for house in ['Martell','None']
# alive,dead=char_lists(house)
# introductions,lifetimes=ages(alive,dead)
# sf,haz=SurvivalHaz(introductions,lifetimes)
# # kal,kah,lal,lah=cred_params(house)
# # CredIntPlt(sf,kal,kah,lal,lah,house,2.5559687549462544,0.26786495258406434) #NW all
# if house=='Martell':
# kal,kah,lal,lah=3.4324324324324325, 4.0851351351351353,0.20135135135135135, 0.25810810810810814
# if house=='None':
# kal,kah,lal,lah=2.5878378378378377, 3.0337837837837838,0.17770270270270272, 0.21554054054054056
# CredIntPlt(sf,kal,kah,lal,lah,house,2.3113471123606892,0.44672574344173971) #NW nobles
# plt.show()
def Specific_Character(House,Gender,Class,ksweep,lamsweep,Title=''):
alive,dead=char_lists(House,Gender,Class)
print 'alive', len (alive)
print alive
print 'dead', len (dead)
introductions,lifetimes=ages(alive,dead)
sf,haz=SurvivalHaz(introductions,lifetimes)
lam= thinkbayes2.MakeUniformPmf(lamsweep[0],lamsweep[1],lamsweep[2])
k = thinkbayes2.MakeUniformPmf(ksweep[0],ksweep[1],ksweep[2])
k.label = 'K'
lam.label = 'Lam'
print('Updating alives')
numAlive = len(alive)
i = 0
for pers in introductions:
i += 1
age = pers
k, lam = Update(k, lam, age, True)
print("Updating deaths")
numDead = len(dead)
i = 0
for pers in lifetimes:
i += 1
age = pers
k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# print ('If these distributions look chopped off, adjust kweep and lamsweep')
# thinkplot.Show()
mk = k.Mean()
ml = lam.Mean()
kl,kh = k.Percentile(5), k.Percentile(95)
ll,lh = lam.Percentile(5), lam.Percentile(95)
CredIntPlt(sf,kl,kh,ll,lh,House,mk,ml,Title)
# plt.show()
# arya and sansa
# Cersi
# Val
# Quaithe
# ksweep=[1.5,11,75]
# lsweep=[.0001,1,75]
# Specific_Character('Stark','F','All',ksweep,lsweep)
# ksweep=[.5,7,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','F','All',ksweep,lsweep,)
# ksweep=[.5,4.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','F','All',ksweep,lsweep)
# ksweep=[2,6,75]
# lsweep=[.0001,.5,75]
# Specific_Character('None','F','All',ksweep,lsweep,'Some Minor Characters')
# plt.show()
# dario
# mance
# theon
# Tyrion+ Jamie
# Frey
# ksweep=[1,8,75]
# lsweep=[.0001,.7,75]
# Specific_Character('Targaryen','M','Small',ksweep,lsweep)
# ksweep=[2.5,7.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','M','Noble',ksweep,lsweep,)
# ksweep=[.5,3.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','M','All',ksweep,lsweep,'Some Characters I Would Like to Live')
ksweep=[1.5,5,75]
lsweep=[.0001,.5,75]
Specific_Character('None','M','Noble',ksweep,lsweep)
ksweep=[2,9.5,75]
lsweep=[.0001,.5,75]
Specific_Character('Greyjoy','M','Noble',ksweep,lsweep,'Some Characters I Would Like to Die')
plt.show()
# i = 0
# for pers in introductions:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, True)
# print("Updating deaths")
# numDead = len(dead)
# i = 0
# for pers in lifetimes:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# thinkplot.Show()
# bestK = k.Mean()
# bestLam = lam.Mean()
# print("K:", bestK, "Lam:", bestLam)
# # k = thinkbayes2.MakeUniformPmf(.5,2.5,75)
# # lam = thinkbayes2.MakeUniformPmf(.000001,2,75)
# # k,lam=makePMF(k,lam)
# # thinkplot.PrePlot(2)
# # thinkplot.Pdfs([k, lam])
# # thinkplot.Show()
# # bestK = k.Mean()
# # bestLam = lam.Mean()
# # print("K:", bestK, "Lam:", bestLam)
# arr=np.linspace(0,7,num=100)
# weibSurv = exponweib.cdf(arr, bestK, bestLam)
# # # weibDeath = exponweib.pdf(arr, bestK, bestLam)
# # p1,=plt.plot(arr, 1-weibSurv, label="Survival Function")
# intervalk = k.Percentile(5), k.Percentile(95)
# intervallam = lam.Percentile(5), lam.Percentile(95)
# print intervalk
# print intervallam
# # (1.3175675675675675, 3.6216216216216219)
# # (0.24675675675675679, 0.66805405405405416)
# # thinkplot.plot(sf)
# # p2,=plt.plot(arr, weibDeath, label="Probability of Death")
# # thinkplot.plot(haz)
# # plt.xlabel('Age (books)')
# # plt.ylabel('Rate of Survival or Death')
# # plt.legend([p1,p2],['Survival Function','Probability of Death'])
# plt.show()
# House='NW All \n'
# WriteFile(k,lam,House) | Update | identifier_name |
Estimate_Hazard.py | import csv
import numpy as np
import scipy as sp
from scipy.stats import exponweib
import matplotlib
import matplotlib.pyplot as plt
import pandas
import thinkstats2
import thinkbayes2
import survival
import thinkplot
import random
import math
houselist=['Wildling','None','Night\'s Watch','Lannister','House Lannister','Stark','House Stark','Tully','House Tully', 'Arryn','House Arryn',
'Tyrell', 'House Tyrell', 'Targaryen','House Targaryen','Martell','House Martell','Baratheon','House Baratheon','Greyjoy','House Greyjoy']
# houselist_short=['Stark','Baratheon','None','Lannister','Tully','Arryn','Targaryen','Greyjoy','Wildling','Night\'s Watch','Tyrell','Martell']
houselist_short1=['Arryn','Stark','Lannister','None']
houselist_short2=['Tyrell','Baratheon','Tully','Night\'s Watch']
houselist_short3=['Martell','Targaryen','Wildling','Greyjoy']
houselist_short=['Arryn','Stark','Lannister','None','Tyrell','Baratheon','Tully','Night\'s Watch','Martell','Targaryen','Wildling','Greyjoy']
def Init_List_Struct():
list_str=[['dead', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]], ['alive', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]]]
return list_str
colordict={'Stark':['SlateGrey','DimGrey','Silver'],'Baratheon':['DarkOrange','Red','Orange'],'None':['MediumTurquoise','Teal','DarkSeaGreen'],
'Lannister':['Maroon','DarkGoldenRod','Gold'],'Tully':['FireBrick','RoyalBlue','LightSteelBlue'],'Arryn':['LightSkyBlue','LightSlateGrey','MidnightBlue'],
'Targaryen':['DarkRed','Black','Brown'],'Greyjoy':['DarkSlateGrey','GoldenRod','Khaki'],'Wildling':['Indigo','BlueViolet','Plum'],
'Night\'s Watch':['Black','LightGrey','Grey'],'Tyrell':['DarkGreen','Yellow','YellowGreen'],'Martell':['PaleGoldenRod','SandyBrown','Tomato']}
No=Init_List_Struct()
Lannister=Init_List_Struct()
Stark=Init_List_Struct()
Tully=Init_List_Struct()
Arryn=Init_List_Struct()
Tyrell=Init_List_Struct() #WTF
Targaryen=Init_List_Struct()
Martell=Init_List_Struct()#WTF2.5559687549462544
Baratheon=Init_List_Struct()
Greyjoy=Init_List_Struct()
Wildling=Init_List_Struct()
NW=Init_List_Struct()
hd={'Wildling':Wildling,'None': No,'Night\'s Watch':NW,'Lannister':Lannister,'House Lannister':Lannister,'Stark':Stark,'House Stark':Stark,
'Tully':Tully,'House Tully':Tully, 'Arryn':Arryn,'House Arryn':Arryn,'Tyrell':Tyrell, 'House Tyrell':Tyrell, 'Targaryen':Targaryen,
'House Targaryen':Targaryen,'Martell':Martell,'House Martell':Martell,'Baratheon':Baratheon,'House Baratheon':Baratheon,'Greyjoy':Greyjoy,'House Greyjoy':Greyjoy}
data=[]
with open('char_final.csv', 'r') as dataset:
reader=csv.reader(dataset)
for row in reader:
data.append(row)
data.pop(0)
data.pop(0)
data.pop(0)
data.pop(0)
def house_list(House_Name,info):
if info [1]==House_Name:
if info[3]!='': #if they are dead
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][0][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][1][2].append(info)
else:
print 'a',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else:
|
for info in data:
for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def Update(k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
cur_house=hd[house]
alive1=cur_house[1][1][1] #Noble Men
alive2=cur_house[1][1][2] #Noble Women
alive3=cur_house[1][2][1] #Small Men
alive4=cur_house[1][2][2] #Small Women
alive1.pop(0)
alive2.pop(0)
alive3.pop(0)
alive4.pop(0)
dead1=cur_house[0][1][1]
dead2=cur_house[0][1][2]
dead3=cur_house[0][2][1]
dead4=cur_house[0][2][2]
dead1.pop(0)
dead2.pop(0)
dead3.pop(0)
dead4.pop(0)
if Gender=='M' and Class=='Noble':
alive=alive1
dead=dead1
elif Gender=='M' and Class=='Small':
alive=alive3
dead=dead3
elif Gender=='M' and Class=='All':
alive=alive1+alive3
dead=dead1+dead3
elif Gender=='F' and Class=='Noble':
alive=alive2
dead=dead2
elif Gender=='F' and Class=='Small':
alive=alive4
dead=dead4
elif Gender=='F' and Class=='All':
alive=alive2+alive4
dead=dead2+dead4
elif Gender=='All' and Class=='All':
dead=dead1+dead2+dead3+dead4
alive=alive1+alive2+alive3+alive4
else:
print ('Check your entries')
if len(dead)<=5:
print ('There are less than 5 dead in this category. Results may not be meaningful')
return alive,dead
# for house in ['Martell','None']
# alive,dead=char_lists(house)
# introductions,lifetimes=ages(alive,dead)
# sf,haz=SurvivalHaz(introductions,lifetimes)
# # kal,kah,lal,lah=cred_params(house)
# # CredIntPlt(sf,kal,kah,lal,lah,house,2.5559687549462544,0.26786495258406434) #NW all
# if house=='Martell':
# kal,kah,lal,lah=3.4324324324324325, 4.0851351351351353,0.20135135135135135, 0.25810810810810814
# if house=='None':
# kal,kah,lal,lah=2.5878378378378377, 3.0337837837837838,0.17770270270270272, 0.21554054054054056
# CredIntPlt(sf,kal,kah,lal,lah,house,2.3113471123606892,0.44672574344173971) #NW nobles
# plt.show()
def Specific_Character(House,Gender,Class,ksweep,lamsweep,Title=''):
alive,dead=char_lists(House,Gender,Class)
print 'alive', len (alive)
print alive
print 'dead', len (dead)
introductions,lifetimes=ages(alive,dead)
sf,haz=SurvivalHaz(introductions,lifetimes)
lam= thinkbayes2.MakeUniformPmf(lamsweep[0],lamsweep[1],lamsweep[2])
k = thinkbayes2.MakeUniformPmf(ksweep[0],ksweep[1],ksweep[2])
k.label = 'K'
lam.label = 'Lam'
print('Updating alives')
numAlive = len(alive)
i = 0
for pers in introductions:
i += 1
age = pers
k, lam = Update(k, lam, age, True)
print("Updating deaths")
numDead = len(dead)
i = 0
for pers in lifetimes:
i += 1
age = pers
k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# print ('If these distributions look chopped off, adjust kweep and lamsweep')
# thinkplot.Show()
mk = k.Mean()
ml = lam.Mean()
kl,kh = k.Percentile(5), k.Percentile(95)
ll,lh = lam.Percentile(5), lam.Percentile(95)
CredIntPlt(sf,kl,kh,ll,lh,House,mk,ml,Title)
# plt.show()
# arya and sansa
# Cersi
# Val
# Quaithe
# ksweep=[1.5,11,75]
# lsweep=[.0001,1,75]
# Specific_Character('Stark','F','All',ksweep,lsweep)
# ksweep=[.5,7,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','F','All',ksweep,lsweep,)
# ksweep=[.5,4.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','F','All',ksweep,lsweep)
# ksweep=[2,6,75]
# lsweep=[.0001,.5,75]
# Specific_Character('None','F','All',ksweep,lsweep,'Some Minor Characters')
# plt.show()
# dario
# mance
# theon
# Tyrion+ Jamie
# Frey
# ksweep=[1,8,75]
# lsweep=[.0001,.7,75]
# Specific_Character('Targaryen','M','Small',ksweep,lsweep)
# ksweep=[2.5,7.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','M','Noble',ksweep,lsweep,)
# ksweep=[.5,3.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','M','All',ksweep,lsweep,'Some Characters I Would Like to Live')
ksweep=[1.5,5,75]
lsweep=[.0001,.5,75]
Specific_Character('None','M','Noble',ksweep,lsweep)
ksweep=[2,9.5,75]
lsweep=[.0001,.5,75]
Specific_Character('Greyjoy','M','Noble',ksweep,lsweep,'Some Characters I Would Like to Die')
plt.show()
# i = 0
# for pers in introductions:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, True)
# print("Updating deaths")
# numDead = len(dead)
# i = 0
# for pers in lifetimes:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# thinkplot.Show()
# bestK = k.Mean()
# bestLam = lam.Mean()
# print("K:", bestK, "Lam:", bestLam)
# # k = thinkbayes2.MakeUniformPmf(.5,2.5,75)
# # lam = thinkbayes2.MakeUniformPmf(.000001,2,75)
# # k,lam=makePMF(k,lam)
# # thinkplot.PrePlot(2)
# # thinkplot.Pdfs([k, lam])
# # thinkplot.Show()
# # bestK = k.Mean()
# # bestLam = lam.Mean()
# # print("K:", bestK, "Lam:", bestLam)
# arr=np.linspace(0,7,num=100)
# weibSurv = exponweib.cdf(arr, bestK, bestLam)
# # # weibDeath = exponweib.pdf(arr, bestK, bestLam)
# # p1,=plt.plot(arr, 1-weibSurv, label="Survival Function")
# intervalk = k.Percentile(5), k.Percentile(95)
# intervallam = lam.Percentile(5), lam.Percentile(95)
# print intervalk
# print intervallam
# # (1.3175675675675675, 3.6216216216216219)
# # (0.24675675675675679, 0.66805405405405416)
# # thinkplot.plot(sf)
# # p2,=plt.plot(arr, weibDeath, label="Probability of Death")
# # thinkplot.plot(haz)
# # plt.xlabel('Age (books)')
# # plt.ylabel('Rate of Survival or Death')
# # plt.legend([p1,p2],['Survival Function','Probability of Death'])
# plt.show()
# House='NW All \n'
# WriteFile(k,lam,House) | print 'e',info | conditional_block |
Estimate_Hazard.py | import csv
import numpy as np
import scipy as sp
from scipy.stats import exponweib
import matplotlib
import matplotlib.pyplot as plt
import pandas
import thinkstats2
import thinkbayes2
import survival
import thinkplot
import random
import math
houselist=['Wildling','None','Night\'s Watch','Lannister','House Lannister','Stark','House Stark','Tully','House Tully', 'Arryn','House Arryn',
'Tyrell', 'House Tyrell', 'Targaryen','House Targaryen','Martell','House Martell','Baratheon','House Baratheon','Greyjoy','House Greyjoy']
# houselist_short=['Stark','Baratheon','None','Lannister','Tully','Arryn','Targaryen','Greyjoy','Wildling','Night\'s Watch','Tyrell','Martell']
houselist_short1=['Arryn','Stark','Lannister','None']
houselist_short2=['Tyrell','Baratheon','Tully','Night\'s Watch']
houselist_short3=['Martell','Targaryen','Wildling','Greyjoy']
houselist_short=['Arryn','Stark','Lannister','None','Tyrell','Baratheon','Tully','Night\'s Watch','Martell','Targaryen','Wildling','Greyjoy']
def Init_List_Struct():
list_str=[['dead', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]], ['alive', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]]]
return list_str
colordict={'Stark':['SlateGrey','DimGrey','Silver'],'Baratheon':['DarkOrange','Red','Orange'],'None':['MediumTurquoise','Teal','DarkSeaGreen'],
'Lannister':['Maroon','DarkGoldenRod','Gold'],'Tully':['FireBrick','RoyalBlue','LightSteelBlue'],'Arryn':['LightSkyBlue','LightSlateGrey','MidnightBlue'],
'Targaryen':['DarkRed','Black','Brown'],'Greyjoy':['DarkSlateGrey','GoldenRod','Khaki'],'Wildling':['Indigo','BlueViolet','Plum'],
'Night\'s Watch':['Black','LightGrey','Grey'],'Tyrell':['DarkGreen','Yellow','YellowGreen'],'Martell':['PaleGoldenRod','SandyBrown','Tomato']}
No=Init_List_Struct()
Lannister=Init_List_Struct()
Stark=Init_List_Struct()
Tully=Init_List_Struct()
Arryn=Init_List_Struct()
Tyrell=Init_List_Struct() #WTF
Targaryen=Init_List_Struct()
Martell=Init_List_Struct()#WTF2.5559687549462544
Baratheon=Init_List_Struct()
Greyjoy=Init_List_Struct()
Wildling=Init_List_Struct()
NW=Init_List_Struct()
hd={'Wildling':Wildling,'None': No,'Night\'s Watch':NW,'Lannister':Lannister,'House Lannister':Lannister,'Stark':Stark,'House Stark':Stark,
'Tully':Tully,'House Tully':Tully, 'Arryn':Arryn,'House Arryn':Arryn,'Tyrell':Tyrell, 'House Tyrell':Tyrell, 'Targaryen':Targaryen,
'House Targaryen':Targaryen,'Martell':Martell,'House Martell':Martell,'Baratheon':Baratheon,'House Baratheon':Baratheon,'Greyjoy':Greyjoy,'House Greyjoy':Greyjoy}
data=[]
with open('char_final.csv', 'r') as dataset:
reader=csv.reader(dataset)
for row in reader:
data.append(row)
data.pop(0)
data.pop(0)
data.pop(0)
data.pop(0)
def house_list(House_Name,info):
if info [1]==House_Name:
if info[3]!='': #if they are dead
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][0][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][1][2].append(info)
else:
print 'a',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else: | for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def Update(k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
cur_house=hd[house]
alive1=cur_house[1][1][1] #Noble Men
alive2=cur_house[1][1][2] #Noble Women
alive3=cur_house[1][2][1] #Small Men
alive4=cur_house[1][2][2] #Small Women
alive1.pop(0)
alive2.pop(0)
alive3.pop(0)
alive4.pop(0)
dead1=cur_house[0][1][1]
dead2=cur_house[0][1][2]
dead3=cur_house[0][2][1]
dead4=cur_house[0][2][2]
dead1.pop(0)
dead2.pop(0)
dead3.pop(0)
dead4.pop(0)
if Gender=='M' and Class=='Noble':
alive=alive1
dead=dead1
elif Gender=='M' and Class=='Small':
alive=alive3
dead=dead3
elif Gender=='M' and Class=='All':
alive=alive1+alive3
dead=dead1+dead3
elif Gender=='F' and Class=='Noble':
alive=alive2
dead=dead2
elif Gender=='F' and Class=='Small':
alive=alive4
dead=dead4
elif Gender=='F' and Class=='All':
alive=alive2+alive4
dead=dead2+dead4
elif Gender=='All' and Class=='All':
dead=dead1+dead2+dead3+dead4
alive=alive1+alive2+alive3+alive4
else:
print ('Check your entries')
if len(dead)<=5:
print ('There are less than 5 dead in this category. Results may not be meaningful')
return alive,dead
# for house in ['Martell','None']
# alive,dead=char_lists(house)
# introductions,lifetimes=ages(alive,dead)
# sf,haz=SurvivalHaz(introductions,lifetimes)
# # kal,kah,lal,lah=cred_params(house)
# # CredIntPlt(sf,kal,kah,lal,lah,house,2.5559687549462544,0.26786495258406434) #NW all
# if house=='Martell':
# kal,kah,lal,lah=3.4324324324324325, 4.0851351351351353,0.20135135135135135, 0.25810810810810814
# if house=='None':
# kal,kah,lal,lah=2.5878378378378377, 3.0337837837837838,0.17770270270270272, 0.21554054054054056
# CredIntPlt(sf,kal,kah,lal,lah,house,2.3113471123606892,0.44672574344173971) #NW nobles
# plt.show()
def Specific_Character(House,Gender,Class,ksweep,lamsweep,Title=''):
alive,dead=char_lists(House,Gender,Class)
print 'alive', len (alive)
print alive
print 'dead', len (dead)
introductions,lifetimes=ages(alive,dead)
sf,haz=SurvivalHaz(introductions,lifetimes)
lam= thinkbayes2.MakeUniformPmf(lamsweep[0],lamsweep[1],lamsweep[2])
k = thinkbayes2.MakeUniformPmf(ksweep[0],ksweep[1],ksweep[2])
k.label = 'K'
lam.label = 'Lam'
print('Updating alives')
numAlive = len(alive)
i = 0
for pers in introductions:
i += 1
age = pers
k, lam = Update(k, lam, age, True)
print("Updating deaths")
numDead = len(dead)
i = 0
for pers in lifetimes:
i += 1
age = pers
k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# print ('If these distributions look chopped off, adjust kweep and lamsweep')
# thinkplot.Show()
mk = k.Mean()
ml = lam.Mean()
kl,kh = k.Percentile(5), k.Percentile(95)
ll,lh = lam.Percentile(5), lam.Percentile(95)
CredIntPlt(sf,kl,kh,ll,lh,House,mk,ml,Title)
# plt.show()
# arya and sansa
# Cersi
# Val
# Quaithe
# ksweep=[1.5,11,75]
# lsweep=[.0001,1,75]
# Specific_Character('Stark','F','All',ksweep,lsweep)
# ksweep=[.5,7,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','F','All',ksweep,lsweep,)
# ksweep=[.5,4.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','F','All',ksweep,lsweep)
# ksweep=[2,6,75]
# lsweep=[.0001,.5,75]
# Specific_Character('None','F','All',ksweep,lsweep,'Some Minor Characters')
# plt.show()
# dario
# mance
# theon
# Tyrion+ Jamie
# Frey
# ksweep=[1,8,75]
# lsweep=[.0001,.7,75]
# Specific_Character('Targaryen','M','Small',ksweep,lsweep)
# ksweep=[2.5,7.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','M','Noble',ksweep,lsweep,)
# ksweep=[.5,3.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','M','All',ksweep,lsweep,'Some Characters I Would Like to Live')
ksweep=[1.5,5,75]
lsweep=[.0001,.5,75]
Specific_Character('None','M','Noble',ksweep,lsweep)
ksweep=[2,9.5,75]
lsweep=[.0001,.5,75]
Specific_Character('Greyjoy','M','Noble',ksweep,lsweep,'Some Characters I Would Like to Die')
plt.show()
# i = 0
# for pers in introductions:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, True)
# print("Updating deaths")
# numDead = len(dead)
# i = 0
# for pers in lifetimes:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# thinkplot.Show()
# bestK = k.Mean()
# bestLam = lam.Mean()
# print("K:", bestK, "Lam:", bestLam)
# # k = thinkbayes2.MakeUniformPmf(.5,2.5,75)
# # lam = thinkbayes2.MakeUniformPmf(.000001,2,75)
# # k,lam=makePMF(k,lam)
# # thinkplot.PrePlot(2)
# # thinkplot.Pdfs([k, lam])
# # thinkplot.Show()
# # bestK = k.Mean()
# # bestLam = lam.Mean()
# # print("K:", bestK, "Lam:", bestLam)
# arr=np.linspace(0,7,num=100)
# weibSurv = exponweib.cdf(arr, bestK, bestLam)
# # # weibDeath = exponweib.pdf(arr, bestK, bestLam)
# # p1,=plt.plot(arr, 1-weibSurv, label="Survival Function")
# intervalk = k.Percentile(5), k.Percentile(95)
# intervallam = lam.Percentile(5), lam.Percentile(95)
# print intervalk
# print intervallam
# # (1.3175675675675675, 3.6216216216216219)
# # (0.24675675675675679, 0.66805405405405416)
# # thinkplot.plot(sf)
# # p2,=plt.plot(arr, weibDeath, label="Probability of Death")
# # thinkplot.plot(haz)
# # plt.xlabel('Age (books)')
# # plt.ylabel('Rate of Survival or Death')
# # plt.legend([p1,p2],['Survival Function','Probability of Death'])
# plt.show()
# House='NW All \n'
# WriteFile(k,lam,House) | print 'e',info
for info in data: | random_line_split |
Estimate_Hazard.py | import csv
import numpy as np
import scipy as sp
from scipy.stats import exponweib
import matplotlib
import matplotlib.pyplot as plt
import pandas
import thinkstats2
import thinkbayes2
import survival
import thinkplot
import random
import math
houselist=['Wildling','None','Night\'s Watch','Lannister','House Lannister','Stark','House Stark','Tully','House Tully', 'Arryn','House Arryn',
'Tyrell', 'House Tyrell', 'Targaryen','House Targaryen','Martell','House Martell','Baratheon','House Baratheon','Greyjoy','House Greyjoy']
# houselist_short=['Stark','Baratheon','None','Lannister','Tully','Arryn','Targaryen','Greyjoy','Wildling','Night\'s Watch','Tyrell','Martell']
houselist_short1=['Arryn','Stark','Lannister','None']
houselist_short2=['Tyrell','Baratheon','Tully','Night\'s Watch']
houselist_short3=['Martell','Targaryen','Wildling','Greyjoy']
houselist_short=['Arryn','Stark','Lannister','None','Tyrell','Baratheon','Tully','Night\'s Watch','Martell','Targaryen','Wildling','Greyjoy']
def Init_List_Struct():
list_str=[['dead', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]], ['alive', ['nobles', ['men'], ['women']], ['smallfolk', ['men'], ['women']]]]
return list_str
colordict={'Stark':['SlateGrey','DimGrey','Silver'],'Baratheon':['DarkOrange','Red','Orange'],'None':['MediumTurquoise','Teal','DarkSeaGreen'],
'Lannister':['Maroon','DarkGoldenRod','Gold'],'Tully':['FireBrick','RoyalBlue','LightSteelBlue'],'Arryn':['LightSkyBlue','LightSlateGrey','MidnightBlue'],
'Targaryen':['DarkRed','Black','Brown'],'Greyjoy':['DarkSlateGrey','GoldenRod','Khaki'],'Wildling':['Indigo','BlueViolet','Plum'],
'Night\'s Watch':['Black','LightGrey','Grey'],'Tyrell':['DarkGreen','Yellow','YellowGreen'],'Martell':['PaleGoldenRod','SandyBrown','Tomato']}
No=Init_List_Struct()
Lannister=Init_List_Struct()
Stark=Init_List_Struct()
Tully=Init_List_Struct()
Arryn=Init_List_Struct()
Tyrell=Init_List_Struct() #WTF
Targaryen=Init_List_Struct()
Martell=Init_List_Struct()#WTF2.5559687549462544
Baratheon=Init_List_Struct()
Greyjoy=Init_List_Struct()
Wildling=Init_List_Struct()
NW=Init_List_Struct()
hd={'Wildling':Wildling,'None': No,'Night\'s Watch':NW,'Lannister':Lannister,'House Lannister':Lannister,'Stark':Stark,'House Stark':Stark,
'Tully':Tully,'House Tully':Tully, 'Arryn':Arryn,'House Arryn':Arryn,'Tyrell':Tyrell, 'House Tyrell':Tyrell, 'Targaryen':Targaryen,
'House Targaryen':Targaryen,'Martell':Martell,'House Martell':Martell,'Baratheon':Baratheon,'House Baratheon':Baratheon,'Greyjoy':Greyjoy,'House Greyjoy':Greyjoy}
data=[]
with open('char_final.csv', 'r') as dataset:
reader=csv.reader(dataset)
for row in reader:
data.append(row)
data.pop(0)
data.pop(0)
data.pop(0)
data.pop(0)
def house_list(House_Name,info):
if info [1]==House_Name:
if info[3]!='': #if they are dead
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][0][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][1][2].append(info)
else:
print 'a',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else:
print 'e',info
for info in data:
for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def Update(k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
|
# for house in ['Martell','None']
# alive,dead=char_lists(house)
# introductions,lifetimes=ages(alive,dead)
# sf,haz=SurvivalHaz(introductions,lifetimes)
# # kal,kah,lal,lah=cred_params(house)
# # CredIntPlt(sf,kal,kah,lal,lah,house,2.5559687549462544,0.26786495258406434) #NW all
# if house=='Martell':
# kal,kah,lal,lah=3.4324324324324325, 4.0851351351351353,0.20135135135135135, 0.25810810810810814
# if house=='None':
# kal,kah,lal,lah=2.5878378378378377, 3.0337837837837838,0.17770270270270272, 0.21554054054054056
# CredIntPlt(sf,kal,kah,lal,lah,house,2.3113471123606892,0.44672574344173971) #NW nobles
# plt.show()
def Specific_Character(House,Gender,Class,ksweep,lamsweep,Title=''):
alive,dead=char_lists(House,Gender,Class)
print 'alive', len (alive)
print alive
print 'dead', len (dead)
introductions,lifetimes=ages(alive,dead)
sf,haz=SurvivalHaz(introductions,lifetimes)
lam= thinkbayes2.MakeUniformPmf(lamsweep[0],lamsweep[1],lamsweep[2])
k = thinkbayes2.MakeUniformPmf(ksweep[0],ksweep[1],ksweep[2])
k.label = 'K'
lam.label = 'Lam'
print('Updating alives')
numAlive = len(alive)
i = 0
for pers in introductions:
i += 1
age = pers
k, lam = Update(k, lam, age, True)
print("Updating deaths")
numDead = len(dead)
i = 0
for pers in lifetimes:
i += 1
age = pers
k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# print ('If these distributions look chopped off, adjust kweep and lamsweep')
# thinkplot.Show()
mk = k.Mean()
ml = lam.Mean()
kl,kh = k.Percentile(5), k.Percentile(95)
ll,lh = lam.Percentile(5), lam.Percentile(95)
CredIntPlt(sf,kl,kh,ll,lh,House,mk,ml,Title)
# plt.show()
# arya and sansa
# Cersi
# Val
# Quaithe
# ksweep=[1.5,11,75]
# lsweep=[.0001,1,75]
# Specific_Character('Stark','F','All',ksweep,lsweep)
# ksweep=[.5,7,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','F','All',ksweep,lsweep,)
# ksweep=[.5,4.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','F','All',ksweep,lsweep)
# ksweep=[2,6,75]
# lsweep=[.0001,.5,75]
# Specific_Character('None','F','All',ksweep,lsweep,'Some Minor Characters')
# plt.show()
# dario
# mance
# theon
# Tyrion+ Jamie
# Frey
# ksweep=[1,8,75]
# lsweep=[.0001,.7,75]
# Specific_Character('Targaryen','M','Small',ksweep,lsweep)
# ksweep=[2.5,7.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Lannister','M','Noble',ksweep,lsweep,)
# ksweep=[.5,3.5,75]
# lsweep=[.0001,1,75]
# Specific_Character('Wildling','M','All',ksweep,lsweep,'Some Characters I Would Like to Live')
ksweep=[1.5,5,75]
lsweep=[.0001,.5,75]
Specific_Character('None','M','Noble',ksweep,lsweep)
ksweep=[2,9.5,75]
lsweep=[.0001,.5,75]
Specific_Character('Greyjoy','M','Noble',ksweep,lsweep,'Some Characters I Would Like to Die')
plt.show()
# i = 0
# for pers in introductions:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, True)
# print("Updating deaths")
# numDead = len(dead)
# i = 0
# for pers in lifetimes:
# i += 1
# age = pers
# k, lam = Update(k, lam, age, False)
# thinkplot.PrePlot(2)
# thinkplot.Pdfs([k, lam])
# plt.xlabel('Value')
# plt.ylabel('Probability')
# plt.title('Posterior Distributions')
# thinkplot.Show()
# bestK = k.Mean()
# bestLam = lam.Mean()
# print("K:", bestK, "Lam:", bestLam)
# # k = thinkbayes2.MakeUniformPmf(.5,2.5,75)
# # lam = thinkbayes2.MakeUniformPmf(.000001,2,75)
# # k,lam=makePMF(k,lam)
# # thinkplot.PrePlot(2)
# # thinkplot.Pdfs([k, lam])
# # thinkplot.Show()
# # bestK = k.Mean()
# # bestLam = lam.Mean()
# # print("K:", bestK, "Lam:", bestLam)
# arr=np.linspace(0,7,num=100)
# weibSurv = exponweib.cdf(arr, bestK, bestLam)
# # # weibDeath = exponweib.pdf(arr, bestK, bestLam)
# # p1,=plt.plot(arr, 1-weibSurv, label="Survival Function")
# intervalk = k.Percentile(5), k.Percentile(95)
# intervallam = lam.Percentile(5), lam.Percentile(95)
# print intervalk
# print intervallam
# # (1.3175675675675675, 3.6216216216216219)
# # (0.24675675675675679, 0.66805405405405416)
# # thinkplot.plot(sf)
# # p2,=plt.plot(arr, weibDeath, label="Probability of Death")
# # thinkplot.plot(haz)
# # plt.xlabel('Age (books)')
# # plt.ylabel('Rate of Survival or Death')
# # plt.legend([p1,p2],['Survival Function','Probability of Death'])
# plt.show()
# House='NW All \n'
# WriteFile(k,lam,House) | cur_house=hd[house]
alive1=cur_house[1][1][1] #Noble Men
alive2=cur_house[1][1][2] #Noble Women
alive3=cur_house[1][2][1] #Small Men
alive4=cur_house[1][2][2] #Small Women
alive1.pop(0)
alive2.pop(0)
alive3.pop(0)
alive4.pop(0)
dead1=cur_house[0][1][1]
dead2=cur_house[0][1][2]
dead3=cur_house[0][2][1]
dead4=cur_house[0][2][2]
dead1.pop(0)
dead2.pop(0)
dead3.pop(0)
dead4.pop(0)
if Gender=='M' and Class=='Noble':
alive=alive1
dead=dead1
elif Gender=='M' and Class=='Small':
alive=alive3
dead=dead3
elif Gender=='M' and Class=='All':
alive=alive1+alive3
dead=dead1+dead3
elif Gender=='F' and Class=='Noble':
alive=alive2
dead=dead2
elif Gender=='F' and Class=='Small':
alive=alive4
dead=dead4
elif Gender=='F' and Class=='All':
alive=alive2+alive4
dead=dead2+dead4
elif Gender=='All' and Class=='All':
dead=dead1+dead2+dead3+dead4
alive=alive1+alive2+alive3+alive4
else:
print ('Check your entries')
if len(dead)<=5:
print ('There are less than 5 dead in this category. Results may not be meaningful')
return alive,dead | identifier_body |
main.rs | /// easiGrow
///
/// by Paul White (Nov 2014--2017)
/// written in rust (www.rust-lang.org)
///
/// A program to match crack growth predictions to measurements.
///
/// The program calculates fatigue crack growth rates and finds the
/// optimum parameters of a crack growth model to match predictions
/// with measurements.
///
/// **easiGrow** is a standalone program but most of the calculations
/// are done through calls to the associated **fatigue** library which
/// is included. The main program is for doing anything that
/// explicitly uses the command line flags inlcuding the optimisation
/// module. These flages are used to build the **EasiOptions** data
/// structure which is then used to generate the crack growth
/// history. The optimisation generates a crack growth curve which it
/// compares with a fractography file. It finds the error between
/// these measurements and tries to minimise the sum errors through
/// minimisation routines.
///
/// Currently, none of the models has a memory effect, so it is ok to
/// just start growing the crack from an iniital crack size that is
/// smaller than the initial fracto data. The struct `grow::CrackState`
/// also contains parameters that are passed along with the applied
/// loading _kmin_ and _kmax_, so any memory variables should be added to
/// this struct and will be availabe to be used by the _da/dn_ equation.
/// The simplest memory effect that is included in the `CrackState`
/// data is the plastic zone size, but there are no dadn equations
/// currently using this. The memory effect does not appear to be
/// strong in AA7050 material.
///
/// Think of the program flow as
///
/// 1. Read in data
/// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles
/// 3. Filter the list of cycles
/// 4. If required, optimise any parameters
/// 5. Perform a crack growth calculation
/// 6. Write out requested output
#[macro_use]
extern crate clap;
extern crate fatigue;
extern crate log;
extern crate env_logger;
use std::f64::consts::FRAC_PI_2;
use std::process;
use std::collections::BTreeSet;
use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) |
// Finally grow the crack with the current parameters which may have been optimised.
// We exit here if the scale has not been set. Otherwise we
// would go through and do a default calculation which confuses
// people if they just want to start the program to see how to get
// help.
fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> {
let dadn_eqn = dadn::make_model(&options.dadn, ¶ms, String::from("unknown"));
println!("{}da/dN equation: {}", COMMENT, dadn_eqn);
let beta = beta::get_beta_fn(&options.beta, &options.component);
if options.scale == 0.0 {
error!(
"Error: The sequence scale factor is 0. You need to set the scale factor
(i.e. load or stress level) in order to perform a crack growth calculation.
Try\n easigrow --help"
);
std::process::exit(1);
}
if options.cycles.is_empty() {
println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r");
std::process::exit(1);
}
// We define the initial state. If any memory effect is to be
// included in the crack growth model, the meory should be in this
// data structure.
let init_crack = grow::CrackState::new(options.a.clone());
let mut history_all = Vec::new();
grow::display_history_header(&options.output_vars);
// Non-dimensional ratios for beta factor
let c = options.a[options.a.len() - 1];
let a_on_c = options.a[0] / c;
let a_on_d = options.a[0] / options.component.forward;
let c_on_b = c / options.component.sideways;
let a_on_r = options.a[0] / options.component.radius;
// phis is a vector of angles around the crack front. It depends
// on the beta whether any or all of the angles are used. Most
// just use the first and some use the last as well.
let phis = vec![0.0, FRAC_PI_2];
// Initialise the history
let init_history = grow::History {
block: 0.0,
stress: 0.0,
cycle: cycle::Cycle {
max: tag::Tag {
value: 0.0,
index: 0,
},
min: tag::Tag {
value: 0.0,
index: 0,
},
},
k: vec![0.0, 0.0],
dk: vec![0.0, 0.0],
beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis),
da: vec![0.0, 0.0],
crack: init_crack,
};
grow::display_history_line(&init_history, &options.output_vars, &options.component);
let component = grow::FatigueTest {
history: init_history,
component: options.component.clone(),
scale: options.scale,
cycles: options.cycles.clone(),
a_limit: options.a_limit.clone(),
block_limit: options.block_limit,
next_cycle: 0,
dadn: dadn_eqn,
beta,
output_vars: options.output_vars.clone(),
};
// make a hash set of the lines that are required for output
let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect();
// if there are no lines in the output then put in the line for the first cycle
if options
.cycles
.iter()
.filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index))
.count() == 0
{
println!("output_lines {:?}", output_lines);
println!(
"
Warning: There are no sequence lines in the cycle list and so there
will be no crack growth output. Consider closing up cycles
with re-order to use all sequence lines or include specific
sequence lines that are in the cycle. Meanwhile, the output will
be for the squence line in the first cycle at line {}.",
options.cycles[0].max.index
);
output_lines.insert(options.cycles[0].max.index);
}
// Start the crack growth. This loop steps through each cycle
// repeating the cycles until a terminating condition stops the
// growth and ends the for loop.
for (cycle_no, history) in component.enumerate() {
if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) {
grow::display_history_line(&history, &options.output_vars, &options.component);
}
// Only keep the history if we are producing a fracto image.
if options.image.file != "" {
history_all.push(history);
}
}
history_all
}
| {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors);
optimise_gsl::gsl_match_crack(options, &mut factors)
}
};
} | identifier_body |
main.rs | /// easiGrow
///
/// by Paul White (Nov 2014--2017)
/// written in rust (www.rust-lang.org)
///
/// A program to match crack growth predictions to measurements.
///
/// The program calculates fatigue crack growth rates and finds the
/// optimum parameters of a crack growth model to match predictions
/// with measurements.
///
/// **easiGrow** is a standalone program but most of the calculations
/// are done through calls to the associated **fatigue** library which
/// is included. The main program is for doing anything that
/// explicitly uses the command line flags inlcuding the optimisation
/// module. These flages are used to build the **EasiOptions** data
/// structure which is then used to generate the crack growth
/// history. The optimisation generates a crack growth curve which it
/// compares with a fractography file. It finds the error between
/// these measurements and tries to minimise the sum errors through
/// minimisation routines.
///
/// Currently, none of the models has a memory effect, so it is ok to
/// just start growing the crack from an iniital crack size that is
/// smaller than the initial fracto data. The struct `grow::CrackState`
/// also contains parameters that are passed along with the applied
/// loading _kmin_ and _kmax_, so any memory variables should be added to
/// this struct and will be availabe to be used by the _da/dn_ equation.
/// The simplest memory effect that is included in the `CrackState`
/// data is the plastic zone size, but there are no dadn equations
/// currently using this. The memory effect does not appear to be
/// strong in AA7050 material.
///
/// Think of the program flow as
///
/// 1. Read in data
/// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles
/// 3. Filter the list of cycles
/// 4. If required, optimise any parameters
/// 5. Perform a crack growth calculation
/// 6. Write out requested output
#[macro_use]
extern crate clap;
extern crate fatigue;
extern crate log;
extern crate env_logger;
use std::f64::consts::FRAC_PI_2;
use std::process;
use std::collections::BTreeSet;
use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")] | OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors);
optimise_gsl::gsl_match_crack(options, &mut factors)
}
};
}
// Finally grow the crack with the current parameters which may have been optimised.
// We exit here if the scale has not been set. Otherwise we
// would go through and do a default calculation which confuses
// people if they just want to start the program to see how to get
// help.
fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> {
let dadn_eqn = dadn::make_model(&options.dadn, ¶ms, String::from("unknown"));
println!("{}da/dN equation: {}", COMMENT, dadn_eqn);
let beta = beta::get_beta_fn(&options.beta, &options.component);
if options.scale == 0.0 {
error!(
"Error: The sequence scale factor is 0. You need to set the scale factor
(i.e. load or stress level) in order to perform a crack growth calculation.
Try\n easigrow --help"
);
std::process::exit(1);
}
if options.cycles.is_empty() {
println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r");
std::process::exit(1);
}
// We define the initial state. If any memory effect is to be
// included in the crack growth model, the meory should be in this
// data structure.
let init_crack = grow::CrackState::new(options.a.clone());
let mut history_all = Vec::new();
grow::display_history_header(&options.output_vars);
// Non-dimensional ratios for beta factor
let c = options.a[options.a.len() - 1];
let a_on_c = options.a[0] / c;
let a_on_d = options.a[0] / options.component.forward;
let c_on_b = c / options.component.sideways;
let a_on_r = options.a[0] / options.component.radius;
// phis is a vector of angles around the crack front. It depends
// on the beta whether any or all of the angles are used. Most
// just use the first and some use the last as well.
let phis = vec![0.0, FRAC_PI_2];
// Initialise the history
let init_history = grow::History {
block: 0.0,
stress: 0.0,
cycle: cycle::Cycle {
max: tag::Tag {
value: 0.0,
index: 0,
},
min: tag::Tag {
value: 0.0,
index: 0,
},
},
k: vec![0.0, 0.0],
dk: vec![0.0, 0.0],
beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis),
da: vec![0.0, 0.0],
crack: init_crack,
};
grow::display_history_line(&init_history, &options.output_vars, &options.component);
let component = grow::FatigueTest {
history: init_history,
component: options.component.clone(),
scale: options.scale,
cycles: options.cycles.clone(),
a_limit: options.a_limit.clone(),
block_limit: options.block_limit,
next_cycle: 0,
dadn: dadn_eqn,
beta,
output_vars: options.output_vars.clone(),
};
// make a hash set of the lines that are required for output
let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect();
// if there are no lines in the output then put in the line for the first cycle
if options
.cycles
.iter()
.filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index))
.count() == 0
{
println!("output_lines {:?}", output_lines);
println!(
"
Warning: There are no sequence lines in the cycle list and so there
will be no crack growth output. Consider closing up cycles
with re-order to use all sequence lines or include specific
sequence lines that are in the cycle. Meanwhile, the output will
be for the squence line in the first cycle at line {}.",
options.cycles[0].max.index
);
output_lines.insert(options.cycles[0].max.index);
}
// Start the crack growth. This loop steps through each cycle
// repeating the cycles until a terminating condition stops the
// growth and ends the for loop.
for (cycle_no, history) in component.enumerate() {
if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) {
grow::display_history_line(&history, &options.output_vars, &options.component);
}
// Only keep the history if we are producing a fracto image.
if options.image.file != "" {
history_all.push(history);
}
}
history_all
} | fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors), | random_line_split |
main.rs | /// easiGrow
///
/// by Paul White (Nov 2014--2017)
/// written in rust (www.rust-lang.org)
///
/// A program to match crack growth predictions to measurements.
///
/// The program calculates fatigue crack growth rates and finds the
/// optimum parameters of a crack growth model to match predictions
/// with measurements.
///
/// **easiGrow** is a standalone program but most of the calculations
/// are done through calls to the associated **fatigue** library which
/// is included. The main program is for doing anything that
/// explicitly uses the command line flags inlcuding the optimisation
/// module. These flages are used to build the **EasiOptions** data
/// structure which is then used to generate the crack growth
/// history. The optimisation generates a crack growth curve which it
/// compares with a fractography file. It finds the error between
/// these measurements and tries to minimise the sum errors through
/// minimisation routines.
///
/// Currently, none of the models has a memory effect, so it is ok to
/// just start growing the crack from an iniital crack size that is
/// smaller than the initial fracto data. The struct `grow::CrackState`
/// also contains parameters that are passed along with the applied
/// loading _kmin_ and _kmax_, so any memory variables should be added to
/// this struct and will be availabe to be used by the _da/dn_ equation.
/// The simplest memory effect that is included in the `CrackState`
/// data is the plastic zone size, but there are no dadn equations
/// currently using this. The memory effect does not appear to be
/// strong in AA7050 material.
///
/// Think of the program flow as
///
/// 1. Read in data
/// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles
/// 3. Filter the list of cycles
/// 4. If required, optimise any parameters
/// 5. Perform a crack growth calculation
/// 6. Write out requested output
#[macro_use]
extern crate clap;
extern crate fatigue;
extern crate log;
extern crate env_logger;
use std::f64::consts::FRAC_PI_2;
use std::process;
use std::collections::BTreeSet;
use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => |
};
}
// Finally grow the crack with the current parameters which may have been optimised.
// We exit here if the scale has not been set. Otherwise we
// would go through and do a default calculation which confuses
// people if they just want to start the program to see how to get
// help.
fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> {
let dadn_eqn = dadn::make_model(&options.dadn, ¶ms, String::from("unknown"));
println!("{}da/dN equation: {}", COMMENT, dadn_eqn);
let beta = beta::get_beta_fn(&options.beta, &options.component);
if options.scale == 0.0 {
error!(
"Error: The sequence scale factor is 0. You need to set the scale factor
(i.e. load or stress level) in order to perform a crack growth calculation.
Try\n easigrow --help"
);
std::process::exit(1);
}
if options.cycles.is_empty() {
println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r");
std::process::exit(1);
}
// We define the initial state. If any memory effect is to be
// included in the crack growth model, the meory should be in this
// data structure.
let init_crack = grow::CrackState::new(options.a.clone());
let mut history_all = Vec::new();
grow::display_history_header(&options.output_vars);
// Non-dimensional ratios for beta factor
let c = options.a[options.a.len() - 1];
let a_on_c = options.a[0] / c;
let a_on_d = options.a[0] / options.component.forward;
let c_on_b = c / options.component.sideways;
let a_on_r = options.a[0] / options.component.radius;
// phis is a vector of angles around the crack front. It depends
// on the beta whether any or all of the angles are used. Most
// just use the first and some use the last as well.
let phis = vec![0.0, FRAC_PI_2];
// Initialise the history
let init_history = grow::History {
block: 0.0,
stress: 0.0,
cycle: cycle::Cycle {
max: tag::Tag {
value: 0.0,
index: 0,
},
min: tag::Tag {
value: 0.0,
index: 0,
},
},
k: vec![0.0, 0.0],
dk: vec![0.0, 0.0],
beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis),
da: vec![0.0, 0.0],
crack: init_crack,
};
grow::display_history_line(&init_history, &options.output_vars, &options.component);
let component = grow::FatigueTest {
history: init_history,
component: options.component.clone(),
scale: options.scale,
cycles: options.cycles.clone(),
a_limit: options.a_limit.clone(),
block_limit: options.block_limit,
next_cycle: 0,
dadn: dadn_eqn,
beta,
output_vars: options.output_vars.clone(),
};
// make a hash set of the lines that are required for output
let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect();
// if there are no lines in the output then put in the line for the first cycle
if options
.cycles
.iter()
.filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index))
.count() == 0
{
println!("output_lines {:?}", output_lines);
println!(
"
Warning: There are no sequence lines in the cycle list and so there
will be no crack growth output. Consider closing up cycles
with re-order to use all sequence lines or include specific
sequence lines that are in the cycle. Meanwhile, the output will
be for the squence line in the first cycle at line {}.",
options.cycles[0].max.index
);
output_lines.insert(options.cycles[0].max.index);
}
// Start the crack growth. This loop steps through each cycle
// repeating the cycles until a terminating condition stops the
// growth and ends the for loop.
for (cycle_no, history) in component.enumerate() {
if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) {
grow::display_history_line(&history, &options.output_vars, &options.component);
}
// Only keep the history if we are producing a fracto image.
if options.image.file != "" {
history_all.push(history);
}
}
history_all
}
| {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors);
optimise_gsl::gsl_match_crack(options, &mut factors)
} | conditional_block |
main.rs | /// easiGrow
///
/// by Paul White (Nov 2014--2017)
/// written in rust (www.rust-lang.org)
///
/// A program to match crack growth predictions to measurements.
///
/// The program calculates fatigue crack growth rates and finds the
/// optimum parameters of a crack growth model to match predictions
/// with measurements.
///
/// **easiGrow** is a standalone program but most of the calculations
/// are done through calls to the associated **fatigue** library which
/// is included. The main program is for doing anything that
/// explicitly uses the command line flags inlcuding the optimisation
/// module. These flages are used to build the **EasiOptions** data
/// structure which is then used to generate the crack growth
/// history. The optimisation generates a crack growth curve which it
/// compares with a fractography file. It finds the error between
/// these measurements and tries to minimise the sum errors through
/// minimisation routines.
///
/// Currently, none of the models has a memory effect, so it is ok to
/// just start growing the crack from an iniital crack size that is
/// smaller than the initial fracto data. The struct `grow::CrackState`
/// also contains parameters that are passed along with the applied
/// loading _kmin_ and _kmax_, so any memory variables should be added to
/// this struct and will be availabe to be used by the _da/dn_ equation.
/// The simplest memory effect that is included in the `CrackState`
/// data is the plastic zone size, but there are no dadn equations
/// currently using this. The memory effect does not appear to be
/// strong in AA7050 material.
///
/// Think of the program flow as
///
/// 1. Read in data
/// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles
/// 3. Filter the list of cycles
/// 4. If required, optimise any parameters
/// 5. Perform a crack growth calculation
/// 6. Write out requested output
#[macro_use]
extern crate clap;
extern crate fatigue;
extern crate log;
extern crate env_logger;
use std::f64::consts::FRAC_PI_2;
use std::process;
use std::collections::BTreeSet;
use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")]
fn | (options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors);
optimise_gsl::gsl_match_crack(options, &mut factors)
}
};
}
// Finally grow the crack with the current parameters which may have been optimised.
// We exit here if the scale has not been set. Otherwise we
// would go through and do a default calculation which confuses
// people if they just want to start the program to see how to get
// help.
fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> {
let dadn_eqn = dadn::make_model(&options.dadn, ¶ms, String::from("unknown"));
println!("{}da/dN equation: {}", COMMENT, dadn_eqn);
let beta = beta::get_beta_fn(&options.beta, &options.component);
if options.scale == 0.0 {
error!(
"Error: The sequence scale factor is 0. You need to set the scale factor
(i.e. load or stress level) in order to perform a crack growth calculation.
Try\n easigrow --help"
);
std::process::exit(1);
}
if options.cycles.is_empty() {
println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r");
std::process::exit(1);
}
// We define the initial state. If any memory effect is to be
// included in the crack growth model, the meory should be in this
// data structure.
let init_crack = grow::CrackState::new(options.a.clone());
let mut history_all = Vec::new();
grow::display_history_header(&options.output_vars);
// Non-dimensional ratios for beta factor
let c = options.a[options.a.len() - 1];
let a_on_c = options.a[0] / c;
let a_on_d = options.a[0] / options.component.forward;
let c_on_b = c / options.component.sideways;
let a_on_r = options.a[0] / options.component.radius;
// phis is a vector of angles around the crack front. It depends
// on the beta whether any or all of the angles are used. Most
// just use the first and some use the last as well.
let phis = vec![0.0, FRAC_PI_2];
// Initialise the history
let init_history = grow::History {
block: 0.0,
stress: 0.0,
cycle: cycle::Cycle {
max: tag::Tag {
value: 0.0,
index: 0,
},
min: tag::Tag {
value: 0.0,
index: 0,
},
},
k: vec![0.0, 0.0],
dk: vec![0.0, 0.0],
beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis),
da: vec![0.0, 0.0],
crack: init_crack,
};
grow::display_history_line(&init_history, &options.output_vars, &options.component);
let component = grow::FatigueTest {
history: init_history,
component: options.component.clone(),
scale: options.scale,
cycles: options.cycles.clone(),
a_limit: options.a_limit.clone(),
block_limit: options.block_limit,
next_cycle: 0,
dadn: dadn_eqn,
beta,
output_vars: options.output_vars.clone(),
};
// make a hash set of the lines that are required for output
let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect();
// if there are no lines in the output then put in the line for the first cycle
if options
.cycles
.iter()
.filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index))
.count() == 0
{
println!("output_lines {:?}", output_lines);
println!(
"
Warning: There are no sequence lines in the cycle list and so there
will be no crack growth output. Consider closing up cycles
with re-order to use all sequence lines or include specific
sequence lines that are in the cycle. Meanwhile, the output will
be for the squence line in the first cycle at line {}.",
options.cycles[0].max.index
);
output_lines.insert(options.cycles[0].max.index);
}
// Start the crack growth. This loop steps through each cycle
// repeating the cycles until a terminating condition stops the
// growth and ends the for loop.
for (cycle_no, history) in component.enumerate() {
if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) {
grow::display_history_line(&history, &options.output_vars, &options.component);
}
// Only keep the history if we are producing a fracto image.
if options.image.file != "" {
history_all.push(history);
}
}
history_all
}
| optimise_error | identifier_name |
player.go | // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"log"
"net/http"
"strings"
"time"
"github.com/gorilla/websocket"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
var (
newline = "\n"
space = " "
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {return true},
}
// player is a middleman between the websocket connection and the hub.
type player struct {
room *Room
// The websocket connection.
conn *websocket.Conn
// Events channels
sendMove chan []byte
sendChat chan message
oppRanOut chan bool
disconnect chan bool
// Action channels
drawOffer chan bool
oppAcceptedDraw chan bool
oppResigned chan bool
rematchOffer chan bool
oppAcceptedRematch chan bool
oppReady chan bool
oppDisconnected chan bool
oppGone chan bool
oppReconnected chan bool
cleanup func()
switchColors func()
color string
gameId string
timeLeft time.Duration
clock *time.Timer
lastMove time.Time
username string
userId string
}
type move struct {
Color string `json:"color"`
Pgn string `json:"pgn"`
move []byte
}
// Chat message
type message struct {
Move move `json:"move,omitempty"`
Text string `json:"chat"`
Username string `json:"from"`
Resign bool `json:"resign"`
DrawOffer bool `json:"drawOffer"`
AcceptDraw bool `json:"acceptDraw"`
GameOver bool `json:"gameOver"`
RematchOffer bool `json:"rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppGone: // opponent is gone
data := map[string]string{
"oppGone": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
}
}
}
// JSON-marshal and send message to the connection.
func sendTextMsg(data map[string]string, conn *websocket.Conn) error {
dataB, err := json.Marshal(data)
if err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(writeWait))
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
w.Write(dataB)
return w.Close()
}
// serveGame handles websocket requests from the peer.
func (rout *router) | (w http.ResponseWriter, r *http.Request,
gameId, color string, minutes int, cleanup, switchColors func(),
username, userId string) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
http.Error(w, "Could not upgrade conn", http.StatusInternalServerError)
return
}
playerClock := time.NewTimer(time.Duration(minutes) * time.Minute)
playerClock.Stop()
p := &player{
cleanup: cleanup,
clock: playerClock,
color: color,
conn: conn,
gameId: gameId,
oppRanOut: make(chan bool, 1),
disconnect: make(chan bool),
drawOffer: make(chan bool, 1),
oppAcceptedDraw: make(chan bool, 1),
oppResigned: make(chan bool, 1),
rematchOffer: make(chan bool, 1),
oppAcceptedRematch: make(chan bool, 1),
oppReady: make(chan bool, 1),
oppDisconnected: make(chan bool, 1),
oppGone: make(chan bool, 1),
oppReconnected: make(chan bool, 1),
sendMove: make(chan []byte, 2), // one for the clock, one for the move
sendChat: make(chan message, 128),
switchColors: switchColors,
timeLeft: time.Duration(minutes) * time.Minute,
userId: userId,
username: username,
}
switch minutes {
case 1:
rout.rm.registerPlayer1Min<- p
case 3:
rout.rm.registerPlayer3Min<- p
case 5:
rout.rm.registerPlayer5Min<- p
case 10:
rout.rm.registerPlayer10Min<- p
default:
log.Println("Invalid clock time:", minutes)
http.Error(w, "Invalid clock time", http.StatusBadRequest)
return
}
// Allow collection of memory referenced by the caller by doing all work in
// new goroutines.
go p.writePump()
go p.readPump()
rout.ldHub.joinPlayer<- userId
}
| serveGame | identifier_name |
player.go | // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"log"
"net/http"
"strings"
"time"
"github.com/gorilla/websocket"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
var (
newline = "\n"
space = " "
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {return true},
}
// player is a middleman between the websocket connection and the hub.
type player struct {
room *Room
// The websocket connection.
conn *websocket.Conn
// Events channels
sendMove chan []byte
sendChat chan message
oppRanOut chan bool
disconnect chan bool
// Action channels
drawOffer chan bool
oppAcceptedDraw chan bool
oppResigned chan bool
rematchOffer chan bool
oppAcceptedRematch chan bool
oppReady chan bool
oppDisconnected chan bool
oppGone chan bool
oppReconnected chan bool
cleanup func()
switchColors func()
color string
gameId string
timeLeft time.Duration
clock *time.Timer
lastMove time.Time
username string
userId string
}
type move struct {
Color string `json:"color"`
Pgn string `json:"pgn"`
move []byte
}
// Chat message
type message struct {
Move move `json:"move,omitempty"`
Text string `json:"chat"`
Username string `json:"from"`
Resign bool `json:"resign"`
DrawOffer bool `json:"drawOffer"`
AcceptDraw bool `json:"acceptDraw"`
GameOver bool `json:"gameOver"`
RematchOffer bool `json:"rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() |
// JSON-marshal and send message to the connection.
func sendTextMsg(data map[string]string, conn *websocket.Conn) error {
dataB, err := json.Marshal(data)
if err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(writeWait))
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
w.Write(dataB)
return w.Close()
}
// serveGame handles websocket requests from the peer.
func (rout *router) serveGame(w http.ResponseWriter, r *http.Request,
gameId, color string, minutes int, cleanup, switchColors func(),
username, userId string) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
http.Error(w, "Could not upgrade conn", http.StatusInternalServerError)
return
}
playerClock := time.NewTimer(time.Duration(minutes) * time.Minute)
playerClock.Stop()
p := &player{
cleanup: cleanup,
clock: playerClock,
color: color,
conn: conn,
gameId: gameId,
oppRanOut: make(chan bool, 1),
disconnect: make(chan bool),
drawOffer: make(chan bool, 1),
oppAcceptedDraw: make(chan bool, 1),
oppResigned: make(chan bool, 1),
rematchOffer: make(chan bool, 1),
oppAcceptedRematch: make(chan bool, 1),
oppReady: make(chan bool, 1),
oppDisconnected: make(chan bool, 1),
oppGone: make(chan bool, 1),
oppReconnected: make(chan bool, 1),
sendMove: make(chan []byte, 2), // one for the clock, one for the move
sendChat: make(chan message, 128),
switchColors: switchColors,
timeLeft: time.Duration(minutes) * time.Minute,
userId: userId,
username: username,
}
switch minutes {
case 1:
rout.rm.registerPlayer1Min<- p
case 3:
rout.rm.registerPlayer3Min<- p
case 5:
rout.rm.registerPlayer5Min<- p
case 10:
rout.rm.registerPlayer10Min<- p
default:
log.Println("Invalid clock time:", minutes)
http.Error(w, "Invalid clock time", http.StatusBadRequest)
return
}
// Allow collection of memory referenced by the caller by doing all work in
// new goroutines.
go p.writePump()
go p.readPump()
rout.ldHub.joinPlayer<- userId
}
| {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppGone: // opponent is gone
data := map[string]string{
"oppGone": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
}
}
} | identifier_body |
player.go | // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"log"
"net/http"
"strings"
"time"
"github.com/gorilla/websocket"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
var (
newline = "\n"
space = " "
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {return true},
}
// player is a middleman between the websocket connection and the hub.
type player struct {
room *Room
// The websocket connection.
conn *websocket.Conn
// Events channels
sendMove chan []byte
sendChat chan message
oppRanOut chan bool
disconnect chan bool
// Action channels
drawOffer chan bool
oppAcceptedDraw chan bool
oppResigned chan bool
rematchOffer chan bool
oppAcceptedRematch chan bool
oppReady chan bool
oppDisconnected chan bool
oppGone chan bool
oppReconnected chan bool
cleanup func()
switchColors func()
color string
gameId string
timeLeft time.Duration
clock *time.Timer
lastMove time.Time
username string
userId string
}
type move struct {
Color string `json:"color"`
Pgn string `json:"pgn"`
move []byte
}
// Chat message
type message struct {
Move move `json:"move,omitempty"`
Text string `json:"chat"`
Username string `json:"from"`
Resign bool `json:"resign"`
DrawOffer bool `json:"drawOffer"`
AcceptDraw bool `json:"acceptDraw"`
GameOver bool `json:"gameOver"`
RematchOffer bool `json:"rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil |
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppGone: // opponent is gone
data := map[string]string{
"oppGone": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
}
}
}
// JSON-marshal and send message to the connection.
func sendTextMsg(data map[string]string, conn *websocket.Conn) error {
dataB, err := json.Marshal(data)
if err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(writeWait))
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
w.Write(dataB)
return w.Close()
}
// serveGame handles websocket requests from the peer.
func (rout *router) serveGame(w http.ResponseWriter, r *http.Request,
gameId, color string, minutes int, cleanup, switchColors func(),
username, userId string) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
http.Error(w, "Could not upgrade conn", http.StatusInternalServerError)
return
}
playerClock := time.NewTimer(time.Duration(minutes) * time.Minute)
playerClock.Stop()
p := &player{
cleanup: cleanup,
clock: playerClock,
color: color,
conn: conn,
gameId: gameId,
oppRanOut: make(chan bool, 1),
disconnect: make(chan bool),
drawOffer: make(chan bool, 1),
oppAcceptedDraw: make(chan bool, 1),
oppResigned: make(chan bool, 1),
rematchOffer: make(chan bool, 1),
oppAcceptedRematch: make(chan bool, 1),
oppReady: make(chan bool, 1),
oppDisconnected: make(chan bool, 1),
oppGone: make(chan bool, 1),
oppReconnected: make(chan bool, 1),
sendMove: make(chan []byte, 2), // one for the clock, one for the move
sendChat: make(chan message, 128),
switchColors: switchColors,
timeLeft: time.Duration(minutes) * time.Minute,
userId: userId,
username: username,
}
switch minutes {
case 1:
rout.rm.registerPlayer1Min<- p
case 3:
rout.rm.registerPlayer3Min<- p
case 5:
rout.rm.registerPlayer5Min<- p
case 10:
rout.rm.registerPlayer10Min<- p
default:
log.Println("Invalid clock time:", minutes)
http.Error(w, "Invalid clock time", http.StatusBadRequest)
return
}
// Allow collection of memory referenced by the caller by doing all work in
// new goroutines.
go p.writePump()
go p.readPump()
rout.ldHub.joinPlayer<- userId
}
| {
log.Println("Could not marshal data:", err)
break
} | conditional_block |
player.go | // Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"log"
"net/http"
"strings"
"time"
"github.com/gorilla/websocket"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
var (
newline = "\n"
space = " "
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {return true},
}
// player is a middleman between the websocket connection and the hub.
type player struct {
room *Room
// The websocket connection.
conn *websocket.Conn
// Events channels
sendMove chan []byte
sendChat chan message
oppRanOut chan bool
disconnect chan bool
// Action channels
drawOffer chan bool
oppAcceptedDraw chan bool
oppResigned chan bool
rematchOffer chan bool
oppAcceptedRematch chan bool
oppReady chan bool
oppDisconnected chan bool
oppGone chan bool
oppReconnected chan bool
cleanup func()
switchColors func()
color string
gameId string
timeLeft time.Duration
clock *time.Timer
lastMove time.Time
username string
userId string
}
type move struct {
Color string `json:"color"`
Pgn string `json:"pgn"`
move []byte
}
// Chat message
type message struct {
Move move `json:"move,omitempty"`
Text string `json:"chat"`
Username string `json:"from"`
Resign bool `json:"resign"`
DrawOffer bool `json:"drawOffer"`
AcceptDraw bool `json:"acceptDraw"`
GameOver bool `json:"gameOver"`
RematchOffer bool `json:"rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppGone: // opponent is gone
data := map[string]string{
"oppGone": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
}
}
}
// JSON-marshal and send message to the connection.
func sendTextMsg(data map[string]string, conn *websocket.Conn) error {
dataB, err := json.Marshal(data)
if err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(writeWait))
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
w.Write(dataB)
return w.Close()
}
// serveGame handles websocket requests from the peer.
func (rout *router) serveGame(w http.ResponseWriter, r *http.Request,
gameId, color string, minutes int, cleanup, switchColors func(),
username, userId string) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
http.Error(w, "Could not upgrade conn", http.StatusInternalServerError)
return
}
playerClock := time.NewTimer(time.Duration(minutes) * time.Minute)
playerClock.Stop()
p := &player{ | clock: playerClock,
color: color,
conn: conn,
gameId: gameId,
oppRanOut: make(chan bool, 1),
disconnect: make(chan bool),
drawOffer: make(chan bool, 1),
oppAcceptedDraw: make(chan bool, 1),
oppResigned: make(chan bool, 1),
rematchOffer: make(chan bool, 1),
oppAcceptedRematch: make(chan bool, 1),
oppReady: make(chan bool, 1),
oppDisconnected: make(chan bool, 1),
oppGone: make(chan bool, 1),
oppReconnected: make(chan bool, 1),
sendMove: make(chan []byte, 2), // one for the clock, one for the move
sendChat: make(chan message, 128),
switchColors: switchColors,
timeLeft: time.Duration(minutes) * time.Minute,
userId: userId,
username: username,
}
switch minutes {
case 1:
rout.rm.registerPlayer1Min<- p
case 3:
rout.rm.registerPlayer3Min<- p
case 5:
rout.rm.registerPlayer5Min<- p
case 10:
rout.rm.registerPlayer10Min<- p
default:
log.Println("Invalid clock time:", minutes)
http.Error(w, "Invalid clock time", http.StatusBadRequest)
return
}
// Allow collection of memory referenced by the caller by doing all work in
// new goroutines.
go p.writePump()
go p.readPump()
rout.ldHub.joinPlayer<- userId
} | cleanup: cleanup, | random_line_split |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/sync/singleflight"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
"k8s.io/kubelet/pkg/apis/credentialprovider/install"
credentialproviderv1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1"
credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1"
credentialproviderv1beta1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/utils/clock"
)
const (
globalCacheKey = "global"
cachePurgeInterval = time.Minute * 15
)
var (
scheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(scheme)
apiVersions = map[string]schema.GroupVersion{
credentialproviderv1alpha1.SchemeGroupVersion.String(): credentialproviderv1alpha1.SchemeGroupVersion,
credentialproviderv1beta1.SchemeGroupVersion.String(): credentialproviderv1beta1.SchemeGroupVersion,
credentialproviderv1.SchemeGroupVersion.String(): credentialproviderv1.SchemeGroupVersion,
}
)
func init() {
install.Install(scheme)
kubeletconfig.AddToScheme(scheme)
kubeletconfigv1alpha1.AddToScheme(scheme)
kubeletconfigv1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil {
klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool |
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.GetByKey(globalCacheKey)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
return nil, false, nil
}
// Plugin is the interface calling ExecPlugin. This is mainly for testability
// so tests don't have to actually exec any processes.
type Plugin interface {
ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error)
}
// execPlugin is the implementation of the Plugin interface that execs a credential provider plugin based
// on it's name provided in CredentialProviderConfig. It is assumed that the executable is available in the
// plugin directory provided by the kubelet.
type execPlugin struct {
name string
apiVersion string
encoder runtime.Encoder
args []string
envVars []kubeletconfig.ExecEnvVar
pluginBinDir string
environ func() []string
}
// ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig:
//
// $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request
//
// The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and
// return CredentialProviderResponse via stdout.
func (e *execPlugin) ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error) {
klog.V(5).Infof("Getting image %s credentials from external exec plugin %s", image, e.name)
authRequest := &credentialproviderapi.CredentialProviderRequest{Image: image}
data, err := e.encodeRequest(authRequest)
if err != nil {
return nil, fmt.Errorf("failed to encode auth request: %w", err)
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
stdin := bytes.NewBuffer(data)
// Use a catch-all timeout of 1 minute for all exec-based plugins, this should leave enough
// head room in case a plugin needs to retry a failed request while ensuring an exec plugin
// does not run forever. In the future we may want this timeout to be tweakable from the plugin
// config file.
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, filepath.Join(e.pluginBinDir, e.name), e.args...)
cmd.Stdout, cmd.Stderr, cmd.Stdin = stdout, stderr, stdin
var configEnvVars []string
for _, v := range e.envVars {
configEnvVars = append(configEnvVars, fmt.Sprintf("%s=%s", v.Name, v.Value))
}
// Append current system environment variables, to the ones configured in the
// credential provider file. Failing to do so may result in unsuccessful execution
// of the provider binary, see https://github.com/kubernetes/kubernetes/issues/102750
// also, this behaviour is inline with Credential Provider Config spec
cmd.Env = mergeEnvVars(e.environ(), configEnvVars)
if err = e.runPlugin(ctx, cmd, image); err != nil {
return nil, fmt.Errorf("%w: %s", err, stderr.String())
}
data = stdout.Bytes()
// check that the response apiVersion matches what is expected
gvk, err := json.DefaultMetaFactory.Interpret(data)
if err != nil {
return nil, fmt.Errorf("error reading GVK from response: %w", err)
}
if gvk.GroupVersion().String() != e.apiVersion {
return nil, fmt.Errorf("apiVersion from credential plugin response did not match expected apiVersion:%s, actual apiVersion:%s", e.apiVersion, gvk.GroupVersion().String())
}
response, err := e.decodeResponse(data)
if err != nil {
// err is explicitly not wrapped since it may contain credentials in the response.
return nil, errors.New("error decoding credential provider plugin response from stdout")
}
return response, nil
}
func (e *execPlugin) runPlugin(ctx context.Context, cmd *exec.Cmd, image string) error {
startTime := time.Now()
defer func() {
kubeletCredentialProviderPluginDuration.WithLabelValues(e.name).Observe(time.Since(startTime).Seconds())
}()
err := cmd.Run()
if ctx.Err() != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, ctx.Err())
}
if err != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, err)
}
return nil
}
// encodeRequest encodes the internal CredentialProviderRequest type into the v1alpha1 version in json
func (e *execPlugin) encodeRequest(request *credentialproviderapi.CredentialProviderRequest) ([]byte, error) {
data, err := runtime.Encode(e.encoder, request)
if err != nil {
return nil, fmt.Errorf("error encoding request: %w", err)
}
return data, nil
}
// decodeResponse decodes data into the internal CredentialProviderResponse type
func (e *execPlugin) decodeResponse(data []byte) (*credentialproviderapi.CredentialProviderResponse, error) {
obj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
return nil, err
}
if gvk.Kind != "CredentialProviderResponse" {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Kind: %q", gvk.Kind)
}
if gvk.Group != credentialproviderapi.GroupName {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Group: %s", gvk.Group)
}
if internalResponse, ok := obj.(*credentialproviderapi.CredentialProviderResponse); ok {
return internalResponse, nil
}
return nil, fmt.Errorf("unable to convert %T to *CredentialProviderResponse", obj)
}
// parseRegistry extracts the registry hostname of an image (including port if specified).
func parseRegistry(image string) string {
imageParts := strings.Split(image, "/")
return imageParts[0]
}
// mergedEnvVars overlays system defined env vars with credential provider env vars,
// it gives priority to the credential provider vars allowing user to override system
// env vars
func mergeEnvVars(sysEnvVars, credProviderVars []string) []string {
mergedEnvVars := sysEnvVars
for _, credProviderVar := range credProviderVars {
mergedEnvVars = append(mergedEnvVars, credProviderVar)
}
return mergedEnvVars
}
| {
return true
} | identifier_body |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/sync/singleflight"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
"k8s.io/kubelet/pkg/apis/credentialprovider/install"
credentialproviderv1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1"
credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1"
credentialproviderv1beta1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/utils/clock"
)
const (
globalCacheKey = "global"
cachePurgeInterval = time.Minute * 15
)
var (
scheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(scheme)
apiVersions = map[string]schema.GroupVersion{
credentialproviderv1alpha1.SchemeGroupVersion.String(): credentialproviderv1alpha1.SchemeGroupVersion,
credentialproviderv1beta1.SchemeGroupVersion.String(): credentialproviderv1beta1.SchemeGroupVersion,
credentialproviderv1.SchemeGroupVersion.String(): credentialproviderv1.SchemeGroupVersion,
}
)
func init() {
install.Install(scheme)
kubeletconfig.AddToScheme(scheme)
kubeletconfigv1alpha1.AddToScheme(scheme)
kubeletconfigv1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil { | if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool {
return true
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.GetByKey(globalCacheKey)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
return nil, false, nil
}
// Plugin is the interface calling ExecPlugin. This is mainly for testability
// so tests don't have to actually exec any processes.
type Plugin interface {
ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error)
}
// execPlugin is the implementation of the Plugin interface that execs a credential provider plugin based
// on it's name provided in CredentialProviderConfig. It is assumed that the executable is available in the
// plugin directory provided by the kubelet.
type execPlugin struct {
name string
apiVersion string
encoder runtime.Encoder
args []string
envVars []kubeletconfig.ExecEnvVar
pluginBinDir string
environ func() []string
}
// ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig:
//
// $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request
//
// The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and
// return CredentialProviderResponse via stdout.
func (e *execPlugin) ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error) {
klog.V(5).Infof("Getting image %s credentials from external exec plugin %s", image, e.name)
authRequest := &credentialproviderapi.CredentialProviderRequest{Image: image}
data, err := e.encodeRequest(authRequest)
if err != nil {
return nil, fmt.Errorf("failed to encode auth request: %w", err)
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
stdin := bytes.NewBuffer(data)
// Use a catch-all timeout of 1 minute for all exec-based plugins, this should leave enough
// head room in case a plugin needs to retry a failed request while ensuring an exec plugin
// does not run forever. In the future we may want this timeout to be tweakable from the plugin
// config file.
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, filepath.Join(e.pluginBinDir, e.name), e.args...)
cmd.Stdout, cmd.Stderr, cmd.Stdin = stdout, stderr, stdin
var configEnvVars []string
for _, v := range e.envVars {
configEnvVars = append(configEnvVars, fmt.Sprintf("%s=%s", v.Name, v.Value))
}
// Append current system environment variables, to the ones configured in the
// credential provider file. Failing to do so may result in unsuccessful execution
// of the provider binary, see https://github.com/kubernetes/kubernetes/issues/102750
// also, this behaviour is inline with Credential Provider Config spec
cmd.Env = mergeEnvVars(e.environ(), configEnvVars)
if err = e.runPlugin(ctx, cmd, image); err != nil {
return nil, fmt.Errorf("%w: %s", err, stderr.String())
}
data = stdout.Bytes()
// check that the response apiVersion matches what is expected
gvk, err := json.DefaultMetaFactory.Interpret(data)
if err != nil {
return nil, fmt.Errorf("error reading GVK from response: %w", err)
}
if gvk.GroupVersion().String() != e.apiVersion {
return nil, fmt.Errorf("apiVersion from credential plugin response did not match expected apiVersion:%s, actual apiVersion:%s", e.apiVersion, gvk.GroupVersion().String())
}
response, err := e.decodeResponse(data)
if err != nil {
// err is explicitly not wrapped since it may contain credentials in the response.
return nil, errors.New("error decoding credential provider plugin response from stdout")
}
return response, nil
}
func (e *execPlugin) runPlugin(ctx context.Context, cmd *exec.Cmd, image string) error {
startTime := time.Now()
defer func() {
kubeletCredentialProviderPluginDuration.WithLabelValues(e.name).Observe(time.Since(startTime).Seconds())
}()
err := cmd.Run()
if ctx.Err() != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, ctx.Err())
}
if err != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, err)
}
return nil
}
// encodeRequest encodes the internal CredentialProviderRequest type into the v1alpha1 version in json
func (e *execPlugin) encodeRequest(request *credentialproviderapi.CredentialProviderRequest) ([]byte, error) {
data, err := runtime.Encode(e.encoder, request)
if err != nil {
return nil, fmt.Errorf("error encoding request: %w", err)
}
return data, nil
}
// decodeResponse decodes data into the internal CredentialProviderResponse type
func (e *execPlugin) decodeResponse(data []byte) (*credentialproviderapi.CredentialProviderResponse, error) {
obj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
return nil, err
}
if gvk.Kind != "CredentialProviderResponse" {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Kind: %q", gvk.Kind)
}
if gvk.Group != credentialproviderapi.GroupName {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Group: %s", gvk.Group)
}
if internalResponse, ok := obj.(*credentialproviderapi.CredentialProviderResponse); ok {
return internalResponse, nil
}
return nil, fmt.Errorf("unable to convert %T to *CredentialProviderResponse", obj)
}
// parseRegistry extracts the registry hostname of an image (including port if specified).
func parseRegistry(image string) string {
imageParts := strings.Split(image, "/")
return imageParts[0]
}
// mergedEnvVars overlays system defined env vars with credential provider env vars,
// it gives priority to the credential provider vars allowing user to override system
// env vars
func mergeEnvVars(sysEnvVars, credProviderVars []string) []string {
mergedEnvVars := sysEnvVars
for _, credProviderVar := range credProviderVars {
mergedEnvVars = append(mergedEnvVars, credProviderVar)
}
return mergedEnvVars
} | klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse) | random_line_split |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/sync/singleflight"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
"k8s.io/kubelet/pkg/apis/credentialprovider/install"
credentialproviderv1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1"
credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1"
credentialproviderv1beta1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/utils/clock"
)
const (
globalCacheKey = "global"
cachePurgeInterval = time.Minute * 15
)
var (
scheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(scheme)
apiVersions = map[string]schema.GroupVersion{
credentialproviderv1alpha1.SchemeGroupVersion.String(): credentialproviderv1alpha1.SchemeGroupVersion,
credentialproviderv1beta1.SchemeGroupVersion.String(): credentialproviderv1beta1.SchemeGroupVersion,
credentialproviderv1.SchemeGroupVersion.String(): credentialproviderv1.SchemeGroupVersion,
}
)
func init() {
install.Install(scheme)
kubeletconfig.AddToScheme(scheme)
kubeletconfigv1alpha1.AddToScheme(scheme)
kubeletconfigv1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func | (pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil {
klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool {
return true
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.GetByKey(globalCacheKey)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
return nil, false, nil
}
// Plugin is the interface calling ExecPlugin. This is mainly for testability
// so tests don't have to actually exec any processes.
type Plugin interface {
ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error)
}
// execPlugin is the implementation of the Plugin interface that execs a credential provider plugin based
// on it's name provided in CredentialProviderConfig. It is assumed that the executable is available in the
// plugin directory provided by the kubelet.
type execPlugin struct {
name string
apiVersion string
encoder runtime.Encoder
args []string
envVars []kubeletconfig.ExecEnvVar
pluginBinDir string
environ func() []string
}
// ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig:
//
// $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request
//
// The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and
// return CredentialProviderResponse via stdout.
func (e *execPlugin) ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error) {
klog.V(5).Infof("Getting image %s credentials from external exec plugin %s", image, e.name)
authRequest := &credentialproviderapi.CredentialProviderRequest{Image: image}
data, err := e.encodeRequest(authRequest)
if err != nil {
return nil, fmt.Errorf("failed to encode auth request: %w", err)
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
stdin := bytes.NewBuffer(data)
// Use a catch-all timeout of 1 minute for all exec-based plugins, this should leave enough
// head room in case a plugin needs to retry a failed request while ensuring an exec plugin
// does not run forever. In the future we may want this timeout to be tweakable from the plugin
// config file.
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, filepath.Join(e.pluginBinDir, e.name), e.args...)
cmd.Stdout, cmd.Stderr, cmd.Stdin = stdout, stderr, stdin
var configEnvVars []string
for _, v := range e.envVars {
configEnvVars = append(configEnvVars, fmt.Sprintf("%s=%s", v.Name, v.Value))
}
// Append current system environment variables, to the ones configured in the
// credential provider file. Failing to do so may result in unsuccessful execution
// of the provider binary, see https://github.com/kubernetes/kubernetes/issues/102750
// also, this behaviour is inline with Credential Provider Config spec
cmd.Env = mergeEnvVars(e.environ(), configEnvVars)
if err = e.runPlugin(ctx, cmd, image); err != nil {
return nil, fmt.Errorf("%w: %s", err, stderr.String())
}
data = stdout.Bytes()
// check that the response apiVersion matches what is expected
gvk, err := json.DefaultMetaFactory.Interpret(data)
if err != nil {
return nil, fmt.Errorf("error reading GVK from response: %w", err)
}
if gvk.GroupVersion().String() != e.apiVersion {
return nil, fmt.Errorf("apiVersion from credential plugin response did not match expected apiVersion:%s, actual apiVersion:%s", e.apiVersion, gvk.GroupVersion().String())
}
response, err := e.decodeResponse(data)
if err != nil {
// err is explicitly not wrapped since it may contain credentials in the response.
return nil, errors.New("error decoding credential provider plugin response from stdout")
}
return response, nil
}
func (e *execPlugin) runPlugin(ctx context.Context, cmd *exec.Cmd, image string) error {
startTime := time.Now()
defer func() {
kubeletCredentialProviderPluginDuration.WithLabelValues(e.name).Observe(time.Since(startTime).Seconds())
}()
err := cmd.Run()
if ctx.Err() != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, ctx.Err())
}
if err != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, err)
}
return nil
}
// encodeRequest encodes the internal CredentialProviderRequest type into the v1alpha1 version in json
func (e *execPlugin) encodeRequest(request *credentialproviderapi.CredentialProviderRequest) ([]byte, error) {
data, err := runtime.Encode(e.encoder, request)
if err != nil {
return nil, fmt.Errorf("error encoding request: %w", err)
}
return data, nil
}
// decodeResponse decodes data into the internal CredentialProviderResponse type
func (e *execPlugin) decodeResponse(data []byte) (*credentialproviderapi.CredentialProviderResponse, error) {
obj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
return nil, err
}
if gvk.Kind != "CredentialProviderResponse" {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Kind: %q", gvk.Kind)
}
if gvk.Group != credentialproviderapi.GroupName {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Group: %s", gvk.Group)
}
if internalResponse, ok := obj.(*credentialproviderapi.CredentialProviderResponse); ok {
return internalResponse, nil
}
return nil, fmt.Errorf("unable to convert %T to *CredentialProviderResponse", obj)
}
// parseRegistry extracts the registry hostname of an image (including port if specified).
func parseRegistry(image string) string {
imageParts := strings.Split(image, "/")
return imageParts[0]
}
// mergedEnvVars overlays system defined env vars with credential provider env vars,
// it gives priority to the credential provider vars allowing user to override system
// env vars
func mergeEnvVars(sysEnvVars, credProviderVars []string) []string {
mergedEnvVars := sysEnvVars
for _, credProviderVar := range credProviderVars {
mergedEnvVars = append(mergedEnvVars, credProviderVar)
}
return mergedEnvVars
}
| newPluginProvider | identifier_name |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/sync/singleflight"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
"k8s.io/kubelet/pkg/apis/credentialprovider/install"
credentialproviderv1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1"
credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1"
credentialproviderv1beta1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/utils/clock"
)
const (
globalCacheKey = "global"
cachePurgeInterval = time.Minute * 15
)
var (
scheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(scheme)
apiVersions = map[string]schema.GroupVersion{
credentialproviderv1alpha1.SchemeGroupVersion.String(): credentialproviderv1alpha1.SchemeGroupVersion,
credentialproviderv1beta1.SchemeGroupVersion.String(): credentialproviderv1beta1.SchemeGroupVersion,
credentialproviderv1.SchemeGroupVersion.String(): credentialproviderv1.SchemeGroupVersion,
}
)
func init() {
install.Install(scheme)
kubeletconfig.AddToScheme(scheme)
kubeletconfigv1alpha1.AddToScheme(scheme)
kubeletconfigv1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok |
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil {
klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool {
return true
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.GetByKey(globalCacheKey)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
return nil, false, nil
}
// Plugin is the interface calling ExecPlugin. This is mainly for testability
// so tests don't have to actually exec any processes.
type Plugin interface {
ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error)
}
// execPlugin is the implementation of the Plugin interface that execs a credential provider plugin based
// on it's name provided in CredentialProviderConfig. It is assumed that the executable is available in the
// plugin directory provided by the kubelet.
type execPlugin struct {
name string
apiVersion string
encoder runtime.Encoder
args []string
envVars []kubeletconfig.ExecEnvVar
pluginBinDir string
environ func() []string
}
// ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig:
//
// $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request
//
// The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and
// return CredentialProviderResponse via stdout.
func (e *execPlugin) ExecPlugin(ctx context.Context, image string) (*credentialproviderapi.CredentialProviderResponse, error) {
klog.V(5).Infof("Getting image %s credentials from external exec plugin %s", image, e.name)
authRequest := &credentialproviderapi.CredentialProviderRequest{Image: image}
data, err := e.encodeRequest(authRequest)
if err != nil {
return nil, fmt.Errorf("failed to encode auth request: %w", err)
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
stdin := bytes.NewBuffer(data)
// Use a catch-all timeout of 1 minute for all exec-based plugins, this should leave enough
// head room in case a plugin needs to retry a failed request while ensuring an exec plugin
// does not run forever. In the future we may want this timeout to be tweakable from the plugin
// config file.
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, filepath.Join(e.pluginBinDir, e.name), e.args...)
cmd.Stdout, cmd.Stderr, cmd.Stdin = stdout, stderr, stdin
var configEnvVars []string
for _, v := range e.envVars {
configEnvVars = append(configEnvVars, fmt.Sprintf("%s=%s", v.Name, v.Value))
}
// Append current system environment variables, to the ones configured in the
// credential provider file. Failing to do so may result in unsuccessful execution
// of the provider binary, see https://github.com/kubernetes/kubernetes/issues/102750
// also, this behaviour is inline with Credential Provider Config spec
cmd.Env = mergeEnvVars(e.environ(), configEnvVars)
if err = e.runPlugin(ctx, cmd, image); err != nil {
return nil, fmt.Errorf("%w: %s", err, stderr.String())
}
data = stdout.Bytes()
// check that the response apiVersion matches what is expected
gvk, err := json.DefaultMetaFactory.Interpret(data)
if err != nil {
return nil, fmt.Errorf("error reading GVK from response: %w", err)
}
if gvk.GroupVersion().String() != e.apiVersion {
return nil, fmt.Errorf("apiVersion from credential plugin response did not match expected apiVersion:%s, actual apiVersion:%s", e.apiVersion, gvk.GroupVersion().String())
}
response, err := e.decodeResponse(data)
if err != nil {
// err is explicitly not wrapped since it may contain credentials in the response.
return nil, errors.New("error decoding credential provider plugin response from stdout")
}
return response, nil
}
func (e *execPlugin) runPlugin(ctx context.Context, cmd *exec.Cmd, image string) error {
startTime := time.Now()
defer func() {
kubeletCredentialProviderPluginDuration.WithLabelValues(e.name).Observe(time.Since(startTime).Seconds())
}()
err := cmd.Run()
if ctx.Err() != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, ctx.Err())
}
if err != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, err)
}
return nil
}
// encodeRequest encodes the internal CredentialProviderRequest type into the v1alpha1 version in json
func (e *execPlugin) encodeRequest(request *credentialproviderapi.CredentialProviderRequest) ([]byte, error) {
data, err := runtime.Encode(e.encoder, request)
if err != nil {
return nil, fmt.Errorf("error encoding request: %w", err)
}
return data, nil
}
// decodeResponse decodes data into the internal CredentialProviderResponse type
func (e *execPlugin) decodeResponse(data []byte) (*credentialproviderapi.CredentialProviderResponse, error) {
obj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
return nil, err
}
if gvk.Kind != "CredentialProviderResponse" {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Kind: %q", gvk.Kind)
}
if gvk.Group != credentialproviderapi.GroupName {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Group: %s", gvk.Group)
}
if internalResponse, ok := obj.(*credentialproviderapi.CredentialProviderResponse); ok {
return internalResponse, nil
}
return nil, fmt.Errorf("unable to convert %T to *CredentialProviderResponse", obj)
}
// parseRegistry extracts the registry hostname of an image (including port if specified).
func parseRegistry(image string) string {
imageParts := strings.Split(image, "/")
return imageParts[0]
}
// mergedEnvVars overlays system defined env vars with credential provider env vars,
// it gives priority to the credential provider vars allowing user to override system
// env vars
func mergeEnvVars(sysEnvVars, credProviderVars []string) []string {
mergedEnvVars := sysEnvVars
for _, credProviderVar := range credProviderVars {
mergedEnvVars = append(mergedEnvVars, credProviderVar)
}
return mergedEnvVars
}
| {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
} | conditional_block |
Article.js | import React from "react";
import { makeStyles } from "@material-ui/core/styles";
import Grid from "@material-ui/core/Grid";
import Paper from "@material-ui/core/Paper";
import { Link } from "react-router-dom";
import HomeBar from "./Home-bar";
import Claps from "./Article-part/Claps";
import Avatar from "@material-ui/core/Avatar";
import "./App.css";
import "./Home/Home.css";
const useStyles = makeStyles(theme => ({
root: {
flexGrow: 1
},
paper: {
height: 480,
width: 400,
boxShadow: "none",
background: "none"
},
control: {
padding: theme.spacing(2)
},
underRelated: {
background: "#424242"
},
relatedProfile: {
display: "flex",
alignItems: "center",
fontSize: "20px"
},
bottom: {
background: "none",
boxShadow: "none",
color: "#fff",
width: "380px",
textAlign: "left"
},
linkRelated: {
textDecoration: "none",
color: "black",
fontFamily: "Baskerville Old face",
textAlign: "left"
},
relatedPostAvatar: {
bottom: "0",
marginRight: "10px"
}
}));
function | () {
var [ claps, setClaps] = React.useState(0);
const follows = [
{
image:
"https://upload.wikimedia.org/wikipedia/commons/a/a7/20180602_FIFA_Friendly_Match_Austria_vs._Germany_Mesut_%C3%96zil_850_0704.jpg",
nama: "Amin Subagiyo",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
},
{
image:
"https://cdn.i-scmp.com/sites/default/files/styles/768x768/public/d8/images/methode/2019/12/13/6b06cb22-1ca7-11ea-8971-922fdc94075f_image_hires_132744.jpg?itok=XditGQBc&v=1576214873",
nama: "King Salman",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
}
];
const [spacing, setSpacing] = React.useState(2);
const classes = useStyles();
const handleChange = event => {
setSpacing(Number(event.target.value));
};
return (
<div className="post">
<HomeBar />
<div>
<br></br>
<br></br>
<br></br>
<img src="https://miro.medium.com/max/2000/1*-T8oo_JoKkMxfnPKLt_Ciw.jpeg" alt="-" />
<h1>If You Only Read A Few Books In 2018, Read These</h1>
<p id="main">
If you’d liked to be jerked around less, provoked less, and more
productive and inwardly focused, where should you start? To me, the
answer is obvious: by turning to wisdom. Below is a list of 21 books
that will help lead you to a better, stronger 2018. Deep Work: Rules
for Focused Success in a Distracted World by Cal Newport Media
consumption went way up in 2017. For most of us, that meant happiness
and productivity went way down. The world is becoming noisier and will
become more so every day. If you can’t cultivate the ability to have
quiet, insightful, deeply focused periods of productive work, you’re
going to get screwed. This is a book that explains how to cultivate
and protect that skill — the ability to do deep work. I strongly urge
you to begin this practice in 2018— if you want to get anything done
or perform your best. The Subtle Art of Not Giving a F*ck: A
Counterintuitive Approach to Living a Good Life by Mark Manson To me,
practical philosophy has always been the art knowing what to — and
what not to — give a fuck about. That’s what Mark’s book is about.
It’s not about apathy. It’s about cultivating indifference to things
that don’t matter. Be careful, as Marcus Aurelius warns, not to give
the little things more time and thought they deserved. Maybe looking
back at this year reveals how much effort you’ve frittered away
worrying about the trivial. If so, let 2018 be a year that you only
devote energy to things that truly matter — get the important things
right by ignoring the insignificant. The Way to Love: The Last
Meditations of Anthony de Mello by Anthony de Mello Coach Shaka Smart
recommended this little book (and it’s a little book, probably the
smallest I’ve ever read. It fits in your palm). But it’s an incredibly
wise and helpful read. Written by a Catholic Priest who’d lived in
India, the book has this unusual convergence of eastern and western
thought. One of my favorite lines: “The question to ask is not ‘What’s
wrong with this person?’ but ‘What does this irritation tell me about
myself?’ I plan on regularly revisiting it throughout 2018. But What
If We’re Wrong by Chuck Klosterman It’s always good to remind
ourselves that almost everything we’re certain about will probably be
eventually proven wrong. Klosterman’s subtitle — Thinking About the
Present As If It Were the Past — is a brilliant exercise for getting
some perspective in 2018. Whether you think it’s going to be a year of
radical change for the better or a horrible year of excesses of
dangerous precedent, you’re probably wrong. You’re probably not even
in the ballpark. This book shows you why, not with lectures about
politics, but with a bunch of awesome thought experiments about music,
books, movies and science. Rules for Radicals: A Practical Primer for
Realistic Radicals by Saul Alinsky If Hillary Clinton had remembered
the lessons of Saul Alinsky (who she wrote her college thesis on), the
election may have turned out differently. Why? A notorious strategist
and community organizer, Alinsky was a die hard pragmatist, but he
also knew how to tell a story and create a collective cause. He could
work within the system but knew how to shake it up and generate
attention. This book is a classic and woefully underrated. Whatever
you set out to do in 2018, this book can provide you with strategic
guidance and insight. The Filter Bubble by Eli Pariser / Trust Me I’m
Lying by Ryan Holiday / The Brass Check by Upton Sinclair I strongly
recommend that you take the time in 2018 to read these books. In light
of this year, you owe it to yourself to study and better understand
how our media system works. In The Filter Bubble, Eli Pariser warns of
the danger of living in bubbles of personalization that reinforce and
insulate our worldview. Though Sinclair’s The Brass Check has been
almost entirely forgotten by history, it’s not only fascinating but a
timeless perspective. Sinclair deeply understood the economic
incentives of early 20th century journalism and thus could predict and
analyze the manipulative effect it had on The Truth. I used that book
as a model for my expose of the media system, Trust Me, I’m Lying.
Today, the incentives and pressures are different but they warp our
information in a similar way. In almost every substantial charge Upton
leveled against the yellow press, you could, today, sub in blogs and
the cable news cycle and be even more correct. 48 Laws of Power / 33
Strategies of War by Robert Greene Robert Greene is a master of human
psychology and human dynamics — he has a profound ability to explain
timeless truths through story and example. You can read the classics
and not always understand the lessons. But if you read Robert’s books,
I promise you will leave not just with actionable lessons but an
indelible sense of what to do in many trying and confusing situations.
I wrote earlier this year that strategic wisdom is not something we
are born with — but the lessons are there for us to pick up. Pick
these two up before the year ends and operate the next with a
strategic mindset and clarity. Conspiracy: Peter Thiel, Hulk Hogan,
Gawker, and the Anatomy of Intrigue by Ryan Holiday — If you want to
immerse yourself in the above topics of media and strategy, and are
looking for one book to teach you lessons in both, my book on the
nearly decade-long conspiracy that billionaire Peter Thiel waged
against Gawker will do this for you. This is a stunning story about
how power works in the modern age, and is a masterclass in strategy
and how to accomplish wildly ambitious aims. The Road To Character by
David Brooks When General Stanley McChrystal was asked on the Tim
Ferriss podcastwhat was a recent purchase that had most positively
impacted his life, he pointed to this book. I agree. It can be a bit
stilted and dense at times, but it should be assigned reading to any
young person today (a little challenge is a good thing). Illustrating
with examples and stories from great men and women, Brooks admonishes
the reader to undertake their own journey of character perfection. In
my own book, I explore the same topic (humility) from a different
angle using similar stories — I’m attacking ego, he’s building up
character. Both will be important for the next year. The Dip by Seth
Godin This book is a short 70 pages and it looks like something
someone would give as a joke gift, but it’s anything but. Godin talks
frankly about quitting and pushing through — and when to do each. Quit
when you’ll be mediocre, when the returns aren’t worth the investment,
when you no longer think you’ll enjoy the ends. Stick when the dip is
the obstacle that creates scarcity, when you’re simply bridging the
gap between beginner’s luck and mastery. I promise, next year you are
guaranteed to find yourself in moments when you don’t know what is the
right answer. This book will help you find it. Hillbilly Elegy: A
Memoir of a Family and Culture in Crisis by J. D. Vance / Strangers in
Their Own Land: Anger and Mourning on the American Right by Arlie
Russell Hochschild You might describe Hillbilly Elegy as a Ta-Nehisi
Coates style memoir about a community that — at least in progressive
circles — gets a lot less attention: disaffected, impoverished whites
(particularly in the mid-east and South). I thought the book was
empathetic, self-aware and inspiring. The author pokes some holes in
the concept of “white privilege” — certainly a third or fourth
generation hillbilly in Kentucky doesn’t walk around feeling like they
have it easy — and an explanation of some of the phenomenon behind
Donald Trump (notice I said explanation, not an excuse).
</p>
<Claps />
<hr width="800px" color="lightgrey"></hr>
{follows.map((follow, index) => (
<div id={index} className="follow">
<div className="image">
<img src={follow.image} alt="-" />
</div>
<div className="comment">
<strong>{follow.nama}</strong>
<p>{follow.comment}</p>
</div>
<div className="follow-button">
<div>Follow</div>
</div>
</div>
))}
<div className="response-button">
<p>
<Link to="/Comment" id="link-response">
Response
</Link>
</p>
</div>
<br></br>
<br></br>
<div className="related-post">
<h3 style={{ float: "left", marginLeft: "40px" }}>
More From Medium
</h3>
<Grid container className={classes.root} spacing={2}>
<Grid item xs={12}>
<Grid container justifyContent="center" spacing={spacing}>
<Grid item>
<Paper className={classes.paper}>
<img src="http://bisnisbandung.com/wp-content/uploads/2017/12/youtube-crowd-uproar-protest-ss-19201920.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>We Can Visit You Tube to Get Much of Video</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
src="/broken-image.jpg"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://miro.medium.com/max/960/1*K7Gi5y5g882yBNyhAEv84A.jpeg"
alt="-"
/>
</Avatar>
<p>Jason Voorhees</p>
</div>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.paper}>
<img src="https://cnet1.cbsistatic.com/img/eOEoOQ4eSDQPxbPCJASBnbsjpmQ=/1092x0/2019/08/14/d9363e04-fd4f-4a81-97ae-88a25d5feef0/gettyimages-858489898.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>You Can Meet Your Favourite Artist Here</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
src="/broken-image.jpg"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://upload.wikimedia.org/wikipedia/commons/0/01/CoreyTaylorGmm.jpg"
alt="-"
/>
</Avatar>
<p>Corey Taylor</p>
</div>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.paper}>
<img src="https://devclass.com/wp-content/uploads/2018/12/Oracle.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>Oracle is The World Most Bigger DBMS</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://gossipgist.com/uploads/19/lionel-messi.jpg"
alt="-"
/>
</Avatar>
<p>Lionel Messi</p>
</div>
</Paper>
</Grid>
</Grid>
</Grid>
</Grid>
<div className={classes.underRelated}>
<Grid container className={classes.root} spacing={2}>
<Grid item xs={12}>
<Grid container justifyContent="center" spacing={spacing}>
<Grid item>
<Paper className={classes.bottom}>
<h3>Discover Medium</h3>
<p>
Welcome to a place where words matter. On Medium, smart
voices and original ideas take center stage - with no
ads in sight.
</p>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.bottom}>
<h3>Make Medium Yours</h3>
<p>
Follow all the topics you care about, and we’ll deliver
the best stories for you to your homepage and inbox.
Explore
</p>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.bottom}>
<h3>Become a Member</h3>
<p>
Get unlimited access to the best stories on Medium — and
support writers while you’re at it. Just $5/month.
Upgrade
</p>
</Paper>
</Grid>
</Grid>
</Grid>
</Grid>
<hr
style={{ width: "90%", marginTop: 30, color: "lightgrey" }}
></hr>
<br></br>
<br></br>
<h2
style={{
textAlign: "left",
color: "#fff",
fontFamily: "Baskerville Old Face",
position: "relative",
left: "85px"
}}
>
Medium
</h2>
<br></br>
<br></br>
</div>
</div>
</div>
</div>
);
}
export default Article;
| Article | identifier_name |
Article.js | import React from "react";
import { makeStyles } from "@material-ui/core/styles";
import Grid from "@material-ui/core/Grid";
import Paper from "@material-ui/core/Paper";
import { Link } from "react-router-dom";
import HomeBar from "./Home-bar";
import Claps from "./Article-part/Claps";
import Avatar from "@material-ui/core/Avatar";
import "./App.css";
import "./Home/Home.css";
const useStyles = makeStyles(theme => ({
root: {
flexGrow: 1
},
paper: {
height: 480,
width: 400,
boxShadow: "none",
background: "none"
},
control: {
padding: theme.spacing(2)
},
underRelated: {
background: "#424242"
},
relatedProfile: {
display: "flex",
alignItems: "center",
fontSize: "20px"
},
bottom: {
background: "none",
boxShadow: "none",
color: "#fff",
width: "380px",
textAlign: "left"
},
linkRelated: {
textDecoration: "none",
color: "black",
fontFamily: "Baskerville Old face",
textAlign: "left"
},
relatedPostAvatar: {
bottom: "0",
marginRight: "10px"
}
}));
function Article() {
var [ claps, setClaps] = React.useState(0);
const follows = [
{
image:
"https://upload.wikimedia.org/wikipedia/commons/a/a7/20180602_FIFA_Friendly_Match_Austria_vs._Germany_Mesut_%C3%96zil_850_0704.jpg",
nama: "Amin Subagiyo",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
},
{
image:
"https://cdn.i-scmp.com/sites/default/files/styles/768x768/public/d8/images/methode/2019/12/13/6b06cb22-1ca7-11ea-8971-922fdc94075f_image_hires_132744.jpg?itok=XditGQBc&v=1576214873",
nama: "King Salman",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
}
];
const [spacing, setSpacing] = React.useState(2);
const classes = useStyles();
const handleChange = event => {
setSpacing(Number(event.target.value));
};
return (
<div className="post">
<HomeBar />
<div>
<br></br>
<br></br>
<br></br>
<img src="https://miro.medium.com/max/2000/1*-T8oo_JoKkMxfnPKLt_Ciw.jpeg" alt="-" />
<h1>If You Only Read A Few Books In 2018, Read These</h1>
<p id="main">
If you’d liked to be jerked around less, provoked less, and more
productive and inwardly focused, where should you start? To me, the
answer is obvious: by turning to wisdom. Below is a list of 21 books
that will help lead you to a better, stronger 2018. Deep Work: Rules
for Focused Success in a Distracted World by Cal Newport Media
consumption went way up in 2017. For most of us, that meant happiness
and productivity went way down. The world is becoming noisier and will
become more so every day. If you can’t cultivate the ability to have
quiet, insightful, deeply focused periods of productive work, you’re
going to get screwed. This is a book that explains how to cultivate
and protect that skill — the ability to do deep work. I strongly urge
you to begin this practice in 2018— if you want to get anything done
or perform your best. The Subtle Art of Not Giving a F*ck: A
Counterintuitive Approach to Living a Good Life by Mark Manson To me,
practical philosophy has always been the art knowing what to — and
what not to — give a fuck about. That’s what Mark’s book is about.
It’s not about apathy. It’s about cultivating indifference to things
that don’t matter. Be careful, as Marcus Aurelius warns, not to give
the little things more time and thought they deserved. Maybe looking
back at this year reveals how much effort you’ve frittered away
worrying about the trivial. If so, let 2018 be a year that you only
devote energy to things that truly matter — get the important things
right by ignoring the insignificant. The Way to Love: The Last
Meditations of Anthony de Mello by Anthony de Mello Coach Shaka Smart
recommended this little book (and it’s a little book, probably the
smallest I’ve ever read. It fits in your palm). But it’s an incredibly
wise and helpful read. Written by a Catholic Priest who’d lived in
India, the book has this unusual convergence of eastern and western
thought. One of my favorite lines: “The question to ask is not ‘What’s
wrong with this person?’ but ‘What does this irritation tell me about
myself?’ I plan on regularly revisiting it throughout 2018. But What
If We’re Wrong by Chuck Klosterman It’s always good to remind
ourselves that almost everything we’re certain about will probably be
eventually proven wrong. Klosterman’s subtitle — Thinking About the
Present As If It Were the Past — is a brilliant exercise for getting
some perspective in 2018. Whether you think it’s going to be a year of
radical change for the better or a horrible year of excesses of
dangerous precedent, you’re probably wrong. You’re probably not even
in the ballpark. This book shows you why, not with lectures about
politics, but with a bunch of awesome thought experiments about music,
books, movies and science. Rules for Radicals: A Practical Primer for
Realistic Radicals by Saul Alinsky If Hillary Clinton had remembered
the lessons of Saul Alinsky (who she wrote her college thesis on), the
election may have turned out differently. Why? A notorious strategist
and community organizer, Alinsky was a die hard pragmatist, but he | Lying by Ryan Holiday / The Brass Check by Upton Sinclair I strongly
recommend that you take the time in 2018 to read these books. In light
of this year, you owe it to yourself to study and better understand
how our media system works. In The Filter Bubble, Eli Pariser warns of
the danger of living in bubbles of personalization that reinforce and
insulate our worldview. Though Sinclair’s The Brass Check has been
almost entirely forgotten by history, it’s not only fascinating but a
timeless perspective. Sinclair deeply understood the economic
incentives of early 20th century journalism and thus could predict and
analyze the manipulative effect it had on The Truth. I used that book
as a model for my expose of the media system, Trust Me, I’m Lying.
Today, the incentives and pressures are different but they warp our
information in a similar way. In almost every substantial charge Upton
leveled against the yellow press, you could, today, sub in blogs and
the cable news cycle and be even more correct. 48 Laws of Power / 33
Strategies of War by Robert Greene Robert Greene is a master of human
psychology and human dynamics — he has a profound ability to explain
timeless truths through story and example. You can read the classics
and not always understand the lessons. But if you read Robert’s books,
I promise you will leave not just with actionable lessons but an
indelible sense of what to do in many trying and confusing situations.
I wrote earlier this year that strategic wisdom is not something we
are born with — but the lessons are there for us to pick up. Pick
these two up before the year ends and operate the next with a
strategic mindset and clarity. Conspiracy: Peter Thiel, Hulk Hogan,
Gawker, and the Anatomy of Intrigue by Ryan Holiday — If you want to
immerse yourself in the above topics of media and strategy, and are
looking for one book to teach you lessons in both, my book on the
nearly decade-long conspiracy that billionaire Peter Thiel waged
against Gawker will do this for you. This is a stunning story about
how power works in the modern age, and is a masterclass in strategy
and how to accomplish wildly ambitious aims. The Road To Character by
David Brooks When General Stanley McChrystal was asked on the Tim
Ferriss podcastwhat was a recent purchase that had most positively
impacted his life, he pointed to this book. I agree. It can be a bit
stilted and dense at times, but it should be assigned reading to any
young person today (a little challenge is a good thing). Illustrating
with examples and stories from great men and women, Brooks admonishes
the reader to undertake their own journey of character perfection. In
my own book, I explore the same topic (humility) from a different
angle using similar stories — I’m attacking ego, he’s building up
character. Both will be important for the next year. The Dip by Seth
Godin This book is a short 70 pages and it looks like something
someone would give as a joke gift, but it’s anything but. Godin talks
frankly about quitting and pushing through — and when to do each. Quit
when you’ll be mediocre, when the returns aren’t worth the investment,
when you no longer think you’ll enjoy the ends. Stick when the dip is
the obstacle that creates scarcity, when you’re simply bridging the
gap between beginner’s luck and mastery. I promise, next year you are
guaranteed to find yourself in moments when you don’t know what is the
right answer. This book will help you find it. Hillbilly Elegy: A
Memoir of a Family and Culture in Crisis by J. D. Vance / Strangers in
Their Own Land: Anger and Mourning on the American Right by Arlie
Russell Hochschild You might describe Hillbilly Elegy as a Ta-Nehisi
Coates style memoir about a community that — at least in progressive
circles — gets a lot less attention: disaffected, impoverished whites
(particularly in the mid-east and South). I thought the book was
empathetic, self-aware and inspiring. The author pokes some holes in
the concept of “white privilege” — certainly a third or fourth
generation hillbilly in Kentucky doesn’t walk around feeling like they
have it easy — and an explanation of some of the phenomenon behind
Donald Trump (notice I said explanation, not an excuse).
</p>
<Claps />
<hr width="800px" color="lightgrey"></hr>
{follows.map((follow, index) => (
<div id={index} className="follow">
<div className="image">
<img src={follow.image} alt="-" />
</div>
<div className="comment">
<strong>{follow.nama}</strong>
<p>{follow.comment}</p>
</div>
<div className="follow-button">
<div>Follow</div>
</div>
</div>
))}
<div className="response-button">
<p>
<Link to="/Comment" id="link-response">
Response
</Link>
</p>
</div>
<br></br>
<br></br>
<div className="related-post">
<h3 style={{ float: "left", marginLeft: "40px" }}>
More From Medium
</h3>
<Grid container className={classes.root} spacing={2}>
<Grid item xs={12}>
<Grid container justifyContent="center" spacing={spacing}>
<Grid item>
<Paper className={classes.paper}>
<img src="http://bisnisbandung.com/wp-content/uploads/2017/12/youtube-crowd-uproar-protest-ss-19201920.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>We Can Visit You Tube to Get Much of Video</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
src="/broken-image.jpg"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://miro.medium.com/max/960/1*K7Gi5y5g882yBNyhAEv84A.jpeg"
alt="-"
/>
</Avatar>
<p>Jason Voorhees</p>
</div>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.paper}>
<img src="https://cnet1.cbsistatic.com/img/eOEoOQ4eSDQPxbPCJASBnbsjpmQ=/1092x0/2019/08/14/d9363e04-fd4f-4a81-97ae-88a25d5feef0/gettyimages-858489898.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>You Can Meet Your Favourite Artist Here</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
src="/broken-image.jpg"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://upload.wikimedia.org/wikipedia/commons/0/01/CoreyTaylorGmm.jpg"
alt="-"
/>
</Avatar>
<p>Corey Taylor</p>
</div>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.paper}>
<img src="https://devclass.com/wp-content/uploads/2018/12/Oracle.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>Oracle is The World Most Bigger DBMS</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://gossipgist.com/uploads/19/lionel-messi.jpg"
alt="-"
/>
</Avatar>
<p>Lionel Messi</p>
</div>
</Paper>
</Grid>
</Grid>
</Grid>
</Grid>
<div className={classes.underRelated}>
<Grid container className={classes.root} spacing={2}>
<Grid item xs={12}>
<Grid container justifyContent="center" spacing={spacing}>
<Grid item>
<Paper className={classes.bottom}>
<h3>Discover Medium</h3>
<p>
Welcome to a place where words matter. On Medium, smart
voices and original ideas take center stage - with no
ads in sight.
</p>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.bottom}>
<h3>Make Medium Yours</h3>
<p>
Follow all the topics you care about, and we’ll deliver
the best stories for you to your homepage and inbox.
Explore
</p>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.bottom}>
<h3>Become a Member</h3>
<p>
Get unlimited access to the best stories on Medium — and
support writers while you’re at it. Just $5/month.
Upgrade
</p>
</Paper>
</Grid>
</Grid>
</Grid>
</Grid>
<hr
style={{ width: "90%", marginTop: 30, color: "lightgrey" }}
></hr>
<br></br>
<br></br>
<h2
style={{
textAlign: "left",
color: "#fff",
fontFamily: "Baskerville Old Face",
position: "relative",
left: "85px"
}}
>
Medium
</h2>
<br></br>
<br></br>
</div>
</div>
</div>
</div>
);
}
export default Article; | also knew how to tell a story and create a collective cause. He could
work within the system but knew how to shake it up and generate
attention. This book is a classic and woefully underrated. Whatever
you set out to do in 2018, this book can provide you with strategic
guidance and insight. The Filter Bubble by Eli Pariser / Trust Me I’m | random_line_split |
Article.js | import React from "react";
import { makeStyles } from "@material-ui/core/styles";
import Grid from "@material-ui/core/Grid";
import Paper from "@material-ui/core/Paper";
import { Link } from "react-router-dom";
import HomeBar from "./Home-bar";
import Claps from "./Article-part/Claps";
import Avatar from "@material-ui/core/Avatar";
import "./App.css";
import "./Home/Home.css";
const useStyles = makeStyles(theme => ({
root: {
flexGrow: 1
},
paper: {
height: 480,
width: 400,
boxShadow: "none",
background: "none"
},
control: {
padding: theme.spacing(2)
},
underRelated: {
background: "#424242"
},
relatedProfile: {
display: "flex",
alignItems: "center",
fontSize: "20px"
},
bottom: {
background: "none",
boxShadow: "none",
color: "#fff",
width: "380px",
textAlign: "left"
},
linkRelated: {
textDecoration: "none",
color: "black",
fontFamily: "Baskerville Old face",
textAlign: "left"
},
relatedPostAvatar: {
bottom: "0",
marginRight: "10px"
}
}));
function Article() | {
var [ claps, setClaps] = React.useState(0);
const follows = [
{
image:
"https://upload.wikimedia.org/wikipedia/commons/a/a7/20180602_FIFA_Friendly_Match_Austria_vs._Germany_Mesut_%C3%96zil_850_0704.jpg",
nama: "Amin Subagiyo",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
},
{
image:
"https://cdn.i-scmp.com/sites/default/files/styles/768x768/public/d8/images/methode/2019/12/13/6b06cb22-1ca7-11ea-8971-922fdc94075f_image_hires_132744.jpg?itok=XditGQBc&v=1576214873",
nama: "King Salman",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
}
];
const [spacing, setSpacing] = React.useState(2);
const classes = useStyles();
const handleChange = event => {
setSpacing(Number(event.target.value));
};
return (
<div className="post">
<HomeBar />
<div>
<br></br>
<br></br>
<br></br>
<img src="https://miro.medium.com/max/2000/1*-T8oo_JoKkMxfnPKLt_Ciw.jpeg" alt="-" />
<h1>If You Only Read A Few Books In 2018, Read These</h1>
<p id="main">
If you’d liked to be jerked around less, provoked less, and more
productive and inwardly focused, where should you start? To me, the
answer is obvious: by turning to wisdom. Below is a list of 21 books
that will help lead you to a better, stronger 2018. Deep Work: Rules
for Focused Success in a Distracted World by Cal Newport Media
consumption went way up in 2017. For most of us, that meant happiness
and productivity went way down. The world is becoming noisier and will
become more so every day. If you can’t cultivate the ability to have
quiet, insightful, deeply focused periods of productive work, you’re
going to get screwed. This is a book that explains how to cultivate
and protect that skill — the ability to do deep work. I strongly urge
you to begin this practice in 2018— if you want to get anything done
or perform your best. The Subtle Art of Not Giving a F*ck: A
Counterintuitive Approach to Living a Good Life by Mark Manson To me,
practical philosophy has always been the art knowing what to — and
what not to — give a fuck about. That’s what Mark’s book is about.
It’s not about apathy. It’s about cultivating indifference to things
that don’t matter. Be careful, as Marcus Aurelius warns, not to give
the little things more time and thought they deserved. Maybe looking
back at this year reveals how much effort you’ve frittered away
worrying about the trivial. If so, let 2018 be a year that you only
devote energy to things that truly matter — get the important things
right by ignoring the insignificant. The Way to Love: The Last
Meditations of Anthony de Mello by Anthony de Mello Coach Shaka Smart
recommended this little book (and it’s a little book, probably the
smallest I’ve ever read. It fits in your palm). But it’s an incredibly
wise and helpful read. Written by a Catholic Priest who’d lived in
India, the book has this unusual convergence of eastern and western
thought. One of my favorite lines: “The question to ask is not ‘What’s
wrong with this person?’ but ‘What does this irritation tell me about
myself?’ I plan on regularly revisiting it throughout 2018. But What
If We’re Wrong by Chuck Klosterman It’s always good to remind
ourselves that almost everything we’re certain about will probably be
eventually proven wrong. Klosterman’s subtitle — Thinking About the
Present As If It Were the Past — is a brilliant exercise for getting
some perspective in 2018. Whether you think it’s going to be a year of
radical change for the better or a horrible year of excesses of
dangerous precedent, you’re probably wrong. You’re probably not even
in the ballpark. This book shows you why, not with lectures about
politics, but with a bunch of awesome thought experiments about music,
books, movies and science. Rules for Radicals: A Practical Primer for
Realistic Radicals by Saul Alinsky If Hillary Clinton had remembered
the lessons of Saul Alinsky (who she wrote her college thesis on), the
election may have turned out differently. Why? A notorious strategist
and community organizer, Alinsky was a die hard pragmatist, but he
also knew how to tell a story and create a collective cause. He could
work within the system but knew how to shake it up and generate
attention. This book is a classic and woefully underrated. Whatever
you set out to do in 2018, this book can provide you with strategic
guidance and insight. The Filter Bubble by Eli Pariser / Trust Me I’m
Lying by Ryan Holiday / The Brass Check by Upton Sinclair I strongly
recommend that you take the time in 2018 to read these books. In light
of this year, you owe it to yourself to study and better understand
how our media system works. In The Filter Bubble, Eli Pariser warns of
the danger of living in bubbles of personalization that reinforce and
insulate our worldview. Though Sinclair’s The Brass Check has been
almost entirely forgotten by history, it’s not only fascinating but a
timeless perspective. Sinclair deeply understood the economic
incentives of early 20th century journalism and thus could predict and
analyze the manipulative effect it had on The Truth. I used that book
as a model for my expose of the media system, Trust Me, I’m Lying.
Today, the incentives and pressures are different but they warp our
information in a similar way. In almost every substantial charge Upton
leveled against the yellow press, you could, today, sub in blogs and
the cable news cycle and be even more correct. 48 Laws of Power / 33
Strategies of War by Robert Greene Robert Greene is a master of human
psychology and human dynamics — he has a profound ability to explain
timeless truths through story and example. You can read the classics
and not always understand the lessons. But if you read Robert’s books,
I promise you will leave not just with actionable lessons but an
indelible sense of what to do in many trying and confusing situations.
I wrote earlier this year that strategic wisdom is not something we
are born with — but the lessons are there for us to pick up. Pick
these two up before the year ends and operate the next with a
strategic mindset and clarity. Conspiracy: Peter Thiel, Hulk Hogan,
Gawker, and the Anatomy of Intrigue by Ryan Holiday — If you want to
immerse yourself in the above topics of media and strategy, and are
looking for one book to teach you lessons in both, my book on the
nearly decade-long conspiracy that billionaire Peter Thiel waged
against Gawker will do this for you. This is a stunning story about
how power works in the modern age, and is a masterclass in strategy
and how to accomplish wildly ambitious aims. The Road To Character by
David Brooks When General Stanley McChrystal was asked on the Tim
Ferriss podcastwhat was a recent purchase that had most positively
impacted his life, he pointed to this book. I agree. It can be a bit
stilted and dense at times, but it should be assigned reading to any
young person today (a little challenge is a good thing). Illustrating
with examples and stories from great men and women, Brooks admonishes
the reader to undertake their own journey of character perfection. In
my own book, I explore the same topic (humility) from a different
angle using similar stories — I’m attacking ego, he’s building up
character. Both will be important for the next year. The Dip by Seth
Godin This book is a short 70 pages and it looks like something
someone would give as a joke gift, but it’s anything but. Godin talks
frankly about quitting and pushing through — and when to do each. Quit
when you’ll be mediocre, when the returns aren’t worth the investment,
when you no longer think you’ll enjoy the ends. Stick when the dip is
the obstacle that creates scarcity, when you’re simply bridging the
gap between beginner’s luck and mastery. I promise, next year you are
guaranteed to find yourself in moments when you don’t know what is the
right answer. This book will help you find it. Hillbilly Elegy: A
Memoir of a Family and Culture in Crisis by J. D. Vance / Strangers in
Their Own Land: Anger and Mourning on the American Right by Arlie
Russell Hochschild You might describe Hillbilly Elegy as a Ta-Nehisi
Coates style memoir about a community that — at least in progressive
circles — gets a lot less attention: disaffected, impoverished whites
(particularly in the mid-east and South). I thought the book was
empathetic, self-aware and inspiring. The author pokes some holes in
the concept of “white privilege” — certainly a third or fourth
generation hillbilly in Kentucky doesn’t walk around feeling like they
have it easy — and an explanation of some of the phenomenon behind
Donald Trump (notice I said explanation, not an excuse).
</p>
<Claps />
<hr width="800px" color="lightgrey"></hr>
{follows.map((follow, index) => (
<div id={index} className="follow">
<div className="image">
<img src={follow.image} alt="-" />
</div>
<div className="comment">
<strong>{follow.nama}</strong>
<p>{follow.comment}</p>
</div>
<div className="follow-button">
<div>Follow</div>
</div>
</div>
))}
<div className="response-button">
<p>
<Link to="/Comment" id="link-response">
Response
</Link>
</p>
</div>
<br></br>
<br></br>
<div className="related-post">
<h3 style={{ float: "left", marginLeft: "40px" }}>
More From Medium
</h3>
<Grid container className={classes.root} spacing={2}>
<Grid item xs={12}>
<Grid container justifyContent="center" spacing={spacing}>
<Grid item>
<Paper className={classes.paper}>
<img src="http://bisnisbandung.com/wp-content/uploads/2017/12/youtube-crowd-uproar-protest-ss-19201920.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>We Can Visit You Tube to Get Much of Video</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
src="/broken-image.jpg"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://miro.medium.com/max/960/1*K7Gi5y5g882yBNyhAEv84A.jpeg"
alt="-"
/>
</Avatar>
<p>Jason Voorhees</p>
</div>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.paper}>
<img src="https://cnet1.cbsistatic.com/img/eOEoOQ4eSDQPxbPCJASBnbsjpmQ=/1092x0/2019/08/14/d9363e04-fd4f-4a81-97ae-88a25d5feef0/gettyimages-858489898.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>You Can Meet Your Favourite Artist Here</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
src="/broken-image.jpg"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://upload.wikimedia.org/wikipedia/commons/0/01/CoreyTaylorGmm.jpg"
alt="-"
/>
</Avatar>
<p>Corey Taylor</p>
</div>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.paper}>
<img src="https://devclass.com/wp-content/uploads/2018/12/Oracle.jpg" alt="-" />
<Link className={classes.linkRelated}>
<h2>Oracle is The World Most Bigger DBMS</h2>
</Link>
<div className={classes.relatedProfile}>
<Avatar
alt="Remy Sharp"
className={classes.relatedPostAvatar}
>
<img
style={{ height: "100%" }}
src="https://gossipgist.com/uploads/19/lionel-messi.jpg"
alt="-"
/>
</Avatar>
<p>Lionel Messi</p>
</div>
</Paper>
</Grid>
</Grid>
</Grid>
</Grid>
<div className={classes.underRelated}>
<Grid container className={classes.root} spacing={2}>
<Grid item xs={12}>
<Grid container justifyContent="center" spacing={spacing}>
<Grid item>
<Paper className={classes.bottom}>
<h3>Discover Medium</h3>
<p>
Welcome to a place where words matter. On Medium, smart
voices and original ideas take center stage - with no
ads in sight.
</p>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.bottom}>
<h3>Make Medium Yours</h3>
<p>
Follow all the topics you care about, and we’ll deliver
the best stories for you to your homepage and inbox.
Explore
</p>
</Paper>
</Grid>
<Grid item>
<Paper className={classes.bottom}>
<h3>Become a Member</h3>
<p>
Get unlimited access to the best stories on Medium — and
support writers while you’re at it. Just $5/month.
Upgrade
</p>
</Paper>
</Grid>
</Grid>
</Grid>
</Grid>
<hr
style={{ width: "90%", marginTop: 30, color: "lightgrey" }}
></hr>
<br></br>
<br></br>
<h2
style={{
textAlign: "left",
color: "#fff",
fontFamily: "Baskerville Old Face",
position: "relative",
left: "85px"
}}
>
Medium
</h2>
<br></br>
<br></br>
</div>
</div>
</div>
</div>
);
}
export default Article;
| identifier_body | |
__init__.py | import types
def escape_text(text=''):
text = str(text)
return text.replace("\'", "\\'")
class GaqHub(object):
data_struct = None
def __init__(self, account_id, single_push=False):
"""Sets up self.data_struct dict which we use for storage.
You'd probably have something like this in your base controller:
class Handler(object):
def __init__(self, request):
self.request = request
h.gaq_setup(self.request, 'AccountId')
All of the other commands in the module accept an optional 'request' kwarg.
If no 'request' is submitted, it will call pyramid.threadlocal.get_current_request()
This should allow you to easily and cleanly call this within templates, and not just handler methods.
"""
self.data_struct = {
'__singlePush': single_push,
'__setAccountAdditional': set({}),
'_setAccount': account_id,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']: |
if self.data_struct['_trackTrans']:
if single_push:
single_pushes.append(u"""['_trackTrans']""")
else:
script.append(u"""_gaq.push(['_trackTrans']);""")
# events seem to be on their own.
for category in ['_trackEvent']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
return single_pushes, script
def as_html(self):
"""helper function. prints out GA code for you, in the right order.
You'd probably call it like this in a Mako template:
<head>
${h.as_html()|n}
</head>
Notice that you have to escape under Mako. For more information on mako escape options - http://www.makotemplates.org/docs/filtering.html
"""
single_push = self.data_struct['__singlePush']
single_pushes = []
script = [
u"""<script type="text/javascript">""",
u"""var _gaq = _gaq || [];""",
]
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, self.data_struct['_setAccount'], is_secondary_account=False)
for account_id in self.data_struct['__setAccountAdditional']:
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, account_id, is_secondary_account=True)
# close the single push if we elected
if single_push:
script.append(u""",\n""".join(single_pushes))
script.append(u""");""")
script.append(u"""(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl': 'http://www') + '.google-analytics.com/analytics.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();""")
script.append(u"""</script>""")
return u"""\n""".join(script) | for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i) | random_line_split |
__init__.py | import types
def escape_text(text=''):
text = str(text)
return text.replace("\'", "\\'")
class GaqHub(object):
data_struct = None
def __init__(self, account_id, single_push=False):
"""Sets up self.data_struct dict which we use for storage.
You'd probably have something like this in your base controller:
class Handler(object):
def __init__(self, request):
self.request = request
h.gaq_setup(self.request, 'AccountId')
All of the other commands in the module accept an optional 'request' kwarg.
If no 'request' is submitted, it will call pyramid.threadlocal.get_current_request()
This should allow you to easily and cleanly call this within templates, and not just handler methods.
"""
self.data_struct = {
'__singlePush': single_push,
'__setAccountAdditional': set({}),
'_setAccount': account_id,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
|
def as_html(self):
"""helper function. prints out GA code for you, in the right order.
You'd probably call it like this in a Mako template:
<head>
${h.as_html()|n}
</head>
Notice that you have to escape under Mako. For more information on mako escape options - http://www.makotemplates.org/docs/filtering.html
"""
single_push = self.data_struct['__singlePush']
single_pushes = []
script = [
u"""<script type="text/javascript">""",
u"""var _gaq = _gaq || [];""",
]
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, self.data_struct['_setAccount'], is_secondary_account=False)
for account_id in self.data_struct['__setAccountAdditional']:
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, account_id, is_secondary_account=True)
# close the single push if we elected
if single_push:
script.append(u""",\n""".join(single_pushes))
script.append(u""");""")
script.append(u"""(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl': 'http://www') + '.google-analytics.com/analytics.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();""")
script.append(u"""</script>""")
return u"""\n""".join(script)
| if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
if self.data_struct['_trackTrans']:
if single_push:
single_pushes.append(u"""['_trackTrans']""")
else:
script.append(u"""_gaq.push(['_trackTrans']);""")
# events seem to be on their own.
for category in ['_trackEvent']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
return single_pushes, script | identifier_body |
__init__.py | import types
def escape_text(text=''):
text = str(text)
return text.replace("\'", "\\'")
class GaqHub(object):
data_struct = None
def __init__(self, account_id, single_push=False):
"""Sets up self.data_struct dict which we use for storage.
You'd probably have something like this in your base controller:
class Handler(object):
def __init__(self, request):
self.request = request
h.gaq_setup(self.request, 'AccountId')
All of the other commands in the module accept an optional 'request' kwarg.
If no 'request' is submitted, it will call pyramid.threadlocal.get_current_request()
This should allow you to easily and cleanly call this within templates, and not just handler methods.
"""
self.data_struct = {
'__singlePush': single_push,
'__setAccountAdditional': set({}),
'_setAccount': account_id,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
if self.data_struct['_trackTrans']:
if single_push:
|
else:
script.append(u"""_gaq.push(['_trackTrans']);""")
# events seem to be on their own.
for category in ['_trackEvent']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
return single_pushes, script
def as_html(self):
"""helper function. prints out GA code for you, in the right order.
You'd probably call it like this in a Mako template:
<head>
${h.as_html()|n}
</head>
Notice that you have to escape under Mako. For more information on mako escape options - http://www.makotemplates.org/docs/filtering.html
"""
single_push = self.data_struct['__singlePush']
single_pushes = []
script = [
u"""<script type="text/javascript">""",
u"""var _gaq = _gaq || [];""",
]
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, self.data_struct['_setAccount'], is_secondary_account=False)
for account_id in self.data_struct['__setAccountAdditional']:
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, account_id, is_secondary_account=True)
# close the single push if we elected
if single_push:
script.append(u""",\n""".join(single_pushes))
script.append(u""");""")
script.append(u"""(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl': 'http://www') + '.google-analytics.com/analytics.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();""")
script.append(u"""</script>""")
return u"""\n""".join(script)
| single_pushes.append(u"""['_trackTrans']""") | conditional_block |
__init__.py | import types
def | (text=''):
text = str(text)
return text.replace("\'", "\\'")
class GaqHub(object):
data_struct = None
def __init__(self, account_id, single_push=False):
"""Sets up self.data_struct dict which we use for storage.
You'd probably have something like this in your base controller:
class Handler(object):
def __init__(self, request):
self.request = request
h.gaq_setup(self.request, 'AccountId')
All of the other commands in the module accept an optional 'request' kwarg.
If no 'request' is submitted, it will call pyramid.threadlocal.get_current_request()
This should allow you to easily and cleanly call this within templates, and not just handler methods.
"""
self.data_struct = {
'__singlePush': single_push,
'__setAccountAdditional': set({}),
'_setAccount': account_id,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
if self.data_struct['_trackTrans']:
if single_push:
single_pushes.append(u"""['_trackTrans']""")
else:
script.append(u"""_gaq.push(['_trackTrans']);""")
# events seem to be on their own.
for category in ['_trackEvent']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
return single_pushes, script
def as_html(self):
"""helper function. prints out GA code for you, in the right order.
You'd probably call it like this in a Mako template:
<head>
${h.as_html()|n}
</head>
Notice that you have to escape under Mako. For more information on mako escape options - http://www.makotemplates.org/docs/filtering.html
"""
single_push = self.data_struct['__singlePush']
single_pushes = []
script = [
u"""<script type="text/javascript">""",
u"""var _gaq = _gaq || [];""",
]
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, self.data_struct['_setAccount'], is_secondary_account=False)
for account_id in self.data_struct['__setAccountAdditional']:
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, account_id, is_secondary_account=True)
# close the single push if we elected
if single_push:
script.append(u""",\n""".join(single_pushes))
script.append(u""");""")
script.append(u"""(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl': 'http://www') + '.google-analytics.com/analytics.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();""")
script.append(u"""</script>""")
return u"""\n""".join(script)
| escape_text | identifier_name |
aio.rs | use std::default::Default;
use std::io;
use std::thread::{self, JoinHandle};
use std::time::SystemTime;
use mio;
use core::pin::Pin;
use futures::channel::{mpsc, oneshot};
use futures::executor;
use futures::stream::Stream;
use futures::task::Context;
use futures::{Future, Poll};
use bytes::BytesMut;
use eventfd::EventFD;
use libaio::directio::DirectFile;
use libaio::raw::{IoOp, Iocontext};
use std::os::unix::io::AsRawFd;
use tokio::runtime::current_thread;
use tokio_net::util::PollEvented;
use libc;
use slab::Slab;
use log::{info, trace};
#[derive(Debug)]
pub enum Message {
PRead(
DirectFile,
usize,
usize,
BytesMut,
oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
),
PWrite(
DirectFile,
usize,
BytesMut,
oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
),
}
#[derive(Debug)]
pub struct Session {
pub inner: mpsc::Sender<Message>,
thread: JoinHandle<()>,
pthread: libc::pthread_t,
}
#[derive(Debug, Clone)]
struct SessionHandle {
inner: mpsc::Sender<Message>,
}
impl Session {
pub fn new(max_queue_depth: usize) -> io::Result<Session> {
// Users of session interact with us by sending messages.
let (tx, rx) = mpsc::channel::<Message>(max_queue_depth);
let (tid_tx, tid_rx) = oneshot::channel();
// Spawn a thread with it's own event loop dedicated to AIO
let t = thread::spawn(move || {
let mut core = current_thread::Runtime::new().unwrap();
// Return the pthread id so the main thread can bind this
// thread to a specific core
tid_tx.send(unsafe { libc::pthread_self() }).unwrap();
let mut ctx = match Iocontext::<usize, BytesMut, BytesMut>::new(max_queue_depth) {
Ok(ctx) => ctx,
Err(e) => panic!("could not create Iocontext: {}", e),
};
// Using an eventfd, the kernel can notify us when there's
// one or more AIO results ready. See 'man eventfd'
match ctx.get_evfd_stream() {
Ok(_) => (),
Err(e) => panic!("get_evfd_stream failed: {}", e),
};
let evfd = ctx.evfd.as_ref().unwrap().clone();
// Add the eventfd to the file descriptors we are
// interested in. This will use epoll under the hood.
let source = AioEventFd { inner: evfd };
let stream = PollEvented::new(source);
let fut = AioThread {
rx: rx,
ctx: ctx,
stream: stream,
handles_pread: Slab::with_capacity(max_queue_depth),
handles_pwrite: Slab::with_capacity(max_queue_depth),
last_report_ts: SystemTime::now(),
stats: AioStats {
..Default::default()
},
};
core.spawn(fut);
core.run().unwrap();
});
let tid = executor::block_on(tid_rx).unwrap();
Ok(Session {
inner: tx,
thread: t,
pthread: tid,
})
}
pub fn thread_id(&self) -> libc::pthread_t {
self.pthread
}
}
struct AioThread {
rx: mpsc::Receiver<Message>,
ctx: Iocontext<usize, BytesMut, BytesMut>,
stream: PollEvented<AioEventFd>,
// Handles to outstanding requests
handles_pread: Slab<HandleEntry>,
handles_pwrite: Slab<HandleEntry>,
last_report_ts: SystemTime,
stats: AioStats,
}
struct HandleEntry {
complete: oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
}
#[derive(Default)]
struct AioStats {
curr_polls: u64,
curr_preads: u64,
curr_pwrites: u64,
prev_polls: u64,
prev_preads: u64,
prev_pwrites: u64,
}
impl Future for AioThread {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!(
"============ AioThread.poll (inflight_preads:{} inflight_pwrites:{})",
self.handles_pread.len(),
self.handles_pwrite.len()
);
self.stats.curr_polls += 1;
// If there are any responses from the kernel available, read
// as many as we can without blocking.
let ready = mio::Ready::readable();
if Pin::new(&mut self.stream)
.poll_read_ready(cx, ready)
.is_ready()
{
match self.ctx.results(0, 100, None) {
Ok(res) => {
trace!(" got {} AIO responses", res.len());
for (op, result) in res.into_iter() {
match op {
IoOp::Pread(retbuf, token) => {
trace!(
" got pread response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pread.remove(token); //? .unwrap();
//let elapsed = entry.timestamp.elapsed().expect("Time drift!");
//trace!("pread returned in {} us", ((elapsed.as_secs() * 1_000_000_000) + elapsed.subsec_nanos() as u64) / 1000);
//entry.complete.send(Ok((retbuf, None))).expect("Could not send AioSession response");
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pread error {:?}", e),
}
}
IoOp::Pwrite(retbuf, token) => {
trace!(
" got pwrite response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pwrite.remove(token); //? .unwrap();
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pwrite error {:?}", e),
}
}
_ => (),
}
}
}
Err(e) => panic!("ctx.results failed: {:?}", e),
}
};
// Read all available incoming requests, enqueue in AIO batch
loop {
let msg = match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(msg)) => msg,
Poll::Ready(None) => break,
Poll::Pending => break, // AioThread.poll is automatically scheduled
};
match msg {
Message::PRead(file, offset, len, buf, complete) => {
self.stats.curr_preads += 1;
// The self is a Pin<&mut Self>. Obtaining mutable references to the fields
// will require going through DerefMut, which requires unique borrow.
// You can avoid the issue by dereferencing self once on entry to the method
// let this = &mut *self, and then continue accessing it
// through this.
// The basic idea is that each access to self.deref_mut()
// basically will create a new mutable reference to self, if
// you do it multiple times you get the error, so by
// effectively calling deref_mut by hand I can save the
// reference once and use it when needed.
let this = &mut *self;
let entry = this.handles_pread.vacant_entry();
let key = entry.key();
match this.ctx.pread(&file, buf, offset as i64, len, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
};
}
Message::PWrite(file, offset, buf, complete) => {
self.stats.curr_pwrites += 1;
let this = &mut *self;
let entry = this.handles_pwrite.vacant_entry();
let key = entry.key();
match this.ctx.pwrite(&file, buf, offset as i64, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
}
}
}
// TODO: If max queue depth is reached, do not receive any
// more messages, will cause clients to block
}
// TODO: Need busywait for submit timeout
trace!(" batch size {}", self.ctx.batched());
while self.ctx.batched() > 0 {
if let Err(e) = self.ctx.submit() {
panic!("batch submit failed {:?}", e);
}
}
let need_read = self.handles_pread.len() > 0 || self.handles_pwrite.len() > 0;
if need_read {
// Not sure I totally understand how the old need_read works vs the
// new clear_read_ready call.
trace!(" calling stream.clear_read_ready()");
Pin::new(&mut self.stream).clear_read_ready(cx, ready);
}
// Print some useful stats
if self.stats.curr_polls % 10000 == 0 {
let elapsed = self.last_report_ts.elapsed().expect("Time drift!");
let elapsed_ms = ((elapsed.as_secs() * 1_000_000_000) as f64
+ elapsed.subsec_nanos() as f64)
/ 1000000.0;
let polls = self.stats.curr_polls - self.stats.prev_polls;
let preads = self.stats.curr_preads - self.stats.prev_preads;
let pwrites = self.stats.curr_pwrites - self.stats.prev_pwrites;
let preads_inflight = self.handles_pread.len();
let pwrites_inflight = self.handles_pwrite.len();
let thread_id = unsafe { libc::pthread_self() };
info!("threadid:{} polls:{:.0}/sec preads:{:.0}/sec pwrites:{:.0}/sec, inflight:({},{}) reqs/poll:{:.2}",
thread_id,
polls as f64 / elapsed_ms * 1000.0,
preads as f64 / elapsed_ms * 1000.0,
pwrites as f64 / elapsed_ms * 1000.0,
preads_inflight,
pwrites_inflight,
(preads as f64 + pwrites as f64) / polls as f64);
self.stats.prev_polls = self.stats.curr_polls;
self.stats.prev_preads = self.stats.curr_preads;
self.stats.prev_pwrites = self.stats.curr_pwrites;
self.last_report_ts = SystemTime::now();
}
// Run forever
Poll::Pending
}
}
// Register the eventfd with mio
struct AioEventFd {
inner: EventFD,
}
impl mio::Evented for AioEventFd {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
trace!("AioEventFd.register");
mio::unix::EventedFd(&self.inner.as_raw_fd()).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
trace!("AioEventFd.reregister");
mio::unix::EventedFd(&self.inner.as_raw_fd()).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
trace!("AioEventFd.deregister");
mio::unix::EventedFd(&self.inner.as_raw_fd()).deregister(poll)
}
}
#[cfg(test)]
mod tests {
extern crate env_logger;
extern crate tempdir;
extern crate uuid;
use self::tempdir::TempDir;
use byteorder::{BigEndian, ByteOrder};
use std::fs::File;
use std::io;
use std::io::Write;
use std::path::Path;
use aio::{Message, Session};
use bytes::{Buf, BufMut, BytesMut, IntoBuf};
use libaio::directio::{DirectFile, FileAccess, Mode};
use futures::channel::oneshot;
use futures::{stream, Future, Sink, Stream};
#[test]
fn test_init() {
let session = Session::new(512);
assert!(session.is_ok());
}
// TODO: Test max queue depth
#[test]
fn test_pread() {
env_logger::init().unwrap();
let path = new_file_with_sequential_u64("pread", 1024);
let file = DirectFile::open(path, Mode::Open, FileAccess::Read, 4096).unwrap();
let session = Session::new(2).unwrap();
let mut buf = BytesMut::with_capacity(512);
unsafe { buf.set_len(512) };
let (tx, rx) = oneshot::channel();
let fut = session.inner.send(Message::PRead(file, 0, 512, buf, tx));
fut.wait();
let res = rx.wait();
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.is_ok());
let (mut buf, err) = res.unwrap();
assert!(err.is_none());
for i in 0..(512 / 8) {
assert_eq!(i, buf.split_to(8).into_buf().get_u64::<BigEndian>());
}
assert_eq!(0, buf.len());
}
#[test]
fn test_pread_many() {
//env_logger::init().unwrap();
let path = new_file_with_sequential_u64("pread", 10240);
let session = Session::new(4).unwrap();
//let handle1 = session.handle();
//let handle2 = session.handle(); | // let file = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap();
// let mut buf = BytesMut::with_capacity(512);
// unsafe { buf.set_len(512) };
// let (tx, rx) = oneshot::channel();
// session.inner.send(Message::PRead(file, 0, 512, buf, tx))
// });
// let file1 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap();
// let file2 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap();
// let mut buf1 = BytesMut::with_capacity(512);
// let mut buf2 = BytesMut::with_capacity(512);
// unsafe { buf1.set_len(512) };
// unsafe { buf2.set_len(512) };
// let req1 = handle1.pread(file1, 0, 512, buf1);
// let req2 = handle2.pread(file2, 0, 512, buf2);
// //session.inner.clone().send(Message::PRead(file2, 0, 512, buf2, tx2));
// let res = req1.wait();
//let stream: Stream<Item=Message, Error=io::Error> = stream::iter(reads);
//let stream: Stream<Item=Message, Error=io::Error> = stream::iter((0..5).map(Ok));
//let responses = session.inner.send_all(stream);
}
fn new_file_with_sequential_u64(name: &str, num: usize) -> String {
let tmp = TempDir::new("test").unwrap();
let mut path = tmp.into_path();
path.push(name);
let mut data = BytesMut::with_capacity(num * 8);
for i in 0..num {
data.put_u64::<BigEndian>(i as u64);
}
let data = data.freeze();
let mut file = File::create(path.clone()).expect("Could not create dummy_clustermap");
file.write_all(data.as_ref()).unwrap();
path.to_str().unwrap().to_owned()
}
} |
// let reads = (0..5).map(|_| {
// println!("foo"); | random_line_split |
aio.rs | use std::default::Default;
use std::io;
use std::thread::{self, JoinHandle};
use std::time::SystemTime;
use mio;
use core::pin::Pin;
use futures::channel::{mpsc, oneshot};
use futures::executor;
use futures::stream::Stream;
use futures::task::Context;
use futures::{Future, Poll};
use bytes::BytesMut;
use eventfd::EventFD;
use libaio::directio::DirectFile;
use libaio::raw::{IoOp, Iocontext};
use std::os::unix::io::AsRawFd;
use tokio::runtime::current_thread;
use tokio_net::util::PollEvented;
use libc;
use slab::Slab;
use log::{info, trace};
#[derive(Debug)]
pub enum Message {
PRead(
DirectFile,
usize,
usize,
BytesMut,
oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
),
PWrite(
DirectFile,
usize,
BytesMut,
oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
),
}
#[derive(Debug)]
pub struct Session {
pub inner: mpsc::Sender<Message>,
thread: JoinHandle<()>,
pthread: libc::pthread_t,
}
#[derive(Debug, Clone)]
struct SessionHandle {
inner: mpsc::Sender<Message>,
}
impl Session {
pub fn new(max_queue_depth: usize) -> io::Result<Session> {
// Users of session interact with us by sending messages.
let (tx, rx) = mpsc::channel::<Message>(max_queue_depth);
let (tid_tx, tid_rx) = oneshot::channel();
// Spawn a thread with it's own event loop dedicated to AIO
let t = thread::spawn(move || {
let mut core = current_thread::Runtime::new().unwrap();
// Return the pthread id so the main thread can bind this
// thread to a specific core
tid_tx.send(unsafe { libc::pthread_self() }).unwrap();
let mut ctx = match Iocontext::<usize, BytesMut, BytesMut>::new(max_queue_depth) {
Ok(ctx) => ctx,
Err(e) => panic!("could not create Iocontext: {}", e),
};
// Using an eventfd, the kernel can notify us when there's
// one or more AIO results ready. See 'man eventfd'
match ctx.get_evfd_stream() {
Ok(_) => (),
Err(e) => panic!("get_evfd_stream failed: {}", e),
};
let evfd = ctx.evfd.as_ref().unwrap().clone();
// Add the eventfd to the file descriptors we are
// interested in. This will use epoll under the hood.
let source = AioEventFd { inner: evfd };
let stream = PollEvented::new(source);
let fut = AioThread {
rx: rx,
ctx: ctx,
stream: stream,
handles_pread: Slab::with_capacity(max_queue_depth),
handles_pwrite: Slab::with_capacity(max_queue_depth),
last_report_ts: SystemTime::now(),
stats: AioStats {
..Default::default()
},
};
core.spawn(fut);
core.run().unwrap();
});
let tid = executor::block_on(tid_rx).unwrap();
Ok(Session {
inner: tx,
thread: t,
pthread: tid,
})
}
pub fn thread_id(&self) -> libc::pthread_t {
self.pthread
}
}
struct AioThread {
rx: mpsc::Receiver<Message>,
ctx: Iocontext<usize, BytesMut, BytesMut>,
stream: PollEvented<AioEventFd>,
// Handles to outstanding requests
handles_pread: Slab<HandleEntry>,
handles_pwrite: Slab<HandleEntry>,
last_report_ts: SystemTime,
stats: AioStats,
}
struct HandleEntry {
complete: oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
}
#[derive(Default)]
struct | {
curr_polls: u64,
curr_preads: u64,
curr_pwrites: u64,
prev_polls: u64,
prev_preads: u64,
prev_pwrites: u64,
}
impl Future for AioThread {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!(
"============ AioThread.poll (inflight_preads:{} inflight_pwrites:{})",
self.handles_pread.len(),
self.handles_pwrite.len()
);
self.stats.curr_polls += 1;
// If there are any responses from the kernel available, read
// as many as we can without blocking.
let ready = mio::Ready::readable();
if Pin::new(&mut self.stream)
.poll_read_ready(cx, ready)
.is_ready()
{
match self.ctx.results(0, 100, None) {
Ok(res) => {
trace!(" got {} AIO responses", res.len());
for (op, result) in res.into_iter() {
match op {
IoOp::Pread(retbuf, token) => {
trace!(
" got pread response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pread.remove(token); //? .unwrap();
//let elapsed = entry.timestamp.elapsed().expect("Time drift!");
//trace!("pread returned in {} us", ((elapsed.as_secs() * 1_000_000_000) + elapsed.subsec_nanos() as u64) / 1000);
//entry.complete.send(Ok((retbuf, None))).expect("Could not send AioSession response");
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pread error {:?}", e),
}
}
IoOp::Pwrite(retbuf, token) => {
trace!(
" got pwrite response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pwrite.remove(token); //? .unwrap();
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pwrite error {:?}", e),
}
}
_ => (),
}
}
}
Err(e) => panic!("ctx.results failed: {:?}", e),
}
};
// Read all available incoming requests, enqueue in AIO batch
loop {
let msg = match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(msg)) => msg,
Poll::Ready(None) => break,
Poll::Pending => break, // AioThread.poll is automatically scheduled
};
match msg {
Message::PRead(file, offset, len, buf, complete) => {
self.stats.curr_preads += 1;
// The self is a Pin<&mut Self>. Obtaining mutable references to the fields
// will require going through DerefMut, which requires unique borrow.
// You can avoid the issue by dereferencing self once on entry to the method
// let this = &mut *self, and then continue accessing it
// through this.
// The basic idea is that each access to self.deref_mut()
// basically will create a new mutable reference to self, if
// you do it multiple times you get the error, so by
// effectively calling deref_mut by hand I can save the
// reference once and use it when needed.
let this = &mut *self;
let entry = this.handles_pread.vacant_entry();
let key = entry.key();
match this.ctx.pread(&file, buf, offset as i64, len, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
};
}
Message::PWrite(file, offset, buf, complete) => {
self.stats.curr_pwrites += 1;
let this = &mut *self;
let entry = this.handles_pwrite.vacant_entry();
let key = entry.key();
match this.ctx.pwrite(&file, buf, offset as i64, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
}
}
}
// TODO: If max queue depth is reached, do not receive any
// more messages, will cause clients to block
}
// TODO: Need busywait for submit timeout
trace!(" batch size {}", self.ctx.batched());
while self.ctx.batched() > 0 {
if let Err(e) = self.ctx.submit() {
panic!("batch submit failed {:?}", e);
}
}
let need_read = self.handles_pread.len() > 0 || self.handles_pwrite.len() > 0;
if need_read {
// Not sure I totally understand how the old need_read works vs the
// new clear_read_ready call.
trace!(" calling stream.clear_read_ready()");
Pin::new(&mut self.stream).clear_read_ready(cx, ready);
}
// Print some useful stats
if self.stats.curr_polls % 10000 == 0 {
let elapsed = self.last_report_ts.elapsed().expect("Time drift!");
let elapsed_ms = ((elapsed.as_secs() * 1_000_000_000) as f64
+ elapsed.subsec_nanos() as f64)
/ 1000000.0;
let polls = self.stats.curr_polls - self.stats.prev_polls;
let preads = self.stats.curr_preads - self.stats.prev_preads;
let pwrites = self.stats.curr_pwrites - self.stats.prev_pwrites;
let preads_inflight = self.handles_pread.len();
let pwrites_inflight = self.handles_pwrite.len();
let thread_id = unsafe { libc::pthread_self() };
info!("threadid:{} polls:{:.0}/sec preads:{:.0}/sec pwrites:{:.0}/sec, inflight:({},{}) reqs/poll:{:.2}",
thread_id,
polls as f64 / elapsed_ms * 1000.0,
preads as f64 / elapsed_ms * 1000.0,
pwrites as f64 / elapsed_ms * 1000.0,
preads_inflight,
pwrites_inflight,
(preads as f64 + pwrites as f64) / polls as f64);
self.stats.prev_polls = self.stats.curr_polls;
self.stats.prev_preads = self.stats.curr_preads;
self.stats.prev_pwrites = self.stats.curr_pwrites;
self.last_report_ts = SystemTime::now();
}
// Run forever
Poll::Pending
}
}
// Register the eventfd with mio
struct AioEventFd {
inner: EventFD,
}
impl mio::Evented for AioEventFd {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
trace!("AioEventFd.register");
mio::unix::EventedFd(&self.inner.as_raw_fd()).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
trace!("AioEventFd.reregister");
mio::unix::EventedFd(&self.inner.as_raw_fd()).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
trace!("AioEventFd.deregister");
mio::unix::EventedFd(&self.inner.as_raw_fd()).deregister(poll)
}
}
#[cfg(test)]
mod tests {
extern crate env_logger;
extern crate tempdir;
extern crate uuid;
use self::tempdir::TempDir;
use byteorder::{BigEndian, ByteOrder};
use std::fs::File;
use std::io;
use std::io::Write;
use std::path::Path;
use aio::{Message, Session};
use bytes::{Buf, BufMut, BytesMut, IntoBuf};
use libaio::directio::{DirectFile, FileAccess, Mode};
use futures::channel::oneshot;
use futures::{stream, Future, Sink, Stream};
#[test]
fn test_init() {
let session = Session::new(512);
assert!(session.is_ok());
}
// TODO: Test max queue depth
#[test]
fn test_pread() {
env_logger::init().unwrap();
let path = new_file_with_sequential_u64("pread", 1024);
let file = DirectFile::open(path, Mode::Open, FileAccess::Read, 4096).unwrap();
let session = Session::new(2).unwrap();
let mut buf = BytesMut::with_capacity(512);
unsafe { buf.set_len(512) };
let (tx, rx) = oneshot::channel();
let fut = session.inner.send(Message::PRead(file, 0, 512, buf, tx));
fut.wait();
let res = rx.wait();
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.is_ok());
let (mut buf, err) = res.unwrap();
assert!(err.is_none());
for i in 0..(512 / 8) {
assert_eq!(i, buf.split_to(8).into_buf().get_u64::<BigEndian>());
}
assert_eq!(0, buf.len());
}
#[test]
fn test_pread_many() {
//env_logger::init().unwrap();
let path = new_file_with_sequential_u64("pread", 10240);
let session = Session::new(4).unwrap();
//let handle1 = session.handle();
//let handle2 = session.handle();
// let reads = (0..5).map(|_| {
// println!("foo");
// let file = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap();
// let mut buf = BytesMut::with_capacity(512);
// unsafe { buf.set_len(512) };
// let (tx, rx) = oneshot::channel();
// session.inner.send(Message::PRead(file, 0, 512, buf, tx))
// });
// let file1 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap();
// let file2 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap();
// let mut buf1 = BytesMut::with_capacity(512);
// let mut buf2 = BytesMut::with_capacity(512);
// unsafe { buf1.set_len(512) };
// unsafe { buf2.set_len(512) };
// let req1 = handle1.pread(file1, 0, 512, buf1);
// let req2 = handle2.pread(file2, 0, 512, buf2);
// //session.inner.clone().send(Message::PRead(file2, 0, 512, buf2, tx2));
// let res = req1.wait();
//let stream: Stream<Item=Message, Error=io::Error> = stream::iter(reads);
//let stream: Stream<Item=Message, Error=io::Error> = stream::iter((0..5).map(Ok));
//let responses = session.inner.send_all(stream);
}
fn new_file_with_sequential_u64(name: &str, num: usize) -> String {
let tmp = TempDir::new("test").unwrap();
let mut path = tmp.into_path();
path.push(name);
let mut data = BytesMut::with_capacity(num * 8);
for i in 0..num {
data.put_u64::<BigEndian>(i as u64);
}
let data = data.freeze();
let mut file = File::create(path.clone()).expect("Could not create dummy_clustermap");
file.write_all(data.as_ref()).unwrap();
path.to_str().unwrap().to_owned()
}
}
| AioStats | identifier_name |
workflow.py | """
Base implementation for high level workflow.
The goal of this design is to make it easy to share
code among different variants of the Inferelator workflow.
"""
from __future__ import unicode_literals, print_function
from inferelator import utils
from inferelator.utils import Validator as check
from inferelator import default
from inferelator.preprocessing.prior_gs_split_workflow import split_for_cv, remove_prior_circularity
from inferelator.regression.base_regression import RegressionWorkflow
from inferelator.distributed.inferelator_mp import MPControl
import inspect
import numpy as np
import os
import datetime
import pandas as pd
# Python 2/3 compatible string checking
try:
basestring
except NameError:
basestring = str
class WorkflowBase(object):
# Paths to the input and output locations
input_dir = None
output_dir = None
# Settings that will be used by pd.read_table to import data files
file_format_settings = default.DEFAULT_PD_INPUT_SETTINGS
# A dict, keyed by file name, of settings to override the defaults in file_format_settings
# Used when input files are perhaps not processed into perfect TSVs
file_format_overrides = dict()
# File names for each of the data files which can be used in the inference workflow
expression_matrix_file = default.DEFAULT_EXPRESSION_FILE
tf_names_file = default.DEFAULT_TFNAMES_FILE
meta_data_file = default.DEFAULT_METADATA_FILE
priors_file = default.DEFAULT_PRIORS_FILE
gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE
# The random seed for sampling, etc
random_seed = default.DEFAULT_RANDOM_SEED
# The number of inference bootstraps to run
num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS
# Flags to control splitting priors into a prior/gold-standard set
split_priors_for_gold_standard = False
split_gold_standard_for_crossvalidation = False
cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO
cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS
shuffle_prior_axis = None
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix dataframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data dataframe [G x ?]
priors_data = None # priors data dataframe [G x K]
gold_standard = None # gold standard dataframe [G x K]
# Multiprocessing controller
initialize_mp = True
multiprocessing_controller = None
def __init__(self):
# Get environment variables
self.get_environmentals()
def initialize_multiprocessing(self):
"""
Register the multiprocessing controller if set and run .connect()
"""
if self.multiprocessing_controller is not None:
MPControl.set_multiprocess_engine(self.multiprocessing_controller)
MPControl.connect()
def get_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing all data into a ready format for regression.
"""
if self.initialize_mp:
self.initialize_multiprocessing()
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute any data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute any data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after all configuration.
"""
raise NotImplementedError # implement in subclass
def get_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_dataframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
# Load the class variable if no file is passed
file = self.tf_names_file if file is None else file
# Read in a dataframe with no header or index
tfs = self.input_dataframe(file, header=None, index_col=None)
# Cast the dataframe into a list
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa":
from inferelator.tfa_workflow import TFAWorkFlow
workflow_class = TFAWorkFlow
elif workflow == "amusr":
from inferelator.amusr_workflow import SingleCellMultiTask
workflow_class = SingleCellMultiTask
elif workflow == "single-cell":
from inferelator.single_cell_workflow import SingleCellWorkflow
workflow_class = SingleCellWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a workflow class".format(val=workflow))
# Or just use a workflow class directly
elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):
workflow_class = workflow
else:
raise ValueError("Workflow must be a string that maps to a workflow class or an actual workflow class")
# Decide which regression workflow to use
# Return just the workflow if regression is set to None
if regression is None:
return workflow_class
# String arguments are parsed for convenience in the run script
elif isinstance(regression, basestring):
if regression == "bbsr":
from inferelator.regression.bbsr_python import BBSRRegressionWorkflow
regression_class = BBSRRegressionWorkflow
elif regression == "elasticnet":
from inferelator.regression.elasticnet_python import ElasticNetWorkflow
regression_class = ElasticNetWorkflow
elif regression == "amusr":
from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow
regression_class = AMUSRRegressionWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a regression class".format(val=regression))
# Or just use a regression class directly
elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):
regression_class = regression
else:
raise ValueError("Regression must be a string that maps to a regression class or an actual regression class")
class RegressWorkflow(regression_class, workflow_class):
regression_type = regression_class
return RegressWorkflow
def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
Create and instantiate a workflow
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an initialized object which is the multi-inheritance result of both the regression workflow and | the preprocessing/postprocessing workflow
"""
return create_inferelator_workflow(regression=regression, workflow=workflow)() | random_line_split | |
workflow.py | """
Base implementation for high level workflow.
The goal of this design is to make it easy to share
code among different variants of the Inferelator workflow.
"""
from __future__ import unicode_literals, print_function
from inferelator import utils
from inferelator.utils import Validator as check
from inferelator import default
from inferelator.preprocessing.prior_gs_split_workflow import split_for_cv, remove_prior_circularity
from inferelator.regression.base_regression import RegressionWorkflow
from inferelator.distributed.inferelator_mp import MPControl
import inspect
import numpy as np
import os
import datetime
import pandas as pd
# Python 2/3 compatible string checking
try:
basestring
except NameError:
basestring = str
class WorkflowBase(object):
# Paths to the input and output locations
input_dir = None
output_dir = None
# Settings that will be used by pd.read_table to import data files
file_format_settings = default.DEFAULT_PD_INPUT_SETTINGS
# A dict, keyed by file name, of settings to override the defaults in file_format_settings
# Used when input files are perhaps not processed into perfect TSVs
file_format_overrides = dict()
# File names for each of the data files which can be used in the inference workflow
expression_matrix_file = default.DEFAULT_EXPRESSION_FILE
tf_names_file = default.DEFAULT_TFNAMES_FILE
meta_data_file = default.DEFAULT_METADATA_FILE
priors_file = default.DEFAULT_PRIORS_FILE
gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE
# The random seed for sampling, etc
random_seed = default.DEFAULT_RANDOM_SEED
# The number of inference bootstraps to run
num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS
# Flags to control splitting priors into a prior/gold-standard set
split_priors_for_gold_standard = False
split_gold_standard_for_crossvalidation = False
cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO
cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS
shuffle_prior_axis = None
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix dataframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data dataframe [G x ?]
priors_data = None # priors data dataframe [G x K]
gold_standard = None # gold standard dataframe [G x K]
# Multiprocessing controller
initialize_mp = True
multiprocessing_controller = None
def __init__(self):
# Get environment variables
self.get_environmentals()
def initialize_multiprocessing(self):
"""
Register the multiprocessing controller if set and run .connect()
"""
if self.multiprocessing_controller is not None:
MPControl.set_multiprocess_engine(self.multiprocessing_controller)
MPControl.connect()
def get_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing all data into a ready format for regression.
"""
if self.initialize_mp:
self.initialize_multiprocessing()
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute any data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute any data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after all configuration.
"""
raise NotImplementedError # implement in subclass
def get_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_dataframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
# Load the class variable if no file is passed
file = self.tf_names_file if file is None else file
# Read in a dataframe with no header or index
tfs = self.input_dataframe(file, header=None, index_col=None)
# Cast the dataframe into a list
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa":
from inferelator.tfa_workflow import TFAWorkFlow
workflow_class = TFAWorkFlow
elif workflow == "amusr":
from inferelator.amusr_workflow import SingleCellMultiTask
workflow_class = SingleCellMultiTask
elif workflow == "single-cell":
from inferelator.single_cell_workflow import SingleCellWorkflow
workflow_class = SingleCellWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a workflow class".format(val=workflow))
# Or just use a workflow class directly
elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):
|
else:
raise ValueError("Workflow must be a string that maps to a workflow class or an actual workflow class")
# Decide which regression workflow to use
# Return just the workflow if regression is set to None
if regression is None:
return workflow_class
# String arguments are parsed for convenience in the run script
elif isinstance(regression, basestring):
if regression == "bbsr":
from inferelator.regression.bbsr_python import BBSRRegressionWorkflow
regression_class = BBSRRegressionWorkflow
elif regression == "elasticnet":
from inferelator.regression.elasticnet_python import ElasticNetWorkflow
regression_class = ElasticNetWorkflow
elif regression == "amusr":
from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow
regression_class = AMUSRRegressionWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a regression class".format(val=regression))
# Or just use a regression class directly
elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):
regression_class = regression
else:
raise ValueError("Regression must be a string that maps to a regression class or an actual regression class")
class RegressWorkflow(regression_class, workflow_class):
regression_type = regression_class
return RegressWorkflow
def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
Create and instantiate a workflow
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an initialized object which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
return create_inferelator_workflow(regression=regression, workflow=workflow)()
| workflow_class = workflow | conditional_block |
workflow.py | """
Base implementation for high level workflow.
The goal of this design is to make it easy to share
code among different variants of the Inferelator workflow.
"""
from __future__ import unicode_literals, print_function
from inferelator import utils
from inferelator.utils import Validator as check
from inferelator import default
from inferelator.preprocessing.prior_gs_split_workflow import split_for_cv, remove_prior_circularity
from inferelator.regression.base_regression import RegressionWorkflow
from inferelator.distributed.inferelator_mp import MPControl
import inspect
import numpy as np
import os
import datetime
import pandas as pd
# Python 2/3 compatible string checking
try:
basestring
except NameError:
basestring = str
class WorkflowBase(object):
# Paths to the input and output locations
input_dir = None
output_dir = None
# Settings that will be used by pd.read_table to import data files
file_format_settings = default.DEFAULT_PD_INPUT_SETTINGS
# A dict, keyed by file name, of settings to override the defaults in file_format_settings
# Used when input files are perhaps not processed into perfect TSVs
file_format_overrides = dict()
# File names for each of the data files which can be used in the inference workflow
expression_matrix_file = default.DEFAULT_EXPRESSION_FILE
tf_names_file = default.DEFAULT_TFNAMES_FILE
meta_data_file = default.DEFAULT_METADATA_FILE
priors_file = default.DEFAULT_PRIORS_FILE
gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE
# The random seed for sampling, etc
random_seed = default.DEFAULT_RANDOM_SEED
# The number of inference bootstraps to run
num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS
# Flags to control splitting priors into a prior/gold-standard set
split_priors_for_gold_standard = False
split_gold_standard_for_crossvalidation = False
cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO
cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS
shuffle_prior_axis = None
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix dataframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data dataframe [G x ?]
priors_data = None # priors data dataframe [G x K]
gold_standard = None # gold standard dataframe [G x K]
# Multiprocessing controller
initialize_mp = True
multiprocessing_controller = None
def __init__(self):
# Get environment variables
self.get_environmentals()
def initialize_multiprocessing(self):
"""
Register the multiprocessing controller if set and run .connect()
"""
if self.multiprocessing_controller is not None:
MPControl.set_multiprocess_engine(self.multiprocessing_controller)
MPControl.connect()
def get_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing all data into a ready format for regression.
"""
if self.initialize_mp:
self.initialize_multiprocessing()
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute any data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute any data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after all configuration.
"""
raise NotImplementedError # implement in subclass
def get_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_dataframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
# Load the class variable if no file is passed
file = self.tf_names_file if file is None else file
# Read in a dataframe with no header or index
tfs = self.input_dataframe(file, header=None, index_col=None)
# Cast the dataframe into a list
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def | (expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa":
from inferelator.tfa_workflow import TFAWorkFlow
workflow_class = TFAWorkFlow
elif workflow == "amusr":
from inferelator.amusr_workflow import SingleCellMultiTask
workflow_class = SingleCellMultiTask
elif workflow == "single-cell":
from inferelator.single_cell_workflow import SingleCellWorkflow
workflow_class = SingleCellWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a workflow class".format(val=workflow))
# Or just use a workflow class directly
elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):
workflow_class = workflow
else:
raise ValueError("Workflow must be a string that maps to a workflow class or an actual workflow class")
# Decide which regression workflow to use
# Return just the workflow if regression is set to None
if regression is None:
return workflow_class
# String arguments are parsed for convenience in the run script
elif isinstance(regression, basestring):
if regression == "bbsr":
from inferelator.regression.bbsr_python import BBSRRegressionWorkflow
regression_class = BBSRRegressionWorkflow
elif regression == "elasticnet":
from inferelator.regression.elasticnet_python import ElasticNetWorkflow
regression_class = ElasticNetWorkflow
elif regression == "amusr":
from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow
regression_class = AMUSRRegressionWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a regression class".format(val=regression))
# Or just use a regression class directly
elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):
regression_class = regression
else:
raise ValueError("Regression must be a string that maps to a regression class or an actual regression class")
class RegressWorkflow(regression_class, workflow_class):
regression_type = regression_class
return RegressWorkflow
def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
Create and instantiate a workflow
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an initialized object which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
return create_inferelator_workflow(regression=regression, workflow=workflow)()
| create_default_meta_data | identifier_name |
workflow.py | """
Base implementation for high level workflow.
The goal of this design is to make it easy to share
code among different variants of the Inferelator workflow.
"""
from __future__ import unicode_literals, print_function
from inferelator import utils
from inferelator.utils import Validator as check
from inferelator import default
from inferelator.preprocessing.prior_gs_split_workflow import split_for_cv, remove_prior_circularity
from inferelator.regression.base_regression import RegressionWorkflow
from inferelator.distributed.inferelator_mp import MPControl
import inspect
import numpy as np
import os
import datetime
import pandas as pd
# Python 2/3 compatible string checking
try:
basestring
except NameError:
basestring = str
class WorkflowBase(object):
# Paths to the input and output locations
input_dir = None
output_dir = None
# Settings that will be used by pd.read_table to import data files
file_format_settings = default.DEFAULT_PD_INPUT_SETTINGS
# A dict, keyed by file name, of settings to override the defaults in file_format_settings
# Used when input files are perhaps not processed into perfect TSVs
file_format_overrides = dict()
# File names for each of the data files which can be used in the inference workflow
expression_matrix_file = default.DEFAULT_EXPRESSION_FILE
tf_names_file = default.DEFAULT_TFNAMES_FILE
meta_data_file = default.DEFAULT_METADATA_FILE
priors_file = default.DEFAULT_PRIORS_FILE
gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE
# The random seed for sampling, etc
random_seed = default.DEFAULT_RANDOM_SEED
# The number of inference bootstraps to run
num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS
# Flags to control splitting priors into a prior/gold-standard set
split_priors_for_gold_standard = False
split_gold_standard_for_crossvalidation = False
cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO
cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS
shuffle_prior_axis = None
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix dataframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data dataframe [G x ?]
priors_data = None # priors data dataframe [G x K]
gold_standard = None # gold standard dataframe [G x K]
# Multiprocessing controller
initialize_mp = True
multiprocessing_controller = None
def __init__(self):
# Get environment variables
self.get_environmentals()
def initialize_multiprocessing(self):
"""
Register the multiprocessing controller if set and run .connect()
"""
if self.multiprocessing_controller is not None:
MPControl.set_multiprocess_engine(self.multiprocessing_controller)
MPControl.connect()
def get_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing all data into a ready format for regression.
"""
if self.initialize_mp:
self.initialize_multiprocessing()
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute any data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute any data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after all configuration.
"""
raise NotImplementedError # implement in subclass
def get_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_dataframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
# Load the class variable if no file is passed
file = self.tf_names_file if file is None else file
# Read in a dataframe with no header or index
tfs = self.input_dataframe(file, header=None, index_col=None)
# Cast the dataframe into a list
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
|
def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
Create and instantiate a workflow
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an initialized object which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
return create_inferelator_workflow(regression=regression, workflow=workflow)()
| """
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa":
from inferelator.tfa_workflow import TFAWorkFlow
workflow_class = TFAWorkFlow
elif workflow == "amusr":
from inferelator.amusr_workflow import SingleCellMultiTask
workflow_class = SingleCellMultiTask
elif workflow == "single-cell":
from inferelator.single_cell_workflow import SingleCellWorkflow
workflow_class = SingleCellWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a workflow class".format(val=workflow))
# Or just use a workflow class directly
elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):
workflow_class = workflow
else:
raise ValueError("Workflow must be a string that maps to a workflow class or an actual workflow class")
# Decide which regression workflow to use
# Return just the workflow if regression is set to None
if regression is None:
return workflow_class
# String arguments are parsed for convenience in the run script
elif isinstance(regression, basestring):
if regression == "bbsr":
from inferelator.regression.bbsr_python import BBSRRegressionWorkflow
regression_class = BBSRRegressionWorkflow
elif regression == "elasticnet":
from inferelator.regression.elasticnet_python import ElasticNetWorkflow
regression_class = ElasticNetWorkflow
elif regression == "amusr":
from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow
regression_class = AMUSRRegressionWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a regression class".format(val=regression))
# Or just use a regression class directly
elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):
regression_class = regression
else:
raise ValueError("Regression must be a string that maps to a regression class or an actual regression class")
class RegressWorkflow(regression_class, workflow_class):
regression_type = regression_class
return RegressWorkflow | identifier_body |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in [socket] module.
//!
//! [socket]: network::noise_wrapper::socket
use crate::noise_wrapper::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
// Noise Wrapper
// -------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseWrapper(noise::NoiseConfig);
impl NoiseWrapper {
/// Create a new NoiseConfig with the provided keypair
pub fn new(key: x25519::PrivateKey) -> Self {
Self(noise::NoiseConfig::new(key))
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done
pub async fn upgrade_connection<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
remote_public_key: Option<x25519::PublicKey>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key)
.await?
}
ConnectionOrigin::Inbound => {
self.accept(socket, anti_replay_timestamps, trusted_peers)
.await?
}
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
pub async fn dial<TSocket>(
&self,
mut socket: TSocket,
mutual_authentication: bool,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = if mutual_authentication {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
} else {
None
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.0
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.0
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
pub async fn accept<TSocket>(
&self,
mut socket: TSocket,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.0
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// make sure the public key is a validator before continuing (if we're in the validator network)
if let Some(trusted_peers) = trusted_peers {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found { | their_public_key
),
));
}
}
// if on a mutually authenticated network
if let Some(anti_replay_timestamps) = &anti_replay_timestamps {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.0
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use futures::{executor::block_on, future::join};
use libra_crypto::test_utils::TEST_SEED;
use memsocket::MemorySocket;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
};
use libra_crypto::traits::Uniform as _;
use rand::SeedableRng as _;
/// helper to setup two testing peers
fn build_peers() -> (
(NoiseWrapper, x25519::PublicKey),
(NoiseWrapper, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let client = NoiseWrapper::new(client_private);
let server = NoiseWrapper::new(server_private);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseWrapper,
server_public_key: x25519::PublicKey,
server: NoiseWrapper,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
let anti_replay_timestamps = Arc::new(RwLock::new(AntiReplayTimestamps::default()));
// perform the handshake
let (client_session, server_session) = block_on(join(
client.dial(dialer_socket, true, server_public_key),
server.accept(listener_socket, Some(anti_replay_timestamps), trusted_peers),
));
//
Ok((client_session?, server_session?))
}
#[test]
fn test_handshake() {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers();
let (client, server) = perform_handshake(client, server_public, server, None).unwrap();
assert_eq!(client.get_remote_static(), server_public,);
assert_eq!(server.get_remote_static(), client_public,);
}
} | // TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}", | random_line_split |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in [socket] module.
//!
//! [socket]: network::noise_wrapper::socket
use crate::noise_wrapper::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
// Noise Wrapper
// -------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseWrapper(noise::NoiseConfig);
impl NoiseWrapper {
/// Create a new NoiseConfig with the provided keypair
pub fn new(key: x25519::PrivateKey) -> Self {
Self(noise::NoiseConfig::new(key))
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done
pub async fn upgrade_connection<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
remote_public_key: Option<x25519::PublicKey>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => |
};
self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key)
.await?
}
ConnectionOrigin::Inbound => {
self.accept(socket, anti_replay_timestamps, trusted_peers)
.await?
}
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
pub async fn dial<TSocket>(
&self,
mut socket: TSocket,
mutual_authentication: bool,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = if mutual_authentication {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
} else {
None
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.0
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.0
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
pub async fn accept<TSocket>(
&self,
mut socket: TSocket,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.0
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// make sure the public key is a validator before continuing (if we're in the validator network)
if let Some(trusted_peers) = trusted_peers {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if on a mutually authenticated network
if let Some(anti_replay_timestamps) = &anti_replay_timestamps {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.0
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use futures::{executor::block_on, future::join};
use libra_crypto::test_utils::TEST_SEED;
use memsocket::MemorySocket;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
};
use libra_crypto::traits::Uniform as _;
use rand::SeedableRng as _;
/// helper to setup two testing peers
fn build_peers() -> (
(NoiseWrapper, x25519::PublicKey),
(NoiseWrapper, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let client = NoiseWrapper::new(client_private);
let server = NoiseWrapper::new(server_private);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseWrapper,
server_public_key: x25519::PublicKey,
server: NoiseWrapper,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
let anti_replay_timestamps = Arc::new(RwLock::new(AntiReplayTimestamps::default()));
// perform the handshake
let (client_session, server_session) = block_on(join(
client.dial(dialer_socket, true, server_public_key),
server.accept(listener_socket, Some(anti_replay_timestamps), trusted_peers),
));
//
Ok((client_session?, server_session?))
}
#[test]
fn test_handshake() {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers();
let (client, server) = perform_handshake(client, server_public, server, None).unwrap();
assert_eq!(client.get_remote_static(), server_public,);
assert_eq!(server.get_remote_static(), client_public,);
}
}
| {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
} | conditional_block |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in [socket] module.
//!
//! [socket]: network::noise_wrapper::socket
use crate::noise_wrapper::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn | (&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
// Noise Wrapper
// -------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseWrapper(noise::NoiseConfig);
impl NoiseWrapper {
/// Create a new NoiseConfig with the provided keypair
pub fn new(key: x25519::PrivateKey) -> Self {
Self(noise::NoiseConfig::new(key))
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done
pub async fn upgrade_connection<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
remote_public_key: Option<x25519::PublicKey>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key)
.await?
}
ConnectionOrigin::Inbound => {
self.accept(socket, anti_replay_timestamps, trusted_peers)
.await?
}
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
pub async fn dial<TSocket>(
&self,
mut socket: TSocket,
mutual_authentication: bool,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = if mutual_authentication {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
} else {
None
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.0
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.0
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
pub async fn accept<TSocket>(
&self,
mut socket: TSocket,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.0
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// make sure the public key is a validator before continuing (if we're in the validator network)
if let Some(trusted_peers) = trusted_peers {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if on a mutually authenticated network
if let Some(anti_replay_timestamps) = &anti_replay_timestamps {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.0
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use futures::{executor::block_on, future::join};
use libra_crypto::test_utils::TEST_SEED;
use memsocket::MemorySocket;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
};
use libra_crypto::traits::Uniform as _;
use rand::SeedableRng as _;
/// helper to setup two testing peers
fn build_peers() -> (
(NoiseWrapper, x25519::PublicKey),
(NoiseWrapper, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let client = NoiseWrapper::new(client_private);
let server = NoiseWrapper::new(server_private);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseWrapper,
server_public_key: x25519::PublicKey,
server: NoiseWrapper,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
let anti_replay_timestamps = Arc::new(RwLock::new(AntiReplayTimestamps::default()));
// perform the handshake
let (client_session, server_session) = block_on(join(
client.dial(dialer_socket, true, server_public_key),
server.accept(listener_socket, Some(anti_replay_timestamps), trusted_peers),
));
//
Ok((client_session?, server_session?))
}
#[test]
fn test_handshake() {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers();
let (client, server) = perform_handshake(client, server_public, server, None).unwrap();
assert_eq!(client.get_remote_static(), server_public,);
assert_eq!(server.get_remote_static(), client_public,);
}
}
| is_replay | identifier_name |
acpi.rs | //! This module provides access to ACPI.
use core::convert::TryInto;
use crate::utils;
use crate::{Error, Ptr};
/// Signature of the RSDP structure.
const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR ";
/// Size of the SDT header.
const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>();
/// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later
/// specifications.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiRsdp20 {
signature: [u8; 8],
checksum: u8,
oem_id: [u8; 6],
revision: u8,
rsdt_addr: u32,
length: u32,
xsdt_addr: u64,
ext_checksum: u8,
reserved: [u8; 3],
}
/// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+.
#[derive(Debug)]
pub struct Rsdp20 {
rsdp20: AcpiRsdp20,
}
impl Rsdp20 {
/// Creates a new `Rsdp20` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// RSDP 2.0+ structure.
///
/// # Safety
///
/// The `Rsdp20` structure is created using a pointer. Thus, this function
/// is considered unsafe.
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType {
Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() {
return unsafe { Madt::new(entry.try_into()?) };
}
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE)
as *const AcpiMadtFields,
);
// Parse entries.
let mut num_lapic_entries = 0;
let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN];
let mut ptr = (madt_ptr.0 as *const u8)
.add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE);
let end = (madt_ptr.0 as *const u8).add(hdr.length as usize);
while ptr < end {
let ty = core::ptr::read_unaligned(ptr);
let length = core::ptr::read_unaligned(ptr.add(1));
// LAPIC.
if ty == 0 {
if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
let lapic =
core::ptr::read_unaligned(ptr as *const AcpiMadtLapic);
lapic_entries[num_lapic_entries] = MadtLapic {
proc_uid: lapic.proc_uid,
apic_id: lapic.apic_id,
flags: lapic.flags,
};
num_lapic_entries += 1;
}
ptr = ptr.add(length as usize);
}
Ok(Madt {
fields,
lapic_entries,
num_lapic_entries,
})
}
/// Local Interrupt Controller Address. In other words, the 32-bit physical
/// address at which each processor can access its local interrupt
/// controller.
pub fn lapic_addr(&self) -> u32 {
self.fields.lapic_addr
}
/// Multiple ACPI flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | PCAT_COMPAT
/// 1 | 31 | Reserved (zero)
pub fn | (&self) -> u32 {
self.fields.flags
}
/// Returns the detected local APIC structures.
pub fn lapic(&self) -> &[MadtLapic] {
&self.lapic_entries[..self.num_lapic_entries]
}
}
| flags | identifier_name |
acpi.rs | //! This module provides access to ACPI.
use core::convert::TryInto;
use crate::utils;
use crate::{Error, Ptr};
/// Signature of the RSDP structure.
const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR ";
/// Size of the SDT header.
const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>();
/// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later
/// specifications.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiRsdp20 {
signature: [u8; 8],
checksum: u8,
oem_id: [u8; 6],
revision: u8,
rsdt_addr: u32,
length: u32,
xsdt_addr: u64,
ext_checksum: u8,
reserved: [u8; 3],
}
/// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+.
#[derive(Debug)]
pub struct Rsdp20 {
rsdp20: AcpiRsdp20,
}
impl Rsdp20 {
/// Creates a new `Rsdp20` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// RSDP 2.0+ structure.
///
/// # Safety
///
/// The `Rsdp20` structure is created using a pointer. Thus, this function
/// is considered unsafe.
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType {
Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() |
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE)
as *const AcpiMadtFields,
);
// Parse entries.
let mut num_lapic_entries = 0;
let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN];
let mut ptr = (madt_ptr.0 as *const u8)
.add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE);
let end = (madt_ptr.0 as *const u8).add(hdr.length as usize);
while ptr < end {
let ty = core::ptr::read_unaligned(ptr);
let length = core::ptr::read_unaligned(ptr.add(1));
// LAPIC.
if ty == 0 {
if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
let lapic =
core::ptr::read_unaligned(ptr as *const AcpiMadtLapic);
lapic_entries[num_lapic_entries] = MadtLapic {
proc_uid: lapic.proc_uid,
apic_id: lapic.apic_id,
flags: lapic.flags,
};
num_lapic_entries += 1;
}
ptr = ptr.add(length as usize);
}
Ok(Madt {
fields,
lapic_entries,
num_lapic_entries,
})
}
/// Local Interrupt Controller Address. In other words, the 32-bit physical
/// address at which each processor can access its local interrupt
/// controller.
pub fn lapic_addr(&self) -> u32 {
self.fields.lapic_addr
}
/// Multiple ACPI flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | PCAT_COMPAT
/// 1 | 31 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.fields.flags
}
/// Returns the detected local APIC structures.
pub fn lapic(&self) -> &[MadtLapic] {
&self.lapic_entries[..self.num_lapic_entries]
}
}
| {
return unsafe { Madt::new(entry.try_into()?) };
} | conditional_block |
acpi.rs | //! This module provides access to ACPI.
use core::convert::TryInto;
use crate::utils;
use crate::{Error, Ptr};
/// Signature of the RSDP structure.
const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR ";
/// Size of the SDT header.
const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>();
/// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later
/// specifications.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiRsdp20 {
signature: [u8; 8],
checksum: u8,
oem_id: [u8; 6],
revision: u8,
rsdt_addr: u32,
length: u32,
xsdt_addr: u64,
ext_checksum: u8,
reserved: [u8; 3],
}
/// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+.
#[derive(Debug)]
pub struct Rsdp20 {
rsdp20: AcpiRsdp20,
}
impl Rsdp20 {
/// Creates a new `Rsdp20` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// RSDP 2.0+ structure.
///
/// # Safety
///
/// The `Rsdp20` structure is created using a pointer. Thus, this function
/// is considered unsafe.
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType {
Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() {
return unsafe { Madt::new(entry.try_into()?) };
}
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> |
/// Local Interrupt Controller Address. In other words, the 32-bit physical
/// address at which each processor can access its local interrupt
/// controller.
pub fn lapic_addr(&self) -> u32 {
self.fields.lapic_addr
}
/// Multiple ACPI flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | PCAT_COMPAT
/// 1 | 31 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.fields.flags
}
/// Returns the detected local APIC structures.
pub fn lapic(&self) -> &[MadtLapic] {
&self.lapic_entries[..self.num_lapic_entries]
}
}
| {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE)
as *const AcpiMadtFields,
);
// Parse entries.
let mut num_lapic_entries = 0;
let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN];
let mut ptr = (madt_ptr.0 as *const u8)
.add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE);
let end = (madt_ptr.0 as *const u8).add(hdr.length as usize);
while ptr < end {
let ty = core::ptr::read_unaligned(ptr);
let length = core::ptr::read_unaligned(ptr.add(1));
// LAPIC.
if ty == 0 {
if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
let lapic =
core::ptr::read_unaligned(ptr as *const AcpiMadtLapic);
lapic_entries[num_lapic_entries] = MadtLapic {
proc_uid: lapic.proc_uid,
apic_id: lapic.apic_id,
flags: lapic.flags,
};
num_lapic_entries += 1;
}
ptr = ptr.add(length as usize);
}
Ok(Madt {
fields,
lapic_entries,
num_lapic_entries,
})
} | identifier_body |
acpi.rs | //! This module provides access to ACPI.
use core::convert::TryInto;
use crate::utils;
use crate::{Error, Ptr};
/// Signature of the RSDP structure.
const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR ";
/// Size of the SDT header.
const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>();
/// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later
/// specifications.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiRsdp20 {
signature: [u8; 8],
checksum: u8,
oem_id: [u8; 6],
revision: u8,
rsdt_addr: u32,
length: u32,
xsdt_addr: u64,
ext_checksum: u8,
reserved: [u8; 3],
}
/// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+.
#[derive(Debug)]
pub struct Rsdp20 {
rsdp20: AcpiRsdp20,
}
impl Rsdp20 {
/// Creates a new `Rsdp20` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// RSDP 2.0+ structure.
///
/// # Safety
///
/// The `Rsdp20` structure is created using a pointer. Thus, this function
/// is considered unsafe.
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType { | Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() {
return unsafe { Madt::new(entry.try_into()?) };
}
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE)
as *const AcpiMadtFields,
);
// Parse entries.
let mut num_lapic_entries = 0;
let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN];
let mut ptr = (madt_ptr.0 as *const u8)
.add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE);
let end = (madt_ptr.0 as *const u8).add(hdr.length as usize);
while ptr < end {
let ty = core::ptr::read_unaligned(ptr);
let length = core::ptr::read_unaligned(ptr.add(1));
// LAPIC.
if ty == 0 {
if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
let lapic =
core::ptr::read_unaligned(ptr as *const AcpiMadtLapic);
lapic_entries[num_lapic_entries] = MadtLapic {
proc_uid: lapic.proc_uid,
apic_id: lapic.apic_id,
flags: lapic.flags,
};
num_lapic_entries += 1;
}
ptr = ptr.add(length as usize);
}
Ok(Madt {
fields,
lapic_entries,
num_lapic_entries,
})
}
/// Local Interrupt Controller Address. In other words, the 32-bit physical
/// address at which each processor can access its local interrupt
/// controller.
pub fn lapic_addr(&self) -> u32 {
self.fields.lapic_addr
}
/// Multiple ACPI flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | PCAT_COMPAT
/// 1 | 31 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.fields.flags
}
/// Returns the detected local APIC structures.
pub fn lapic(&self) -> &[MadtLapic] {
&self.lapic_entries[..self.num_lapic_entries]
}
} | random_line_split | |
snapshot.rs | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#[cfg(target_family = "unix")]
use crate::disk_usage;
use crate::{
format_error,
image::{Block, Image},
};
use clap::ValueEnum;
use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader};
#[cfg(not(target_family = "unix"))]
use std::env::consts::OS;
use std::{
fs::{metadata, OpenOptions},
num::NonZeroU64,
ops::Range,
path::{Path, PathBuf},
};
#[derive(thiserror::Error)]
pub enum Error {
#[error("unable to parse elf structures: {0}")]
Elf(elf::ParseError),
#[error("locked down /proc/kcore")]
LockedDownKcore,
#[error(
"estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes"
)]
DiskUsageEstimateExceeded { estimated: u64, allowed: u64 },
#[error("unable to create memory snapshot")]
UnableToCreateMemorySnapshot(#[from] crate::image::Error),
#[error("unable to create memory snapshot from source: {1}")]
UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source),
#[error("unable to create memory snapshot: {0}")]
UnableToCreateSnapshot(String),
#[error("{0}: {1}")]
Other(&'static str, String),
#[error("disk error")]
Disk(#[source] std::io::Error),
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
format_error(self, f)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, ValueEnum)]
pub enum Source {
/// Provides a read-only view of physical memory. Access to memory using
/// this device must be paged aligned and read one page at a time.
///
/// On RHEL based distributions, this device is frequently provided by
/// default. A loadable kernel module version is available as part of
/// the Linux utility `crash`:
/// <https://github.com/crash-utility/crash/tree/master/memory_driver>
#[value(name = "/dev/crash")]
DevCrash,
/// Provides a read-write view of physical memory, though AVML opens it in a
/// read-only fashion. Access to to memory using this device can be
/// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM`
/// or `CONFIG_IO_STRICT_DEVMEM`.
///
/// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be
/// accessed.
#[value(name = "/dev/mem")]
DevMem,
/// Provides a virtual ELF coredump of kernel memory. This can be used to
/// access physical memory.
///
/// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but
/// is either inaccessible or doesn't allow access to all of the kernel
/// memory.
#[value(name = "/proc/kcore")]
ProcKcore,
/// User-specified path to a raw memory file
#[value(skip)]
Raw(PathBuf),
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevCrash => write!(f, "/dev/crash"),
Self::DevMem => write!(f, "/dev/mem"),
Self::ProcKcore => write!(f, "/proc/kcore"),
Self::Raw(path) => write!(f, "{}", path.display()),
}
}
}
#[must_use]
fn can_open(src: &Path) -> bool {
OpenOptions::new().read(true).open(src).is_ok()
}
// The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical
// memory in size.
//
// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is
// either inaccessible or doesn't allow access to all of the kernel memory.
//
// /dev/mem and /dev/crash, if available, are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
}
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() {
self.create_source(&Source::ProcKcore)?;
} else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation is a no-op.
#[cfg(not(target_family = "unix"))]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() {
return Err(Error::Other(
"unable to check disk usage on this platform",
format!("os:{OS}"),
));
}
Ok(())
}
fn kcore(&self) -> Result<()> {
if !is_kcore_ok() {
return Err(Error::LockedDownKcore);
}
let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?;
self.check_disk_usage(&image)?;
let file =
elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?;
let mut segments: Vec<&ProgramHeader> = file
.segments()
.iter()
.filter(|x| x.p_type == PT_LOAD)
.collect();
segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr));
let first_vaddr = segments
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))?
.p_vaddr;
let first_start = self
.memory_ranges
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))?
.start;
let start = first_vaddr - first_start;
let mut physical_ranges = vec![];
for phdr in segments {
let entry_start = phdr.p_vaddr - start;
let entry_end = entry_start + phdr.p_memsz;
physical_ranges.push(Block {
range: entry_start..entry_end,
offset: phdr.p_offset,
});
}
let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges);
image.write_blocks(&blocks)?;
Ok(())
}
fn phys(&self, mem: &Path) -> Result<()> {
let is_crash = mem == Path::new("/dev/crash");
let blocks = self
.memory_ranges
.iter()
.map(|x| Block {
offset: x.start,
range: if is_crash {
x.start..((x.end >> 12) << 12)
} else {
x.start..x.end
},
})
.collect::<Vec<_>>();
let mut image = Image::new(self.version, mem, self.destination)?;
self.check_disk_usage(&image)?;
image.write_blocks(&blocks)?;
Ok(())
}
}
#[cfg(test)] | let ranges = [10..20, 30..35, 45..55];
let core_ranges = [
Block {
range: 10..20,
offset: 0,
},
Block {
range: 25..35,
offset: 10,
},
Block {
range: 40..50,
offset: 20,
},
Block {
range: 50..55,
offset: 35,
},
];
let expected = vec![
Block {
offset: 0,
range: 10..20,
},
Block {
offset: 10 + 5,
range: 30..35,
},
Block {
offset: 25,
range: 45..50,
},
Block {
offset: 35,
range: 50..55,
},
];
let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges);
assert_eq!(result, expected);
}
} | mod tests {
use super::*;
#[test]
fn translate_ranges() { | random_line_split |
snapshot.rs | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#[cfg(target_family = "unix")]
use crate::disk_usage;
use crate::{
format_error,
image::{Block, Image},
};
use clap::ValueEnum;
use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader};
#[cfg(not(target_family = "unix"))]
use std::env::consts::OS;
use std::{
fs::{metadata, OpenOptions},
num::NonZeroU64,
ops::Range,
path::{Path, PathBuf},
};
#[derive(thiserror::Error)]
pub enum Error {
#[error("unable to parse elf structures: {0}")]
Elf(elf::ParseError),
#[error("locked down /proc/kcore")]
LockedDownKcore,
#[error(
"estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes"
)]
DiskUsageEstimateExceeded { estimated: u64, allowed: u64 },
#[error("unable to create memory snapshot")]
UnableToCreateMemorySnapshot(#[from] crate::image::Error),
#[error("unable to create memory snapshot from source: {1}")]
UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source),
#[error("unable to create memory snapshot: {0}")]
UnableToCreateSnapshot(String),
#[error("{0}: {1}")]
Other(&'static str, String),
#[error("disk error")]
Disk(#[source] std::io::Error),
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
format_error(self, f)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, ValueEnum)]
pub enum Source {
/// Provides a read-only view of physical memory. Access to memory using
/// this device must be paged aligned and read one page at a time.
///
/// On RHEL based distributions, this device is frequently provided by
/// default. A loadable kernel module version is available as part of
/// the Linux utility `crash`:
/// <https://github.com/crash-utility/crash/tree/master/memory_driver>
#[value(name = "/dev/crash")]
DevCrash,
/// Provides a read-write view of physical memory, though AVML opens it in a
/// read-only fashion. Access to to memory using this device can be
/// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM`
/// or `CONFIG_IO_STRICT_DEVMEM`.
///
/// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be
/// accessed.
#[value(name = "/dev/mem")]
DevMem,
/// Provides a virtual ELF coredump of kernel memory. This can be used to
/// access physical memory.
///
/// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but
/// is either inaccessible or doesn't allow access to all of the kernel
/// memory.
#[value(name = "/proc/kcore")]
ProcKcore,
/// User-specified path to a raw memory file
#[value(skip)]
Raw(PathBuf),
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevCrash => write!(f, "/dev/crash"),
Self::DevMem => write!(f, "/dev/mem"),
Self::ProcKcore => write!(f, "/proc/kcore"),
Self::Raw(path) => write!(f, "{}", path.display()),
}
}
}
#[must_use]
fn can_open(src: &Path) -> bool {
OpenOptions::new().read(true).open(src).is_ok()
}
// The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical
// memory in size.
//
// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is
// either inaccessible or doesn't allow access to all of the kernel memory.
//
// /dev/mem and /dev/crash, if available, are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool |
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() {
self.create_source(&Source::ProcKcore)?;
} else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation is a no-op.
#[cfg(not(target_family = "unix"))]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() {
return Err(Error::Other(
"unable to check disk usage on this platform",
format!("os:{OS}"),
));
}
Ok(())
}
fn kcore(&self) -> Result<()> {
if !is_kcore_ok() {
return Err(Error::LockedDownKcore);
}
let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?;
self.check_disk_usage(&image)?;
let file =
elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?;
let mut segments: Vec<&ProgramHeader> = file
.segments()
.iter()
.filter(|x| x.p_type == PT_LOAD)
.collect();
segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr));
let first_vaddr = segments
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))?
.p_vaddr;
let first_start = self
.memory_ranges
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))?
.start;
let start = first_vaddr - first_start;
let mut physical_ranges = vec![];
for phdr in segments {
let entry_start = phdr.p_vaddr - start;
let entry_end = entry_start + phdr.p_memsz;
physical_ranges.push(Block {
range: entry_start..entry_end,
offset: phdr.p_offset,
});
}
let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges);
image.write_blocks(&blocks)?;
Ok(())
}
fn phys(&self, mem: &Path) -> Result<()> {
let is_crash = mem == Path::new("/dev/crash");
let blocks = self
.memory_ranges
.iter()
.map(|x| Block {
offset: x.start,
range: if is_crash {
x.start..((x.end >> 12) << 12)
} else {
x.start..x.end
},
})
.collect::<Vec<_>>();
let mut image = Image::new(self.version, mem, self.destination)?;
self.check_disk_usage(&image)?;
image.write_blocks(&blocks)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn translate_ranges() {
let ranges = [10..20, 30..35, 45..55];
let core_ranges = [
Block {
range: 10..20,
offset: 0,
},
Block {
range: 25..35,
offset: 10,
},
Block {
range: 40..50,
offset: 20,
},
Block {
range: 50..55,
offset: 35,
},
];
let expected = vec![
Block {
offset: 0,
range: 10..20,
},
Block {
offset: 10 + 5,
range: 30..35,
},
Block {
offset: 25,
range: 45..50,
},
Block {
offset: 35,
range: 50..55,
},
];
let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges);
assert_eq!(result, expected);
}
}
| {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
} | identifier_body |
snapshot.rs | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#[cfg(target_family = "unix")]
use crate::disk_usage;
use crate::{
format_error,
image::{Block, Image},
};
use clap::ValueEnum;
use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader};
#[cfg(not(target_family = "unix"))]
use std::env::consts::OS;
use std::{
fs::{metadata, OpenOptions},
num::NonZeroU64,
ops::Range,
path::{Path, PathBuf},
};
#[derive(thiserror::Error)]
pub enum Error {
#[error("unable to parse elf structures: {0}")]
Elf(elf::ParseError),
#[error("locked down /proc/kcore")]
LockedDownKcore,
#[error(
"estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes"
)]
DiskUsageEstimateExceeded { estimated: u64, allowed: u64 },
#[error("unable to create memory snapshot")]
UnableToCreateMemorySnapshot(#[from] crate::image::Error),
#[error("unable to create memory snapshot from source: {1}")]
UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source),
#[error("unable to create memory snapshot: {0}")]
UnableToCreateSnapshot(String),
#[error("{0}: {1}")]
Other(&'static str, String),
#[error("disk error")]
Disk(#[source] std::io::Error),
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
format_error(self, f)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, ValueEnum)]
pub enum Source {
/// Provides a read-only view of physical memory. Access to memory using
/// this device must be paged aligned and read one page at a time.
///
/// On RHEL based distributions, this device is frequently provided by
/// default. A loadable kernel module version is available as part of
/// the Linux utility `crash`:
/// <https://github.com/crash-utility/crash/tree/master/memory_driver>
#[value(name = "/dev/crash")]
DevCrash,
/// Provides a read-write view of physical memory, though AVML opens it in a
/// read-only fashion. Access to to memory using this device can be
/// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM`
/// or `CONFIG_IO_STRICT_DEVMEM`.
///
/// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be
/// accessed.
#[value(name = "/dev/mem")]
DevMem,
/// Provides a virtual ELF coredump of kernel memory. This can be used to
/// access physical memory.
///
/// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but
/// is either inaccessible or doesn't allow access to all of the kernel
/// memory.
#[value(name = "/proc/kcore")]
ProcKcore,
/// User-specified path to a raw memory file
#[value(skip)]
Raw(PathBuf),
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevCrash => write!(f, "/dev/crash"),
Self::DevMem => write!(f, "/dev/mem"),
Self::ProcKcore => write!(f, "/proc/kcore"),
Self::Raw(path) => write!(f, "{}", path.display()),
}
}
}
#[must_use]
fn can_open(src: &Path) -> bool {
OpenOptions::new().read(true).open(src).is_ok()
}
// The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical
// memory in size.
//
// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is
// either inaccessible or doesn't allow access to all of the kernel memory.
//
// /dev/mem and /dev/crash, if available, are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
}
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() {
self.create_source(&Source::ProcKcore)?;
} else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation is a no-op.
#[cfg(not(target_family = "unix"))]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() {
return Err(Error::Other(
"unable to check disk usage on this platform",
format!("os:{OS}"),
));
}
Ok(())
}
fn kcore(&self) -> Result<()> {
if !is_kcore_ok() {
return Err(Error::LockedDownKcore);
}
let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?;
self.check_disk_usage(&image)?;
let file =
elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?;
let mut segments: Vec<&ProgramHeader> = file
.segments()
.iter()
.filter(|x| x.p_type == PT_LOAD)
.collect();
segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr));
let first_vaddr = segments
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))?
.p_vaddr;
let first_start = self
.memory_ranges
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))?
.start;
let start = first_vaddr - first_start;
let mut physical_ranges = vec![];
for phdr in segments {
let entry_start = phdr.p_vaddr - start;
let entry_end = entry_start + phdr.p_memsz;
physical_ranges.push(Block {
range: entry_start..entry_end,
offset: phdr.p_offset,
});
}
let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges);
image.write_blocks(&blocks)?;
Ok(())
}
fn phys(&self, mem: &Path) -> Result<()> {
let is_crash = mem == Path::new("/dev/crash");
let blocks = self
.memory_ranges
.iter()
.map(|x| Block {
offset: x.start,
range: if is_crash {
x.start..((x.end >> 12) << 12)
} else {
x.start..x.end
},
})
.collect::<Vec<_>>();
let mut image = Image::new(self.version, mem, self.destination)?;
self.check_disk_usage(&image)?;
image.write_blocks(&blocks)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let ranges = [10..20, 30..35, 45..55];
let core_ranges = [
Block {
range: 10..20,
offset: 0,
},
Block {
range: 25..35,
offset: 10,
},
Block {
range: 40..50,
offset: 20,
},
Block {
range: 50..55,
offset: 35,
},
];
let expected = vec![
Block {
offset: 0,
range: 10..20,
},
Block {
offset: 10 + 5,
range: 30..35,
},
Block {
offset: 25,
range: 45..50,
},
Block {
offset: 35,
range: 50..55,
},
];
let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges);
assert_eq!(result, expected);
}
}
| translate_ranges | identifier_name |
snapshot.rs | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#[cfg(target_family = "unix")]
use crate::disk_usage;
use crate::{
format_error,
image::{Block, Image},
};
use clap::ValueEnum;
use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader};
#[cfg(not(target_family = "unix"))]
use std::env::consts::OS;
use std::{
fs::{metadata, OpenOptions},
num::NonZeroU64,
ops::Range,
path::{Path, PathBuf},
};
#[derive(thiserror::Error)]
pub enum Error {
#[error("unable to parse elf structures: {0}")]
Elf(elf::ParseError),
#[error("locked down /proc/kcore")]
LockedDownKcore,
#[error(
"estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes"
)]
DiskUsageEstimateExceeded { estimated: u64, allowed: u64 },
#[error("unable to create memory snapshot")]
UnableToCreateMemorySnapshot(#[from] crate::image::Error),
#[error("unable to create memory snapshot from source: {1}")]
UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source),
#[error("unable to create memory snapshot: {0}")]
UnableToCreateSnapshot(String),
#[error("{0}: {1}")]
Other(&'static str, String),
#[error("disk error")]
Disk(#[source] std::io::Error),
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
format_error(self, f)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, ValueEnum)]
pub enum Source {
/// Provides a read-only view of physical memory. Access to memory using
/// this device must be paged aligned and read one page at a time.
///
/// On RHEL based distributions, this device is frequently provided by
/// default. A loadable kernel module version is available as part of
/// the Linux utility `crash`:
/// <https://github.com/crash-utility/crash/tree/master/memory_driver>
#[value(name = "/dev/crash")]
DevCrash,
/// Provides a read-write view of physical memory, though AVML opens it in a
/// read-only fashion. Access to to memory using this device can be
/// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM`
/// or `CONFIG_IO_STRICT_DEVMEM`.
///
/// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be
/// accessed.
#[value(name = "/dev/mem")]
DevMem,
/// Provides a virtual ELF coredump of kernel memory. This can be used to
/// access physical memory.
///
/// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but
/// is either inaccessible or doesn't allow access to all of the kernel
/// memory.
#[value(name = "/proc/kcore")]
ProcKcore,
/// User-specified path to a raw memory file
#[value(skip)]
Raw(PathBuf),
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevCrash => write!(f, "/dev/crash"),
Self::DevMem => write!(f, "/dev/mem"),
Self::ProcKcore => write!(f, "/proc/kcore"),
Self::Raw(path) => write!(f, "{}", path.display()),
}
}
}
#[must_use]
fn can_open(src: &Path) -> bool {
OpenOptions::new().read(true).open(src).is_ok()
}
// The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical
// memory in size.
//
// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is
// either inaccessible or doesn't allow access to all of the kernel memory.
//
// /dev/mem and /dev/crash, if available, are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
}
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() | else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation is a no-op.
#[cfg(not(target_family = "unix"))]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() {
return Err(Error::Other(
"unable to check disk usage on this platform",
format!("os:{OS}"),
));
}
Ok(())
}
fn kcore(&self) -> Result<()> {
if !is_kcore_ok() {
return Err(Error::LockedDownKcore);
}
let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?;
self.check_disk_usage(&image)?;
let file =
elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?;
let mut segments: Vec<&ProgramHeader> = file
.segments()
.iter()
.filter(|x| x.p_type == PT_LOAD)
.collect();
segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr));
let first_vaddr = segments
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))?
.p_vaddr;
let first_start = self
.memory_ranges
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))?
.start;
let start = first_vaddr - first_start;
let mut physical_ranges = vec![];
for phdr in segments {
let entry_start = phdr.p_vaddr - start;
let entry_end = entry_start + phdr.p_memsz;
physical_ranges.push(Block {
range: entry_start..entry_end,
offset: phdr.p_offset,
});
}
let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges);
image.write_blocks(&blocks)?;
Ok(())
}
fn phys(&self, mem: &Path) -> Result<()> {
let is_crash = mem == Path::new("/dev/crash");
let blocks = self
.memory_ranges
.iter()
.map(|x| Block {
offset: x.start,
range: if is_crash {
x.start..((x.end >> 12) << 12)
} else {
x.start..x.end
},
})
.collect::<Vec<_>>();
let mut image = Image::new(self.version, mem, self.destination)?;
self.check_disk_usage(&image)?;
image.write_blocks(&blocks)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn translate_ranges() {
let ranges = [10..20, 30..35, 45..55];
let core_ranges = [
Block {
range: 10..20,
offset: 0,
},
Block {
range: 25..35,
offset: 10,
},
Block {
range: 40..50,
offset: 20,
},
Block {
range: 50..55,
offset: 35,
},
];
let expected = vec![
Block {
offset: 0,
range: 10..20,
},
Block {
offset: 10 + 5,
range: 30..35,
},
Block {
offset: 25,
range: 45..50,
},
Block {
offset: 35,
range: 50..55,
},
];
let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges);
assert_eq!(result, expected);
}
}
| {
self.create_source(&Source::ProcKcore)?;
} | conditional_block |
real_comparison.py | #!/usr/bin/env python
import sys
import datetime
import glob
import operator as op
import numpy as np
import matplotlib.pyplot as plt
from bunch import Bunch
import mygis
wrf_dir="/glade/u/home/gutmann/scratch/wrfoutput/4km/2007/"
DIM_2D_SHAPE=3
DIM_3D_SHAPE=4
def echo(fn):
def wrapped(*v, **k):
print(fn.__name__)
return fn(*v, **k)
return wrapped
def exner(th,p):
Rd=287.058
cp=1004.0
p0=100000
pii=(p/p0)**(Rd/cp)
return th * pii
class DataReader(object):
# only directly accessible public attributes
files=None
times_per_file=1
# curpos, last_rain, and last_rain_pos are accessible via @properties
_curpos=0
_pos_in_file=0
_curfile=0
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
return data
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def __iter__(self):
|
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=(260,310),
u=(-15,15),
v=(-15,15),
rain=(0,0.000005))
def make_subplot(data,ny,nx,curplot,v,extra_title):
plt.subplot(ny,nx,curplot)
plt.imshow(data)
plt.clim(clims[v])
plt.colorbar()
plt.title(v+extra_title)
def make_plots(data1,data2,date,fig=None):
plt.close("all")
if fig==None:
fig=plt.figure(figsize=(24,14));
else:
fig.clear()
ny=3
nx=4
curplot=0
varnames=["qv","qc","u","v","t","rain"]
for v in varnames:
curplot+=1
make_subplot(data1[v],ny,nx,curplot,v," "+str(date)[:14])
curplot+=1
make_subplot(data2[v],ny,nx,curplot,v," "+str(date)[:14])
return fig
def main(icar_dir="output/",output_dir="./"):
output_filename=output_dir+"vis_{}.png"
wrf_files=glob.glob(wrf_dir+"wrfout*")
wrf_files.sort()
icar_files=glob.glob(icar_dir+"swim_out*")
icar_files.sort()
wrf_data=DataReader(wrf_files,datatype="WRF")
icar_data=DataReader(icar_files,datatype="ICAR")
fig=plt.figure(figsize=(24,14));
for i in range(len(wrf_data)):
wrf=wrf_data.next()
icar=icar_data.next()
print(str(wrf.date),str(icar.date))
sys.stdout.flush()
fig=make_plots(icar,wrf,wrf.date,fig=fig)
fig.savefig(output_filename.format(str(wrf.date).replace(" ","_")))
if __name__ == '__main__':
global wrf_dir
out_dir="./"
icar_dir="output/"
if len(sys.argv)>1:
if sys.argv[1][:2]=="-h":
print("Usage: real_comparison.py [icar_output_directory] [vis_output_directory] [wrf_dir]")
sys.exit()
icar_dir=sys.argv[1]
if len(sys.argv)>2:
out_dir=sys.argv[2]
if len(sys.argv)>3:
wrf_dir=sys.argv[3]
main(icar_dir,out_dir) | return self | identifier_body |
real_comparison.py | #!/usr/bin/env python
import sys
import datetime
import glob
import operator as op
import numpy as np
import matplotlib.pyplot as plt
from bunch import Bunch
import mygis
wrf_dir="/glade/u/home/gutmann/scratch/wrfoutput/4km/2007/"
DIM_2D_SHAPE=3
DIM_3D_SHAPE=4
def echo(fn):
def wrapped(*v, **k):
print(fn.__name__)
return fn(*v, **k)
return wrapped
def exner(th,p):
Rd=287.058
cp=1004.0
p0=100000
pii=(p/p0)**(Rd/cp)
return th * pii
class DataReader(object):
# only directly accessible public attributes
files=None
times_per_file=1
# curpos, last_rain, and last_rain_pos are accessible via @properties
_curpos=0
_pos_in_file=0
_curfile=0
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
return data
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def | (self):
return self
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=(260,310),
u=(-15,15),
v=(-15,15),
rain=(0,0.000005))
def make_subplot(data,ny,nx,curplot,v,extra_title):
plt.subplot(ny,nx,curplot)
plt.imshow(data)
plt.clim(clims[v])
plt.colorbar()
plt.title(v+extra_title)
def make_plots(data1,data2,date,fig=None):
plt.close("all")
if fig==None:
fig=plt.figure(figsize=(24,14));
else:
fig.clear()
ny=3
nx=4
curplot=0
varnames=["qv","qc","u","v","t","rain"]
for v in varnames:
curplot+=1
make_subplot(data1[v],ny,nx,curplot,v," "+str(date)[:14])
curplot+=1
make_subplot(data2[v],ny,nx,curplot,v," "+str(date)[:14])
return fig
def main(icar_dir="output/",output_dir="./"):
output_filename=output_dir+"vis_{}.png"
wrf_files=glob.glob(wrf_dir+"wrfout*")
wrf_files.sort()
icar_files=glob.glob(icar_dir+"swim_out*")
icar_files.sort()
wrf_data=DataReader(wrf_files,datatype="WRF")
icar_data=DataReader(icar_files,datatype="ICAR")
fig=plt.figure(figsize=(24,14));
for i in range(len(wrf_data)):
wrf=wrf_data.next()
icar=icar_data.next()
print(str(wrf.date),str(icar.date))
sys.stdout.flush()
fig=make_plots(icar,wrf,wrf.date,fig=fig)
fig.savefig(output_filename.format(str(wrf.date).replace(" ","_")))
if __name__ == '__main__':
global wrf_dir
out_dir="./"
icar_dir="output/"
if len(sys.argv)>1:
if sys.argv[1][:2]=="-h":
print("Usage: real_comparison.py [icar_output_directory] [vis_output_directory] [wrf_dir]")
sys.exit()
icar_dir=sys.argv[1]
if len(sys.argv)>2:
out_dir=sys.argv[2]
if len(sys.argv)>3:
wrf_dir=sys.argv[3]
main(icar_dir,out_dir) | __iter__ | identifier_name |
real_comparison.py | #!/usr/bin/env python
import sys
import datetime
import glob
import operator as op
import numpy as np
import matplotlib.pyplot as plt
from bunch import Bunch
import mygis
wrf_dir="/glade/u/home/gutmann/scratch/wrfoutput/4km/2007/"
DIM_2D_SHAPE=3
DIM_3D_SHAPE=4
def echo(fn):
def wrapped(*v, **k):
print(fn.__name__)
return fn(*v, **k)
return wrapped
def exner(th,p):
Rd=287.058
cp=1004.0
p0=100000
pii=(p/p0)**(Rd/cp)
return th * pii
class DataReader(object):
# only directly accessible public attributes
files=None
times_per_file=1
# curpos, last_rain, and last_rain_pos are accessible via @properties
_curpos=0
_pos_in_file=0
_curfile=0
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
return data
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def __iter__(self):
return self
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=(260,310),
u=(-15,15),
v=(-15,15),
rain=(0,0.000005))
def make_subplot(data,ny,nx,curplot,v,extra_title):
plt.subplot(ny,nx,curplot)
plt.imshow(data)
plt.clim(clims[v])
plt.colorbar()
plt.title(v+extra_title)
def make_plots(data1,data2,date,fig=None):
plt.close("all")
if fig==None:
fig=plt.figure(figsize=(24,14));
else:
fig.clear()
ny=3
nx=4
curplot=0
varnames=["qv","qc","u","v","t","rain"]
for v in varnames:
curplot+=1
make_subplot(data1[v],ny,nx,curplot,v," "+str(date)[:14])
curplot+=1
make_subplot(data2[v],ny,nx,curplot,v," "+str(date)[:14])
return fig
def main(icar_dir="output/",output_dir="./"):
output_filename=output_dir+"vis_{}.png"
wrf_files=glob.glob(wrf_dir+"wrfout*")
wrf_files.sort()
icar_files=glob.glob(icar_dir+"swim_out*")
icar_files.sort()
wrf_data=DataReader(wrf_files,datatype="WRF")
icar_data=DataReader(icar_files,datatype="ICAR")
fig=plt.figure(figsize=(24,14));
for i in range(len(wrf_data)):
wrf=wrf_data.next()
icar=icar_data.next()
print(str(wrf.date),str(icar.date))
sys.stdout.flush() |
fig=make_plots(icar,wrf,wrf.date,fig=fig)
fig.savefig(output_filename.format(str(wrf.date).replace(" ","_")))
if __name__ == '__main__':
global wrf_dir
out_dir="./"
icar_dir="output/"
if len(sys.argv)>1:
if sys.argv[1][:2]=="-h":
print("Usage: real_comparison.py [icar_output_directory] [vis_output_directory] [wrf_dir]")
sys.exit()
icar_dir=sys.argv[1]
if len(sys.argv)>2:
out_dir=sys.argv[2]
if len(sys.argv)>3:
wrf_dir=sys.argv[3]
main(icar_dir,out_dir) | random_line_split | |
real_comparison.py | #!/usr/bin/env python
import sys
import datetime
import glob
import operator as op
import numpy as np
import matplotlib.pyplot as plt
from bunch import Bunch
import mygis
wrf_dir="/glade/u/home/gutmann/scratch/wrfoutput/4km/2007/"
DIM_2D_SHAPE=3
DIM_3D_SHAPE=4
def echo(fn):
def wrapped(*v, **k):
print(fn.__name__)
return fn(*v, **k)
return wrapped
def exner(th,p):
Rd=287.058
cp=1004.0
p0=100000
pii=(p/p0)**(Rd/cp)
return th * pii
class DataReader(object):
# only directly accessible public attributes
files=None
times_per_file=1
# curpos, last_rain, and last_rain_pos are accessible via @properties
_curpos=0
_pos_in_file=0
_curfile=0
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
|
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def __iter__(self):
return self
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=(260,310),
u=(-15,15),
v=(-15,15),
rain=(0,0.000005))
def make_subplot(data,ny,nx,curplot,v,extra_title):
plt.subplot(ny,nx,curplot)
plt.imshow(data)
plt.clim(clims[v])
plt.colorbar()
plt.title(v+extra_title)
def make_plots(data1,data2,date,fig=None):
plt.close("all")
if fig==None:
fig=plt.figure(figsize=(24,14));
else:
fig.clear()
ny=3
nx=4
curplot=0
varnames=["qv","qc","u","v","t","rain"]
for v in varnames:
curplot+=1
make_subplot(data1[v],ny,nx,curplot,v," "+str(date)[:14])
curplot+=1
make_subplot(data2[v],ny,nx,curplot,v," "+str(date)[:14])
return fig
def main(icar_dir="output/",output_dir="./"):
output_filename=output_dir+"vis_{}.png"
wrf_files=glob.glob(wrf_dir+"wrfout*")
wrf_files.sort()
icar_files=glob.glob(icar_dir+"swim_out*")
icar_files.sort()
wrf_data=DataReader(wrf_files,datatype="WRF")
icar_data=DataReader(icar_files,datatype="ICAR")
fig=plt.figure(figsize=(24,14));
for i in range(len(wrf_data)):
wrf=wrf_data.next()
icar=icar_data.next()
print(str(wrf.date),str(icar.date))
sys.stdout.flush()
fig=make_plots(icar,wrf,wrf.date,fig=fig)
fig.savefig(output_filename.format(str(wrf.date).replace(" ","_")))
if __name__ == '__main__':
global wrf_dir
out_dir="./"
icar_dir="output/"
if len(sys.argv)>1:
if sys.argv[1][:2]=="-h":
print("Usage: real_comparison.py [icar_output_directory] [vis_output_directory] [wrf_dir]")
sys.exit()
icar_dir=sys.argv[1]
if len(sys.argv)>2:
out_dir=sys.argv[2]
if len(sys.argv)>3:
wrf_dir=sys.argv[3]
main(icar_dir,out_dir) | return data | conditional_block |
PanoImageRenderer.js | import Component from "@egjs/component";
import {glMatrix, vec3, mat3, mat4, quat} from "gl-matrix";
import ImageLoader from "./ImageLoader";
import VideoLoader from "./VideoLoader";
import WebGLUtils from "./WebGLUtils";
import Renderer from "./renderer/Renderer";
import CubeRenderer from "./renderer/CubeRenderer";
import CubeStripRenderer from "./renderer/CubeStripRenderer";
import SphereRenderer from "./renderer/SphereRenderer";
import CylinderRenderer from "./renderer/CylinderRenderer";
import VRManager from "./vr/VRManager";
import XRManager from "./vr/XRManager";
import WebGLAnimator from "./WebGLAnimator";
import {util as mathUtil} from "../utils/math-util";
import {devicePixelRatio, WEBXR_SUPPORTED} from "../utils/browserFeature";
import {PROJECTION_TYPE, STEREO_FORMAT} from "../PanoViewer/consts";
import {IS_IOS} from "../utils/browser";
const ImageType = PROJECTION_TYPE;
let DEVICE_PIXEL_RATIO = devicePixelRatio || 1;
// DEVICE_PIXEL_RATIO 가 2를 초과하는 경우는 리소스 낭비이므로 2로 맞춘다.
if (DEVICE_PIXEL_RATIO > 2) {
DEVICE_PIXEL_RATIO = 2;
}
// define custom events name
/**
* TODO: how to manage events/errortype with PanoViewer
*
* I think renderer events should be seperated from viewer events although it has same name.
*/
const EVENTS = {
BIND_TEXTURE: "bindTexture",
IMAGE_LOADED: "imageLoaded",
ERROR: "error",
RENDERING_CONTEXT_LOST: "renderingContextLost",
RENDERING_CONTEXT_RESTORE: "renderingContextRestore",
};
const ERROR_TYPE = {
INVALID_DEVICE: 10,
NO_WEBGL: 11,
FAIL_IMAGE_LOAD: 12,
RENDERER_ERROR: 13
};
class PanoImageRenderer extends Component {
static EVENTS = EVENTS;
static ERROR_TYPE = ERROR_TYPE;
constructor(
image, width, height, isVideo, sphericalConfig, renderingContextAttributes
) {
// Super constructor
super();
this.sphericalConfig = sphericalConfig;
this.fieldOfView = sphericalConfig.fieldOfView;
this.width = width;
this.height = height;
this._lastQuaternion = null;
this._lastYaw = null;
this._lastPitch = null;
this._lastFieldOfView = null;
this.pMatrix = mat4.create();
this.mvMatrix = mat4.create();
// initialzie pMatrix
mat4.perspective(this.pMatrix, glMatrix.toRadian(this.fieldOfView), width / height, 0.1, 100);
this.textureCoordBuffer = null;
this.vertexBuffer = null;
this.indexBuffer = null;
this.canvas = this._initCanvas(width, height);
this._setDefaultCanvasStyle();
this._wrapper = null; // canvas wrapper
this._wrapperOrigStyle = null;
this._renderingContextAttributes = renderingContextAttributes;
this._image = null;
this._imageConfig = null;
this._imageIsReady = false;
this._shouldForceDraw = false;
this._keepUpdate = false; // Flag to specify 'continuous update' on video even when still.
this._onContentLoad = this._onContentLoad.bind(this);
this._onContentError = this._onContentError.bind(this);
this._animator = new WebGLAnimator();
// VR/XR manager
this._vr = null;
if (image) {
this.setImage({
image,
imageType: sphericalConfig.imageType,
isVideo,
cubemapConfig: sphericalConfig.cubemapConfig
});
}
}
// FIXME: Please refactor me to have more loose connection to yawpitchcontrol
setYawPitchControl(yawPitchControl) {
this._yawPitchControl = yawPitchControl;
}
getContent() {
return this._image;
}
setImage({image, imageType, isVideo = false, cubemapConfig}) {
this._imageIsReady = false;
this._isVideo = isVideo;
this._imageConfig = Object.assign(
{
/* RLUDBF is abnormal, we use it on CUBEMAP only */
order: (imageType === ImageType.CUBEMAP) ? "RLUDBF" : "RLUDFB",
tileConfig: {
flipHorizontal: false,
rotation: 0
}
},
cubemapConfig
);
this._setImageType(imageType);
if (this._contentLoader) {
this._contentLoader.destroy();
}
if (isVideo) {
this._contentLoader = new VideoLoader();
this._keepUpdate = true;
} else {
this._contentLoader = new ImageLoader();
this._keepUpdate = false;
}
// img element or img url
this._contentLoader.set(image);
// 이미지의 사이즈를 캐시한다.
// image is reference for content in contentLoader, so it may be not valid if contentLoader is destroyed.
this._image = this._contentLoader.getElement();
return this._contentLoader.get()
.then(this._onContentLoad, this._onContentError)
.catch(e => setTimeout(() => { throw e; }));// Prevent exceptions from being isolated in promise chain.
}
_setImageType(imageType) {
if (!imageType || this._imageType === imageType) {
return;
}
this._imageType = imageType;
this._isCubeMap = imageType === ImageType.CUBEMAP;
if (this._renderer) {
this._renderer.off();
}
switch (imageType) {
case ImageType.CUBEMAP:
this._renderer = new CubeRenderer();
break;
case ImageType.CUBESTRIP:
this._renderer = new CubeStripRenderer();
break;
case ImageType.PANORAMA:
this._renderer = new CylinderRenderer();
break;
case ImageType.STEREOSCOPIC_EQUI:
this._renderer = new SphereRenderer(this.sphericalConfig.stereoFormat);
break;
default:
this._renderer = new SphereRenderer(STEREO_FORMAT.NONE);
break;
}
this._renderer.on(Renderer.EVENTS.ERROR, e => {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.RENDERER_ERROR,
message: e.message
});
});
this._initWebGL();
}
_initCanvas(width, height) {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
this._onWebglcontextlost = this._onWebglcontextlost.bind(this);
this._onWebglcontextrestored = this._onWebglcontextrestored.bind(this);
canvas.addEventListener("webglcontextlost", this._onWebglcontextlost);
canvas.addEventListener("webglcontextrestored", this._onWebglcontextrestored);
return canvas;
}
_setDefaultCanvasStyle() {
const canvas = this.canvas;
canvas.style.bottom = 0;
canvas.style.left = 0;
canvas.style.right = 0;
canvas.style.top = 0;
canvas.style.margin = "auto";
canvas.style.maxHeight = "100%";
canvas.style.maxWidth = "100%";
canvas.style.outline = "none";
canvas.style.position = "absolute";
}
_onContentError(error) {
this._imageIsReady = false;
this._image = null;
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.FAIL_IMAGE_LOAD,
message: "failed to load image"
});
return false;
}
_triggerContentLoad() {
this.trigger(EVENTS.IMAGE_LOADED, {
content: this._image,
isVideo: this._isVideo,
projectionType: this._imageType
});
}
_onContentLoad(image) {
this._imageIsReady = true;
this._triggerContentLoad();
return true;
}
isImageLoaded() {
return !!this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl.enableVertexAttribArray(shaderProgram.textureCoordAttribute);
// clear buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
// Use TEXTURE0
gl.uniform1i(shaderProgram.samplerUniform, 0);
this.shaderProgram = shaderProgram;
}
_onWebglcontextlost(e) {
e.preventDefault();
this.trigger(EVENTS.RENDERING_CONTEXT_LOST);
}
_onWebglcontextrestored(e) {
this._initWebGL();
this.trigger(EVENTS.RENDERING_CONTEXT_RESTORE);
}
updateFieldOfView(fieldOfView) {
this.fieldOfView = fieldOfView;
this._updateViewport();
}
updateViewportDimensions(width, height) {
let viewPortChanged = false;
this.width = width;
this.height = height;
const w = width * DEVICE_PIXEL_RATIO;
const h = height * DEVICE_PIXEL_RATIO;
if (w !== this.canvas.width) {
this.canvas.width = w;
viewPortChanged = true;
}
if (h !== this.canvas.height) {
this.canvas.height = h;
viewPortChanged = true;
}
if (!viewPortChanged) {
return;
}
this._updateViewport();
this._shouldForceDraw = true;
}
_updateViewport() {
mat4.perspective(
this.pMatrix,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
this.context = WebGLUtils.getWebglContext(this.canvas, this._renderingContextAttributes);
if (!this.context) {
throw new Error("Failed to acquire 3D rendering context");
}
}
_initBuffers() {
const vertexPositionData = this._renderer.getVertexPositionData();
const indexData = this._renderer.getIndexData();
const textureCoordData = this._renderer.getTextureCoordData(this._imageConfig);
const gl = this.context;
this.vertexBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(vertexPositionData), 3,
this.shaderProgram.vertexPositionAttribute);
this.indexBuffer = WebGLUtils.initBuffer(
gl, gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indexData), 1);
this.textureCoordBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(textureCoordData), this._isCubeMap ? 3 : 2,
this.shaderProgram.textureCoordAttribute);
this._bindBuffers();
}
_bindTexture() {
// Detect if it is EAC Format while CUBESTRIP mode.
// We assume it is EAC if image is not 3/2 ratio.
if (this._imageType === ImageType.CUBESTRIP) {
const {width, height} = this._renderer.getDimension(this._image);
const isEAC = width && height && width / height !== 1.5;
this.context.uniform1f(this.context.getUniformLocation(this.shaderProgram, "uIsEAC"), isEAC);
} else if (this._imageType === ImageType.PANORAMA) {
const {width, height} = this._renderer.getDimension(this._image);
const imageAspectRatio = width && height && width / height;
this._renderer.updateShaderData({imageAspectRatio});
}
// intialize shader buffers after image is loaded.(by updateShaderData)
// because buffer may be differ by image size.(eg. CylinderRenderer)
this._initBuffers();
this._renderer.bindTexture(
this.context,
this.texture,
this._image,
this._imageConfig,
);
this._shouldForceDraw = true;
this.trigger(EVENTS.BIND_TEXTURE);
}
_updateTexture() {
this._renderer.updateTexture(
this.context,
this._image,
this._imageConfig,
);
}
keepUpdate(doUpdate) {
if (doUpdate && this.isImageLoaded() === false) {
// Force to draw a frame after image is loaded on render()
this._shouldForceDraw = true;
}
this._keepUpdate = doUpdate;
}
startRender() {
this._animator.setCallback(this._render.bind(this));
this._animator.start();
}
stopRender() {
this._animator.stop();
}
renderWithQuaternion(quaternion, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastQuaternion && quat.exactEquals(this._lastQuaternion, quaternion | ldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// updatefieldOfView only if fieldOfView is changed.
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
this.mvMatrix = mat4.fromQuat(mat4.create(), quaternion);
this._draw();
this._lastQuaternion = quat.clone(quaternion);
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
renderWithYawPitch(yaw, pitch, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastYaw !== null && this._lastYaw === yaw &&
this._lastPitch !== null && this._lastPitch === pitch &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// fieldOfView 가 존재하면서 기존의 값과 다를 경우에만 업데이트 호출
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
mat4.identity(this.mvMatrix);
mat4.rotateX(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(pitch));
mat4.rotateY(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(yaw));
this._draw();
this._lastYaw = yaw;
this._lastPitch = pitch;
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
_render() {
const yawPitchControl = this._yawPitchControl;
const fov = yawPitchControl.getFov();
if (yawPitchControl.shouldRenderWithQuaternion()) {
const quaternion = yawPitchControl.getQuaternion();
this.renderWithQuaternion(quaternion, fov);
} else {
const yawPitch = yawPitchControl.getYawPitch();
this.renderWithYawPitch(yawPitch.yaw, yawPitch.pitch, fov);
}
}
_renderStereo = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const eyeParams = vr.getEyeParams(gl, frame);
if (!eyeParams) return;
vr.beforeRender(gl, frame);
// Render both eyes
for (const eyeIndex of [0, 1]) {
const eyeParam = eyeParams[eyeIndex];
this.mvMatrix = eyeParam.mvMatrix;
this.pMatrix = eyeParam.pMatrix;
gl.viewport(...eyeParam.viewport);
gl.uniform1f(this.shaderProgram.uEye, eyeIndex);
this._bindBuffers();
this._draw();
}
vr.afterRender();
}
_bindBuffers() {
const gl = this.context;
const program = this.shaderProgram;
const vertexBuffer = this.vertexBuffer;
const textureCoordBuffer = this.textureCoordBuffer;
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.enableVertexAttribArray(program.vertexPositionAttribute);
gl.vertexAttribPointer(
program.vertexPositionAttribute, vertexBuffer.itemSize, gl.FLOAT, false, 0, 0
);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer);
gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
gl.enableVertexAttribArray(program.textureCoordAttribute);
gl.vertexAttribPointer(
program.textureCoordAttribute, textureCoordBuffer.itemSize, gl.FLOAT, false, 0, 0
);
}
_draw() {
if (this._isVideo && this._keepUpdate) {
this._updateTexture();
}
this._renderer.render({
gl: this.context,
shaderProgram: this.shaderProgram,
indexBuffer: this.indexBuffer,
mvMatrix: this.mvMatrix,
pMatrix: this.pMatrix,
});
}
/**
* Returns projection renderer by each type
*/
getProjectionRenderer() {
return this._renderer;
}
/**
* @return Promise
*/
enterVR() {
const vr = this._vr;
if (!WEBXR_SUPPORTED && !navigator.getVRDisplays) {
return Promise.reject("VR is not available on this browser.");
}
if (vr && vr.isPresenting()) {
return Promise.resolve("VR already enabled.");
}
return this._requestPresent();
}
exitVR = () => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
if (!vr) return;
vr.removeEndCallback(this.exitVR);
vr.destroy();
this._vr = null;
// Restore canvas & context on iOS
if (IS_IOS) {
this._restoreStyle();
}
this.updateViewportDimensions(this.width, this.height);
this._updateViewport();
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
this._bindBuffers();
this._shouldForceDraw = true;
animator.stop();
animator.setContext(window);
animator.setCallback(this._render.bind(this));
animator.start();
}
_requestPresent() {
const gl = this.context;
const canvas = this.canvas;
const animator = this._animator;
this._vr = WEBXR_SUPPORTED ?
new XRManager() :
new VRManager();
const vr = this._vr;
animator.stop();
return new Promise((resolve, reject) => {
vr.requestPresent(canvas, gl)
.then(() => {
vr.addEndCallback(this.exitVR);
animator.setContext(vr.context);
animator.setCallback(this._onFirstVRFrame);
if (IS_IOS) {
this._setWrapperFullscreen();
}
this._shouldForceDraw = true;
animator.start();
resolve("success");
})
.catch(e => {
vr.destroy();
this._vr = null;
animator.start();
reject(e);
});
});
}
_onFirstVRFrame = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
// If rendering is not ready, wait for next frame
if (!vr.canRender(frame)) return;
const minusZDir = vec3.fromValues(0, 0, -1);
const eyeParam = vr.getEyeParams(gl, frame)[0];
// Extract only rotation
const mvMatrix = mat3.fromMat4(mat3.create(), eyeParam.mvMatrix);
const pMatrix = mat3.fromMat4(mat3.create(), eyeParam.pMatrix);
const mvInv = mat3.invert(mat3.create(), mvMatrix);
const pInv = mat3.invert(mat3.create(), pMatrix);
const viewDir = vec3.transformMat3(vec3.create(), minusZDir, pInv);
vec3.transformMat3(viewDir, viewDir, mvInv);
const yawOffset = mathUtil.yawOffsetBetween(viewDir, vec3.fromValues(0, 0, 1));
if (yawOffset === 0) {
// If the yawOffset is exactly 0, then device sensor is not ready
// So read it again until it has any value in it
return;
}
vr.setYawOffset(yawOffset);
animator.setCallback(this._renderStereo);
}
_setWrapperFullscreen() {
const wrapper = this._wrapper;
if (!wrapper) return;
this._wrapperOrigStyle = wrapper.getAttribute("style");
const wrapperStyle = wrapper.style;
wrapperStyle.width = "100vw";
wrapperStyle.height = "100vh";
wrapperStyle.position = "fixed";
wrapperStyle.left = "0";
wrapperStyle.top = "0";
wrapperStyle.zIndex = "9999";
}
_restoreStyle() {
const wrapper = this._wrapper;
const canvas = this.canvas;
if (!wrapper) return;
if (this._wrapperOrigStyle) {
wrapper.setAttribute("style", this._wrapperOrigStyle);
} else {
wrapper.removeAttribute("style");
}
this._wrapperOrigStyle = null;
// Restore canvas style
canvas.removeAttribute("style");
this._setDefaultCanvasStyle();
}
}
export default PanoImageRenderer;
| ) &&
this.fie | conditional_block |
PanoImageRenderer.js | import Component from "@egjs/component";
import {glMatrix, vec3, mat3, mat4, quat} from "gl-matrix";
import ImageLoader from "./ImageLoader";
import VideoLoader from "./VideoLoader";
import WebGLUtils from "./WebGLUtils";
import Renderer from "./renderer/Renderer";
import CubeRenderer from "./renderer/CubeRenderer";
import CubeStripRenderer from "./renderer/CubeStripRenderer";
import SphereRenderer from "./renderer/SphereRenderer";
import CylinderRenderer from "./renderer/CylinderRenderer";
import VRManager from "./vr/VRManager";
import XRManager from "./vr/XRManager";
import WebGLAnimator from "./WebGLAnimator";
import {util as mathUtil} from "../utils/math-util";
import {devicePixelRatio, WEBXR_SUPPORTED} from "../utils/browserFeature";
import {PROJECTION_TYPE, STEREO_FORMAT} from "../PanoViewer/consts";
import {IS_IOS} from "../utils/browser";
const ImageType = PROJECTION_TYPE;
let DEVICE_PIXEL_RATIO = devicePixelRatio || 1;
// DEVICE_PIXEL_RATIO 가 2를 초과하는 경우는 리소스 낭비이므로 2로 맞춘다.
if (DEVICE_PIXEL_RATIO > 2) {
DEVICE_PIXEL_RATIO = 2;
}
// define custom events name
/**
* TODO: how to manage events/errortype with PanoViewer
*
* I think renderer events should be seperated from viewer events although it has same name.
*/
const EVENTS = {
BIND_TEXTURE: "bindTexture",
IMAGE_LOADED: "imageLoaded",
ERROR: "error",
RENDERING_CONTEXT_LOST: "renderingContextLost",
RENDERING_CONTEXT_RESTORE: "renderingContextRestore",
};
const ERROR_TYPE = {
INVALID_DEVICE: 10,
NO_WEBGL: 11,
FAIL_IMAGE_LOAD: 12,
RENDERER_ERROR: 13
};
class PanoImageRenderer extends Component {
static EVENTS = EVENTS;
static ERROR_TYPE = ERROR_TYPE;
constructor(
image, width, height, isVideo, sphericalConfig, renderingContextAttributes
) {
// Super constructor
super();
this.sphericalConfig = sphericalConfig;
this.fieldOfView = sphericalConfig.fieldOfView;
this.width = width;
this.height = height;
this._lastQuaternion = null;
this._lastYaw = null;
this._lastPitch = null;
this._lastFieldOfView = null;
this.pMatrix = mat4.create();
this.mvMatrix = mat4.create();
// initialzie pMatrix
mat4.perspective(this.pMatrix, glMatrix.toRadian(this.fieldOfView), width / height, 0.1, 100);
this.textureCoordBuffer = null;
this.vertexBuffer = null;
this.indexBuffer = null;
this.canvas = this._initCanvas(width, height);
this._setDefaultCanvasStyle();
this._wrapper = null; // canvas wrapper
this._wrapperOrigStyle = null;
this._renderingContextAttributes = renderingContextAttributes;
this._image = null;
this._imageConfig = null;
this._imageIsReady = false;
this._shouldForceDraw = false;
this._keepUpdate = false; // Flag to specify 'continuous update' on video even when still.
this._onContentLoad = this._onContentLoad.bind(this);
this._onContentError = this._onContentError.bind(this);
this._animator = new WebGLAnimator();
// VR/XR manager
this._vr = null;
if (image) {
this.setImage({
image,
imageType: sphericalConfig.imageType,
isVideo,
cubemapConfig: sphericalConfig.cubemapConfig
});
}
}
// FIXME: Please refactor me to have more loose connection to yawpitchcontrol
setYawPitchControl(yawPitchControl) {
this._yawPitchControl = yawPitchControl;
}
getContent() {
return this._image;
}
setImage({image, imageType, isVideo = false, cubemapConfig}) {
this._imageIsReady = false;
this._isVideo = isVideo;
this._imageConfig = Object.assign(
{
/* RLUDBF is abnormal, we use it on CUBEMAP only */
order: (imageType === ImageType.CUBEMAP) ? "RLUDBF" : "RLUDFB",
tileConfig: {
flipHorizontal: false,
rotation: 0
}
},
cubemapConfig
);
this._setImageType(imageType);
if (this._contentLoader) {
this._contentLoader.destroy();
}
if (isVideo) {
this._contentLoader = new VideoLoader();
this._keepUpdate = true;
} else {
this._contentLoader = new ImageLoader();
this._keepUpdate = false;
}
// img element or img url
this._contentLoader.set(image);
// 이미지의 사이즈를 캐시한다.
// image is reference for content in contentLoader, so it may be not valid if contentLoader is destroyed.
this._image = this._contentLoader.getElement();
return this._contentLoader.get()
.then(this._onContentLoad, this._onContentError)
.catch(e => setTimeout(() => { throw e; }));// Prevent exceptions from being isolated in promise chain.
}
_setImageType(imageType) {
if (!imageType || this._imageType === imageType) {
return;
}
this._imageType = imageType;
this._isCubeMap = imageType === ImageType.CUBEMAP;
if (this._renderer) {
this._renderer.off();
}
switch (imageType) {
case ImageType.CUBEMAP:
this._renderer = new CubeRenderer();
break;
case ImageType.CUBESTRIP:
this._renderer = new CubeStripRenderer();
break;
case ImageType.PANORAMA:
this._renderer = new CylinderRenderer();
break;
case ImageType.STEREOSCOPIC_EQUI:
this._renderer = new SphereRenderer(this.sphericalConfig.stereoFormat);
break;
default:
this._renderer = new SphereRenderer(STEREO_FORMAT.NONE);
break;
}
this._renderer.on(Renderer.EVENTS.ERROR, e => {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.RENDERER_ERROR,
message: e.message
});
});
this._initWebGL();
}
_initCanvas(width, height) {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
this._onWebglcontextlost = this._onWebglcontextlost.bind(this);
this._onWebglcontextrestored = this._onWebglcontextrestored.bind(this);
canvas.addEventListener("webglcontextlost", this._onWebglcontextlost);
canvas.addEventListener("webglcontextrestored", this._onWebglcontextrestored);
return canvas;
}
_setDefaultCanvasStyle() {
const canvas = this.canvas;
canvas.style.bottom = 0;
canvas.style.left = 0;
canvas.style.right = 0;
canvas.style.top = 0;
canvas.style.margin = "auto";
canvas.style.maxHeight = "100%";
canvas.style.maxWidth = "100%";
canvas.style.outline = "none";
canvas.style.position = "absolute";
}
_onContentError(error) {
this._imageIsReady = false;
this._image = null;
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.FAIL_IMAGE_LOAD,
message: "failed to load image"
});
return false;
}
_triggerContentLoad() {
this.trigger(EVENTS.IMAGE_LOADED, {
content: this._image,
isVideo: this._isVideo,
projectionType: this._imageType
});
}
_onContentLoad(image) {
this._imageIsReady = true;
this._triggerContentLoad();
return true;
}
isImageLoaded() {
return !!this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl.enableVertexAttribArray(shaderProgram.textureCoordAttribute);
// clear buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
// Use TEXTURE0
gl.uniform1i(shaderProgram.samplerUniform, 0);
this.shaderProgram = shaderProgram;
}
_onWebglcontextlost(e) {
e.preventDefault();
this.trigger(EVENTS.RENDERING_CONTEXT_LOST);
}
_onWebglcontextrestored(e) {
this._initWebGL();
this.trigger(EVENTS.RENDERING_CONTEXT_RESTORE);
}
updateFieldOfView(fieldOfView) {
this.fieldOfView = fieldOfView;
this._updateViewport();
}
updateViewportDimensions(width, height) {
let viewPortChanged = false;
this.width = width;
this.height = height;
const w = width * DEVICE_PIXEL_RATIO;
const h = height * DEVICE_PIXEL_RATIO;
if (w !== this.canvas.width) {
this.canvas.width = w;
viewPortChanged = true;
}
if (h !== this.canvas.height) {
this.canvas.height = h;
viewPortChanged = true;
}
if (!viewPortChanged) {
return;
}
this._updateViewport();
this._shouldForceDraw = true;
}
_updateViewport() {
mat4.perspective(
this.pMatrix,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
this.context = WebGLUtils.getWebglContext(this.canvas, this._renderingContextAttributes);
if (!this.context) {
throw new Error("Failed to acquire 3D rendering context");
}
}
_initBuffers() {
const vertexPositionData = this._renderer.getVertexPositionData();
const indexData = this._renderer.getIndexData();
const textureCoordData = this._renderer.getTextureCoordData(this._imageConfig);
const gl = this.context;
this.vertexBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(vertexPositionData), 3,
this.shaderProgram.vertexPositionAttribute);
this.indexBuffer = WebGLUtils.initBuffer(
gl, gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indexData), 1);
this.textureCoordBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(textureCoordData), this._isCubeMap ? 3 : 2,
this.shaderProgram.textureCoordAttribute);
this._bindBuffers();
}
_bindTexture() {
// Detect if it is EAC Format while CUBESTRIP mode.
// We assume it is EAC if image is not 3/2 ratio.
if (this._imageType === ImageType.CUBESTRIP) {
const {width, height} = this._renderer.getDimension(this._image);
const isEAC = width && height && width / height !== 1.5;
this.context.uniform1f(this.context.getUniformLocation(this.shaderProgram, "uIsEAC"), isEAC);
} else if (this._imageType === ImageType.PANORAMA) {
const {width, height} = this._renderer.getDimension(this._image);
const imageAspectRatio = width && height && width / height;
this._renderer.updateShaderData({imageAspectRatio});
}
// intialize shader buffers after image is loaded.(by updateShaderData)
// because buffer may be differ by image size.(eg. CylinderRenderer)
this._initBuffers();
this._renderer.bindTexture(
this.context,
this.texture,
this._image,
this._imageConfig,
);
this._shouldForceDraw = true;
this.trigger(EVENTS.BIND_TEXTURE);
}
_updateTexture() {
this._renderer.updateTexture(
this.context,
this._image,
this._imageConfig,
);
}
keepUpdate(doUpdate) {
if (doUpdate && this.isImageLoaded() === false) {
// Force to draw a frame after image is loaded on render()
this._shouldForceDraw = true;
}
this._keepUpdate = doUpdate;
}
startRender() {
this._animator.setCallback(this._render.bind(this));
this._animator.start();
}
stopRender() {
this._animator.stop();
}
renderWithQuaternion(quaternion, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastQuaternion && quat.exactEquals(this._lastQuaternion, quaternion) &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// updatefieldOfView only if fieldOfView is changed.
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
this.mvMatrix = mat4.fromQuat(mat4.create(), quaternion);
this._draw();
this._lastQuaternion = quat.clone(quaternion);
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
renderWithYawPitch(yaw, pitch, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastYaw !== null && this._lastYaw === yaw &&
this._lastPitch !== null && this._lastPitch === pitch &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// fieldOfView 가 존재하면서 기존의 값과 다를 경우에만 업데이트 호출
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
mat4.identity(this.mvMatrix);
mat4.rotateX(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(pitch));
mat4.rotateY(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(yaw));
this._draw();
this._lastYaw = yaw;
this._lastPitch = pitch;
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
_render() {
const yawPitchControl = this._yawPitchControl;
const fov = yawPitchControl.getFov();
if (yawPitchControl.shouldRenderWithQuaternion()) {
const quaternion = yawPitchControl.getQuaternion();
this.renderWithQuaternion(quaternion, fov);
} else {
const yawPitch = yawPitchControl.getYawPitch();
this.renderWithYawPitch(yawPitch.yaw, yawPitch.pitch, fov);
}
}
_renderStereo = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const eyeParams = vr.getEyeParams(gl, frame);
if (!eyeParams) return;
vr.beforeRender(gl, frame);
// Render both eyes
for (const eyeIndex of [0, 1]) {
const eyeParam = eyeParams[eyeIndex];
this.mvMatrix = eyeParam.mvMatrix;
this.pMatrix = eyeParam.pMatrix;
gl.viewport(...eyeParam.viewport);
gl.uniform1f(this.shaderProgram.uEye, eyeIndex);
this._bindBuffers();
this._draw();
}
vr.afterRender();
}
_bindBuffers() {
const gl = this.context;
const program = this.shaderProgram;
const vertexBuffer = this.vertexBuffer;
const textureCoordBuffer = this.textureCoordBuffer;
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.enableVertexAttribArray(program.vertexPositionAttribute);
gl.vertexAttribPointer(
program.vertexPositionAttribute, vertexBuffer.itemSize, gl.FLOAT, false, 0, 0
);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer);
gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
gl.enableVertexAttribArray(program.textureCoordAttribute);
gl.vertexAttribPointer(
program.textureCoordAttribute, textureCoordBuffer.itemSize, gl.FLOAT, false, 0, 0
);
}
_draw() {
if (this._isVideo && this._keepUpdate) {
this._updateTexture();
}
this._renderer.render({
gl: this.context,
shaderProgram: this.shaderProgram,
indexBuffer: this.indexBuffer,
mvMatrix: this.mvMatrix,
pMatrix: this.pMatrix,
});
}
/**
* Returns projection renderer by each type
*/
getProjectionRenderer() {
return this._renderer;
}
/**
* @return Promise
*/
enterVR() {
const vr = this._vr;
if (!WEBXR_SUPPORTED && !navigator.getVRDisplays) {
return Promise.reject("VR is not available on this browser.");
}
if (vr && vr.isPresenting()) {
return Promise.resolve("VR already enabled.");
}
return this._requestPresent();
} |
exitVR = () => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
if (!vr) return;
vr.removeEndCallback(this.exitVR);
vr.destroy();
this._vr = null;
// Restore canvas & context on iOS
if (IS_IOS) {
this._restoreStyle();
}
this.updateViewportDimensions(this.width, this.height);
this._updateViewport();
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
this._bindBuffers();
this._shouldForceDraw = true;
animator.stop();
animator.setContext(window);
animator.setCallback(this._render.bind(this));
animator.start();
}
_requestPresent() {
const gl = this.context;
const canvas = this.canvas;
const animator = this._animator;
this._vr = WEBXR_SUPPORTED ?
new XRManager() :
new VRManager();
const vr = this._vr;
animator.stop();
return new Promise((resolve, reject) => {
vr.requestPresent(canvas, gl)
.then(() => {
vr.addEndCallback(this.exitVR);
animator.setContext(vr.context);
animator.setCallback(this._onFirstVRFrame);
if (IS_IOS) {
this._setWrapperFullscreen();
}
this._shouldForceDraw = true;
animator.start();
resolve("success");
})
.catch(e => {
vr.destroy();
this._vr = null;
animator.start();
reject(e);
});
});
}
_onFirstVRFrame = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
// If rendering is not ready, wait for next frame
if (!vr.canRender(frame)) return;
const minusZDir = vec3.fromValues(0, 0, -1);
const eyeParam = vr.getEyeParams(gl, frame)[0];
// Extract only rotation
const mvMatrix = mat3.fromMat4(mat3.create(), eyeParam.mvMatrix);
const pMatrix = mat3.fromMat4(mat3.create(), eyeParam.pMatrix);
const mvInv = mat3.invert(mat3.create(), mvMatrix);
const pInv = mat3.invert(mat3.create(), pMatrix);
const viewDir = vec3.transformMat3(vec3.create(), minusZDir, pInv);
vec3.transformMat3(viewDir, viewDir, mvInv);
const yawOffset = mathUtil.yawOffsetBetween(viewDir, vec3.fromValues(0, 0, 1));
if (yawOffset === 0) {
// If the yawOffset is exactly 0, then device sensor is not ready
// So read it again until it has any value in it
return;
}
vr.setYawOffset(yawOffset);
animator.setCallback(this._renderStereo);
}
_setWrapperFullscreen() {
const wrapper = this._wrapper;
if (!wrapper) return;
this._wrapperOrigStyle = wrapper.getAttribute("style");
const wrapperStyle = wrapper.style;
wrapperStyle.width = "100vw";
wrapperStyle.height = "100vh";
wrapperStyle.position = "fixed";
wrapperStyle.left = "0";
wrapperStyle.top = "0";
wrapperStyle.zIndex = "9999";
}
_restoreStyle() {
const wrapper = this._wrapper;
const canvas = this.canvas;
if (!wrapper) return;
if (this._wrapperOrigStyle) {
wrapper.setAttribute("style", this._wrapperOrigStyle);
} else {
wrapper.removeAttribute("style");
}
this._wrapperOrigStyle = null;
// Restore canvas style
canvas.removeAttribute("style");
this._setDefaultCanvasStyle();
}
}
export default PanoImageRenderer; | random_line_split | |
PanoImageRenderer.js | import Component from "@egjs/component";
import {glMatrix, vec3, mat3, mat4, quat} from "gl-matrix";
import ImageLoader from "./ImageLoader";
import VideoLoader from "./VideoLoader";
import WebGLUtils from "./WebGLUtils";
import Renderer from "./renderer/Renderer";
import CubeRenderer from "./renderer/CubeRenderer";
import CubeStripRenderer from "./renderer/CubeStripRenderer";
import SphereRenderer from "./renderer/SphereRenderer";
import CylinderRenderer from "./renderer/CylinderRenderer";
import VRManager from "./vr/VRManager";
import XRManager from "./vr/XRManager";
import WebGLAnimator from "./WebGLAnimator";
import {util as mathUtil} from "../utils/math-util";
import {devicePixelRatio, WEBXR_SUPPORTED} from "../utils/browserFeature";
import {PROJECTION_TYPE, STEREO_FORMAT} from "../PanoViewer/consts";
import {IS_IOS} from "../utils/browser";
const ImageType = PROJECTION_TYPE;
let DEVICE_PIXEL_RATIO = devicePixelRatio || 1;
// DEVICE_PIXEL_RATIO 가 2를 초과하는 경우는 리소스 낭비이므로 2로 맞춘다.
if (DEVICE_PIXEL_RATIO > 2) {
DEVICE_PIXEL_RATIO = 2;
}
// define custom events name
/**
* TODO: how to manage events/errortype with PanoViewer
*
* I think renderer events should be seperated from viewer events although it has same name.
*/
const EVENTS = {
BIND_TEXTURE: "bindTexture",
IMAGE_LOADED: "imageLoaded",
ERROR: "error",
RENDERING_CONTEXT_LOST: "renderingContextLost",
RENDERING_CONTEXT_RESTORE: "renderingContextRestore",
};
const ERROR_TYPE = {
INVALID_DEVICE: 10,
NO_WEBGL: 11,
FAIL_IMAGE_LOAD: 12,
RENDERER_ERROR: 13
};
class PanoImageRenderer extends Component {
static EVENTS = EVENTS;
static ERROR_TYPE = ERROR_TYPE;
constructor(
image, width, height, isVid | alConfig, renderingContextAttributes
) {
// Super constructor
super();
this.sphericalConfig = sphericalConfig;
this.fieldOfView = sphericalConfig.fieldOfView;
this.width = width;
this.height = height;
this._lastQuaternion = null;
this._lastYaw = null;
this._lastPitch = null;
this._lastFieldOfView = null;
this.pMatrix = mat4.create();
this.mvMatrix = mat4.create();
// initialzie pMatrix
mat4.perspective(this.pMatrix, glMatrix.toRadian(this.fieldOfView), width / height, 0.1, 100);
this.textureCoordBuffer = null;
this.vertexBuffer = null;
this.indexBuffer = null;
this.canvas = this._initCanvas(width, height);
this._setDefaultCanvasStyle();
this._wrapper = null; // canvas wrapper
this._wrapperOrigStyle = null;
this._renderingContextAttributes = renderingContextAttributes;
this._image = null;
this._imageConfig = null;
this._imageIsReady = false;
this._shouldForceDraw = false;
this._keepUpdate = false; // Flag to specify 'continuous update' on video even when still.
this._onContentLoad = this._onContentLoad.bind(this);
this._onContentError = this._onContentError.bind(this);
this._animator = new WebGLAnimator();
// VR/XR manager
this._vr = null;
if (image) {
this.setImage({
image,
imageType: sphericalConfig.imageType,
isVideo,
cubemapConfig: sphericalConfig.cubemapConfig
});
}
}
// FIXME: Please refactor me to have more loose connection to yawpitchcontrol
setYawPitchControl(yawPitchControl) {
this._yawPitchControl = yawPitchControl;
}
getContent() {
return this._image;
}
setImage({image, imageType, isVideo = false, cubemapConfig}) {
this._imageIsReady = false;
this._isVideo = isVideo;
this._imageConfig = Object.assign(
{
/* RLUDBF is abnormal, we use it on CUBEMAP only */
order: (imageType === ImageType.CUBEMAP) ? "RLUDBF" : "RLUDFB",
tileConfig: {
flipHorizontal: false,
rotation: 0
}
},
cubemapConfig
);
this._setImageType(imageType);
if (this._contentLoader) {
this._contentLoader.destroy();
}
if (isVideo) {
this._contentLoader = new VideoLoader();
this._keepUpdate = true;
} else {
this._contentLoader = new ImageLoader();
this._keepUpdate = false;
}
// img element or img url
this._contentLoader.set(image);
// 이미지의 사이즈를 캐시한다.
// image is reference for content in contentLoader, so it may be not valid if contentLoader is destroyed.
this._image = this._contentLoader.getElement();
return this._contentLoader.get()
.then(this._onContentLoad, this._onContentError)
.catch(e => setTimeout(() => { throw e; }));// Prevent exceptions from being isolated in promise chain.
}
_setImageType(imageType) {
if (!imageType || this._imageType === imageType) {
return;
}
this._imageType = imageType;
this._isCubeMap = imageType === ImageType.CUBEMAP;
if (this._renderer) {
this._renderer.off();
}
switch (imageType) {
case ImageType.CUBEMAP:
this._renderer = new CubeRenderer();
break;
case ImageType.CUBESTRIP:
this._renderer = new CubeStripRenderer();
break;
case ImageType.PANORAMA:
this._renderer = new CylinderRenderer();
break;
case ImageType.STEREOSCOPIC_EQUI:
this._renderer = new SphereRenderer(this.sphericalConfig.stereoFormat);
break;
default:
this._renderer = new SphereRenderer(STEREO_FORMAT.NONE);
break;
}
this._renderer.on(Renderer.EVENTS.ERROR, e => {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.RENDERER_ERROR,
message: e.message
});
});
this._initWebGL();
}
_initCanvas(width, height) {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
this._onWebglcontextlost = this._onWebglcontextlost.bind(this);
this._onWebglcontextrestored = this._onWebglcontextrestored.bind(this);
canvas.addEventListener("webglcontextlost", this._onWebglcontextlost);
canvas.addEventListener("webglcontextrestored", this._onWebglcontextrestored);
return canvas;
}
_setDefaultCanvasStyle() {
const canvas = this.canvas;
canvas.style.bottom = 0;
canvas.style.left = 0;
canvas.style.right = 0;
canvas.style.top = 0;
canvas.style.margin = "auto";
canvas.style.maxHeight = "100%";
canvas.style.maxWidth = "100%";
canvas.style.outline = "none";
canvas.style.position = "absolute";
}
_onContentError(error) {
this._imageIsReady = false;
this._image = null;
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.FAIL_IMAGE_LOAD,
message: "failed to load image"
});
return false;
}
_triggerContentLoad() {
this.trigger(EVENTS.IMAGE_LOADED, {
content: this._image,
isVideo: this._isVideo,
projectionType: this._imageType
});
}
_onContentLoad(image) {
this._imageIsReady = true;
this._triggerContentLoad();
return true;
}
isImageLoaded() {
return !!this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl.enableVertexAttribArray(shaderProgram.textureCoordAttribute);
// clear buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
// Use TEXTURE0
gl.uniform1i(shaderProgram.samplerUniform, 0);
this.shaderProgram = shaderProgram;
}
_onWebglcontextlost(e) {
e.preventDefault();
this.trigger(EVENTS.RENDERING_CONTEXT_LOST);
}
_onWebglcontextrestored(e) {
this._initWebGL();
this.trigger(EVENTS.RENDERING_CONTEXT_RESTORE);
}
updateFieldOfView(fieldOfView) {
this.fieldOfView = fieldOfView;
this._updateViewport();
}
updateViewportDimensions(width, height) {
let viewPortChanged = false;
this.width = width;
this.height = height;
const w = width * DEVICE_PIXEL_RATIO;
const h = height * DEVICE_PIXEL_RATIO;
if (w !== this.canvas.width) {
this.canvas.width = w;
viewPortChanged = true;
}
if (h !== this.canvas.height) {
this.canvas.height = h;
viewPortChanged = true;
}
if (!viewPortChanged) {
return;
}
this._updateViewport();
this._shouldForceDraw = true;
}
_updateViewport() {
mat4.perspective(
this.pMatrix,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
this.context = WebGLUtils.getWebglContext(this.canvas, this._renderingContextAttributes);
if (!this.context) {
throw new Error("Failed to acquire 3D rendering context");
}
}
_initBuffers() {
const vertexPositionData = this._renderer.getVertexPositionData();
const indexData = this._renderer.getIndexData();
const textureCoordData = this._renderer.getTextureCoordData(this._imageConfig);
const gl = this.context;
this.vertexBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(vertexPositionData), 3,
this.shaderProgram.vertexPositionAttribute);
this.indexBuffer = WebGLUtils.initBuffer(
gl, gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indexData), 1);
this.textureCoordBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(textureCoordData), this._isCubeMap ? 3 : 2,
this.shaderProgram.textureCoordAttribute);
this._bindBuffers();
}
_bindTexture() {
// Detect if it is EAC Format while CUBESTRIP mode.
// We assume it is EAC if image is not 3/2 ratio.
if (this._imageType === ImageType.CUBESTRIP) {
const {width, height} = this._renderer.getDimension(this._image);
const isEAC = width && height && width / height !== 1.5;
this.context.uniform1f(this.context.getUniformLocation(this.shaderProgram, "uIsEAC"), isEAC);
} else if (this._imageType === ImageType.PANORAMA) {
const {width, height} = this._renderer.getDimension(this._image);
const imageAspectRatio = width && height && width / height;
this._renderer.updateShaderData({imageAspectRatio});
}
// intialize shader buffers after image is loaded.(by updateShaderData)
// because buffer may be differ by image size.(eg. CylinderRenderer)
this._initBuffers();
this._renderer.bindTexture(
this.context,
this.texture,
this._image,
this._imageConfig,
);
this._shouldForceDraw = true;
this.trigger(EVENTS.BIND_TEXTURE);
}
_updateTexture() {
this._renderer.updateTexture(
this.context,
this._image,
this._imageConfig,
);
}
keepUpdate(doUpdate) {
if (doUpdate && this.isImageLoaded() === false) {
// Force to draw a frame after image is loaded on render()
this._shouldForceDraw = true;
}
this._keepUpdate = doUpdate;
}
startRender() {
this._animator.setCallback(this._render.bind(this));
this._animator.start();
}
stopRender() {
this._animator.stop();
}
renderWithQuaternion(quaternion, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastQuaternion && quat.exactEquals(this._lastQuaternion, quaternion) &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// updatefieldOfView only if fieldOfView is changed.
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
this.mvMatrix = mat4.fromQuat(mat4.create(), quaternion);
this._draw();
this._lastQuaternion = quat.clone(quaternion);
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
renderWithYawPitch(yaw, pitch, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastYaw !== null && this._lastYaw === yaw &&
this._lastPitch !== null && this._lastPitch === pitch &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// fieldOfView 가 존재하면서 기존의 값과 다를 경우에만 업데이트 호출
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
mat4.identity(this.mvMatrix);
mat4.rotateX(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(pitch));
mat4.rotateY(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(yaw));
this._draw();
this._lastYaw = yaw;
this._lastPitch = pitch;
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
_render() {
const yawPitchControl = this._yawPitchControl;
const fov = yawPitchControl.getFov();
if (yawPitchControl.shouldRenderWithQuaternion()) {
const quaternion = yawPitchControl.getQuaternion();
this.renderWithQuaternion(quaternion, fov);
} else {
const yawPitch = yawPitchControl.getYawPitch();
this.renderWithYawPitch(yawPitch.yaw, yawPitch.pitch, fov);
}
}
_renderStereo = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const eyeParams = vr.getEyeParams(gl, frame);
if (!eyeParams) return;
vr.beforeRender(gl, frame);
// Render both eyes
for (const eyeIndex of [0, 1]) {
const eyeParam = eyeParams[eyeIndex];
this.mvMatrix = eyeParam.mvMatrix;
this.pMatrix = eyeParam.pMatrix;
gl.viewport(...eyeParam.viewport);
gl.uniform1f(this.shaderProgram.uEye, eyeIndex);
this._bindBuffers();
this._draw();
}
vr.afterRender();
}
_bindBuffers() {
const gl = this.context;
const program = this.shaderProgram;
const vertexBuffer = this.vertexBuffer;
const textureCoordBuffer = this.textureCoordBuffer;
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.enableVertexAttribArray(program.vertexPositionAttribute);
gl.vertexAttribPointer(
program.vertexPositionAttribute, vertexBuffer.itemSize, gl.FLOAT, false, 0, 0
);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer);
gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
gl.enableVertexAttribArray(program.textureCoordAttribute);
gl.vertexAttribPointer(
program.textureCoordAttribute, textureCoordBuffer.itemSize, gl.FLOAT, false, 0, 0
);
}
_draw() {
if (this._isVideo && this._keepUpdate) {
this._updateTexture();
}
this._renderer.render({
gl: this.context,
shaderProgram: this.shaderProgram,
indexBuffer: this.indexBuffer,
mvMatrix: this.mvMatrix,
pMatrix: this.pMatrix,
});
}
/**
* Returns projection renderer by each type
*/
getProjectionRenderer() {
return this._renderer;
}
/**
* @return Promise
*/
enterVR() {
const vr = this._vr;
if (!WEBXR_SUPPORTED && !navigator.getVRDisplays) {
return Promise.reject("VR is not available on this browser.");
}
if (vr && vr.isPresenting()) {
return Promise.resolve("VR already enabled.");
}
return this._requestPresent();
}
exitVR = () => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
if (!vr) return;
vr.removeEndCallback(this.exitVR);
vr.destroy();
this._vr = null;
// Restore canvas & context on iOS
if (IS_IOS) {
this._restoreStyle();
}
this.updateViewportDimensions(this.width, this.height);
this._updateViewport();
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
this._bindBuffers();
this._shouldForceDraw = true;
animator.stop();
animator.setContext(window);
animator.setCallback(this._render.bind(this));
animator.start();
}
_requestPresent() {
const gl = this.context;
const canvas = this.canvas;
const animator = this._animator;
this._vr = WEBXR_SUPPORTED ?
new XRManager() :
new VRManager();
const vr = this._vr;
animator.stop();
return new Promise((resolve, reject) => {
vr.requestPresent(canvas, gl)
.then(() => {
vr.addEndCallback(this.exitVR);
animator.setContext(vr.context);
animator.setCallback(this._onFirstVRFrame);
if (IS_IOS) {
this._setWrapperFullscreen();
}
this._shouldForceDraw = true;
animator.start();
resolve("success");
})
.catch(e => {
vr.destroy();
this._vr = null;
animator.start();
reject(e);
});
});
}
_onFirstVRFrame = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
// If rendering is not ready, wait for next frame
if (!vr.canRender(frame)) return;
const minusZDir = vec3.fromValues(0, 0, -1);
const eyeParam = vr.getEyeParams(gl, frame)[0];
// Extract only rotation
const mvMatrix = mat3.fromMat4(mat3.create(), eyeParam.mvMatrix);
const pMatrix = mat3.fromMat4(mat3.create(), eyeParam.pMatrix);
const mvInv = mat3.invert(mat3.create(), mvMatrix);
const pInv = mat3.invert(mat3.create(), pMatrix);
const viewDir = vec3.transformMat3(vec3.create(), minusZDir, pInv);
vec3.transformMat3(viewDir, viewDir, mvInv);
const yawOffset = mathUtil.yawOffsetBetween(viewDir, vec3.fromValues(0, 0, 1));
if (yawOffset === 0) {
// If the yawOffset is exactly 0, then device sensor is not ready
// So read it again until it has any value in it
return;
}
vr.setYawOffset(yawOffset);
animator.setCallback(this._renderStereo);
}
_setWrapperFullscreen() {
const wrapper = this._wrapper;
if (!wrapper) return;
this._wrapperOrigStyle = wrapper.getAttribute("style");
const wrapperStyle = wrapper.style;
wrapperStyle.width = "100vw";
wrapperStyle.height = "100vh";
wrapperStyle.position = "fixed";
wrapperStyle.left = "0";
wrapperStyle.top = "0";
wrapperStyle.zIndex = "9999";
}
_restoreStyle() {
const wrapper = this._wrapper;
const canvas = this.canvas;
if (!wrapper) return;
if (this._wrapperOrigStyle) {
wrapper.setAttribute("style", this._wrapperOrigStyle);
} else {
wrapper.removeAttribute("style");
}
this._wrapperOrigStyle = null;
// Restore canvas style
canvas.removeAttribute("style");
this._setDefaultCanvasStyle();
}
}
export default PanoImageRenderer;
| eo, spheric | identifier_name |
PanoImageRenderer.js | import Component from "@egjs/component";
import {glMatrix, vec3, mat3, mat4, quat} from "gl-matrix";
import ImageLoader from "./ImageLoader";
import VideoLoader from "./VideoLoader";
import WebGLUtils from "./WebGLUtils";
import Renderer from "./renderer/Renderer";
import CubeRenderer from "./renderer/CubeRenderer";
import CubeStripRenderer from "./renderer/CubeStripRenderer";
import SphereRenderer from "./renderer/SphereRenderer";
import CylinderRenderer from "./renderer/CylinderRenderer";
import VRManager from "./vr/VRManager";
import XRManager from "./vr/XRManager";
import WebGLAnimator from "./WebGLAnimator";
import {util as mathUtil} from "../utils/math-util";
import {devicePixelRatio, WEBXR_SUPPORTED} from "../utils/browserFeature";
import {PROJECTION_TYPE, STEREO_FORMAT} from "../PanoViewer/consts";
import {IS_IOS} from "../utils/browser";
const ImageType = PROJECTION_TYPE;
let DEVICE_PIXEL_RATIO = devicePixelRatio || 1;
// DEVICE_PIXEL_RATIO 가 2를 초과하는 경우는 리소스 낭비이므로 2로 맞춘다.
if (DEVICE_PIXEL_RATIO > 2) {
DEVICE_PIXEL_RATIO = 2;
}
// define custom events name
/**
* TODO: how to manage events/errortype with PanoViewer
*
* I think renderer events should be seperated from viewer events although it has same name.
*/
const EVENTS = {
BIND_TEXTURE: "bindTexture",
IMAGE_LOADED: "imageLoaded",
ERROR: "error",
RENDERING_CONTEXT_LOST: "renderingContextLost",
RENDERING_CONTEXT_RESTORE: "renderingContextRestore",
};
const ERROR_TYPE = {
INVALID_DEVICE: 10,
NO_WEBGL: 11,
FAIL_IMAGE_LOAD: 12,
RENDERER_ERROR: 13
};
class PanoImageRenderer extends Component {
static EVENTS = EVENTS;
static ERROR_TYPE = ERROR_TYPE;
constructor(
image, width, height, isVideo, sphericalConfig, renderingContextAttributes
) {
// Super constructor
super();
this.sphericalConfig = sphericalConfig;
this.fieldOfView = sphericalConfig.fieldOfView;
this.width = width;
this.height = height;
this._lastQuaternion = null;
this._lastYaw = null;
this._lastPitch = null;
this._lastFieldOfView = null;
this.pMatrix = mat4.create();
this.mvMatrix = mat4.create();
// initialzie pMatrix
mat4.perspective(this.pMatrix, glMatrix.toRadian(this.fieldOfView), width / height, 0.1, 100);
this.textureCoordBuffer = null;
this.vertexBuffer = null;
this.indexBuffer = null;
this.canvas = this._initCanvas(width, height);
this._setDefaultCanvasStyle();
this._wrapper = null; // canvas wrapper
this._wrapperOrigStyle = null;
this._renderingContextAttributes = renderingContextAttributes;
this._image = null;
this._imageConfig = null;
this._imageIsReady = false;
this._shouldForceDraw = false;
this._keepUpdate = false; // Flag to specify 'continuous update' on video even when still.
this._onContentLoad = this._onContentLoad.bind(this);
this._onContentError = this._onContentError.bind(this);
this._animator = new WebGLAnimator();
// VR/XR manager
this._vr = null;
if (image) {
this.setImage({
image,
imageType: sphericalConfig.imageType,
isVideo,
cubemapConfig: sphericalConfig.cubemapConfig
});
}
}
// FIXME: Please refactor me to have more loose connection to yawpitchcontrol
setYawPitchControl(yawPitchControl) {
this._yawPitchControl = yawPitchControl;
}
getContent() {
return this._image;
}
setImage({image, imageType, isVideo = false, cubemapConfig}) {
this._imageIsReady = false;
this._isVideo = isVideo;
this._imageConfig = Object.assign(
{
/* RLUDBF is abnormal, we use it on CUBEMAP only */
order: (imageType === ImageType.CUBEMAP) ? "RLUDBF" : "RLUDFB",
tileConfig: {
flipHorizontal: false,
rotation: 0
}
},
cubemapConfig
);
this._setImageType(imageType);
if (this._contentLoader) {
this._contentLoader.destroy();
}
if (isVideo) {
this._contentLoader = new VideoLoader();
this._keepUpdate = true;
} else {
this._contentLoader = new ImageLoader();
this._keepUpdate = false;
}
// img element or img url
this._contentLoader.set(image);
// 이미지의 사이즈를 캐시한다.
// image is reference for content in contentLoader, so it may be not valid if contentLoader is destroyed.
this._image = this._contentLoader.getElement();
return this._contentLoader.get()
.then(this._onContentLoad, this._onContentError)
.catch(e => setTimeout(() => { throw e; }));// Prevent exceptions from being isolated in promise chain.
}
_setImageType(imageType) {
if (!imageType || this._imageType === imageType) {
return;
| ement("canvas");
canvas.width = width;
canvas.height = height;
this._onWebglcontextlost = this._onWebglcontextlost.bind(this);
this._onWebglcontextrestored = this._onWebglcontextrestored.bind(this);
canvas.addEventListener("webglcontextlost", this._onWebglcontextlost);
canvas.addEventListener("webglcontextrestored", this._onWebglcontextrestored);
return canvas;
}
_setDefaultCanvasStyle() {
const canvas = this.canvas;
canvas.style.bottom = 0;
canvas.style.left = 0;
canvas.style.right = 0;
canvas.style.top = 0;
canvas.style.margin = "auto";
canvas.style.maxHeight = "100%";
canvas.style.maxWidth = "100%";
canvas.style.outline = "none";
canvas.style.position = "absolute";
}
_onContentError(error) {
this._imageIsReady = false;
this._image = null;
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.FAIL_IMAGE_LOAD,
message: "failed to load image"
});
return false;
}
_triggerContentLoad() {
this.trigger(EVENTS.IMAGE_LOADED, {
content: this._image,
isVideo: this._isVideo,
projectionType: this._imageType
});
}
_onContentLoad(image) {
this._imageIsReady = true;
this._triggerContentLoad();
return true;
}
isImageLoaded() {
return !!this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl.enableVertexAttribArray(shaderProgram.textureCoordAttribute);
// clear buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
// Use TEXTURE0
gl.uniform1i(shaderProgram.samplerUniform, 0);
this.shaderProgram = shaderProgram;
}
_onWebglcontextlost(e) {
e.preventDefault();
this.trigger(EVENTS.RENDERING_CONTEXT_LOST);
}
_onWebglcontextrestored(e) {
this._initWebGL();
this.trigger(EVENTS.RENDERING_CONTEXT_RESTORE);
}
updateFieldOfView(fieldOfView) {
this.fieldOfView = fieldOfView;
this._updateViewport();
}
updateViewportDimensions(width, height) {
let viewPortChanged = false;
this.width = width;
this.height = height;
const w = width * DEVICE_PIXEL_RATIO;
const h = height * DEVICE_PIXEL_RATIO;
if (w !== this.canvas.width) {
this.canvas.width = w;
viewPortChanged = true;
}
if (h !== this.canvas.height) {
this.canvas.height = h;
viewPortChanged = true;
}
if (!viewPortChanged) {
return;
}
this._updateViewport();
this._shouldForceDraw = true;
}
_updateViewport() {
mat4.perspective(
this.pMatrix,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
this.context = WebGLUtils.getWebglContext(this.canvas, this._renderingContextAttributes);
if (!this.context) {
throw new Error("Failed to acquire 3D rendering context");
}
}
_initBuffers() {
const vertexPositionData = this._renderer.getVertexPositionData();
const indexData = this._renderer.getIndexData();
const textureCoordData = this._renderer.getTextureCoordData(this._imageConfig);
const gl = this.context;
this.vertexBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(vertexPositionData), 3,
this.shaderProgram.vertexPositionAttribute);
this.indexBuffer = WebGLUtils.initBuffer(
gl, gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indexData), 1);
this.textureCoordBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(textureCoordData), this._isCubeMap ? 3 : 2,
this.shaderProgram.textureCoordAttribute);
this._bindBuffers();
}
_bindTexture() {
// Detect if it is EAC Format while CUBESTRIP mode.
// We assume it is EAC if image is not 3/2 ratio.
if (this._imageType === ImageType.CUBESTRIP) {
const {width, height} = this._renderer.getDimension(this._image);
const isEAC = width && height && width / height !== 1.5;
this.context.uniform1f(this.context.getUniformLocation(this.shaderProgram, "uIsEAC"), isEAC);
} else if (this._imageType === ImageType.PANORAMA) {
const {width, height} = this._renderer.getDimension(this._image);
const imageAspectRatio = width && height && width / height;
this._renderer.updateShaderData({imageAspectRatio});
}
// intialize shader buffers after image is loaded.(by updateShaderData)
// because buffer may be differ by image size.(eg. CylinderRenderer)
this._initBuffers();
this._renderer.bindTexture(
this.context,
this.texture,
this._image,
this._imageConfig,
);
this._shouldForceDraw = true;
this.trigger(EVENTS.BIND_TEXTURE);
}
_updateTexture() {
this._renderer.updateTexture(
this.context,
this._image,
this._imageConfig,
);
}
keepUpdate(doUpdate) {
if (doUpdate && this.isImageLoaded() === false) {
// Force to draw a frame after image is loaded on render()
this._shouldForceDraw = true;
}
this._keepUpdate = doUpdate;
}
startRender() {
this._animator.setCallback(this._render.bind(this));
this._animator.start();
}
stopRender() {
this._animator.stop();
}
renderWithQuaternion(quaternion, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastQuaternion && quat.exactEquals(this._lastQuaternion, quaternion) &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// updatefieldOfView only if fieldOfView is changed.
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
this.mvMatrix = mat4.fromQuat(mat4.create(), quaternion);
this._draw();
this._lastQuaternion = quat.clone(quaternion);
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
renderWithYawPitch(yaw, pitch, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastYaw !== null && this._lastYaw === yaw &&
this._lastPitch !== null && this._lastPitch === pitch &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// fieldOfView 가 존재하면서 기존의 값과 다를 경우에만 업데이트 호출
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
mat4.identity(this.mvMatrix);
mat4.rotateX(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(pitch));
mat4.rotateY(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(yaw));
this._draw();
this._lastYaw = yaw;
this._lastPitch = pitch;
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
_render() {
const yawPitchControl = this._yawPitchControl;
const fov = yawPitchControl.getFov();
if (yawPitchControl.shouldRenderWithQuaternion()) {
const quaternion = yawPitchControl.getQuaternion();
this.renderWithQuaternion(quaternion, fov);
} else {
const yawPitch = yawPitchControl.getYawPitch();
this.renderWithYawPitch(yawPitch.yaw, yawPitch.pitch, fov);
}
}
_renderStereo = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const eyeParams = vr.getEyeParams(gl, frame);
if (!eyeParams) return;
vr.beforeRender(gl, frame);
// Render both eyes
for (const eyeIndex of [0, 1]) {
const eyeParam = eyeParams[eyeIndex];
this.mvMatrix = eyeParam.mvMatrix;
this.pMatrix = eyeParam.pMatrix;
gl.viewport(...eyeParam.viewport);
gl.uniform1f(this.shaderProgram.uEye, eyeIndex);
this._bindBuffers();
this._draw();
}
vr.afterRender();
}
_bindBuffers() {
const gl = this.context;
const program = this.shaderProgram;
const vertexBuffer = this.vertexBuffer;
const textureCoordBuffer = this.textureCoordBuffer;
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.enableVertexAttribArray(program.vertexPositionAttribute);
gl.vertexAttribPointer(
program.vertexPositionAttribute, vertexBuffer.itemSize, gl.FLOAT, false, 0, 0
);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer);
gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
gl.enableVertexAttribArray(program.textureCoordAttribute);
gl.vertexAttribPointer(
program.textureCoordAttribute, textureCoordBuffer.itemSize, gl.FLOAT, false, 0, 0
);
}
_draw() {
if (this._isVideo && this._keepUpdate) {
this._updateTexture();
}
this._renderer.render({
gl: this.context,
shaderProgram: this.shaderProgram,
indexBuffer: this.indexBuffer,
mvMatrix: this.mvMatrix,
pMatrix: this.pMatrix,
});
}
/**
* Returns projection renderer by each type
*/
getProjectionRenderer() {
return this._renderer;
}
/**
* @return Promise
*/
enterVR() {
const vr = this._vr;
if (!WEBXR_SUPPORTED && !navigator.getVRDisplays) {
return Promise.reject("VR is not available on this browser.");
}
if (vr && vr.isPresenting()) {
return Promise.resolve("VR already enabled.");
}
return this._requestPresent();
}
exitVR = () => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
if (!vr) return;
vr.removeEndCallback(this.exitVR);
vr.destroy();
this._vr = null;
// Restore canvas & context on iOS
if (IS_IOS) {
this._restoreStyle();
}
this.updateViewportDimensions(this.width, this.height);
this._updateViewport();
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
this._bindBuffers();
this._shouldForceDraw = true;
animator.stop();
animator.setContext(window);
animator.setCallback(this._render.bind(this));
animator.start();
}
_requestPresent() {
const gl = this.context;
const canvas = this.canvas;
const animator = this._animator;
this._vr = WEBXR_SUPPORTED ?
new XRManager() :
new VRManager();
const vr = this._vr;
animator.stop();
return new Promise((resolve, reject) => {
vr.requestPresent(canvas, gl)
.then(() => {
vr.addEndCallback(this.exitVR);
animator.setContext(vr.context);
animator.setCallback(this._onFirstVRFrame);
if (IS_IOS) {
this._setWrapperFullscreen();
}
this._shouldForceDraw = true;
animator.start();
resolve("success");
})
.catch(e => {
vr.destroy();
this._vr = null;
animator.start();
reject(e);
});
});
}
_onFirstVRFrame = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const animator = this._animator;
// If rendering is not ready, wait for next frame
if (!vr.canRender(frame)) return;
const minusZDir = vec3.fromValues(0, 0, -1);
const eyeParam = vr.getEyeParams(gl, frame)[0];
// Extract only rotation
const mvMatrix = mat3.fromMat4(mat3.create(), eyeParam.mvMatrix);
const pMatrix = mat3.fromMat4(mat3.create(), eyeParam.pMatrix);
const mvInv = mat3.invert(mat3.create(), mvMatrix);
const pInv = mat3.invert(mat3.create(), pMatrix);
const viewDir = vec3.transformMat3(vec3.create(), minusZDir, pInv);
vec3.transformMat3(viewDir, viewDir, mvInv);
const yawOffset = mathUtil.yawOffsetBetween(viewDir, vec3.fromValues(0, 0, 1));
if (yawOffset === 0) {
// If the yawOffset is exactly 0, then device sensor is not ready
// So read it again until it has any value in it
return;
}
vr.setYawOffset(yawOffset);
animator.setCallback(this._renderStereo);
}
_setWrapperFullscreen() {
const wrapper = this._wrapper;
if (!wrapper) return;
this._wrapperOrigStyle = wrapper.getAttribute("style");
const wrapperStyle = wrapper.style;
wrapperStyle.width = "100vw";
wrapperStyle.height = "100vh";
wrapperStyle.position = "fixed";
wrapperStyle.left = "0";
wrapperStyle.top = "0";
wrapperStyle.zIndex = "9999";
}
_restoreStyle() {
const wrapper = this._wrapper;
const canvas = this.canvas;
if (!wrapper) return;
if (this._wrapperOrigStyle) {
wrapper.setAttribute("style", this._wrapperOrigStyle);
} else {
wrapper.removeAttribute("style");
}
this._wrapperOrigStyle = null;
// Restore canvas style
canvas.removeAttribute("style");
this._setDefaultCanvasStyle();
}
}
export default PanoImageRenderer;
| }
this._imageType = imageType;
this._isCubeMap = imageType === ImageType.CUBEMAP;
if (this._renderer) {
this._renderer.off();
}
switch (imageType) {
case ImageType.CUBEMAP:
this._renderer = new CubeRenderer();
break;
case ImageType.CUBESTRIP:
this._renderer = new CubeStripRenderer();
break;
case ImageType.PANORAMA:
this._renderer = new CylinderRenderer();
break;
case ImageType.STEREOSCOPIC_EQUI:
this._renderer = new SphereRenderer(this.sphericalConfig.stereoFormat);
break;
default:
this._renderer = new SphereRenderer(STEREO_FORMAT.NONE);
break;
}
this._renderer.on(Renderer.EVENTS.ERROR, e => {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.RENDERER_ERROR,
message: e.message
});
});
this._initWebGL();
}
_initCanvas(width, height) {
const canvas = document.createEl | identifier_body |
row.rs | /*
* Copyright (C) 2020, 2021 Yury Vostrikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#![allow(dead_code)]
use std::{alloc::{alloc, dealloc, Layout}, env, fmt, mem, slice, io, io::Read, ops::{Deref, DerefMut}};
use once_cell::sync::Lazy;
use byteorder::{LittleEndian, ReadBytesExt};
use anyhow::{bail, Context, Result};
use super::*;
#[repr(C, packed)]
pub union RowAux {
pub remote_scn: [u8; 6],
pub run_crc: u32,
}
extern {
type RowData;
}
#[repr(C, packed)]
pub struct Row {
pub header_crc32c: u32,
pub lsn: i64,
pub scn: i64,
pub tag: u16,
shard_id: u16,
aux: RowAux,
tm: f64,
pub len: u32,
pub data_crc32c: u32,
_data: RowData,
}
const ROW_LAYOUT : Layout = unsafe { Layout::from_size_align_unchecked(46, 16) };
pub struct BoxRow {
ptr: *mut Row
}
impl Deref for BoxRow {
type Target = Row;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr }
}
}
impl DerefMut for BoxRow {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.ptr }
}
}
impl Drop for BoxRow {
fn drop(&mut self) {
unsafe {
dealloc(self.ptr as *mut _, Row::layout(self.len));
}
}
}
impl Row {
fn layout(len: u32) -> Layout {
assert!(len < 2<<10);
let data = Layout::from_size_align(len as usize, 1).unwrap();
ROW_LAYOUT.extend_packed(data).unwrap()
}
fn alloc(len: u32) -> *mut Row {
unsafe {
let ptr = alloc(Self::layout(len)) as *mut Row;
(*ptr).len = len;
ptr
}
}
fn data_ptr(&self) -> *const u8 {
let ptr = self as *const _ as *const u8;
unsafe { ptr.add(ROW_LAYOUT.size()) }
}
pub fn data(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.data_ptr(), self.len as usize)
}
}
pub fn data_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self.data_ptr() as *mut _, self.len as usize)
}
}
pub fn read(io: &mut dyn Read) -> Result<BoxRow> {
let mut header = [0; ROW_LAYOUT.size()];
io.read_exact(&mut header).context("reading header")?;
let header_crc32c = (&header[0..4]).read_u32::<LittleEndian>().unwrap();
let header_crc32c_calculated = crc32c(&header[4..]);
if header_crc32c_calculated != header_crc32c {
bail!("header crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}", | header_crc32c, header_crc32c_calculated);
}
let len = (&header[ROW_LAYOUT.size() - 8..]).read_u32::<LittleEndian>().unwrap();
let mut row = BoxRow { ptr: Self::alloc(len) };
row.as_bytes_mut().copy_from_slice(&header);
debug_assert!(row.len == len);
io.read_exact(row.data_mut()).context("reading body")?;
if crc32c(row.data()) != row.data_crc32c {
bail!("data crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
{row.data_crc32c}, crc32c(row.data()));
}
log::debug!("read row LSN:{}", {row.lsn});
Ok(row)
}
pub fn write(&self, io: &mut dyn io::Write) -> io::Result<usize> {
io.write_all(self.as_bytes())?; // FIXME: nasty and unportable
io.write_all(self.data())?;
Ok(ROW_LAYOUT.size() + self.data().len())
}
fn as_bytes(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self as *const _ as *const u8, ROW_LAYOUT.size())
}
}
fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self as *mut _ as *mut u8, ROW_LAYOUT.size())
}
}
pub fn update_crc(&mut self) {
self.data_crc32c = crc32c(self.data());
self.header_crc32c = crc32c(&self.as_bytes()[4..])
}
fn tag(&self) -> Tag {
Tag::new(self.tag & TAG_MASK)
}
fn tag_type(&self) -> TagType {
TagType::new(self.tag & !TAG_MASK)
}
}
#[derive(Debug)]
#[derive(PartialEq, Eq)]
pub enum Tag {
SnapInitial,
SnapData,
WalData,
SnapFinal,
WalFinal,
RunCrc,
Nop,
RaftAppend,
RaftCommit,
RaftVote,
ShardCreate,
ShardAlter,
ShardFinal,
Tlv,
SysTag(u8),
UserTag(u8),
}
impl Tag {
fn new(repr: u16) -> Self {
match repr & TAG_MASK {
1 => Tag::SnapInitial,
2 => Tag::SnapData,
3 => Tag::WalData,
4 => Tag::SnapFinal,
5 => Tag::WalFinal,
6 => Tag::RunCrc,
7 => Tag::Nop,
8 => Tag::RaftAppend,
9 => Tag::RaftCommit,
10 => Tag::RaftVote,
11 => Tag::ShardCreate,
12 => Tag::ShardAlter,
13 => Tag::ShardFinal,
14 => Tag::Tlv,
t if t < 32 => Tag::SysTag(t as u8),
t => Tag::UserTag((t >> 5) as u8),
}
}
fn as_u16(&self) -> u16 {
match self {
Tag::SnapInitial => 1,
Tag::SnapData => 2,
Tag::WalData => 3,
Tag::SnapFinal => 4,
Tag::WalFinal => 5,
Tag::RunCrc => 6,
Tag::Nop => 7,
Tag::RaftAppend => 8,
Tag::RaftCommit => 9,
Tag::RaftVote => 10,
Tag::ShardCreate => 11,
Tag::ShardAlter => 12,
Tag::ShardFinal => 13,
Tag::Tlv => 14,
Tag::SysTag(t) => *t as u16,
Tag::UserTag(t) => *t as u16,
}
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tag::SnapInitial => write!(f, "snap_initial"),
Tag::SnapData => write!(f, "snap_data"),
Tag::SnapFinal => write!(f, "snap_final"),
Tag::WalData => write!(f, "wal_data"),
Tag::WalFinal => write!(f, "wal_final"),
Tag::ShardCreate => write!(f, "shard_create"),
Tag::ShardAlter => write!(f, "shard_alter"),
Tag::ShardFinal => write!(f, "shard_final"),
Tag::RunCrc => write!(f, "run_crc"),
Tag::Nop => write!(f, "nop"),
Tag::RaftAppend => write!(f, "raft_append"),
Tag::RaftCommit => write!(f, "raft_commit"),
Tag::RaftVote => write!(f, "raft_vote"),
Tag::Tlv => write!(f, "tlv"),
Tag::SysTag(n) => write!(f, "sys{}", n),
Tag::UserTag(n) => write!(f, "usr{}", n)
}
}
}
/* two highest bit in tag encode tag type:
00 - invalid
01 - snap
10 - wal
11 - system wal */
pub const TAG_MASK: u16 = 0x3fff;
const TAG_SIZE: usize = 14;
enum TagType {
SNAP = 0x4000,
WAL = 0x8000,
SYS = 0xc000,
INVALID = 0,
}
impl TagType {
fn new(repr: u16) -> TagType {
match repr & !TAG_MASK {
0x4000 => TagType::SNAP,
0x8000 => TagType::WAL,
0xc000 => TagType::SYS,
_ => TagType::INVALID,
}
}
}
impl fmt::Display for TagType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TagType::SNAP => write!(f, "snap"),
TagType::WAL => write!(f, "wal"),
TagType::SYS => write!(f, "sys"),
TagType::INVALID => write!(f, "invalid"),
}
}
}
#[derive(PartialEq, Eq)]
enum ShardType {
POR,
RAFT,
PART
}
impl ShardType {
fn new(repr: u8) -> Result<Self> {
match repr {
0 => Ok(ShardType::POR),
1 => Ok(ShardType::RAFT),
2 => Ok(ShardType::PART),
_ => bail!("invalid shard type {}", repr)
}
}
}
// TODO: switch to byteordered?
struct LittleEndianReader<'a> (std::io::Cursor<&'a[u8]>);
impl<'a> LittleEndianReader<'a> {
fn new(buf: &'a[u8]) -> Self { Self(std::io::Cursor::new(buf)) }
fn read_u8(&mut self) -> u8 { self.0.read_u8().unwrap() }
fn read_u16(&mut self) -> u16 { self.0.read_u16::<LittleEndian>().unwrap() }
fn read_u32(&mut self) -> u32 { self.0.read_u32::<LittleEndian>().unwrap() }
fn read_i64(&mut self) -> i64 { self.0.read_i64::<LittleEndian>().unwrap() }
fn read_u64(&mut self) -> u64 { self.0.read_u64::<LittleEndian>().unwrap() }
fn read_str(&mut self, len: usize) -> &str {
let pos = self.0.position() as usize;
let raw = &self.0.get_ref()[pos..pos+len];
let (raw, _) = raw.split_at(raw.iter().position(|&x| x == 0).unwrap_or(len));
let str = std::str::from_utf8(raw).unwrap();
self.0.set_position((pos+len) as u64);
str
}
fn into_cursor(self) -> std::io::Cursor<&'a[u8]> { self.0 }
fn unparsed(&self) -> &[u8] {
&self.0.get_ref()[self.0.position() as usize..]
}
}
pub fn print_row<W: fmt::Write + fmt::Debug>(buf: &mut W, row: &Row,
handler: &dyn Fn(&mut W, u16, &[u8])) -> Result<()> {
fn int_flag(name: &str, default: bool) -> bool {
let flag = try {
let val = env::var(name).ok()?;
val.parse::<usize>().ok()?
};
Some(1) == flag || default
}
static PRINT_HEADER : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_ROW_HEADER", true) });
static PRINT_RUN_CRC : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_RUN_CRC", false) });
let tag = row.tag();
if *PRINT_HEADER || true {
write!(buf, "lsn:{}", {row.lsn})?;
if row.scn != -1 || tag == Tag::RaftVote || tag == Tag::SnapData {
write!(buf, " shard:{}", {row.shard_id})?;
if *PRINT_RUN_CRC {
write!(buf, " run_crc:0x{:08x}", unsafe {row.aux.run_crc})?;
}
}
write!(buf, " scn:{} tm:{:.3} t:{}/{} ", {row.scn}, {row.tm}, row.tag_type(), row.tag())?;
}
use mem::size_of;
let mut reader = LittleEndianReader::new(row.data());
match row.tag() {
Tag::SnapInitial => {
if row.data().len() == size_of::<u32>() * 3 {
let count = reader.read_u32();
let crc_log = reader.read_u32();
let crc_mod = reader.read_u32();
write!(buf, "count:{} run_crc_log:0x{:08x} run_crc_mod:0x{:08x}",
count, crc_log, crc_mod)?;
} else if row.scn == -1 {
let ver = reader.read_u8();
let count = reader.read_u32();
let flags = reader.read_u32();
write!(buf, "ver:{} count:{} flags:0x{:08x}", ver, count, flags)?;
} else {
write!(buf, "unknow format")?;
}
},
Tag::RunCrc => {
let mut scn = -1;
if row.data().len() == size_of::<i64>() + 2 * size_of::<u32>() {
scn = reader.read_i64();
}
let crc_log = reader.read_u32();
let _ = reader.read_u32(); /* ignore run_crc_mod */
write!(buf, "SCN:{} log:0x{:08x}", scn, crc_log)?;
},
Tag::SnapData | Tag::WalData | Tag::UserTag(_) | Tag::Tlv => {
handler(buf, row.tag, row.data());
return Ok(())
}
Tag::SnapFinal => {
let mut end = [0u8; 4];
for i in 0..3 {
end[i] = reader.read_u8()
}
if end != ['E' as u8, 'N' as u8, 'D' as u8, 0] {
write!(buf, " {:x?}", &end)?;
}
},
Tag::WalFinal => (),
Tag::Nop => {
if reader.unparsed().len() > 0 {
let dummy = reader.read_u16();
if dummy != 0 {
write!(buf, " {:02x}", dummy)?;
}
}
},
Tag::SysTag(_) => (),
Tag::RaftAppend | Tag::RaftCommit => {
let flags = reader.read_u16();
let term = reader.read_u64();
let inner_tag_raw = reader.read_u16();
let inner_tag = Tag::new(inner_tag_raw);
write!(buf, "term:{} flags:0x{:02x} it:{} ", flags, term, inner_tag)?;
match inner_tag {
Tag::RunCrc => {
let scn = reader.read_u64();
let log = reader.read_u32();
let _ = reader.read_u32(); /* ignore run_crc_mod */
write!(buf, "SCN:{} log:0x{:08x}", scn, log)?;
},
Tag::Nop => {
let dummy = reader.read_u16();
if dummy != 0 {
write!(buf, " {:02x}", dummy)?;
}
},
_ => {
handler(buf, inner_tag_raw, reader.into_cursor().into_inner());
return Ok(())
}
}
},
Tag::RaftVote => {
let flags = reader.read_u16();
let term = reader.read_u64();
let peer_id = reader.read_u8();
write!(buf, "term:{} flags:0x{:02x} peer:{}", term, flags, peer_id)?;
},
Tag::ShardCreate | Tag::ShardAlter => {
let ver = reader.read_u8();
if ver != 1 {
bail!("unknow version: {}", ver);
}
let shard_type = ShardType::new(reader.read_u8())?;
let estimated_row_count = reader.read_u32();
match row.tag() {
Tag::ShardCreate => write!(buf,"SHARD_CREATE")?,
Tag::ShardAlter => write!(buf, "SHARD_ALTER")?,
_ => unreachable!(),
}
write!(buf, " shard_id:{}", {row.shard_id})?;
match shard_type {
ShardType::RAFT => write!(buf, " RAFT")?,
ShardType::POR => write!(buf, " POR")?,
ShardType::PART => write!(buf, " PART")?,
}
let mod_name = reader.read_str(16);
write!(buf, " {}", mod_name)?;
write!(buf, " count:{} run_crc:0x{:08x}", estimated_row_count, unsafe { row.aux.run_crc })?;
write!(buf, " master:{}", reader.read_str(16))?;
for _ in 0..4 {
let peer_name = reader.read_str(16);
if peer_name.len() > 0 {
write!(buf, " repl:{}", peer_name)?;
}
}
let aux_len = reader.read_u16();
if aux_len > 0 {
write!(buf, " aux:")?;
for _ in 0..aux_len {
let b = reader.read_u8();
write!(buf, "{:02x} ", b)?;
}
}
},
Tag::ShardFinal => {
let dummy = reader.read_u16();
if dummy != 0 {
write!(buf, " {:02x}", dummy)?;
}
},
}
if reader.unparsed().len() > 0 {
write!(buf, " unparsed: {:x?} ", reader.unparsed())?;
}
Ok(())
}
#[test]
fn test_print_row() {
use std::{path::Path, fmt::Write};
println!("current dir {:?}", env::current_dir().unwrap());
let mut xlog = XLog::name(Path::new("testdata/00000000000000000002.xlog")).unwrap();
let mut buf = String::new();
env::set_var("OCTOPUS_CAT_ROW_HEADER", "1");
fn hexdump(buf: &mut String, _tag: u16, data: &[u8]) {
write!(buf, " {:?x}", data).unwrap();
}
if let IO::Read(reader) = &mut xlog.io {
loop {
match reader.read_row() {
Ok(Some(row)) => {
print_row(&mut buf, &row, &hexdump).unwrap();
println!("row {}", buf);
buf.clear();
},
Ok(None) => break,
Err(err) => {
println!("fail {:?}", err);
break;
}
}
}
}
}
mod ffi {
use super::*;
use crate::tbuf::TBuf;
#[no_mangle]
unsafe extern "C" fn print_row(out: *mut TBuf, row: *const Row,
handler: Option<extern fn(out: *mut TBuf, tag: u16, data: *const TBuf) -> ()>) {
let ret = super::print_row(&mut *out, &*row, &|buf: &mut TBuf, tag: u16, data: &[u8]| {
let buf = &mut*buf;
if let Some(f) = handler {
f(buf, tag, &TBuf::from_slice(data))
} else {
use std::fmt::Write;
write!(buf, " {:x?}", data).unwrap();
}
});
if let Err(err) = ret {
log::error!("{:?}", err);
}
}
} | random_line_split | |
row.rs | /*
* Copyright (C) 2020, 2021 Yury Vostrikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#![allow(dead_code)]
use std::{alloc::{alloc, dealloc, Layout}, env, fmt, mem, slice, io, io::Read, ops::{Deref, DerefMut}};
use once_cell::sync::Lazy;
use byteorder::{LittleEndian, ReadBytesExt};
use anyhow::{bail, Context, Result};
use super::*;
#[repr(C, packed)]
pub union RowAux {
pub remote_scn: [u8; 6],
pub run_crc: u32,
}
extern {
type RowData;
}
#[repr(C, packed)]
pub struct Row {
pub header_crc32c: u32,
pub lsn: i64,
pub scn: i64,
pub tag: u16,
shard_id: u16,
aux: RowAux,
tm: f64,
pub len: u32,
pub data_crc32c: u32,
_data: RowData,
}
const ROW_LAYOUT : Layout = unsafe { Layout::from_size_align_unchecked(46, 16) };
pub struct BoxRow {
ptr: *mut Row
}
impl Deref for BoxRow {
type Target = Row;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr }
}
}
impl DerefMut for BoxRow {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.ptr }
}
}
impl Drop for BoxRow {
fn drop(&mut self) {
unsafe {
dealloc(self.ptr as *mut _, Row::layout(self.len));
}
}
}
impl Row {
fn layout(len: u32) -> Layout {
assert!(len < 2<<10);
let data = Layout::from_size_align(len as usize, 1).unwrap();
ROW_LAYOUT.extend_packed(data).unwrap()
}
fn alloc(len: u32) -> *mut Row {
unsafe {
let ptr = alloc(Self::layout(len)) as *mut Row;
(*ptr).len = len;
ptr
}
}
fn data_ptr(&self) -> *const u8 {
let ptr = self as *const _ as *const u8;
unsafe { ptr.add(ROW_LAYOUT.size()) }
}
pub fn data(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.data_ptr(), self.len as usize)
}
}
pub fn data_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self.data_ptr() as *mut _, self.len as usize)
}
}
pub fn read(io: &mut dyn Read) -> Result<BoxRow> {
let mut header = [0; ROW_LAYOUT.size()];
io.read_exact(&mut header).context("reading header")?;
let header_crc32c = (&header[0..4]).read_u32::<LittleEndian>().unwrap();
let header_crc32c_calculated = crc32c(&header[4..]);
if header_crc32c_calculated != header_crc32c {
bail!("header crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
header_crc32c, header_crc32c_calculated);
}
let len = (&header[ROW_LAYOUT.size() - 8..]).read_u32::<LittleEndian>().unwrap();
let mut row = BoxRow { ptr: Self::alloc(len) };
row.as_bytes_mut().copy_from_slice(&header);
debug_assert!(row.len == len);
io.read_exact(row.data_mut()).context("reading body")?;
if crc32c(row.data()) != row.data_crc32c {
bail!("data crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
{row.data_crc32c}, crc32c(row.data()));
}
log::debug!("read row LSN:{}", {row.lsn});
Ok(row)
}
pub fn write(&self, io: &mut dyn io::Write) -> io::Result<usize> {
io.write_all(self.as_bytes())?; // FIXME: nasty and unportable
io.write_all(self.data())?;
Ok(ROW_LAYOUT.size() + self.data().len())
}
fn as_bytes(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self as *const _ as *const u8, ROW_LAYOUT.size())
}
}
fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self as *mut _ as *mut u8, ROW_LAYOUT.size())
}
}
pub fn update_crc(&mut self) {
self.data_crc32c = crc32c(self.data());
self.header_crc32c = crc32c(&self.as_bytes()[4..])
}
fn tag(&self) -> Tag {
Tag::new(self.tag & TAG_MASK)
}
fn tag_type(&self) -> TagType {
TagType::new(self.tag & !TAG_MASK)
}
}
#[derive(Debug)]
#[derive(PartialEq, Eq)]
pub enum Tag {
SnapInitial,
SnapData,
WalData,
SnapFinal,
WalFinal,
RunCrc,
Nop,
RaftAppend,
RaftCommit,
RaftVote,
ShardCreate,
ShardAlter,
ShardFinal,
Tlv,
SysTag(u8),
UserTag(u8),
}
impl Tag {
fn new(repr: u16) -> Self {
match repr & TAG_MASK {
1 => Tag::SnapInitial,
2 => Tag::SnapData,
3 => Tag::WalData,
4 => Tag::SnapFinal,
5 => Tag::WalFinal,
6 => Tag::RunCrc,
7 => Tag::Nop,
8 => Tag::RaftAppend,
9 => Tag::RaftCommit,
10 => Tag::RaftVote,
11 => Tag::ShardCreate,
12 => Tag::ShardAlter,
13 => Tag::ShardFinal,
14 => Tag::Tlv,
t if t < 32 => Tag::SysTag(t as u8),
t => Tag::UserTag((t >> 5) as u8),
}
}
fn as_u16(&self) -> u16 {
match self {
Tag::SnapInitial => 1,
Tag::SnapData => 2,
Tag::WalData => 3,
Tag::SnapFinal => 4,
Tag::WalFinal => 5,
Tag::RunCrc => 6,
Tag::Nop => 7,
Tag::RaftAppend => 8,
Tag::RaftCommit => 9,
Tag::RaftVote => 10,
Tag::ShardCreate => 11,
Tag::ShardAlter => 12,
Tag::ShardFinal => 13,
Tag::Tlv => 14,
Tag::SysTag(t) => *t as u16,
Tag::UserTag(t) => *t as u16,
}
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tag::SnapInitial => write!(f, "snap_initial"),
Tag::SnapData => write!(f, "snap_data"),
Tag::SnapFinal => write!(f, "snap_final"),
Tag::WalData => write!(f, "wal_data"),
Tag::WalFinal => write!(f, "wal_final"),
Tag::ShardCreate => write!(f, "shard_create"),
Tag::ShardAlter => write!(f, "shard_alter"),
Tag::ShardFinal => write!(f, "shard_final"),
Tag::RunCrc => write!(f, "run_crc"),
Tag::Nop => write!(f, "nop"),
Tag::RaftAppend => write!(f, "raft_append"),
Tag::RaftCommit => write!(f, "raft_commit"),
Tag::RaftVote => write!(f, "raft_vote"),
Tag::Tlv => write!(f, "tlv"),
Tag::SysTag(n) => write!(f, "sys{}", n),
Tag::UserTag(n) => write!(f, "usr{}", n)
}
}
}
/* two highest bit in tag encode tag type:
00 - invalid
01 - snap
10 - wal
11 - system wal */
pub const TAG_MASK: u16 = 0x3fff;
const TAG_SIZE: usize = 14;
enum TagType {
SNAP = 0x4000,
WAL = 0x8000,
SYS = 0xc000,
INVALID = 0,
}
impl TagType {
fn new(repr: u16) -> TagType {
match repr & !TAG_MASK {
0x4000 => TagType::SNAP,
0x8000 => TagType::WAL,
0xc000 => TagType::SYS,
_ => TagType::INVALID,
}
}
}
impl fmt::Display for TagType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TagType::SNAP => write!(f, "snap"),
TagType::WAL => write!(f, "wal"),
TagType::SYS => write!(f, "sys"),
TagType::INVALID => write!(f, "invalid"),
}
}
}
#[derive(PartialEq, Eq)]
enum ShardType {
POR,
RAFT,
PART
}
impl ShardType {
fn new(repr: u8) -> Result<Self> {
match repr {
0 => Ok(ShardType::POR),
1 => Ok(ShardType::RAFT),
2 => Ok(ShardType::PART),
_ => bail!("invalid shard type {}", repr)
}
}
}
// TODO: switch to byteordered?
struct LittleEndianReader<'a> (std::io::Cursor<&'a[u8]>);
impl<'a> LittleEndianReader<'a> {
fn new(buf: &'a[u8]) -> Self { Self(std::io::Cursor::new(buf)) }
fn read_u8(&mut self) -> u8 { self.0.read_u8().unwrap() }
fn read_u16(&mut self) -> u16 { self.0.read_u16::<LittleEndian>().unwrap() }
fn read_u32(&mut self) -> u32 { self.0.read_u32::<LittleEndian>().unwrap() }
fn read_i64(&mut self) -> i64 { self.0.read_i64::<LittleEndian>().unwrap() }
fn read_u64(&mut self) -> u64 { self.0.read_u64::<LittleEndian>().unwrap() }
fn read_str(&mut self, len: usize) -> &str {
let pos = self.0.position() as usize;
let raw = &self.0.get_ref()[pos..pos+len];
let (raw, _) = raw.split_at(raw.iter().position(|&x| x == 0).unwrap_or(len));
let str = std::str::from_utf8(raw).unwrap();
self.0.set_position((pos+len) as u64);
str
}
fn into_cursor(self) -> std::io::Cursor<&'a[u8]> { self.0 }
fn | (&self) -> &[u8] {
&self.0.get_ref()[self.0.position() as usize..]
}
}
pub fn print_row<W: fmt::Write + fmt::Debug>(buf: &mut W, row: &Row,
handler: &dyn Fn(&mut W, u16, &[u8])) -> Result<()> {
fn int_flag(name: &str, default: bool) -> bool {
let flag = try {
let val = env::var(name).ok()?;
val.parse::<usize>().ok()?
};
Some(1) == flag || default
}
static PRINT_HEADER : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_ROW_HEADER", true) });
static PRINT_RUN_CRC : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_RUN_CRC", false) });
let tag = row.tag();
if *PRINT_HEADER || true {
write!(buf, "lsn:{}", {row.lsn})?;
if row.scn != -1 || tag == Tag::RaftVote || tag == Tag::SnapData {
write!(buf, " shard:{}", {row.shard_id})?;
if *PRINT_RUN_CRC {
write!(buf, " run_crc:0x{:08x}", unsafe {row.aux.run_crc})?;
}
}
write!(buf, " scn:{} tm:{:.3} t:{}/{} ", {row.scn}, {row.tm}, row.tag_type(), row.tag())?;
}
use mem::size_of;
let mut reader = LittleEndianReader::new(row.data());
match row.tag() {
Tag::SnapInitial => {
if row.data().len() == size_of::<u32>() * 3 {
let count = reader.read_u32();
let crc_log = reader.read_u32();
let crc_mod = reader.read_u32();
write!(buf, "count:{} run_crc_log:0x{:08x} run_crc_mod:0x{:08x}",
count, crc_log, crc_mod)?;
} else if row.scn == -1 {
let ver = reader.read_u8();
let count = reader.read_u32();
let flags = reader.read_u32();
write!(buf, "ver:{} count:{} flags:0x{:08x}", ver, count, flags)?;
} else {
write!(buf, "unknow format")?;
}
},
Tag::RunCrc => {
let mut scn = -1;
if row.data().len() == size_of::<i64>() + 2 * size_of::<u32>() {
scn = reader.read_i64();
}
let crc_log = reader.read_u32();
let _ = reader.read_u32(); /* ignore run_crc_mod */
write!(buf, "SCN:{} log:0x{:08x}", scn, crc_log)?;
},
Tag::SnapData | Tag::WalData | Tag::UserTag(_) | Tag::Tlv => {
handler(buf, row.tag, row.data());
return Ok(())
}
Tag::SnapFinal => {
let mut end = [0u8; 4];
for i in 0..3 {
end[i] = reader.read_u8()
}
if end != ['E' as u8, 'N' as u8, 'D' as u8, 0] {
write!(buf, " {:x?}", &end)?;
}
},
Tag::WalFinal => (),
Tag::Nop => {
if reader.unparsed().len() > 0 {
let dummy = reader.read_u16();
if dummy != 0 {
write!(buf, " {:02x}", dummy)?;
}
}
},
Tag::SysTag(_) => (),
Tag::RaftAppend | Tag::RaftCommit => {
let flags = reader.read_u16();
let term = reader.read_u64();
let inner_tag_raw = reader.read_u16();
let inner_tag = Tag::new(inner_tag_raw);
write!(buf, "term:{} flags:0x{:02x} it:{} ", flags, term, inner_tag)?;
match inner_tag {
Tag::RunCrc => {
let scn = reader.read_u64();
let log = reader.read_u32();
let _ = reader.read_u32(); /* ignore run_crc_mod */
write!(buf, "SCN:{} log:0x{:08x}", scn, log)?;
},
Tag::Nop => {
let dummy = reader.read_u16();
if dummy != 0 {
write!(buf, " {:02x}", dummy)?;
}
},
_ => {
handler(buf, inner_tag_raw, reader.into_cursor().into_inner());
return Ok(())
}
}
},
Tag::RaftVote => {
let flags = reader.read_u16();
let term = reader.read_u64();
let peer_id = reader.read_u8();
write!(buf, "term:{} flags:0x{:02x} peer:{}", term, flags, peer_id)?;
},
Tag::ShardCreate | Tag::ShardAlter => {
let ver = reader.read_u8();
if ver != 1 {
bail!("unknow version: {}", ver);
}
let shard_type = ShardType::new(reader.read_u8())?;
let estimated_row_count = reader.read_u32();
match row.tag() {
Tag::ShardCreate => write!(buf,"SHARD_CREATE")?,
Tag::ShardAlter => write!(buf, "SHARD_ALTER")?,
_ => unreachable!(),
}
write!(buf, " shard_id:{}", {row.shard_id})?;
match shard_type {
ShardType::RAFT => write!(buf, " RAFT")?,
ShardType::POR => write!(buf, " POR")?,
ShardType::PART => write!(buf, " PART")?,
}
let mod_name = reader.read_str(16);
write!(buf, " {}", mod_name)?;
write!(buf, " count:{} run_crc:0x{:08x}", estimated_row_count, unsafe { row.aux.run_crc })?;
write!(buf, " master:{}", reader.read_str(16))?;
for _ in 0..4 {
let peer_name = reader.read_str(16);
if peer_name.len() > 0 {
write!(buf, " repl:{}", peer_name)?;
}
}
let aux_len = reader.read_u16();
if aux_len > 0 {
write!(buf, " aux:")?;
for _ in 0..aux_len {
let b = reader.read_u8();
write!(buf, "{:02x} ", b)?;
}
}
},
Tag::ShardFinal => {
let dummy = reader.read_u16();
if dummy != 0 {
write!(buf, " {:02x}", dummy)?;
}
},
}
if reader.unparsed().len() > 0 {
write!(buf, " unparsed: {:x?} ", reader.unparsed())?;
}
Ok(())
}
#[test]
fn test_print_row() {
use std::{path::Path, fmt::Write};
println!("current dir {:?}", env::current_dir().unwrap());
let mut xlog = XLog::name(Path::new("testdata/00000000000000000002.xlog")).unwrap();
let mut buf = String::new();
env::set_var("OCTOPUS_CAT_ROW_HEADER", "1");
fn hexdump(buf: &mut String, _tag: u16, data: &[u8]) {
write!(buf, " {:?x}", data).unwrap();
}
if let IO::Read(reader) = &mut xlog.io {
loop {
match reader.read_row() {
Ok(Some(row)) => {
print_row(&mut buf, &row, &hexdump).unwrap();
println!("row {}", buf);
buf.clear();
},
Ok(None) => break,
Err(err) => {
println!("fail {:?}", err);
break;
}
}
}
}
}
mod ffi {
use super::*;
use crate::tbuf::TBuf;
#[no_mangle]
unsafe extern "C" fn print_row(out: *mut TBuf, row: *const Row,
handler: Option<extern fn(out: *mut TBuf, tag: u16, data: *const TBuf) -> ()>) {
let ret = super::print_row(&mut *out, &*row, &|buf: &mut TBuf, tag: u16, data: &[u8]| {
let buf = &mut*buf;
if let Some(f) = handler {
f(buf, tag, &TBuf::from_slice(data))
} else {
use std::fmt::Write;
write!(buf, " {:x?}", data).unwrap();
}
});
if let Err(err) = ret {
log::error!("{:?}", err);
}
}
}
| unparsed | identifier_name |
main.go | package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/gorilla/handlers"
"github.com/kyma-project/kyma/components/apiserver-proxy/cmd/proxy/reload"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/monitoring"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/spdy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/golang/glog"
"github.com/hkwi/h2c"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authn"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authz"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/proxy"
"github.com/spf13/pflag"
"golang.org/x/net/http2"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
cliflag "k8s.io/component-base/cli/flag"
)
const (
corsAllowOriginHeader = "Access-Control-Allow-Origin"
corsAllowMethodsHeader = "Access-Control-Allow-Methods"
corsAllowHeadersHeader = "Access-Control-Allow-Headers"
corsAllowCredentialsHeader = "Access-Control-Allow-Credentials"
)
var corsHeaders = []string{corsAllowOriginHeader, corsAllowMethodsHeader, corsAllowHeadersHeader, corsAllowCredentialsHeader}
type config struct {
insecureListenAddress string
secureListenAddress string
upstream string
upstreamForceH2C bool
auth proxy.Config
tls tlsConfig
kubeconfigLocation string
cors corsConfig
metricsListenAddress string
}
type tlsConfig struct {
certFile string
keyFile string
minVersion string
cipherSuites []string
}
type corsConfig struct {
allowHeaders []string
allowOrigin []string
allowMethods []string
}
var versions = map[string]uint16{
"VersionTLS10": tls.VersionTLS10,
"VersionTLS11": tls.VersionTLS11,
"VersionTLS12": tls.VersionTLS12,
}
func tlsVersion(versionName string) (uint16, error) {
if version, ok := versions[versionName]; ok {
return version, nil
}
return 0, fmt.Errorf("unknown tls version %q", versionName)
}
func main() {
cfg := config{
auth: proxy.Config{
Authentication: &authn.AuthnConfig{
X509: &authn.X509Config{},
Header: &authn.AuthnHeaderConfig{},
OIDC: &authn.OIDCConfig{},
},
Authorization: &authz.Config{},
},
cors: corsConfig{},
}
flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
// Add glog flags
flagset.AddGoFlagSet(flag.CommandLine)
// kube-rbac-proxy flags
flagset.StringVar(&cfg.insecureListenAddress, "insecure-listen-address", "", "The address the kube-rbac-proxy HTTP server should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream)
if err != nil {
glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
}
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls.Certificate{cert},
MinVersion: version,
// To enable http/2
// See net/http.Server.shouldConfigureHTTP2ForServe for more context
NextProtos: []string{"h2"},
}
} else {
certReloader, err := setupTLSCertReloader(fileWatcherCtx, cfg.tls.certFile, cfg.tls.keyFile)
if err != nil {
glog.Fatalf("Failed to create ReloadableTLSCertProvider: %v", err)
}
//Configure srv with GetCertificate function
srv.TLSConfig = &tls.Config{
GetCertificate: certReloader.GetCertificateFunc,
}
}
l, err := net.Listen("tcp", cfg.secureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on secure address: %v", err)
}
glog.Infof("Listening securely on %v", cfg.secureListenAddress)
go srv.ServeTLS(l, "", "")
}
if cfg.metricsListenAddress != "" {
srv := &http.Server{Handler: promhttp.Handler()}
l, err := net.Listen("tcp", cfg.metricsListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening for metrics on %v", cfg.metricsListenAddress)
go srv.Serve(l)
}
if cfg.insecureListenAddress != "" {
if cfg.upstreamForceH2C && !proxyForApiserver {
// Force http/2 for connections to the upstream i.e. do not start with HTTP1.1 UPGRADE req to
// initialize http/2 session.
// See https://github.com/golang/go/issues/14141#issuecomment-219212895 for more context
rp.Transport = &http2.Transport{
// Allow http schema. This doesn't automatically disable TLS
AllowHTTP: true,
// Do disable TLS.
// In combination with the schema check above. We could enforce h2c against the upstream server
DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
return net.Dial(netw, addr)
},
}
}
// Background:
//
// golang's http2 server doesn't support h2c
// https://github.com/golang/go/issues/16696
//
//
// Action:
//
// Use hkwi/h2c so that you can properly handle HTTP Upgrade requests over plain TCP,
// which is one of consequences for a h2c support.
//
// See https://github.com/golang/go/issues/14141 for more context.
//
// Possible alternative:
//
// We could potentially use grpc-go server's HTTP handler support
// which would handle HTTP UPGRADE from http1.1 to http/2, especially in case
// what you wanted kube-rbac-proxy to authn/authz was gRPC over h2c calls.
//
// Note that golang's http server requires a client(including gRPC) to send HTTP Upgrade req to
// property start http/2.
//
// but it isn't straight-forward to understand.
// Also note that at time of writing this, grpc-go's server implementation still lacks
// a h2c support for communication against the upstream.
//
// See belows for more information:
// - https://github.com/grpc/grpc-go/pull/1406/files
// - https://github.com/grpc/grpc-go/issues/549#issuecomment-191458335
// - https://github.com/golang/go/issues/14141#issuecomment-176465220
h2cHandler := &h2c.Server{Handler: mux}
srv := &http.Server{Handler: h2cHandler}
l, err := net.Listen("tcp", cfg.insecureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening insecurely on %v", cfg.insecureListenAddress)
go srv.Serve(l)
}
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
glog.Info("Received SIGTERM, exiting gracefully...")
fileWatcherCtxCancel()
}
//Allow for file watchers to close gracefully
time.Sleep(1 * time.Second)
}
// Returns intiliazed config, allows local usage (outside cluster) based on provided kubeconfig or in-cluter
func initKubeConfig(kcLocation string) *rest.Config {
if kcLocation != "" {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kcLocation)
if err != nil {
glog.Fatalf("unable to build rest config based on provided path to kubeconfig file %s", err.Error())
}
return kubeConfig
}
kubeConfig, err := rest.InClusterConfig()
if err != nil {
glog.Fatal("cannot find Service Account in pod to build in-cluster rest config")
}
return kubeConfig
}
func | (target *url.URL, kcfg *rest.Config, proxyForApiserver bool) (*httputil.ReverseProxy, error) {
rp := httputil.NewSingleHostReverseProxy(target)
rp.ModifyResponse = deleteUpstreamCORSHeaders
if proxyForApiserver {
t, err := rest.TransportFor(kcfg)
if err != nil {
return nil, fmt.Errorf("unable to set HTTP Transport for the upstream. Details : %s", err.Error())
}
rp.Transport = t
}
return rp, nil
}
func getCORSHandler(handler http.Handler, corsCfg corsConfig) http.Handler {
return handlers.CORS(
handlers.AllowedOrigins(corsCfg.allowOrigin),
handlers.AllowedMethods(corsCfg.allowMethods),
handlers.AllowedHeaders(corsCfg.allowHeaders),
)(handler)
}
func deleteUpstreamCORSHeaders(r *http.Response) error {
for _, h := range corsHeaders {
r.Header.Del(h)
}
return nil
}
func setupOIDCAuthReloader(fileWatcherCtx context.Context, cfg *authn.OIDCConfig) (authenticator.Request, error) {
const eventBatchDelaySeconds = 10
filesToWatch := []string{cfg.CAFile}
cancelableAuthReqestConstructor := func() (authn.CancelableAuthRequest, error) {
glog.Infof("creating new cancelable instance of authenticator.Request...")
return authn.NewOIDCAuthenticator(cfg)
}
//Create reloader
result, err := reload.NewCancelableAuthReqestReloader(cancelableAuthReqestConstructor)
if err != nil {
return nil, err
}
//Setup file watcher
oidcCAFileWatcher := reload.NewWatcher("oidc-ca-dex-tls-cert", filesToWatch, eventBatchDelaySeconds, result.Reload)
go oidcCAFileWatcher.Run(fileWatcherCtx)
return result, nil
}
func setupTLSCertReloader(fileWatcherCtx context.Context, certFile, keyFile string) (*reload.TLSCertReloader, error) {
const eventBatchDelaySeconds = 10
tlsConstructor := func() (*tls.Certificate, error) {
glog.Infof("Creating new TLS Certificate from data files: %s, %s", certFile, keyFile)
res, err := tls.LoadX509KeyPair(certFile, keyFile)
return &res, err
}
//Create reloader
result, err := reload.NewTLSCertReloader(tlsConstructor)
if err != nil {
return nil, err
}
//Start file watcher for certificate files
tlsCertFileWatcher := reload.NewWatcher("main-tls-crt/key", []string{certFile, keyFile}, eventBatchDelaySeconds, result.Reload)
go tlsCertFileWatcher.Run(fileWatcherCtx)
return result, nil
}
| newReverseProxy | identifier_name |
main.go | package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/gorilla/handlers"
"github.com/kyma-project/kyma/components/apiserver-proxy/cmd/proxy/reload"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/monitoring"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/spdy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/golang/glog"
"github.com/hkwi/h2c"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authn"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authz"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/proxy"
"github.com/spf13/pflag"
"golang.org/x/net/http2"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
cliflag "k8s.io/component-base/cli/flag"
)
const (
corsAllowOriginHeader = "Access-Control-Allow-Origin"
corsAllowMethodsHeader = "Access-Control-Allow-Methods"
corsAllowHeadersHeader = "Access-Control-Allow-Headers"
corsAllowCredentialsHeader = "Access-Control-Allow-Credentials"
)
var corsHeaders = []string{corsAllowOriginHeader, corsAllowMethodsHeader, corsAllowHeadersHeader, corsAllowCredentialsHeader}
type config struct {
insecureListenAddress string
secureListenAddress string
upstream string
upstreamForceH2C bool
auth proxy.Config
tls tlsConfig
kubeconfigLocation string
cors corsConfig
metricsListenAddress string
}
type tlsConfig struct {
certFile string
keyFile string
minVersion string
cipherSuites []string
}
type corsConfig struct {
allowHeaders []string
allowOrigin []string
allowMethods []string
}
var versions = map[string]uint16{
"VersionTLS10": tls.VersionTLS10,
"VersionTLS11": tls.VersionTLS11,
"VersionTLS12": tls.VersionTLS12,
}
func tlsVersion(versionName string) (uint16, error) |
func main() {
cfg := config{
auth: proxy.Config{
Authentication: &authn.AuthnConfig{
X509: &authn.X509Config{},
Header: &authn.AuthnHeaderConfig{},
OIDC: &authn.OIDCConfig{},
},
Authorization: &authz.Config{},
},
cors: corsConfig{},
}
flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
// Add glog flags
flagset.AddGoFlagSet(flag.CommandLine)
// kube-rbac-proxy flags
flagset.StringVar(&cfg.insecureListenAddress, "insecure-listen-address", "", "The address the kube-rbac-proxy HTTP server should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream)
if err != nil {
glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
}
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls.Certificate{cert},
MinVersion: version,
// To enable http/2
// See net/http.Server.shouldConfigureHTTP2ForServe for more context
NextProtos: []string{"h2"},
}
} else {
certReloader, err := setupTLSCertReloader(fileWatcherCtx, cfg.tls.certFile, cfg.tls.keyFile)
if err != nil {
glog.Fatalf("Failed to create ReloadableTLSCertProvider: %v", err)
}
//Configure srv with GetCertificate function
srv.TLSConfig = &tls.Config{
GetCertificate: certReloader.GetCertificateFunc,
}
}
l, err := net.Listen("tcp", cfg.secureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on secure address: %v", err)
}
glog.Infof("Listening securely on %v", cfg.secureListenAddress)
go srv.ServeTLS(l, "", "")
}
if cfg.metricsListenAddress != "" {
srv := &http.Server{Handler: promhttp.Handler()}
l, err := net.Listen("tcp", cfg.metricsListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening for metrics on %v", cfg.metricsListenAddress)
go srv.Serve(l)
}
if cfg.insecureListenAddress != "" {
if cfg.upstreamForceH2C && !proxyForApiserver {
// Force http/2 for connections to the upstream i.e. do not start with HTTP1.1 UPGRADE req to
// initialize http/2 session.
// See https://github.com/golang/go/issues/14141#issuecomment-219212895 for more context
rp.Transport = &http2.Transport{
// Allow http schema. This doesn't automatically disable TLS
AllowHTTP: true,
// Do disable TLS.
// In combination with the schema check above. We could enforce h2c against the upstream server
DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
return net.Dial(netw, addr)
},
}
}
// Background:
//
// golang's http2 server doesn't support h2c
// https://github.com/golang/go/issues/16696
//
//
// Action:
//
// Use hkwi/h2c so that you can properly handle HTTP Upgrade requests over plain TCP,
// which is one of consequences for a h2c support.
//
// See https://github.com/golang/go/issues/14141 for more context.
//
// Possible alternative:
//
// We could potentially use grpc-go server's HTTP handler support
// which would handle HTTP UPGRADE from http1.1 to http/2, especially in case
// what you wanted kube-rbac-proxy to authn/authz was gRPC over h2c calls.
//
// Note that golang's http server requires a client(including gRPC) to send HTTP Upgrade req to
// property start http/2.
//
// but it isn't straight-forward to understand.
// Also note that at time of writing this, grpc-go's server implementation still lacks
// a h2c support for communication against the upstream.
//
// See belows for more information:
// - https://github.com/grpc/grpc-go/pull/1406/files
// - https://github.com/grpc/grpc-go/issues/549#issuecomment-191458335
// - https://github.com/golang/go/issues/14141#issuecomment-176465220
h2cHandler := &h2c.Server{Handler: mux}
srv := &http.Server{Handler: h2cHandler}
l, err := net.Listen("tcp", cfg.insecureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening insecurely on %v", cfg.insecureListenAddress)
go srv.Serve(l)
}
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
glog.Info("Received SIGTERM, exiting gracefully...")
fileWatcherCtxCancel()
}
//Allow for file watchers to close gracefully
time.Sleep(1 * time.Second)
}
// Returns intiliazed config, allows local usage (outside cluster) based on provided kubeconfig or in-cluter
func initKubeConfig(kcLocation string) *rest.Config {
if kcLocation != "" {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kcLocation)
if err != nil {
glog.Fatalf("unable to build rest config based on provided path to kubeconfig file %s", err.Error())
}
return kubeConfig
}
kubeConfig, err := rest.InClusterConfig()
if err != nil {
glog.Fatal("cannot find Service Account in pod to build in-cluster rest config")
}
return kubeConfig
}
func newReverseProxy(target *url.URL, kcfg *rest.Config, proxyForApiserver bool) (*httputil.ReverseProxy, error) {
rp := httputil.NewSingleHostReverseProxy(target)
rp.ModifyResponse = deleteUpstreamCORSHeaders
if proxyForApiserver {
t, err := rest.TransportFor(kcfg)
if err != nil {
return nil, fmt.Errorf("unable to set HTTP Transport for the upstream. Details : %s", err.Error())
}
rp.Transport = t
}
return rp, nil
}
func getCORSHandler(handler http.Handler, corsCfg corsConfig) http.Handler {
return handlers.CORS(
handlers.AllowedOrigins(corsCfg.allowOrigin),
handlers.AllowedMethods(corsCfg.allowMethods),
handlers.AllowedHeaders(corsCfg.allowHeaders),
)(handler)
}
func deleteUpstreamCORSHeaders(r *http.Response) error {
for _, h := range corsHeaders {
r.Header.Del(h)
}
return nil
}
func setupOIDCAuthReloader(fileWatcherCtx context.Context, cfg *authn.OIDCConfig) (authenticator.Request, error) {
const eventBatchDelaySeconds = 10
filesToWatch := []string{cfg.CAFile}
cancelableAuthReqestConstructor := func() (authn.CancelableAuthRequest, error) {
glog.Infof("creating new cancelable instance of authenticator.Request...")
return authn.NewOIDCAuthenticator(cfg)
}
//Create reloader
result, err := reload.NewCancelableAuthReqestReloader(cancelableAuthReqestConstructor)
if err != nil {
return nil, err
}
//Setup file watcher
oidcCAFileWatcher := reload.NewWatcher("oidc-ca-dex-tls-cert", filesToWatch, eventBatchDelaySeconds, result.Reload)
go oidcCAFileWatcher.Run(fileWatcherCtx)
return result, nil
}
func setupTLSCertReloader(fileWatcherCtx context.Context, certFile, keyFile string) (*reload.TLSCertReloader, error) {
const eventBatchDelaySeconds = 10
tlsConstructor := func() (*tls.Certificate, error) {
glog.Infof("Creating new TLS Certificate from data files: %s, %s", certFile, keyFile)
res, err := tls.LoadX509KeyPair(certFile, keyFile)
return &res, err
}
//Create reloader
result, err := reload.NewTLSCertReloader(tlsConstructor)
if err != nil {
return nil, err
}
//Start file watcher for certificate files
tlsCertFileWatcher := reload.NewWatcher("main-tls-crt/key", []string{certFile, keyFile}, eventBatchDelaySeconds, result.Reload)
go tlsCertFileWatcher.Run(fileWatcherCtx)
return result, nil
}
| {
if version, ok := versions[versionName]; ok {
return version, nil
}
return 0, fmt.Errorf("unknown tls version %q", versionName)
} | identifier_body |
main.go | package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/gorilla/handlers"
"github.com/kyma-project/kyma/components/apiserver-proxy/cmd/proxy/reload"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/monitoring"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/spdy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/golang/glog"
"github.com/hkwi/h2c"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authn"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authz"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/proxy"
"github.com/spf13/pflag"
"golang.org/x/net/http2"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
cliflag "k8s.io/component-base/cli/flag"
)
const (
corsAllowOriginHeader = "Access-Control-Allow-Origin"
corsAllowMethodsHeader = "Access-Control-Allow-Methods"
corsAllowHeadersHeader = "Access-Control-Allow-Headers"
corsAllowCredentialsHeader = "Access-Control-Allow-Credentials"
)
var corsHeaders = []string{corsAllowOriginHeader, corsAllowMethodsHeader, corsAllowHeadersHeader, corsAllowCredentialsHeader}
type config struct {
insecureListenAddress string
secureListenAddress string
upstream string
upstreamForceH2C bool
auth proxy.Config
tls tlsConfig
kubeconfigLocation string
cors corsConfig
metricsListenAddress string
}
type tlsConfig struct {
certFile string
keyFile string
minVersion string
cipherSuites []string
}
type corsConfig struct {
allowHeaders []string
allowOrigin []string
allowMethods []string
}
var versions = map[string]uint16{
"VersionTLS10": tls.VersionTLS10,
"VersionTLS11": tls.VersionTLS11,
"VersionTLS12": tls.VersionTLS12,
}
func tlsVersion(versionName string) (uint16, error) {
if version, ok := versions[versionName]; ok {
return version, nil
}
return 0, fmt.Errorf("unknown tls version %q", versionName)
}
func main() {
cfg := config{
auth: proxy.Config{
Authentication: &authn.AuthnConfig{
X509: &authn.X509Config{},
Header: &authn.AuthnHeaderConfig{},
OIDC: &authn.OIDCConfig{},
},
Authorization: &authz.Config{},
},
cors: corsConfig{},
}
flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
// Add glog flags
flagset.AddGoFlagSet(flag.CommandLine)
// kube-rbac-proxy flags
flagset.StringVar(&cfg.insecureListenAddress, "insecure-listen-address", "", "The address the kube-rbac-proxy HTTP server should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream)
if err != nil {
glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil |
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls.Certificate{cert},
MinVersion: version,
// To enable http/2
// See net/http.Server.shouldConfigureHTTP2ForServe for more context
NextProtos: []string{"h2"},
}
} else {
certReloader, err := setupTLSCertReloader(fileWatcherCtx, cfg.tls.certFile, cfg.tls.keyFile)
if err != nil {
glog.Fatalf("Failed to create ReloadableTLSCertProvider: %v", err)
}
//Configure srv with GetCertificate function
srv.TLSConfig = &tls.Config{
GetCertificate: certReloader.GetCertificateFunc,
}
}
l, err := net.Listen("tcp", cfg.secureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on secure address: %v", err)
}
glog.Infof("Listening securely on %v", cfg.secureListenAddress)
go srv.ServeTLS(l, "", "")
}
if cfg.metricsListenAddress != "" {
srv := &http.Server{Handler: promhttp.Handler()}
l, err := net.Listen("tcp", cfg.metricsListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening for metrics on %v", cfg.metricsListenAddress)
go srv.Serve(l)
}
if cfg.insecureListenAddress != "" {
if cfg.upstreamForceH2C && !proxyForApiserver {
// Force http/2 for connections to the upstream i.e. do not start with HTTP1.1 UPGRADE req to
// initialize http/2 session.
// See https://github.com/golang/go/issues/14141#issuecomment-219212895 for more context
rp.Transport = &http2.Transport{
// Allow http schema. This doesn't automatically disable TLS
AllowHTTP: true,
// Do disable TLS.
// In combination with the schema check above. We could enforce h2c against the upstream server
DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
return net.Dial(netw, addr)
},
}
}
// Background:
//
// golang's http2 server doesn't support h2c
// https://github.com/golang/go/issues/16696
//
//
// Action:
//
// Use hkwi/h2c so that you can properly handle HTTP Upgrade requests over plain TCP,
// which is one of consequences for a h2c support.
//
// See https://github.com/golang/go/issues/14141 for more context.
//
// Possible alternative:
//
// We could potentially use grpc-go server's HTTP handler support
// which would handle HTTP UPGRADE from http1.1 to http/2, especially in case
// what you wanted kube-rbac-proxy to authn/authz was gRPC over h2c calls.
//
// Note that golang's http server requires a client(including gRPC) to send HTTP Upgrade req to
// property start http/2.
//
// but it isn't straight-forward to understand.
// Also note that at time of writing this, grpc-go's server implementation still lacks
// a h2c support for communication against the upstream.
//
// See belows for more information:
// - https://github.com/grpc/grpc-go/pull/1406/files
// - https://github.com/grpc/grpc-go/issues/549#issuecomment-191458335
// - https://github.com/golang/go/issues/14141#issuecomment-176465220
h2cHandler := &h2c.Server{Handler: mux}
srv := &http.Server{Handler: h2cHandler}
l, err := net.Listen("tcp", cfg.insecureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening insecurely on %v", cfg.insecureListenAddress)
go srv.Serve(l)
}
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
glog.Info("Received SIGTERM, exiting gracefully...")
fileWatcherCtxCancel()
}
//Allow for file watchers to close gracefully
time.Sleep(1 * time.Second)
}
// Returns intiliazed config, allows local usage (outside cluster) based on provided kubeconfig or in-cluter
func initKubeConfig(kcLocation string) *rest.Config {
if kcLocation != "" {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kcLocation)
if err != nil {
glog.Fatalf("unable to build rest config based on provided path to kubeconfig file %s", err.Error())
}
return kubeConfig
}
kubeConfig, err := rest.InClusterConfig()
if err != nil {
glog.Fatal("cannot find Service Account in pod to build in-cluster rest config")
}
return kubeConfig
}
func newReverseProxy(target *url.URL, kcfg *rest.Config, proxyForApiserver bool) (*httputil.ReverseProxy, error) {
rp := httputil.NewSingleHostReverseProxy(target)
rp.ModifyResponse = deleteUpstreamCORSHeaders
if proxyForApiserver {
t, err := rest.TransportFor(kcfg)
if err != nil {
return nil, fmt.Errorf("unable to set HTTP Transport for the upstream. Details : %s", err.Error())
}
rp.Transport = t
}
return rp, nil
}
func getCORSHandler(handler http.Handler, corsCfg corsConfig) http.Handler {
return handlers.CORS(
handlers.AllowedOrigins(corsCfg.allowOrigin),
handlers.AllowedMethods(corsCfg.allowMethods),
handlers.AllowedHeaders(corsCfg.allowHeaders),
)(handler)
}
func deleteUpstreamCORSHeaders(r *http.Response) error {
for _, h := range corsHeaders {
r.Header.Del(h)
}
return nil
}
func setupOIDCAuthReloader(fileWatcherCtx context.Context, cfg *authn.OIDCConfig) (authenticator.Request, error) {
const eventBatchDelaySeconds = 10
filesToWatch := []string{cfg.CAFile}
cancelableAuthReqestConstructor := func() (authn.CancelableAuthRequest, error) {
glog.Infof("creating new cancelable instance of authenticator.Request...")
return authn.NewOIDCAuthenticator(cfg)
}
//Create reloader
result, err := reload.NewCancelableAuthReqestReloader(cancelableAuthReqestConstructor)
if err != nil {
return nil, err
}
//Setup file watcher
oidcCAFileWatcher := reload.NewWatcher("oidc-ca-dex-tls-cert", filesToWatch, eventBatchDelaySeconds, result.Reload)
go oidcCAFileWatcher.Run(fileWatcherCtx)
return result, nil
}
func setupTLSCertReloader(fileWatcherCtx context.Context, certFile, keyFile string) (*reload.TLSCertReloader, error) {
const eventBatchDelaySeconds = 10
tlsConstructor := func() (*tls.Certificate, error) {
glog.Infof("Creating new TLS Certificate from data files: %s, %s", certFile, keyFile)
res, err := tls.LoadX509KeyPair(certFile, keyFile)
return &res, err
}
//Create reloader
result, err := reload.NewTLSCertReloader(tlsConstructor)
if err != nil {
return nil, err
}
//Start file watcher for certificate files
tlsCertFileWatcher := reload.NewWatcher("main-tls-crt/key", []string{certFile, keyFile}, eventBatchDelaySeconds, result.Reload)
go tlsCertFileWatcher.Run(fileWatcherCtx)
return result, nil
}
| {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
} | conditional_block |
main.go | package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/gorilla/handlers"
"github.com/kyma-project/kyma/components/apiserver-proxy/cmd/proxy/reload"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/monitoring"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/spdy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/golang/glog"
"github.com/hkwi/h2c"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authn"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/authz"
"github.com/kyma-project/kyma/components/apiserver-proxy/internal/proxy"
"github.com/spf13/pflag"
"golang.org/x/net/http2"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
cliflag "k8s.io/component-base/cli/flag"
)
const (
corsAllowOriginHeader = "Access-Control-Allow-Origin"
corsAllowMethodsHeader = "Access-Control-Allow-Methods"
corsAllowHeadersHeader = "Access-Control-Allow-Headers"
corsAllowCredentialsHeader = "Access-Control-Allow-Credentials"
)
var corsHeaders = []string{corsAllowOriginHeader, corsAllowMethodsHeader, corsAllowHeadersHeader, corsAllowCredentialsHeader}
type config struct {
insecureListenAddress string
secureListenAddress string
upstream string
upstreamForceH2C bool
auth proxy.Config
tls tlsConfig
kubeconfigLocation string
cors corsConfig
metricsListenAddress string
}
type tlsConfig struct {
certFile string
keyFile string
minVersion string
cipherSuites []string
}
type corsConfig struct {
allowHeaders []string
allowOrigin []string
allowMethods []string
}
var versions = map[string]uint16{
"VersionTLS10": tls.VersionTLS10,
"VersionTLS11": tls.VersionTLS11,
"VersionTLS12": tls.VersionTLS12,
}
func tlsVersion(versionName string) (uint16, error) {
if version, ok := versions[versionName]; ok {
return version, nil
}
return 0, fmt.Errorf("unknown tls version %q", versionName)
}
func main() {
cfg := config{
auth: proxy.Config{
Authentication: &authn.AuthnConfig{
X509: &authn.X509Config{},
Header: &authn.AuthnHeaderConfig{},
OIDC: &authn.OIDCConfig{},
},
Authorization: &authz.Config{},
},
cors: corsConfig{},
}
flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
// Add glog flags
flagset.AddGoFlagSet(flag.CommandLine)
// kube-rbac-proxy flags
flagset.StringVar(&cfg.insecureListenAddress, "insecure-listen-address", "", "The address the kube-rbac-proxy HTTP server should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream) | glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
}
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls.Certificate{cert},
MinVersion: version,
// To enable http/2
// See net/http.Server.shouldConfigureHTTP2ForServe for more context
NextProtos: []string{"h2"},
}
} else {
certReloader, err := setupTLSCertReloader(fileWatcherCtx, cfg.tls.certFile, cfg.tls.keyFile)
if err != nil {
glog.Fatalf("Failed to create ReloadableTLSCertProvider: %v", err)
}
//Configure srv with GetCertificate function
srv.TLSConfig = &tls.Config{
GetCertificate: certReloader.GetCertificateFunc,
}
}
l, err := net.Listen("tcp", cfg.secureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on secure address: %v", err)
}
glog.Infof("Listening securely on %v", cfg.secureListenAddress)
go srv.ServeTLS(l, "", "")
}
if cfg.metricsListenAddress != "" {
srv := &http.Server{Handler: promhttp.Handler()}
l, err := net.Listen("tcp", cfg.metricsListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening for metrics on %v", cfg.metricsListenAddress)
go srv.Serve(l)
}
if cfg.insecureListenAddress != "" {
if cfg.upstreamForceH2C && !proxyForApiserver {
// Force http/2 for connections to the upstream i.e. do not start with HTTP1.1 UPGRADE req to
// initialize http/2 session.
// See https://github.com/golang/go/issues/14141#issuecomment-219212895 for more context
rp.Transport = &http2.Transport{
// Allow http schema. This doesn't automatically disable TLS
AllowHTTP: true,
// Do disable TLS.
// In combination with the schema check above. We could enforce h2c against the upstream server
DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
return net.Dial(netw, addr)
},
}
}
// Background:
//
// golang's http2 server doesn't support h2c
// https://github.com/golang/go/issues/16696
//
//
// Action:
//
// Use hkwi/h2c so that you can properly handle HTTP Upgrade requests over plain TCP,
// which is one of consequences for a h2c support.
//
// See https://github.com/golang/go/issues/14141 for more context.
//
// Possible alternative:
//
// We could potentially use grpc-go server's HTTP handler support
// which would handle HTTP UPGRADE from http1.1 to http/2, especially in case
// what you wanted kube-rbac-proxy to authn/authz was gRPC over h2c calls.
//
// Note that golang's http server requires a client(including gRPC) to send HTTP Upgrade req to
// property start http/2.
//
// but it isn't straight-forward to understand.
// Also note that at time of writing this, grpc-go's server implementation still lacks
// a h2c support for communication against the upstream.
//
// See belows for more information:
// - https://github.com/grpc/grpc-go/pull/1406/files
// - https://github.com/grpc/grpc-go/issues/549#issuecomment-191458335
// - https://github.com/golang/go/issues/14141#issuecomment-176465220
h2cHandler := &h2c.Server{Handler: mux}
srv := &http.Server{Handler: h2cHandler}
l, err := net.Listen("tcp", cfg.insecureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening insecurely on %v", cfg.insecureListenAddress)
go srv.Serve(l)
}
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
glog.Info("Received SIGTERM, exiting gracefully...")
fileWatcherCtxCancel()
}
//Allow for file watchers to close gracefully
time.Sleep(1 * time.Second)
}
// Returns intiliazed config, allows local usage (outside cluster) based on provided kubeconfig or in-cluter
func initKubeConfig(kcLocation string) *rest.Config {
if kcLocation != "" {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kcLocation)
if err != nil {
glog.Fatalf("unable to build rest config based on provided path to kubeconfig file %s", err.Error())
}
return kubeConfig
}
kubeConfig, err := rest.InClusterConfig()
if err != nil {
glog.Fatal("cannot find Service Account in pod to build in-cluster rest config")
}
return kubeConfig
}
func newReverseProxy(target *url.URL, kcfg *rest.Config, proxyForApiserver bool) (*httputil.ReverseProxy, error) {
rp := httputil.NewSingleHostReverseProxy(target)
rp.ModifyResponse = deleteUpstreamCORSHeaders
if proxyForApiserver {
t, err := rest.TransportFor(kcfg)
if err != nil {
return nil, fmt.Errorf("unable to set HTTP Transport for the upstream. Details : %s", err.Error())
}
rp.Transport = t
}
return rp, nil
}
func getCORSHandler(handler http.Handler, corsCfg corsConfig) http.Handler {
return handlers.CORS(
handlers.AllowedOrigins(corsCfg.allowOrigin),
handlers.AllowedMethods(corsCfg.allowMethods),
handlers.AllowedHeaders(corsCfg.allowHeaders),
)(handler)
}
func deleteUpstreamCORSHeaders(r *http.Response) error {
for _, h := range corsHeaders {
r.Header.Del(h)
}
return nil
}
func setupOIDCAuthReloader(fileWatcherCtx context.Context, cfg *authn.OIDCConfig) (authenticator.Request, error) {
const eventBatchDelaySeconds = 10
filesToWatch := []string{cfg.CAFile}
cancelableAuthReqestConstructor := func() (authn.CancelableAuthRequest, error) {
glog.Infof("creating new cancelable instance of authenticator.Request...")
return authn.NewOIDCAuthenticator(cfg)
}
//Create reloader
result, err := reload.NewCancelableAuthReqestReloader(cancelableAuthReqestConstructor)
if err != nil {
return nil, err
}
//Setup file watcher
oidcCAFileWatcher := reload.NewWatcher("oidc-ca-dex-tls-cert", filesToWatch, eventBatchDelaySeconds, result.Reload)
go oidcCAFileWatcher.Run(fileWatcherCtx)
return result, nil
}
func setupTLSCertReloader(fileWatcherCtx context.Context, certFile, keyFile string) (*reload.TLSCertReloader, error) {
const eventBatchDelaySeconds = 10
tlsConstructor := func() (*tls.Certificate, error) {
glog.Infof("Creating new TLS Certificate from data files: %s, %s", certFile, keyFile)
res, err := tls.LoadX509KeyPair(certFile, keyFile)
return &res, err
}
//Create reloader
result, err := reload.NewTLSCertReloader(tlsConstructor)
if err != nil {
return nil, err
}
//Start file watcher for certificate files
tlsCertFileWatcher := reload.NewWatcher("main-tls-crt/key", []string{certFile, keyFile}, eventBatchDelaySeconds, result.Reload)
go tlsCertFileWatcher.Run(fileWatcherCtx)
return result, nil
} | if err != nil { | random_line_split |
main.go | package main
import (
"code.google.com/p/go.net/websocket"
"code.google.com/p/goauth2/oauth"
"code.google.com/p/google-api-go-client/mirror/v1"
"code.google.com/p/google-api-go-client/oauth2/v2"
"encoding/json"
"fmt"
picarus "github.com/bwhite/picarus/go"
"github.com/gorilla/pat"
"github.com/ugorji/go-msgpack"
"io"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
)
const revokeEndpointFmt = "https://accounts.google.com/o/oauth2/revoke?token=%s"
func StaticServer(w http.ResponseWriter, req *http.Request) {
path := req.URL.Query().Get(":path")
if strings.ContainsAny(path, "/\\") {
return
}
http.ServeFile(w, req, "static/"+path)
}
func RootServer(w http.ResponseWriter, req *http.Request) {
fmt.Println("Got /")
content, err := ioutil.ReadFile("static/app.html")
if err != nil {
return
}
io.WriteString(w, string(content))
}
func DebugServer(w http.ResponseWriter, req *http.Request) {
fmt.Println("Debug server")
fmt.Println(req)
}
func setupUser(r *http.Request, client *http.Client, userId string) {
m, _ := mirror.New(client)
s := &mirror.Subscription{
Collection: "timeline",
UserToken: userId,
CallbackUrl: fullUrl + "/notify",
}
m.Subscriptions.Insert(s).Do()
c := &mirror.Contact{
Id: "Memento",
DisplayName: "Memento",
ImageUrls: []string{fullUrl + "/static/memento.jpg"},
}
m.Contacts.Insert(c).Do()
c = &mirror.Contact{
Id: "OpenGlass",
DisplayName: "OpenGlass",
ImageUrls: []string{fullUrl + "/static/oglogo.png"},
}
m.Contacts.Insert(c).Do()
menuItems := []*mirror.MenuItem{&mirror.MenuItem{Action: "REPLY"}, &mirror.MenuItem{Action: "TOGGLE_PINNED"}}
for _, eventName := range eventNames {
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: eventName + " 1", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: eventName, IconUrl: fullUrl + "/static/icon_plus.png"}}})
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: eventName + " 0", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: eventName, IconUrl: fullUrl + "/static/icon_minus.png"}}})
}
t := &mirror.TimelineItem{
Text: "OpenGlass",
Creator: c,
MenuItems: menuItems,
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
}
req, _ := m.Timeline.Insert(t).Do()
setUserAttribute(userId, "ogtid", req.Id)
}
// auth is the HTTP handler that redirects the user to authenticate
// with OAuth.
func authHandler(w http.ResponseWriter, r *http.Request) {
url := config(r.Host).AuthCodeURL(r.URL.RawQuery)
http.Redirect(w, r, url, http.StatusFound)
}
// oauth2callback is the handler to which Google's OAuth service redirects the
// user after they have granted the appropriate permissions.
func oauth2callbackHandler(w http.ResponseWriter, r *http.Request) {
// Create an oauth transport with a urlfetch.Transport embedded inside.
t := &oauth.Transport{Config: config(r.Host)}
// Exchange the code for access and refresh tokens.
tok, err := t.Exchange(r.FormValue("code"))
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: exchange")
return
}
o, err := oauth2.New(t.Client())
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: oauth get")
return
}
u, err := o.Userinfo.Get().Do()
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: userinfo get")
return
}
userId := fmt.Sprintf("%s_%s", strings.Split(clientId, ".")[0], u.Id)
if err = storeUserID(w, r, userId); err != nil {
w.WriteHeader(500)
LogPrintf("oauth: store userid")
return
}
userSer, err := json.Marshal(u)
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: json marshal")
return
}
storeCredential(userId, tok, string(userSer))
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func SetupHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("setup: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(401)
LogPrintf("setup: auth")
return
}
setupUser(r, t.Client(), userId)
}
// signout Revokes access for the user and removes the associated credentials from the datastore.
func signoutHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("signout: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(500)
LogPrintf("signout: auth")
return
}
req, err := http.NewRequest("GET", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)
response, err := http.DefaultClient.Do(req)
if err != nil {
w.WriteHeader(500)
LogPrintf("signout: revoke")
return
}
defer response.Body.Close()
storeUserID(w, r, "")
deleteCredential(userId)
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func sendImageCard(image string, text string, svc *mirror.Service) {
nt := &mirror.TimelineItem{
SpeakableText: text,
MenuItems: []*mirror.MenuItem{&mirror.MenuItem{Action: "READ_ALOUD"}, &mirror.MenuItem{Action: "DELETE"}},
Html: "<img src=\"attachment:0\" width=\"100%\" height=\"100%\">",
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
}
req := svc.Timeline.Insert(nt)
req.Media(strings.NewReader(image))
_, err := req.Do()
if err != nil {
LogPrintf("sendimage: insert")
return
}
}
func getImageAttachment(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem) ([]byte, error) |
func notifyOpenGlass(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem, userId string) {
if !hasFlagSingle(userId, "flags", "user_openglass") {
LogPrintf("openglass: flag user_openglass")
return
}
var err error
flags, err := getUserFlags(userId, "uflags")
if err != nil {
LogPrintf("openglass: uflags")
return
}
if t.Attachments != nil && len(t.Attachments) > 0 {
imageData, err := getImageAttachment(conn, svc, trans, t)
if err != nil {
LogPrintf("openglass: attachment")
return
}
imageRow, err := PicarusApiImageUpload(conn, imageData)
if err != nil {
LogPrintf("openglass: picarus upload")
return
}
pushUserListTrim(userId, "images", imageRow, maxImages)
PicarusApiRowThumb(conn, imageRow)
if hasFlag(flags, "match_memento") {
mementoMatches, _, err := matchMementoImage(conn, imageRow, userId)
if err != nil {
LogPrintf("openglass: memento match")
} else {
for row, note := range mementoMatches {
m, err := conn.GetRow("images", row, []string{picarus.B64Dec(glassImageModel)})
if err != nil {
LogPrintf("openglass: memento get thumb")
continue
}
sendImageCard(m[picarus.B64Dec(glassImageModel)], note, svc)
}
}
}
if hasFlag(flags, "location") && hasFlag(flags, "location:streetview") {
//searchData, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(locationModel))
}
if err != nil {
LogPrintf("openglass: image search")
}
// Warped image example
var imageWarped string
if hasFlag(flags, "warp") {
imageWarped, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(homographyModel))
if err != nil {
LogPrintf("openglass: image warp")
imageWarped = ""
} else {
sendImageCard(imageWarped, "", svc)
}
}
// If there is a caption, send it to the annotation task
if len(t.Text) > 0 {
if hasFlag(flags, "crowdqa") {
imageType := "full"
if strings.HasPrefix(t.Text, "augmented ") {
if len(imageWarped) > 0 {
imageWarpedData := []byte(imageWarped)
imageRowWarped, err := PicarusApiImageUpload(conn, imageWarpedData)
PicarusApiRowThumb(conn, imageRowWarped)
if err != nil {
LogPrintf("openglass: warp image upload")
} else {
imageRow = imageRowWarped
imageData = imageWarpedData
imageType = "augmented"
}
}
t.Text = t.Text[10:] // Remove "augmented "
}
_, err = conn.PatchRow("images", imageRow, map[string]string{"meta:question": t.Text, "meta:openglass_user": userId,
"meta:openglass_image_type": imageType}, map[string][]byte{})
if err != nil {
LogPrintf("openglass: patch image")
return
}
// TODO: Here is where we would resize the image, we can do that later
_, err = conn.PostRow("jobs", annotationTask, map[string]string{"action": "io/annotation/sync"})
if err != nil {
LogPrintf("openglass: sync annotations")
return
}
}
} else {
if hasFlag(flags, "predict") {
confHTML := "<article><section><ul class=\"text-x-small\">"
menuItems := []*mirror.MenuItem{}
for modelName, modelRow := range predictionModels {
confMsgpack, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(modelRow))
if err != nil {
LogPrintf("openglass: predict")
return
}
var value float64
err = msgpack.Unmarshal([]byte(confMsgpack), &value, nil)
if err != nil {
LogPrintf("openglass: predict msgpack")
return
}
confHTML = confHTML + fmt.Sprintf("<li>%s: %f</li>", modelName, value)
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: modelName + " 1", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: modelName, IconUrl: fullUrl + "/static/icon_plus.png"}}})
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: modelName + " 0", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: modelName, IconUrl: fullUrl + "/static/icon_minus.png"}}})
}
menuItems = append(menuItems, &mirror.MenuItem{Action: "DELETE"})
confHTML = confHTML + "</ul></section><footer><p>Image Attributes</p></footer></article>"
nt := &mirror.TimelineItem{
Html: confHTML,
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
HtmlPages: []string{"<img src=\"attachment:0\" width=\"100%\" height=\"100%\">"},
MenuItems: menuItems,
}
imageThumbData, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(glassImageModel))
if err != nil {
LogPrintf("openglass: thumb")
return
}
req := svc.Timeline.Insert(nt)
req.Media(strings.NewReader(string(imageThumbData)))
tiConf, err := req.Do()
if err != nil {
LogPrintf("openglass: predictinsert")
return
}
setUserAttribute(userId, "tid_to_row:"+tiConf.Id, imageRow)
}
}
} else {
if len(t.Text) > 0 {
if strings.HasPrefix(t.Text, "where") && hasFlag(flags, "location") {
loc, _ := svc.Locations.Get("latest").Do()
_, err = conn.PostTable("images", map[string]string{"meta:question": t.Text, "meta:openglass_user": userId, "meta:latitude": strconv.FormatFloat(loc.Latitude, 'f', 16, 64),
"meta:longitude": strconv.FormatFloat(loc.Longitude, 'f', 16, 64)}, map[string][]byte{}, []picarus.Slice{})
} else {
_, err = conn.PostTable("images", map[string]string{"meta:question": t.Text, "meta:openglass_user": userId}, map[string][]byte{}, []picarus.Slice{})
}
if err != nil {
LogPrintf("openglass: qa post text-only")
return
}
_, err = conn.PostRow("jobs", annotationTask, map[string]string{"action": "io/annotation/sync"})
if err != nil {
LogPrintf("openglass: qa post text-only sync")
return
}
}
}
}
func notifyHandler(w http.ResponseWriter, r *http.Request) {
conn := picarus.Conn{Email: picarusEmail, ApiKey: picarusApiKey, Server: "https://api.picar.us"}
not := new(mirror.Notification)
if err := json.NewDecoder(r.Body).Decode(not); err != nil {
LogPrintf("notify: decode")
return
}
userId := not.UserToken
itemId := not.ItemId
fmt.Println(not)
if not.Operation == "UPDATE" {
ogTid, err := getUserAttribute(userId, "ogtid")
if err != nil {
LogPrintf("notify: ogtid")
return
}
// Annotation set by user in OpenGlass
fmt.Println(not.ItemId)
fmt.Println(ogTid)
if not.ItemId == ogTid {
for _, v := range not.UserActions {
vs := strings.Split(v.Payload, " ")
if len(vs) != 2 || len(vs[1]) != 1 {
LogPrintf("notify: payload")
continue
}
annotationJS, err := json.Marshal(WSAnnotation{Timestamp: CurTime(), Name: vs[0], Polarity: vs[1] == "1"})
if err != nil {
LogPrintf("notify: annotationJS")
return
}
err = pushUserListTrim(userId, "annotations", string(annotationJS), 100)
if err != nil {
LogPrintf("notify: push list")
return
}
}
return
}
imageRow, err := getUserAttribute(userId, "tid_to_row:"+not.ItemId)
if err != nil {
LogPrintf("notify: tid_to_row")
return
}
for _, v := range not.UserActions {
vs := strings.Split(v.Payload, " ")
if len(vs) != 2 || predictionModels[vs[0]] == "" || len(vs[1]) != 1 {
LogPrintf("notify: payload")
continue
}
_, err = conn.PatchRow("images", imageRow, map[string]string{"meta:userannot-" + vs[0]: vs[1]}, map[string][]byte{})
if err != nil {
LogPrintf("notify: patch annotation")
continue
}
}
return
}
if not.Operation != "INSERT" {
return
}
trans := authTransport(userId)
if trans == nil {
LogPrintf("notify: auth")
return
}
svc, err := mirror.New(trans.Client())
if err != nil {
LogPrintf("notify: mirror")
return
}
t, err := svc.Timeline.Get(itemId).Do()
if err != nil {
LogPrintf("notify: timeline item")
return
}
notifyOG := true
for _, r := range t.Recipients {
if r.Id == "Memento" {
notifyOG = false
}
}
if notifyOG {
go notifyOpenGlass(&conn, svc, trans, t, userId)
} else {
go notifyMemento(&conn, svc, trans, t, userId)
}
}
func main() {
PupilCalibrate("219250584360_109113122718379096525")
m := pat.New()
//m.Post("/", http.HandlerFunc(DebugServer))
m.Get("/map", http.HandlerFunc(MapServer))
m.Get("/search", http.HandlerFunc(MementoSearchServer))
m.Get("/static/{path}", http.HandlerFunc(StaticServer))
m.Post("/raven/{key}", http.HandlerFunc(RavenServer))
m.Post("/notify/{key}", http.HandlerFunc(NotifyServer))
m.Post("/pupil/{key}", http.HandlerFunc(PupilServer))
m.Post("/control/{action}", http.HandlerFunc(ControlServer))
m.Post("/location", http.HandlerFunc(LocationHandler))
m.Post("/setup", http.HandlerFunc(SetupHandler))
m.Post("/user/key/{type}", http.HandlerFunc(SecretKeySetupHandler))
// /auth -> google -> /oauth2callback
m.Get("/auth", http.HandlerFunc(authHandler))
m.Get("/oauth2callback", http.HandlerFunc(oauth2callbackHandler))
m.Post("/notify", http.HandlerFunc(notifyHandler))
m.Post("/signout", http.HandlerFunc(signoutHandler))
m.Post("/flags", http.HandlerFunc(FlagsHandler))
m.Get("/flags", http.HandlerFunc(FlagsHandler))
m.Delete("/flags", http.HandlerFunc(FlagsHandler))
m.Get("/", http.HandlerFunc(RootServer))
go pollAnnotations()
http.Handle("/ws/glass/", websocket.Handler(WSGlassHandler))
http.Handle("/ws/web", websocket.Handler(WSWebHandler))
http.Handle("/ws/web/", websocket.Handler(WSWebHandler))
http.Handle("/", m)
err := http.ListenAndServe(":16001", nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
| {
a, err := svc.Timeline.Attachments.Get(t.Id, t.Attachments[0].Id).Do()
if err != nil {
LogPrintf("getattachment: metadata")
return nil, err
}
req, err := http.NewRequest("GET", a.ContentUrl, nil)
if err != nil {
LogPrintf("getattachment: http")
return nil, err
}
resp, err := trans.RoundTrip(req)
if err != nil {
LogPrintf("getattachment: content")
return nil, err
}
defer resp.Body.Close()
imageData, err := ioutil.ReadAll(resp.Body)
if err != nil {
LogPrintf("getattachment: body")
return nil, err
}
return imageData, nil
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.