file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
run_utils.py |
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def | (n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users):
age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features)
age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose)
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, | build_price_icm | identifier_name |
run_utils.py | .seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def build_price_icm(n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users):
age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features)
age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose)
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, zip(*data))
return items, features, values
def __encode_values(values):
| le = LabelEncoder()
le.fit(values)
return le.transform(values) | identifier_body | |
run_utils.py |
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def build_price_icm(n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users):
age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features)
age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
|
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, | if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose) | conditional_block |
run_utils.py | 3
def set_seed(seed):
print('seed = {0}'.format(seed))
os.environ['RECSYS_SEED'] = str(seed)
np.random.seed(seed)
def get_seed():
env = os.getenv('RECSYS_SEED')
if env:
return int(env)
return -1
def build_urm():
urm_data = load_csv(DataFiles.TRAIN)
urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]
users, items, ratings = map(np.array, zip(*urm_data))
return sps.csr_matrix((ratings, (users, items)))
def clusterize():
data = load_csv(DataFiles.CLUSTERS)
data = [[int(row[i]) for i in range(len(row))] for row in data]
_, user_ids, cluster_ids = map(list, zip(*data))
assert len(user_ids) == len(cluster_ids)
data_len = len(user_ids)
clusters = dict()
for n in range(max(cluster_ids) + 1):
clusters[n] = list()
for i in range(data_len):
user_id = user_ids[i]
cluster_id = cluster_ids[i]
clusters[cluster_id].append(user_id)
return clusters
def get_cold_users(urm_train, return_warm=False):
profile_lengths = np.ediff1d(urm_train.indptr)
cold_users = np.where(profile_lengths == 0)[0]
if return_warm:
warm_users = np.where(profile_lengths > 0)[0]
return cold_users, warm_users
return cold_users
def build_price_icm(n_items):
price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)
price_icm_values = __encode_values(price_icm_values)
n_features = max(price_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(price_icm_values))
price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)
return price_icm
def build_asset_icm(n_items):
asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)
asset_icm_values += 1
asset_icm_values = __encode_values(asset_icm_values)
n_features = max(asset_icm_values) + 1
shape = (n_items, n_features)
ones = np.ones(len(asset_icm_values))
asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)
return asset_icm
def build_subclass_icm(n_items):
subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)
n_features = max(subclass_icm_features) + 1
shape = (n_items, n_features)
subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)
return subclass_icm
def build_icm(n_items):
price_icm = build_price_icm(n_items)
asset_icm = build_asset_icm(n_items)
subclass_icm = build_subclass_icm(n_items)
return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()
def build_age_ucm(n_users): | age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)
return age_ucm
def build_region_ucm(n_users):
region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)
n_features = max(region_ucm_features) + 1
shape = (n_users, n_features)
region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)
return region_ucm
def build_ucm(n_users):
age_ucm = build_age_ucm(n_users)
region_ucm = build_region_ucm(n_users)
return sps.hstack((age_ucm, region_ucm))
def build_target_users():
target_users = load_csv(DataFiles.TARGET_USERS_TEST)
return [int(x[0]) for x in target_users]
def build_all_matrices():
urm = build_urm()
n_users, n_items = urm.shape
icm = build_icm(n_items)
ucm = build_ucm(n_users)
target_users = build_target_users()
return urm, icm, ucm, target_users
def train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):
if split_type == SplitType.PROBABILISTIC:
return __train_test_split(urm, split)
elif split_type == SplitType.LOO:
return __train_test_loo_split(urm)
elif split_type == SplitType.LOO_CYTHON:
return __train_test_loo_split_cython(urm)
def evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):
from evaluation import evaluate_algorithm
if cython:
if verbose:
print('Ignoring argument excluded_users')
from cython_modules.evaluation import evaluate_cython
if verbose:
print('Using Cython evaluation')
return evaluate_cython(recommender, urm_test, verbose=verbose)
else:
return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)
def evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):
assert type(urm_tests) == list
assert len(urm_tests) >= 1
assert type(n_processes) == int
if n_processes == 0:
n_processes = len(urm_tests)
with Pool(processes=n_processes) as pool:
args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]
maps = pool.starmap(evaluate, args, chunksize=1)
maps = [x['MAP'] for x in maps]
return np.mean(maps)
def export(target_users, recommender):
print('Exporting recommendations...')
data = list()
for u_id in tqdm(target_users, desc='Export'):
data.append((u_id, recommender.recommend(u_id, at=10)))
export_csv(('user_id', 'item_list'), data)
print('OK')
def __train_test_split(urm, split=0.8):
print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))
urm = urm.tocoo()
num_interactions = urm.nnz
shape = urm.shape
train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])
urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)
urm_train = urm_train.tocsr()
test_mask = np.logical_not(train_mask)
urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)
urm_test = urm_test.tocsr()
return urm_train, urm_test
def __train_test_loo_split(urm):
print('Using LeaveOneOut')
urm = urm.tocsr()
num_users = urm.shape[0]
num_items = urm.shape[1]
urm_train = urm.copy()
urm_test = sps.lil_matrix((num_users, num_items), dtype=int)
for user_id in trange(num_users, desc='LeaveOneOut'):
start_pos = urm_train.indptr[user_id]
end_pos = urm_train.indptr[user_id + 1]
user_profile = urm_train.indices[start_pos:end_pos]
if user_profile.size > 0:
item_id = np.random.choice(user_profile, 1)
urm_train[user_id, item_id] = 0
urm_test[user_id, item_id] = 1
urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)
urm_train.eliminate_zeros()
urm_test.eliminate_zeros()
return urm_train, urm_test
def __load_icm_csv(filename, third_type):
data = load_csv(filename)
data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]
items, features, values = map(np.array, | age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)
n_features = max(age_ucm_features) + 1
shape = (n_users, n_features) | random_line_split |
Home.js | ] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3},
{type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
| setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
}
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96 | try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false); | identifier_body |
Home.js | ] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3},
{type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
| }
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96 | let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
| conditional_block |
Home.js | () {
const [error, setError] = useState(null);
const [isLoading, setIsLoading] = useState(false);
const [weekday, setWeekday] = useState([]);
const [friday, setFriday] = useState([]);
const [sunday, setSunday] = useState([]);
const [fastFood, setFastFood] = useState([]);
const [dinnerList, setDinnerList] = useState(null);
const [filteredData, setFilteredData] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3},
{type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
}
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
< | Home | identifier_name | |
Home.js | ] = useState([]);
const [database, setDatabase] = useState([])
const [isModal, setIsModal] = useState(false);
const [isChecked, setIsChecked] = useState({
filters:[
{type: "meat", checked: false, text: "Kjøtt"},
{type: "fish", checked: false, text: "Fisk"},
{type: "veg", checked: false, text: "Vegetar"},
{type: "glutenFree", checked: false, text: "Glutenfri"},
{type: "lactoseFree", checked: false, text: "Laktosefri"},
{type: "mon", checked: false, text: "Mandag", index: 0},
{type: "tue", checked: false, text: "Tirsdag", index: 1},
{type: "wed", checked: false, text: "Onsdag", index: 2},
{type: "thu", checked: false, text: "Torsdag", index: 3}, | {type: "fri", checked: false, text: "Fredag", index: 4},
{type: "sat", checked: false, text: "Lørdag", index: 5},
{type: "sun", checked: false, text: "Søndag", index:6},
]}
);
const storage = useStorageContext();
//----------------------------------------------------------------useEffects
//Get data from Firebase
useEffect(() => {
setIsLoading(true);
async function readCollection(text) {
try{
const collection = await firebaseInstance.firestore().collection(text)
const readCollection = await collection.get()
let returnArray = [];
readCollection.forEach(item => {
const itemData = item.data() || {};
returnArray.push({
id: item.id,
...itemData
})
})
setDatabase(returnArray);
setIsLoading(false)
}
catch(error) {
setIsLoading(false);
setError(error);
}
}
readCollection("dinners")
}, [])
//set filteredData to "database"
useEffect(() => {
setFilteredData([...database])
}, [database]);
//Apply filter when a filter is added/removed
useEffect(() => {
applyFilter();
}, [isChecked]);
//Organize courses in weekday,friday and sunday-lists
useEffect(() => {
let tempArr = [...filteredData];
if (database !== null) {
const course = item => item.time === 1 || item.time === 2 && item.friday === false && item.sunday === false
tempArr = filter(course, filteredData)
setWeekday(tempArr)
const friday = item => item.friday;
tempArr = filter(friday, filteredData)
setFriday(tempArr);
const sunday = item => item.sunday;
tempArr = filter(sunday, filteredData)
setSunday(tempArr);
const fastFood = item => item.time === 1;
tempArr = filter(fastFood, filteredData);
setFastFood(tempArr);
}
}, [filteredData])
//re-fill dinnerList every time the weekday-array changes
useEffect(() => {
if (weekday.length > 0) {
fillDinnerList();
}
}, [weekday]);
//---------------------------------------------------------------------------Fill the dinnerList
//Sets the 7 days dinner list based on the weekday, friday and sunday arrays
const fillDinnerList = () => {
let list = [];
let tempWeek = [...weekday];
let tempFri = [...friday];
let tempSun = [...sunday];
//Push weekday dinners
for (let i=0; i <= 4; i++) {
let index = randomIndex(tempWeek);
let dinner = tempWeek[index];
list.push(dinner)
tempWeek.splice(index, 1);
}
//Push friday dinner
let f = randomIndex(tempFri);
let fDinner = tempFri[f];
list.splice(4, 0, fDinner);
tempFri.splice(f, 1);
//Push sunday dinner
let s = randomIndex(sunday);
let sDinner = tempSun[s];
list.splice(6, 0, sDinner);
tempSun.splice(s, 1)
list = applyBusyDaysFilter(list);
setDinnerList(list);
}
//----------------------------------------------------------------------------Filter courses
const applyFilter = () => {
let tempArr = database !== null ? [...database] : null;
let meatArr = [];
let fishArr = [];
let vegArr = [];
let glutArr = [];
let lactoseArr = [];
//filters meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "meat") {
let meat = item => item.type === "meat";
meatArr = filter(meat, tempArr)
}
if (param.type === "fish") {
let fish = item => item.type === "fish";
fishArr = filter(fish, tempArr);
}
if (param.type === "veg") {
let veg = item => item.type === "veg";
vegArr = filter(veg, tempArr);
}
}
})
if (fishArr.length > 0 || vegArr.length > 0 || meatArr.length > 0) {
tempArr = [...fishArr, ...vegArr, ...meatArr];
}
//filters glutenFree and lactoseFree - based on the tempArr that's already filtered by meat, fish and vegetarian courses
isChecked.filters.forEach(param => {
if (param.checked) {
if (param.type === "glutenFree") {
let glut = item => item.glutenFree;
glutArr = filter(glut, tempArr);
}
else if (param.type === "lactoseFree") {
let lac = item => item.lactoseFree;
lactoseArr = filter(lac, tempArr);
}
}
})
if (glutArr.length > 0 || lactoseArr.length > 0) {
tempArr = [...glutArr, ...lactoseArr];
}
setFilteredData(tempArr);
}
//Changes to a course with time===1 on the days where the user is extra busy
const applyBusyDaysFilter = (array) => {
let newDinnerList = array;
const busyDays = userIsBusy(isChecked.filters);
let newCourse;
busyDays.forEach(dayIndex => {
newCourse = getNewCourse(dinnerList, fastFood);
newDinnerList.splice(dayIndex, 1, newCourse);
})
return newDinnerList;
}
//Toggles filter parameters based on which buttons the user has pressed
const toggleFilters = ({type}) => {
let tempArr = isChecked.filters.map(item => {
if (item.type === type) {
return {...item, checked: !item.checked};
}
return item
})
setIsChecked({filters: tempArr});
}
//shows/hides the modal with filter options
const toggleModal = () => {
setIsModal(!isModal);
}
//---------------------------------------------------------------------------------Change courses
//Changes the course of a certain index in the dinnerList
const changeCourse = ({index}) => {
let newArr = [...dinnerList];
if (index < 4 || index === 5) {
newArr[index] = getNewCourse(dinnerList, weekday)
} else if (index === 4) {
newArr[index] = getNewCourse(dinnerList, friday)
} else {
newArr[index] = getNewCourse(dinnerList, sunday)
}
newArr = applyBusyDaysFilter(newArr);
setDinnerList(newArr);
}
if (error) {
return (
<View style={styles.container}>
<Text h3>{error.message}</Text>
<Button title="Prøv igjen" onPress={() => DevSettings.reload()} />
</View>
)
}
if (isLoading) {
return(
<View style={styles.container}>
<ActivityIndicator/>
<Text h3>Siden lastes inn.</Text>
</View>
)
}
return (
<View style={styles.container}>
<Header
placement="right"
containerStyle={{
backgroundColor: "#f9f9f8"
}}
leftComponent={
<Image
accessibility={true}
accessibilityLabel="Logo"
source={require("../assets/logo_purple.png")}
style={{width: 140, height: 50}}
PlaceholderContent={<ActivityIndicator/>}/>
}
centerComponent={
<Button
accessibilityLabel="Lagre listen"
icon={
<Icon name="save" size={35} color="#a96dd8"/>
}
raised={true}
type="outline"
containerStyle={{height: 50,}}
onPress={() => {storage.saveInStorage(dinnerList)}}/>
}
rightComponent={
<Button
accessibilityLabel="Åpne filter"
icon={
<Icon name="filter" size={40} color="#a96dd8 | random_line_split | |
proxy.rs | (String),
}
pub trait Authenticate {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>;
}
pub enum AuthzResult {
Allow,
Disallow,
}
pub trait Authorize {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>;
}
pub trait SiteAuthorize {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>;
}
#[derive(Clone)]
pub struct AuthConfig<U, S, A>
where
U: Authenticate + Clone,
S: SiteAuthorize + Clone,
A: Authorize + Clone,
{
authenticate: U,
site: S,
authorize: A,
}
fn handle_tls_raw<C: Connect + 'static>(
req_uuid: uuid::Uuid,
_client: &Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let cpair = TcpStream::connect(&upstream_addr)
.map(|upstream| {
println!("Connection established");
let _ = resp_tx.send(()).unwrap();
upstream
})
.map_err(|err| eprintln!("connect: {}", err));
let upgraded = req.into_body().on_upgrade();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(|(upstream, downstream)| {
println!("In up/down");
let (u2dr, u2dw) = upstream.split();
let (d2ur, d2uw) = downstream.split();
let u2df = copy(u2dr, d2uw);
let d2uf = copy(d2ur, u2dw);
d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err))
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
// result(200)
}
fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool {
true
}
trait RequestFilter {
type Future: Future<Item = Request<Body>>;
fn filter(&self, req: Request<Body>) -> Self::Future;
}
trait ResponseFilter {
type Future: Future<Item = Response<Body>>;
fn filter(&self, req: Response<Body>) -> Self::Future;
}
#[derive(Clone)]
struct AdWareBlock;
impl SiteAuthorize for AdWareBlock {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> {
if url.starts_with("adservice.google.com") {
return Ok(AuthzResult::Disallow);
}
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct AllowAll;
impl Authorize for AllowAll {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> {
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct NoAuth;
impl Authenticate for NoAuth {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> |
}
pub enum Trace {
TraceId(String),
TraceSecurity(String, openssl::x509::X509),
TraceRequest(String, Request<Body>),
TraceResponse(String, Request<Body>),
}
fn make_absolute(req: &mut Request<Body>) {
/* RFC 7312 5.4
When a proxy receives a request with an absolute-form of
request-target, the proxy MUST ignore the received Host header field
(if any) and instead replace it with the host information of the
request-target. A proxy that forwards such a request MUST generate a
new Host field-value based on the received request-target rather than
forward the received Host field-value.
*/
match req.method() {
&Method::CONNECT => {}
_ => {
let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) };
if let Some(n) = nhost {
req.headers_mut()
.insert(http::header::HOST, n.parse().unwrap());
return;
}
let nuri = req.headers().get(http::header::HOST).map(|host| {
let autht: Authority = host.to_str().unwrap().parse().unwrap();
let mut builder = hyper::Uri::builder();
builder.authority(autht);
//TODO(matt) do as map[
if let Some(p) = req.uri().path_and_query() {
builder.path_and_query(p.as_str());
}
if let Some(p) = req.uri().scheme_part() {
builder.scheme(p.as_str());
} else {
// Ok so this kind of sketchy, but since this is fixing up a client connection
// we'll never see an https one. Why? https is via CONNECT at the proxy
builder.scheme("http");
}
builder.build().unwrap()
});
match nuri {
Some(n) => *req.uri_mut() = n,
None => {}
}
}
}
}
#[derive(Clone)]
struct Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone + 'static,
S: SiteAuthorize + Sync + Send + Clone + 'static,
A: Authorize + Sync + Send + Clone + 'static,
{
//TODO(matt) - trace filter
tracer: Option<mpsc::Sender<Trace>>,
ca: Arc<ca::CertAuthority>,
auth_config: AuthConfig<U, S, A>,
upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>,
}
impl<U, S, A> Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone,
S: SiteAuthorize + Sync + Send + Clone,
A: Authorize + Sync + Send + Clone,
{
// Rework this instead of duping proxy do somehting else
fn dup(&self) -> Proxy<U, S, A> {
Proxy {
tracer: self.tracer.iter().map(|t| t.clone()).next(),
ca: self.ca.clone(),
auth_config: self.auth_config.clone(),
upstream_ssl_pool: pool::Pool::empty(100),
}
}
fn handle<C: Connect + 'static>(
&self,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let req_uuid = uuid::Uuid::new_v4();
let hostname = normalize_authority(req.uri());
// TODO this is slow and not async, and crappy
let upstream_addr = match hostname.to_socket_addrs() {
Ok(mut addrs) => match addrs.next() {
Some(addr) => addr,
None => return result(502),
},
Err(e) => {
eprintln!("Upstream resolution: ({}): {}", hostname, e);
return Box::new(futures::future::ok(result_502_resolve_failed(&hostname)));
}
};
let uid = self.auth_config.authenticate.authenticate(&req);
let x = uid
.and_then(|u| {
self.auth_config
.site
.authorize(&u, &hostname)
.map(|r| (u, r))
})
.and_then(|(u, site_result)| {
self.auth_config
.authorize
.authorize(&u, &req)
.map(|ar| (u, site_result, ar))
});
let _user = match x {
Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u,
Err(_) => return result(401),
_ => return result(403),
};
self.handle_inner(req_uuid, upstream_addr, client, req)
}
fn handle_inner<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
upstream_addr: std::net::SocketAddr,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
crappy_log(&req);
let mitm_enabled = true;
match req.method() {
&Method::CONNECT => match is_mitm(&req, mitm_enabled) {
true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req),
false => handle_tls_raw(req_uuid, client, upstream_addr, req),
},
_ => self.handle_http(req_uuid, client, req),
}
}
fn handle_http_forward<C: Connect + | {
Ok(Identity::Anonymous)
} | identifier_body |
proxy.rs | addrs.next() {
Some(addr) => addr,
None => return result(502),
},
Err(e) => {
eprintln!("Upstream resolution: ({}): {}", hostname, e);
return Box::new(futures::future::ok(result_502_resolve_failed(&hostname)));
}
};
let uid = self.auth_config.authenticate.authenticate(&req);
let x = uid
.and_then(|u| {
self.auth_config
.site
.authorize(&u, &hostname)
.map(|r| (u, r))
})
.and_then(|(u, site_result)| {
self.auth_config
.authorize
.authorize(&u, &req)
.map(|ar| (u, site_result, ar))
});
let _user = match x {
Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u,
Err(_) => return result(401),
_ => return result(403),
};
self.handle_inner(req_uuid, upstream_addr, client, req)
}
fn handle_inner<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
upstream_addr: std::net::SocketAddr,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
crappy_log(&req);
let mitm_enabled = true;
match req.method() {
&Method::CONNECT => match is_mitm(&req, mitm_enabled) {
true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req),
false => handle_tls_raw(req_uuid, client, upstream_addr, req),
},
_ => self.handle_http(req_uuid, client, req),
}
}
fn handle_http_forward<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
mut client: Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let client = client.request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_http<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: &Client<C>,
mut req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
make_absolute(&mut req);
let client = client.clone().request(req);
match self.tracer.clone() {
Some(tx) => {
let f = tx
.send(Trace::TraceId(format!("{}", req_uuid)))
.map_err(|e| {
println!("Error in trace: {:?}", e);
io::Error::from(io::ErrorKind::Other)
});
Box::new(
f.join(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
}))
.map(|(_, b)| b),
)
}
None => Box::new(client.map(|resp| resp).map_err(|e| {
println!("Error in upstream: {:?}", e);
io::Error::from(io::ErrorKind::Other)
})),
}
}
fn handle_mitm<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
client: Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let authority = req.uri().authority_part().unwrap().clone();
let cpair = TcpStream::connect(&upstream_addr)
.map_err(|err| eprintln!("mitm tcp connect: {}", err))
.and_then(move |upstream| {
let cx = SslConnector::builder(SslMethod::tls()).unwrap().build();
cx.connect_async(authority.host(), upstream)
.map(|ssl_conn| {
let _ = resp_tx.send(()).unwrap();
println!("MITM Connection established");
let peer_cert =
{ ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() };
(ssl_conn, peer_cert)
})
.map_err(|e| println!("tls error: {:}", e))
});
let upgraded = req.into_body().on_upgrade();
let ca = self.ca.clone();
let np = self.clone();
let req_uuid = req_uuid.clone();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(move |tuple| {
let (downstream, (upstream, peer_cert)) = tuple;
let ca = ca;
let req_uuid = req_uuid;
let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap();
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
acceptor.set_private_key(ca.child_key.as_ref()).unwrap();
acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap();
acceptor.check_private_key().unwrap();
let acceptor = acceptor.build();
acceptor
.accept_async(downstream)
.map_err(|e| eprintln!("accept: {}", e))
.and_then(move |tls_downstream| {
// This should cause the pool to have a single entry
// and then magic
let upstream_pool = {
let local_pool = pool::Pool::empty(1);
let pooled_upstream = pool::PoolItem::new(upstream);
pool::PoolItem::attach(pooled_upstream, local_pool.clone());
local_pool
};
Http::new()
.serve_connection(
tls_downstream,
service_fn(move |req: Request<Body>| {
let upstream_pool = upstream_pool.clone();
let uc = Client::builder()
.keep_alive(false)
.build(AlreadyConnected(upstream_pool));
// println!("In inner client handler: {} {:?}", req_uuid, req);
np.handle_http(req_uuid, &uc, req)
}),
)
.map_err(|err| {
eprintln!("Error in inner http: {}", err);
()
})
// This is proxy without analysis, just forward
// serve_connection
// let (u2dr, u2dw) = upstream_conn.split();
// let (d2ur, d2uw) = tls_downstream.split();
// let u2df = copy(u2dr, d2uw);
// let d2uf = copy(d2ur, u2dw);
// d2uf.join(u2df)
// .map_err(|err| eprintln!("mitm forward: {}", err));
})
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
}
}
struct AlreadyConnected<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync>(
Arc<pool::Pool<T>>,
);
impl<T: Send + 'static + AsyncRead + AsyncWrite + 'static + Sync> Connect for AlreadyConnected<T> {
type Transport = pool::PoolItem<T>;
/// An error occured when trying to connect.
type Error = io::Error;
/// A Future that will resolve to the connected Transport.
type Future = Box<Future<Item = (Self::Transport, Connected), Error = Self::Error> + Send>;
/// Connect to a destination.
fn connect(&self, _: hyper::client::connect::Destination) -> Self::Future {
let o = pool::Pool::checkout(self.0.clone()).unwrap();
Box::new(futures::future::ok((
o,
hyper::client::connect::Connected::new(),
)))
}
}
fn trace_handler(mut rx: mpsc::Receiver<Trace>) {
let _t = std::thread::spawn(move || {
let done = rx.for_each(|tx| {
match tx {
Trace::TraceId(uuid) => {
println!("Begin Tracing {}", uuid);
}
_ => {} | }
println!("Trace recv");
Ok(())
}); | random_line_split | |
proxy.rs | (String),
}
pub trait Authenticate {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>;
}
pub enum AuthzResult {
Allow,
Disallow,
}
pub trait Authorize {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>;
}
pub trait SiteAuthorize {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>;
}
#[derive(Clone)]
pub struct AuthConfig<U, S, A>
where
U: Authenticate + Clone,
S: SiteAuthorize + Clone,
A: Authorize + Clone,
{
authenticate: U,
site: S,
authorize: A,
}
fn handle_tls_raw<C: Connect + 'static>(
req_uuid: uuid::Uuid,
_client: &Client<C>,
upstream_addr: std::net::SocketAddr,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let (resp_tx, resp_rx) = oneshot::channel();
// connect, then on_upgrade()
// this needs to be reworked
// there is a panic in upgrade none
let cpair = TcpStream::connect(&upstream_addr)
.map(|upstream| {
println!("Connection established");
let _ = resp_tx.send(()).unwrap();
upstream
})
.map_err(|err| eprintln!("connect: {}", err));
let upgraded = req.into_body().on_upgrade();
let upg2 = upgraded
.map_err(|err| eprintln!("upgrade: {}", err))
.join(cpair)
.and_then(|(upstream, downstream)| {
println!("In up/down");
let (u2dr, u2dw) = upstream.split();
let (d2ur, d2uw) = downstream.split();
let u2df = copy(u2dr, d2uw);
let d2uf = copy(d2ur, u2dw);
d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err))
})
.map(|_| ())
.map_err(|e| println!("Error {:?}", e));
hyper::rt::spawn(upg2);
Box::new(
resp_rx
.map(|_| 200)
.or_else(|_| Ok(502))
.and_then(|i| result(i)),
)
// result(200)
}
fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool {
true
}
trait RequestFilter {
type Future: Future<Item = Request<Body>>;
fn filter(&self, req: Request<Body>) -> Self::Future;
}
trait ResponseFilter {
type Future: Future<Item = Response<Body>>;
fn filter(&self, req: Response<Body>) -> Self::Future;
}
#[derive(Clone)]
struct AdWareBlock;
impl SiteAuthorize for AdWareBlock {
fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> {
if url.starts_with("adservice.google.com") {
return Ok(AuthzResult::Disallow);
}
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct AllowAll;
impl Authorize for AllowAll {
fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> {
Ok(AuthzResult::Allow)
}
}
#[derive(Clone)]
struct NoAuth;
impl Authenticate for NoAuth {
fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> {
Ok(Identity::Anonymous)
}
}
pub enum Trace {
TraceId(String),
TraceSecurity(String, openssl::x509::X509),
TraceRequest(String, Request<Body>),
TraceResponse(String, Request<Body>),
}
fn make_absolute(req: &mut Request<Body>) {
/* RFC 7312 5.4
When a proxy receives a request with an absolute-form of
request-target, the proxy MUST ignore the received Host header field
(if any) and instead replace it with the host information of the
request-target. A proxy that forwards such a request MUST generate a
new Host field-value based on the received request-target rather than
forward the received Host field-value.
*/
match req.method() {
&Method::CONNECT => {}
_ => {
let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) };
if let Some(n) = nhost {
req.headers_mut()
.insert(http::header::HOST, n.parse().unwrap());
return;
}
let nuri = req.headers().get(http::header::HOST).map(|host| {
let autht: Authority = host.to_str().unwrap().parse().unwrap();
let mut builder = hyper::Uri::builder();
builder.authority(autht);
//TODO(matt) do as map[
if let Some(p) = req.uri().path_and_query() {
builder.path_and_query(p.as_str());
}
if let Some(p) = req.uri().scheme_part() {
builder.scheme(p.as_str());
} else {
// Ok so this kind of sketchy, but since this is fixing up a client connection
// we'll never see an https one. Why? https is via CONNECT at the proxy
builder.scheme("http");
}
builder.build().unwrap()
});
match nuri {
Some(n) => *req.uri_mut() = n,
None => {}
}
}
}
}
#[derive(Clone)]
struct Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone + 'static,
S: SiteAuthorize + Sync + Send + Clone + 'static,
A: Authorize + Sync + Send + Clone + 'static,
{
//TODO(matt) - trace filter
tracer: Option<mpsc::Sender<Trace>>,
ca: Arc<ca::CertAuthority>,
auth_config: AuthConfig<U, S, A>,
upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>,
}
impl<U, S, A> Proxy<U, S, A>
where
U: Authenticate + Sync + Send + Clone,
S: SiteAuthorize + Sync + Send + Clone,
A: Authorize + Sync + Send + Clone,
{
// Rework this instead of duping proxy do somehting else
fn | (&self) -> Proxy<U, S, A> {
Proxy {
tracer: self.tracer.iter().map(|t| t.clone()).next(),
ca: self.ca.clone(),
auth_config: self.auth_config.clone(),
upstream_ssl_pool: pool::Pool::empty(100),
}
}
fn handle<C: Connect + 'static>(
&self,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
let req_uuid = uuid::Uuid::new_v4();
let hostname = normalize_authority(req.uri());
// TODO this is slow and not async, and crappy
let upstream_addr = match hostname.to_socket_addrs() {
Ok(mut addrs) => match addrs.next() {
Some(addr) => addr,
None => return result(502),
},
Err(e) => {
eprintln!("Upstream resolution: ({}): {}", hostname, e);
return Box::new(futures::future::ok(result_502_resolve_failed(&hostname)));
}
};
let uid = self.auth_config.authenticate.authenticate(&req);
let x = uid
.and_then(|u| {
self.auth_config
.site
.authorize(&u, &hostname)
.map(|r| (u, r))
})
.and_then(|(u, site_result)| {
self.auth_config
.authorize
.authorize(&u, &req)
.map(|ar| (u, site_result, ar))
});
let _user = match x {
Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u,
Err(_) => return result(401),
_ => return result(403),
};
self.handle_inner(req_uuid, upstream_addr, client, req)
}
fn handle_inner<C: Connect + 'static>(
&self,
req_uuid: uuid::Uuid,
upstream_addr: std::net::SocketAddr,
client: &Client<C>,
req: Request<Body>,
) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> {
crappy_log(&req);
let mitm_enabled = true;
match req.method() {
&Method::CONNECT => match is_mitm(&req, mitm_enabled) {
true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req),
false => handle_tls_raw(req_uuid, client, upstream_addr, req),
},
_ => self.handle_http(req_uuid, client, req),
}
}
fn handle_http_forward<C: Connect + ' | dup | identifier_name |
partdisk.go | .frandi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined | for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func CreateFiles(fS *FileCollection, ts int64, filelock chan int64) {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount[index]* | func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string | random_line_split |
partdisk.go | randi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined
func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string
for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func | (fS *FileCollection, ts int64, filelock chan int64) {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount | CreateFiles | identifier_name |
partdisk.go | randi = basedir+"/"+randstring(rstl)
}
//Creates a random string made of lower case letters only
func randstring(size int) string {
rval := make([]byte,size)
rand.Seed(time.Now().UnixNano())
for i:=0; i<size; i++ {
rval[i] = byte(rand.Intn(26) + 97)
}
return string(rval)
}
//Creates a random list of bytes with printable ASCII characters
func randbytes(size uint64) []byte {
const blength int = 1024
rval := make([]byte,size)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
}
//Fill the rval slice with pseudorandom characters picked from the base array
for i:=uint64(0); i<size; i++ {
//This psuedo random algorith is explained in the documentation
counter += i + uint64(base[i%uint64(blength)])
index = counter%uint64(len(base))
rval[i]=base[index]
if i%uint64(blength) == 0 {
counter = uint64(rand.Intn(blength))
}
}
return rval
}
//Get the total number of bytes used up by the files already created
func (fc FileCollection) totalFileSize() (uint64,error) {
var tfsize uint64
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("totalSizeFiles(): Error listing directory: %s\n%s",directory,err.Error())
return 0,err
}
for _,v := range fileList {
tfsize += uint64(v.Size())
}
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined
func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string
for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func CreateFiles(fS *FileCollection, ts int64, filelock chan int64) | filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount | {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts) | identifier_body |
partdisk.go | }
}
return tfsize,nil
}
//Compute the number of files of each size required for the size requested
//tsize contains the number of bytes to allocate
//hlimit is the maximum size that can be requested
func DefineFiles(tsize uint64, hilimit uint64, flS *FileCollection) error {
var nfiles, remain uint64
tfs, err := flS.totalFileSize()
if err != nil {
log.Printf("DefineFiles(): Error computing total file size: %s", err.Error())
return err
}
if tsize > tfs && tsize > hilimit { //Trying to add files and the total size exceeds the limit
return fmt.Errorf("Size requested is over the limit: requested %d bytes, limit: %d bytes.", tsize, hilimit)
}
for index, fsize := range flS.fileSizes {
nfiles = tsize / fsize
remain = tsize % fsize
if nfiles > limitFiles { //Use all files of this size, keep adding more files of higher capacities
tsize -= limitFiles * fsize
flS.fileAmmount[index] = limitFiles
} else if nfiles == 0 {
flS.fileAmmount[index] = 0
} else {
tsize -= nfiles * fsize
flS.fileAmmount[index] = nfiles
}
}
if tsize > flS.fileSizes[len(flS.fileSizes)-1] { //The remaining size to allocate is bigger than the biggest file sezie, Add more parts of the maximum size
nfiles = tsize / flS.fileSizes[len(flS.fileSizes)-1]
remain = tsize % flS.fileSizes[len(flS.fileSizes)-1]
flS.fileAmmount[len(flS.fileAmmount)-1] += nfiles
}
if remain > 0 { //The remain must be smaller than the bigger file size.
for index, fsize := range flS.fileSizes {
if remain <= 3*fsize {
signRemain := int(remain)
for signRemain > 0 {
flS.fileAmmount[index]++
signRemain -= int(fsize)
}
break
}
}
}
return nil
}
//Prints the number of _file_ elements defined
func GetDefFiles(fS *FileCollection) string {
var semiTotal uint64
var rst string
for index, value := range fS.fileSizes {
semiTotal += value * fS.fileAmmount[index]
rst += fmt.Sprintf("Files of size: %d, count: %d, total size: %d\n", value, fS.fileAmmount[index], value*fS.fileAmmount[index])
}
rst += fmt.Sprintf("Total size reserved: %d bytes.\n", semiTotal)
return rst
}
//Generate a message with information about the actual ammount and size of the existing files
func (fc FileCollection) GetActFiles() string {
var mensj string
var totalSize int64
mensj += fmt.Sprintf("Last request ID: %d\n",fc.flid)
for _,fsize := range fc.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fc.frandi,fsize)
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("GetActFiles(): Error listing directory: %s\n%s",directory,err.Error())
return "Error getting files information\n"
}
mensj += fmt.Sprintf("Files of size: %d, Count: %d\n", fsize,len(fileList))
for _,fl := range fileList{
totalSize += fl.Size()
}
}
mensj += fmt.Sprintf("Total size: %d bytes.\n",totalSize)
return mensj
}
//Create or remove files to reach the requested number of files of each size
func CreateFiles(fS *FileCollection, ts int64, filelock chan int64) {
var lt time.Time
var err error
select {
case <- time.After(5 * time.Second):
//If 5 seconds pass without getting the proper lock, abort
log.Printf("partdisk.CreateFiles(): timeout waiting for lock\n")
return
case chts := <- filelock:
if chts == ts { //Got the lock and it matches the timestamp received
//Proceed
fS.flid = ts
defer func(){
filelock <- 0 //Release lock
}()
lt = time.Now() //Start counting how long does the parts creation take
log.Printf("CreateFiles(): lock obtained, timestamps match: %d\n",ts)
} else {
log.Printf("CreateFiles(): lock obtained, but timestamps missmatch: %d - %d\n", ts,chts)
filelock <- chts
return
}
}
//Lock obtained proper, create/delete the files
err = adrefiles(fS)
if err != nil {
log.Printf("CreateFiles(): Error creating file: %s\n",err.Error())
return
}
log.Printf("CreateFiles(): Request %d completed in %d seconds\n",ts,int64(time.Since(lt).Seconds()))
}
//Add or remove files match the files definition in the FileCollection struct
func adrefiles(fS *FileCollection) error {
for index,value := range fS.fileSizes {
directory := fmt.Sprintf("%s/d-%d",fS.frandi,value)
//Create a list of files in directory
fileList,err := getFilesInDir(directory)
if err != nil {
log.Printf("adrefiles(): Error listing directory: %s",directory)
return err
}
//Sort the list of files
sort.Slice(fileList, func(i,j int) bool {
s1 := strings.TrimLeft(fileList[i].Name(),"f-")
s2 := strings.TrimLeft(fileList[j].Name(),"f-")
n1,_ := strconv.ParseInt(s1,10,32)
n2,_ := strconv.ParseInt(s2,10,32)
return n1 < n2
})
//Get the number of the last file created, 0 if none has been
var lastfnum uint64
if len(fileList) > 0 {
lastfnum,_ = strconv.ParseUint(strings.TrimLeft(fileList[len(fileList)-1].Name(),"f-"),10,32)
} else {
lastfnum = 0
}
log.Printf("Last file number: %d",lastfnum)
//Get the total size in bytes consumed by the files
var tfsize,rqsize,deltasize,fdelta uint64
for _,v := range fileList {
tfsize += uint64(v.Size())
//log.Printf("File: %s - Size: %d",v.Name(),v.Size())
}
log.Printf("Total file size in dir %s: %d",directory,tfsize)
rqsize = fS.fileAmmount[index]*value
log.Printf("Requested size: %d",rqsize)
if tfsize > rqsize { //Need to remove files
deltasize = tfsize - rqsize
fdelta = deltasize / value
log.Printf("- Need to remove %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=0;n<int(fdelta);n++{
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,int(lastfnum)-n)
err = os.Remove(filename)
if err != nil {
log.Printf("adrefiles(): error deleting file %s:",filename)
return err
}
}
} else if tfsize < rqsize { //Need to create files
deltasize = rqsize - tfsize
fdelta = deltasize / value
log.Printf("+ Need to add %d bytes, %d files of size %d",deltasize,fdelta,value)
for n:=1;n<=int(fdelta);n++ {
filename := fmt.Sprintf("%s/d-%d/f-%d",fS.frandi,value,n+int(lastfnum))
err = newFile(filename,value)
if err != nil {
log.Printf("adrefiles(): error creating file %s:",filename)
return err
}
}
} else { //No need to add or remove anything
log.Printf("= No need to add or remove any files")
}
}
return nil
}
//Creates a single file of the indicated size
func newFile(filename string, size uint64) error {
const blength int = 1024
burval := make([]byte,blength)
var base [blength]byte
var counter, index uint64
//Fill up the base array with random printable characters
rand.Seed(time.Now().UnixNano())
for x:=0; x<len(base); x++ | {
base[x]=byte(rand.Intn(95) + 32) //ASCII 32 to 126
} | conditional_block | |
main.py | (counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def | ():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead | get_starting_location | identifier_name |
main.py | output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3)) # to get list of colors for each possible class
# Loading image
with open("{}.jpeg".format(counter), "wb") as f:
f.write(p)
frame = cv2.imread("{}.jpeg".format(counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index | net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
counter = 1
with open("yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
| random_line_split | |
main.py | (counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
|
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of | new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index | identifier_body |
main.py | int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of a random action)
discount_factor = 0.9 # discount factor for future rewards
learning_rate = 0.9 # the rate at which the AI agent should learn
# run through 1000 training episodes
for episode in range(1000):
# get the starting location for this episode
row_index, column_index = get_starting_location()
# continue taking actions (i.e., moving) until we reach a terminal state
# (i.e., until we reach goal or crash )
while not is_terminal_state(row_index, column_index):
# choose which action to take (i.e., where to move next)
| action_index = get_next_action(row_index, column_index, epsilon)
# perform the chosen action, and transition to the next state (i.e., move to the next location)
old_row_index, old_column_index = row_index, column_index # store the old row and column indexes
row_index, column_index = get_next_location(row_index, column_index, action_index)
# receive the reward for moving to the new state, and calculate the temporal difference
reward = rewards[row_index, column_index]
old_q_value = q_values[old_row_index, old_column_index, action_index]
temporal_difference = reward + (discount_factor * np.max(q_values[row_index, column_index])) - old_q_value
# update the Q-value for the previous state and action pair
new_q_value = old_q_value + (learning_rate * temporal_difference)
q_values[old_row_index, old_column_index, action_index] = new_q_value | conditional_block | |
smart_contract_service_impl.go | was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return "", errors.New("An error occured while creating or opening file!")
}
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
config, err := ioutil.ReadFil | e(tarPath_config)
if err != nil {
logger_s.Errorln("An error occured while reading config | conditional_block | |
smart_contract_service_impl.go | () {
}
func NewSmartContractService(githubID string,smartContractDirPath string) SmartContractService{
return &SmartContractServiceImpl{
GithubID:githubID,
SmartContractDirPath:smartContractDirPath,
SmartContractMap: make(map[string]SmartContract),
}
}
func (scs *SmartContractServiceImpl) PullAllSmartContracts(authenticatedGit string, errorHandler func(error),
completionHandler func()) {
go func() {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return "", errors.New("An error occured while creating or opening file!")
}
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured | Init | identifier_name | |
smart_contract_service_impl.go | {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return "", errors.New("An error occured while creating or opening file!")
}
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil |
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
config | {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID | identifier_body |
smart_contract_service_impl.go | () {
repoList, err := domain.GetRepositoryList(authenticatedGit)
if err != nil {
errorHandler(errors.New("An error was occured during getting repository list"))
return
}
for _, repo := range repoList {
localReposPath := scs.SmartContractDirPath + "/" +
strings.Replace(repo.FullName, "/", "_", -1)
err = os.MkdirAll(localReposPath, 0755)
if err != nil {
errorHandler(errors.New("An error was occured during making repository path"))
return
}
commits, err := domain.GetReposCommits(repo.FullName)
if err != nil {
errorHandler(errors.New("An error was occured during getting commit logs"))
return
}
for _, commit := range commits {
if commit.Author.Login == authenticatedGit {
err := domain.CloneReposWithName(repo.FullName, localReposPath, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during cloning with name"))
return
}
err = domain.ResetWithSHA(localReposPath + "/" + commit.Sha, commit.Sha)
if err != nil {
errorHandler(errors.New("An error was occured during resetting with SHA"))
return
}
}
}
}
completionHandler()
return
}()
}
func (scs *SmartContractServiceImpl) Deploy(ReposPath string) (string, error) {
origin_repos_name := strings.Split(ReposPath, "/")[1]
new_repos_name := strings.Replace(ReposPath, "/", "_", -1)
_, ok := scs.keyByValue(ReposPath)
if ok {
// 버전 업데이트 기능 추가 필요
return "", errors.New("Already exist smart contract ID")
}
repos, err := domain.GetRepos(ReposPath)
if err != nil {
return "", errors.New("An error occured while getting repos!")
}
if repos.Message == "Bad credentials" {
return "", errors.New("Not Exist Repos!")
}
err = os.MkdirAll(scs.SmartContractDirPath + "/" + new_repos_name, 0755)
if err != nil {
return "", errors.New("An error occured while make repository's directory!")
}
//todo gitpath이미 존재하는지 확인
err = domain.CloneRepos(ReposPath, scs.SmartContractDirPath + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
common.Log.Println(viper.GetString("smartContract.githubID"))
_, err = domain.CreateRepos(new_repos_name, viper.GetString("smartContract.githubAccessToken"))
if err != nil {
return "", errors.New(err.Error())//"An error occured while creating repos!")
}
err = domain.ChangeRemote(scs.GithubID + "/" + new_repos_name, scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name)
if err != nil {
return "", errors.New("An error occured while cloning repos!")
}
// 버전 관리를 위한 파일 추가
now := time.Now().Format("2006-01-02 15:04:05");
file, err := os.OpenFile(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name + "/version", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil { | }
_, err = file.WriteString("Deployed at " + now + "\n")
if err != nil {
return "", errors.New("An error occured while writing file!")
}
err = file.Close()
if err != nil {
return "", errors.New("An error occured while closing file!")
}
err = domain.CommitAndPush(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, "It-Chain Smart Contract \"" + new_repos_name + "\" Deploy")
if err != nil {
return "", errors.New(err.Error())
//return "", errors.New("An error occured while committing and pushing!")
}
githubResponseCommits, err := domain.GetReposCommits(scs.GithubID + "/" + new_repos_name)
if err != nil {
return "", errors.New("An error occured while getting commit log!")
}
reposDirPath := scs.SmartContractDirPath + "/" + new_repos_name + "/" + githubResponseCommits[0].Sha
err = os.Rename(scs.SmartContractDirPath + "/" + new_repos_name + "/" + origin_repos_name, reposDirPath)
if err != nil {
return "", errors.New("An error occured while renaming directory!")
}
scs.SmartContractMap[githubResponseCommits[0].Sha] = SmartContract{new_repos_name, ReposPath, ""}
return githubResponseCommits[0].Sha, nil
}
/***************************************************
* 1. smartcontract 검사
* 2. smartcontract -> sc.tar : 애초에 풀 받을 때 압축해 둘 수 있음
* 3. go 버전에 맞는 docker image를 Create
* 4. sc.tar를 docker container로 복사
* 5. docker container Start
* 6. docker에서 smartcontract 실행
****************************************************/
func (scs *SmartContractServiceImpl) Query(transaction domain.Transaction) (error) {
/*** Set Transaction Arg ***/
logger_s.Errorln("query start")
tx_bytes, err := json.Marshal(transaction)
if err != nil {
return errors.New("Tx Marshal Error")
}
sc, ok := scs.SmartContractMap[transaction.TxData.ContractID];
if !ok {
logger_s.Errorln("Not exist contract ID")
return errors.New("Not exist contract ID")
}
_, err = os.Stat(sc.SmartContractPath)
if os.IsNotExist(err) {
logger_s.Errorln("File or Directory Not Exist")
return errors.New("File or Directory Not Exist")
}
/*** smartcontract build ***/
logger_s.Errorln("build start")
cmd := exec.Command("env", "GOOS=linux", "go", "build", "-o", TMP_DIR + "/" + sc.Name, "./" + sc.Name + ".go")
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("SmartContract build error")
return err
}
cmd = exec.Command("chmod", "777", TMP_DIR + "/" + sc.Name)
cmd.Dir = sc.SmartContractPath + "/" + transaction.TxData.ContractID
err = cmd.Run()
if err != nil {
logger_s.Errorln("Chmod Error")
return err
}
logger_s.Errorln("make tar")
err = domain.MakeTar(TMP_DIR + "/" + sc.Name, TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving smartcontract file!")
return err
}
err = domain.MakeTar("$GOPATH/src/it-chain/smartcontract/worldstatedb", TMP_DIR)
if err != nil {
logger_s.Errorln("An error occured while archiving worldstateDB file!")
return err
}
logger_s.Errorln("exec cmd")
// tar config file
cmd = exec.Command("tar", "-cf", TMP_DIR + "/config.tar", "./it-chain/config.yaml")
cmd.Dir = "../../"
err = cmd.Run()
if err != nil {
logger_s.Errorln("An error occured while archiving config file!")
return err
}
logger_s.Errorln("Pulling image")
// Docker Code
imageName := "docker.io/library/golang:1.9.2-alpine3.6"
tarPath := TMP_DIR + "/" + sc.Name + ".tar"
tarPath_wsdb := TMP_DIR + "/worldstatedb.tar"
tarPath_config := TMP_DIR + "/config.tar"
ctx := context.Background()
cli, err := docker.NewEnvClient()
if err != nil {
logger_s.Errorln("An error occured while creating new Docker Client!")
return err
}
out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
if err != nil {
logger_s.Errorln("An error oeccured while pulling docker image!")
return err
}
io.Copy(os.Stdout, out)
imageName_splited := strings.Split(imageName, "/")
image := imageName_splited[len(imageName_splited)-1]
resp, err := cli.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"/go/src/" + sc.Name, string(tx_bytes)},
Tty: true,
AttachStdout: true,
AttachStderr: true,
}, nil, nil, "")
if err != nil {
logger_s.Errorln("An error occured while creating docker container!")
return err
}
/*** read tar file ***/
file, err := ioutil.ReadFile(tarPath)
if err != nil {
logger_s.Errorln("An error occured while reading smartcontract tar file!")
return err
}
wsdb, err := ioutil.ReadFile(tarPath_wsdb)
if err != nil {
logger_s.Errorln("An error occured while reading worldstateDB tar file!")
return err
}
| return "", errors.New("An error occured while creating or opening file!") | random_line_split |
main.rs | }
}
// Simple `split_once` "polyfill" since it's currently unstable.
fn split_once(text: &str, pat: char) -> Option<(&str, &str)> {
let mut iter = text.splitn(2, pat);
Some((iter.next()?, iter.next()?))
}
async fn parse_name_and_discriminator(
args: &mut Args,
) -> Option<Result<(String, u16), &'static str>> {
let mut name = String::new();
while let Ok(arg) = args.single::<String>() {
let mut fragment = arg.as_str();
if name.is_empty() {
match fragment.strip_prefix('@') {
Some(trimmed) => fragment = trimmed,
None => {
args.rewind();
return None;
}
}
}
match split_once(fragment, '#') {
Some((name_tail, discriminator_str)) => {
name.push_str(name_tail);
match discriminator_str.parse() {
Ok(discriminator) if (1..=9999).contains(&discriminator) => {
return Some(Ok((name, discriminator)))
}
_ => return Some(Err("invalid discriminator")),
}
}
None => name.push_str(fragment),
}
}
Some(Err(
"invalid format; mention should be in the form `@username#discriminator`",
))
}
#[group("relay")]
#[commands(forward)]
struct Relay;
#[command("forward")]
async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let delegate_member = if let Ok(member) = ctx
.http
.get_member(config.guild_id(), msg.author.id.into())
.await
{
member
} else {
msg.channel_id
.say(ctx, "Umm... have I made your acquaintance?")
.await?;
return Ok(());
};
if !delegate_member
.roles
.contains(&config.delegate_role_id().into())
{
msg.channel_id
.say(ctx, format!("This command is only available to delegates."))
.await?;
return Ok(());
}
let committee = if let Some(committee) = config
.committees()
.iter()
.find(|&committee| delegate_member.roles.contains(&committee.role_id().into()))
{
committee
} else {
msg.channel_id
.say(ctx, "Sorry, but I'm not sure which committee you're on.")
.await?;
return Ok(());
};
let committee_channel = ctx
.cache
.guild_channel(committee.channel_id())
.await
.expect("failed to find committee channel");
let recipient_id = match parse_name_and_discriminator(&mut args).await {
Some(res) => match res {
Ok((name, discriminator)) => {
let members = delegate_member.guild_id.members(ctx, None, None).await?;
match members
.iter()
.map(|member| &member.user)
.find(|&user| user.name == name && user.discriminator == discriminator)
.map(|user| user.id)
{
Some(id) => Some(id),
None => {
msg.channel_id
.say(ctx, "Sorry, I couldn't find that user.")
.await?;
return Ok(());
}
}
}
Err(err) => {
msg.channel_id
.say(
ctx,
format!(
"Sorry, I couldn't understand your mention. Problem: `{}`",
err
),
)
.await?;
return Ok(());
}
},
None => None,
};
let is_external = recipient_id.is_some(); | let committee_msg = committee_channel
.say(
ctx,
&MessageBuilder::new()
.push("Received request from ")
.mention(&msg.author)
.push(if is_external {
format!(
" to forward message to {}",
&recipient_id.unwrap().mention()
)
} else {
String::new()
})
.push_line(":")
.push_quote_line(cleaned_content.clone())
.push_line("")
.push(if is_external {
"Use the reactions below to approve or deny this request. "
} else {
""
})
.push(format!(
"Reply to this message within the next {} minutes{}to send a response.",
REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " }
))
.build(),
)
.await?;
if is_external {
committee_msg.react(ctx, POSITIVE_REACTION).await?;
committee_msg.react(ctx, NEGATIVE_REACTION).await?;
}
msg.reply(
ctx,
&MessageBuilder::new()
.push("Your message has been forwarded to ")
.push_bold_safe(committee.name())
.push(if is_external { " for approval" } else { "" })
.push(".")
.build(),
)
.await?;
typing.stop();
if is_external {
let approved = if let Some(reaction) = committee_msg
.await_reaction(ctx)
.timeout(REACTION_TIMEOUT)
.await
{
match reaction
.as_inner_ref()
.emoji
.as_data()
.chars()
.next()
.unwrap()
{
POSITIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("approved")
.push(".")
.build(),
)
.await?;
true
}
NEGATIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("rejected")
.push(".")
.build(),
)
.await?;
false
}
_ => {
committee_msg
.reply(ctx, "Invalid reaction; rejecting request.")
.await?;
false
}
}
} else {
committee_msg.delete_reactions(ctx).await?;
committee_msg
.reply(
ctx,
"No consensus reached; rejecting request.",
)
.await?;
false
};
msg.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold(if approved { "approved" } else { "rejected" })
.push(".")
.build(),
)
.await?;
if approved {
recipient_id
.unwrap()
.create_dm_channel(ctx)
.await?
.say(
ctx,
&MessageBuilder::new()
.push("Received message from ")
.mention(&msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
}
}
let committee_msg_id = committee_msg.id;
let mut replies = committee_channel
.id
.await_replies(ctx)
.timeout(REACTION_TIMEOUT)
.filter(move |msg| match msg.message_reference {
Some(ref msg_ref) => match msg_ref.message_id {
Some(m) => m == committee_msg_id,
None => false,
},
None => false,
})
.await;
while let Some(reply_msg) = replies.next().await {
let cleaned_content = content_safe(
&ctx.cache,
&reply_msg.content,
&ContentSafeOptions::default(),
)
.await;
msg.channel_id
.say(
ctx,
&MessageBuilder::new()
.push("Received reply from ")
.mention(&reply_msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
reply_msg.react(ctx, SENT_REACTION).await?;
}
Ok(())
}
#[group("role")]
#[commands(join)]
struct Role;
#[command("join")]
async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let in_valid_guild = match msg.guild_id {
Some(id) => id.as_u64() == &config.guild_id(),
None => false,
};
if !in_valid_guild {
msg.channel_id
.say(ctx, "I'm not configured to work here.")
.await?;
return Ok(());
}
let guild = msg.guild(ctx).await.unwrap();
let query = args.rest().to_lowercase();
let committee = if let Some(committee) = config.committees().iter().find(|&committee| {
query == guild.roles[&committee.role_id().into()].name.to_lowercase()
|| query == committee.name()
}) {
committee
} else {
msg.reply(ctx, "Sorry, I couldn't find a committee by that name.")
.await?;
return Ok(());
};
let mut member = msg.member(ctx).await?;
let committee_role_ids: HashSet<RoleId> = config
.committees()
.iter()
|
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await;
let typing = msg.channel_id.start_typing(&ctx.http)?;
| random_line_split |
main.rs | }
}
// Simple `split_once` "polyfill" since it's currently unstable.
fn split_once(text: &str, pat: char) -> Option<(&str, &str)> {
let mut iter = text.splitn(2, pat);
Some((iter.next()?, iter.next()?))
}
async fn parse_name_and_discriminator(
args: &mut Args,
) -> Option<Result<(String, u16), &'static str>> {
let mut name = String::new();
while let Ok(arg) = args.single::<String>() {
let mut fragment = arg.as_str();
if name.is_empty() {
match fragment.strip_prefix('@') {
Some(trimmed) => fragment = trimmed,
None => {
args.rewind();
return None;
}
}
}
match split_once(fragment, '#') {
Some((name_tail, discriminator_str)) => {
name.push_str(name_tail);
match discriminator_str.parse() {
Ok(discriminator) if (1..=9999).contains(&discriminator) => {
return Some(Ok((name, discriminator)))
}
_ => return Some(Err("invalid discriminator")),
}
}
None => name.push_str(fragment),
}
}
Some(Err(
"invalid format; mention should be in the form `@username#discriminator`",
))
}
#[group("relay")]
#[commands(forward)]
struct Relay;
| mmand("forward")]
async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let delegate_member = if let Ok(member) = ctx
.http
.get_member(config.guild_id(), msg.author.id.into())
.await
{
member
} else {
msg.channel_id
.say(ctx, "Umm... have I made your acquaintance?")
.await?;
return Ok(());
};
if !delegate_member
.roles
.contains(&config.delegate_role_id().into())
{
msg.channel_id
.say(ctx, format!("This command is only available to delegates."))
.await?;
return Ok(());
}
let committee = if let Some(committee) = config
.committees()
.iter()
.find(|&committee| delegate_member.roles.contains(&committee.role_id().into()))
{
committee
} else {
msg.channel_id
.say(ctx, "Sorry, but I'm not sure which committee you're on.")
.await?;
return Ok(());
};
let committee_channel = ctx
.cache
.guild_channel(committee.channel_id())
.await
.expect("failed to find committee channel");
let recipient_id = match parse_name_and_discriminator(&mut args).await {
Some(res) => match res {
Ok((name, discriminator)) => {
let members = delegate_member.guild_id.members(ctx, None, None).await?;
match members
.iter()
.map(|member| &member.user)
.find(|&user| user.name == name && user.discriminator == discriminator)
.map(|user| user.id)
{
Some(id) => Some(id),
None => {
msg.channel_id
.say(ctx, "Sorry, I couldn't find that user.")
.await?;
return Ok(());
}
}
}
Err(err) => {
msg.channel_id
.say(
ctx,
format!(
"Sorry, I couldn't understand your mention. Problem: `{}`",
err
),
)
.await?;
return Ok(());
}
},
None => None,
};
let is_external = recipient_id.is_some();
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await;
let typing = msg.channel_id.start_typing(&ctx.http)?;
let committee_msg = committee_channel
.say(
ctx,
&MessageBuilder::new()
.push("Received request from ")
.mention(&msg.author)
.push(if is_external {
format!(
" to forward message to {}",
&recipient_id.unwrap().mention()
)
} else {
String::new()
})
.push_line(":")
.push_quote_line(cleaned_content.clone())
.push_line("")
.push(if is_external {
"Use the reactions below to approve or deny this request. "
} else {
""
})
.push(format!(
"Reply to this message within the next {} minutes{}to send a response.",
REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " }
))
.build(),
)
.await?;
if is_external {
committee_msg.react(ctx, POSITIVE_REACTION).await?;
committee_msg.react(ctx, NEGATIVE_REACTION).await?;
}
msg.reply(
ctx,
&MessageBuilder::new()
.push("Your message has been forwarded to ")
.push_bold_safe(committee.name())
.push(if is_external { " for approval" } else { "" })
.push(".")
.build(),
)
.await?;
typing.stop();
if is_external {
let approved = if let Some(reaction) = committee_msg
.await_reaction(ctx)
.timeout(REACTION_TIMEOUT)
.await
{
match reaction
.as_inner_ref()
.emoji
.as_data()
.chars()
.next()
.unwrap()
{
POSITIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("approved")
.push(".")
.build(),
)
.await?;
true
}
NEGATIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("rejected")
.push(".")
.build(),
)
.await?;
false
}
_ => {
committee_msg
.reply(ctx, "Invalid reaction; rejecting request.")
.await?;
false
}
}
} else {
committee_msg.delete_reactions(ctx).await?;
committee_msg
.reply(
ctx,
"No consensus reached; rejecting request.",
)
.await?;
false
};
msg.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold(if approved { "approved" } else { "rejected" })
.push(".")
.build(),
)
.await?;
if approved {
recipient_id
.unwrap()
.create_dm_channel(ctx)
.await?
.say(
ctx,
&MessageBuilder::new()
.push("Received message from ")
.mention(&msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
}
}
let committee_msg_id = committee_msg.id;
let mut replies = committee_channel
.id
.await_replies(ctx)
.timeout(REACTION_TIMEOUT)
.filter(move |msg| match msg.message_reference {
Some(ref msg_ref) => match msg_ref.message_id {
Some(m) => m == committee_msg_id,
None => false,
},
None => false,
})
.await;
while let Some(reply_msg) = replies.next().await {
let cleaned_content = content_safe(
&ctx.cache,
&reply_msg.content,
&ContentSafeOptions::default(),
)
.await;
msg.channel_id
.say(
ctx,
&MessageBuilder::new()
.push("Received reply from ")
.mention(&reply_msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
reply_msg.react(ctx, SENT_REACTION).await?;
}
Ok(())
}
#[group("role")]
#[commands(join)]
struct Role;
#[command("join")]
async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let in_valid_guild = match msg.guild_id {
Some(id) => id.as_u64() == &config.guild_id(),
None => false,
};
if !in_valid_guild {
msg.channel_id
.say(ctx, "I'm not configured to work here.")
.await?;
return Ok(());
}
let guild = msg.guild(ctx).await.unwrap();
let query = args.rest().to_lowercase();
let committee = if let Some(committee) = config.committees().iter().find(|&committee| {
query == guild.roles[&committee.role_id().into()].name.to_lowercase()
|| query == committee.name()
}) {
committee
} else {
msg.reply(ctx, "Sorry, I couldn't find a committee by that name.")
.await?;
return Ok(());
};
let mut member = msg.member(ctx).await?;
let committee_role_ids: HashSet<RoleId> = config
.committees()
. |
#[co | identifier_name |
main.rs | }
}
// Simple `split_once` "polyfill" since it's currently unstable.
fn split_once(text: &str, pat: char) -> Option<(&str, &str)> {
l | fn parse_name_and_discriminator(
args: &mut Args,
) -> Option<Result<(String, u16), &'static str>> {
let mut name = String::new();
while let Ok(arg) = args.single::<String>() {
let mut fragment = arg.as_str();
if name.is_empty() {
match fragment.strip_prefix('@') {
Some(trimmed) => fragment = trimmed,
None => {
args.rewind();
return None;
}
}
}
match split_once(fragment, '#') {
Some((name_tail, discriminator_str)) => {
name.push_str(name_tail);
match discriminator_str.parse() {
Ok(discriminator) if (1..=9999).contains(&discriminator) => {
return Some(Ok((name, discriminator)))
}
_ => return Some(Err("invalid discriminator")),
}
}
None => name.push_str(fragment),
}
}
Some(Err(
"invalid format; mention should be in the form `@username#discriminator`",
))
}
#[group("relay")]
#[commands(forward)]
struct Relay;
#[command("forward")]
async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let delegate_member = if let Ok(member) = ctx
.http
.get_member(config.guild_id(), msg.author.id.into())
.await
{
member
} else {
msg.channel_id
.say(ctx, "Umm... have I made your acquaintance?")
.await?;
return Ok(());
};
if !delegate_member
.roles
.contains(&config.delegate_role_id().into())
{
msg.channel_id
.say(ctx, format!("This command is only available to delegates."))
.await?;
return Ok(());
}
let committee = if let Some(committee) = config
.committees()
.iter()
.find(|&committee| delegate_member.roles.contains(&committee.role_id().into()))
{
committee
} else {
msg.channel_id
.say(ctx, "Sorry, but I'm not sure which committee you're on.")
.await?;
return Ok(());
};
let committee_channel = ctx
.cache
.guild_channel(committee.channel_id())
.await
.expect("failed to find committee channel");
let recipient_id = match parse_name_and_discriminator(&mut args).await {
Some(res) => match res {
Ok((name, discriminator)) => {
let members = delegate_member.guild_id.members(ctx, None, None).await?;
match members
.iter()
.map(|member| &member.user)
.find(|&user| user.name == name && user.discriminator == discriminator)
.map(|user| user.id)
{
Some(id) => Some(id),
None => {
msg.channel_id
.say(ctx, "Sorry, I couldn't find that user.")
.await?;
return Ok(());
}
}
}
Err(err) => {
msg.channel_id
.say(
ctx,
format!(
"Sorry, I couldn't understand your mention. Problem: `{}`",
err
),
)
.await?;
return Ok(());
}
},
None => None,
};
let is_external = recipient_id.is_some();
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await;
let typing = msg.channel_id.start_typing(&ctx.http)?;
let committee_msg = committee_channel
.say(
ctx,
&MessageBuilder::new()
.push("Received request from ")
.mention(&msg.author)
.push(if is_external {
format!(
" to forward message to {}",
&recipient_id.unwrap().mention()
)
} else {
String::new()
})
.push_line(":")
.push_quote_line(cleaned_content.clone())
.push_line("")
.push(if is_external {
"Use the reactions below to approve or deny this request. "
} else {
""
})
.push(format!(
"Reply to this message within the next {} minutes{}to send a response.",
REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " }
))
.build(),
)
.await?;
if is_external {
committee_msg.react(ctx, POSITIVE_REACTION).await?;
committee_msg.react(ctx, NEGATIVE_REACTION).await?;
}
msg.reply(
ctx,
&MessageBuilder::new()
.push("Your message has been forwarded to ")
.push_bold_safe(committee.name())
.push(if is_external { " for approval" } else { "" })
.push(".")
.build(),
)
.await?;
typing.stop();
if is_external {
let approved = if let Some(reaction) = committee_msg
.await_reaction(ctx)
.timeout(REACTION_TIMEOUT)
.await
{
match reaction
.as_inner_ref()
.emoji
.as_data()
.chars()
.next()
.unwrap()
{
POSITIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("approved")
.push(".")
.build(),
)
.await?;
true
}
NEGATIVE_REACTION => {
committee_msg
.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold("rejected")
.push(".")
.build(),
)
.await?;
false
}
_ => {
committee_msg
.reply(ctx, "Invalid reaction; rejecting request.")
.await?;
false
}
}
} else {
committee_msg.delete_reactions(ctx).await?;
committee_msg
.reply(
ctx,
"No consensus reached; rejecting request.",
)
.await?;
false
};
msg.reply(
ctx,
&MessageBuilder::new()
.push("This request has been ")
.push_bold(if approved { "approved" } else { "rejected" })
.push(".")
.build(),
)
.await?;
if approved {
recipient_id
.unwrap()
.create_dm_channel(ctx)
.await?
.say(
ctx,
&MessageBuilder::new()
.push("Received message from ")
.mention(&msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
}
}
let committee_msg_id = committee_msg.id;
let mut replies = committee_channel
.id
.await_replies(ctx)
.timeout(REACTION_TIMEOUT)
.filter(move |msg| match msg.message_reference {
Some(ref msg_ref) => match msg_ref.message_id {
Some(m) => m == committee_msg_id,
None => false,
},
None => false,
})
.await;
while let Some(reply_msg) = replies.next().await {
let cleaned_content = content_safe(
&ctx.cache,
&reply_msg.content,
&ContentSafeOptions::default(),
)
.await;
msg.channel_id
.say(
ctx,
&MessageBuilder::new()
.push("Received reply from ")
.mention(&reply_msg.author)
.push_line(":")
.push_quote_line(cleaned_content.clone()),
)
.await?;
reply_msg.react(ctx, SENT_REACTION).await?;
}
Ok(())
}
#[group("role")]
#[commands(join)]
struct Role;
#[command("join")]
async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
let config = {
let data = ctx.data.read().await;
data.get::<ConfigContainer>().unwrap().clone()
};
let in_valid_guild = match msg.guild_id {
Some(id) => id.as_u64() == &config.guild_id(),
None => false,
};
if !in_valid_guild {
msg.channel_id
.say(ctx, "I'm not configured to work here.")
.await?;
return Ok(());
}
let guild = msg.guild(ctx).await.unwrap();
let query = args.rest().to_lowercase();
let committee = if let Some(committee) = config.committees().iter().find(|&committee| {
query == guild.roles[&committee.role_id().into()].name.to_lowercase()
|| query == committee.name()
}) {
committee
} else {
msg.reply(ctx, "Sorry, I couldn't find a committee by that name.")
.await?;
return Ok(());
};
let mut member = msg.member(ctx).await?;
let committee_role_ids: HashSet<RoleId> = config
.committees()
.iter()
| et mut iter = text.splitn(2, pat);
Some((iter.next()?, iter.next()?))
}
async | identifier_body |
NeatDatePicker.js | winY = Dimensions.get('window').height
const NeatDatePicker = ({
isVisible,
initialDate, mode,
onCancel, onConfirm,
minDate, maxDate,
startDate, endDate,
onBackButtonPress, onBackdropPress,
chinese, colorOptions,
}) => {
const [showChangeYearModal, setShowChangeYearModal] = useState(false);
const sevenDays = chinese
? ['日', '一', '二', '三', '四', '五', '六']
: ['S', 'M', 'T', 'W', 'T', 'F', 'S']
// displayTime defines which month is going to be shown onto the screen
// For 'single' mode, displayTime is also the initial selected date when opening DatePicker at the first time.
const [displayTime, setDisplayTime] = useState(initialDate || new Date());
const year = displayTime.getFullYear()
const month = displayTime.getMonth()// 0-base
const date = displayTime.getDate()
const TODAY = new Date(year, month, date)
// output decides which date should be active.
const [output, setOutput] = useState(
mode === 'single'
? { date: TODAY, startDate: null, endDate: null }
: { date: null, startDate: startDate || null, endDate: endDate || null }
);
// If user presses cancel, reset 'output' state to this 'originalOutput'
const [originalOutput, setOriginalOutput] = useState(output);
const minTime = minDate?.getTime()
const maxTime = maxDate?.getTime()
// useDaysOfMonth returns an array that having several objects,
// representing all the days that are going to be rendered on screen.
// Each object contains five properties, 'year', 'month', 'date', 'isCurrentMonth' and 'disabled'.
const daysArray = useDaysOfMonth(year, month, minTime, maxTime)
const onCancelPress = () => {
onCancel()
setTimeout(() => {
// reset output to originalOutput
setOutput(originalOutput)
// originalOutput.startDate will be null only when the user hasn't picked any date using RANGE DatePicker.
// If that's the case, don't reset displayTime to originalOutput but initialDate/new Date()
if (mode === 'range' & !originalOutput.startDate) return setDisplayTime(initialDate || new Date())
// reset displayTime
return (mode === 'single')
? setDisplayTime(originalOutput.date)
: setDisplayTime(originalOutput.startDate)
}, 300);
}
const autoCompleteEndDate = () => {
// set endDate to startDate
output.endDate = output.startDate
// After successfully passing arguments in onConfirm, in next life cycle set endDate to null.
// Therefore, next time when user opens DatePicker he can start from selecting endDate.
setOutput({ ...output, endDate: null })
}
const onConfirmPress = () => {
if (mode === 'single') onConfirm(output.date)
else {
// If have not selected any date, just to onCancel
if (mode === 'range' & !output.startDate) return onCancel()
// If have not selected endDate, set it same as startDate
if (!output.endDate) autoCompleteEndDate()
onConfirm(output.startDate, output.endDate)
}
// Because the selected dates are confirmed, originalOutput should be updated.
setOriginalOutput({ ...output })
// reset displayTime
setTimeout(() => {
return (mode === 'single')
? setDisplayTime(output.date)
: setDisplayTime(output.startDate)
}, 300);
}
const [btnDisabled, setBtnDisabled] = useState(false);
// move to previous month
const onPrev = () => {
setBtnDisabled(true)
setDisplayTime(new Date(year, month - 1, date))
}
// move to next month
const onNext = () => {
setBtnDisabled(true)
setDisplayTime(new Date(year, month + 1, date))
}
// Disable Prev & Next buttons for a while after pressing them.
// Otherwise if the user presses the button rapidly in a short time
// the switching delay of the calendar is not neglectable
useEffect(() => {
setTimeout(setBtnDisabled, 300, false)
}, [btnDisabled])
// destructure colorOptions
const {
backgroundColor,
headerColor,
headerTextColor,
changeYearModalColor,
weekDaysColor,
dateTextColor,
selectedDateTextColor,
selectedDateBackgroundColor,
confirmButtonColor,
} = { ...defaultColorOptions, ...colorOptions }
// const [isFontsLoaded] = useFonts({
// Roboto_100Thin,
// Roboto_300Light,
// Roboto_400Regular,
// Roboto_500Medium,
// Roboto_700Bold,
// })
// if (!isFontsLoaded) return null
return (
<Modal
isVisible={isVisible}
animationIn={'zoomIn'}
animationOut={'zoomOut'}
useNativeDriver
hideModalContentWhileAnimating
onBackButtonPress={onBackButtonPress || onCancelPress}
onBackdropPress={onBackdropPress || onCancelPress}
style={styles.modal}
>
<View style={[styles.container, { backgroundColor: backgroundColor, }]}>
<View style={[styles.header, { backgroundColor: headerColor }]}>
{/* last month */}
<TouchableOpacity style={styles.changeMonthTO} onPress={onPrev} disabled={btnDisabled} >
<MDicon name={'keyboard-arrow-left'} size={32} color={headerTextColor} />
</TouchableOpacity>
{/* displayed year and month */}
<TouchableOpacity onPress={() => { setShowChangeYearModal(true) }}>
<Text style={[styles.header__title, { color: headerTextColor }]}>
{daysArray.length !== 0 && daysArray[10].year + ' '}
{daysArray.length !== 0 && (chinese ? getMonthInChinese(daysArray[10].month) : getMonthInEnglish(daysArray[10].month))}
</Text>
</TouchableOpacity>
{/* next month */}
<TouchableOpacity style={styles.changeMonthTO} onPress={onNext} disabled={btnDisabled} >
<MDicon name={'keyboard-arrow-right'} size={32} color={headerTextColor} />
</TouchableOpacity>
</View>
| {sevenDays.map((weekDay, index) => (
<View style={styles.keys} key={index.toString()}>
<Text style={[styles.weekDays, { color: weekDaysColor }]}>
{weekDay}
</Text>
</View>
))}
{/* every days */}
{daysArray.map((Day, i) => (
<Key key={Day.year.toString() + Day.month.toString() + i.toString()}
Day={Day}
mode={mode}
output={output}
setOutput={setOutput}
colorOptions={{
dateTextColor,
backgroundColor,
selectedDateTextColor,
selectedDateBackgroundColor
}}
/>
))}
</View>
<View style={styles.footer}>
<View style={styles.btn_box}>
<TouchableOpacity style={styles.btn} onPress={onCancelPress}>
<Text style={styles.btn_text}>
{chinese ? '取消' : 'Cancel'}
</Text>
</TouchableOpacity>
<TouchableOpacity style={styles.btn} onPress={onConfirmPress}>
<Text style={[styles.btn_text, { color: confirmButtonColor }]}>
{chinese ? '確定' : 'OK'}
</Text>
</TouchableOpacity>
</View>
</View>
<ChangeYearModal
isVisible={showChangeYearModal}
dismiss={() => { setShowChangeYearModal(false) }}
displayTime={displayTime}
setDisplayTime={setDisplayTime}
colorOptions={{
primary: changeYearModalColor,
backgroundColor
}}
/>
</View>
</Modal>
)
}
NeatDatePicker.proptype = {
isVisible: PropTypes.bool.isRequired,
mode: PropTypes.string.isRequired,
onConfirm: PropTypes.func,
minDate: PropTypes.object,
maxDate: PropTypes.object,
}
NeatDatePicker.defaultProps = {
}
// Notice: only six-digit HEX values are allowed.
const defaultColorOptions = {
backgroundColor: '#ffffff',
headerColor: '#4682E9',
headerTextColor: '#ffffff',
changeYearModalColor: '#4682E9',
weekDaysColor: '#4682E9',
dateTextColor: '#000000',
selectedDateTextColor: '#ffffff',
selectedDateBackgroundColor: '#4682E9',
confirmButtonColor: '#4682E9',
}
export default NeatDatePicker
const styles = StyleSheet.create({
modal: {
flex: 0,
height: winY,
alignItems: 'center',
padding: 0,
margin: 0,
},
container: {
width: 328,
justifyContent: 'center',
alignItems: 'center',
borderRadius: 12,
overflow: 'hidden'
},
header: {
// borderWidth: 1,
flexDirection: 'row',
width: '100%',
height: 68,
paddingHorizontal: 24,
justifyContent: 'space | <View style={styles.keys_container}>
{/* week days */} | random_line_split |
country-card.component.ts | .route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
}
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) |
});
}
resetStyle(e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
}
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category | {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
} | conditional_block |
country-card.component.ts | .route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() |
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
}
});
}
resetStyle(e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
}
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category | {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
} | identifier_body |
country-card.component.ts | .route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
}
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
}
});
}
resetStyle(e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
} |
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category Score | random_line_split | |
country-card.component.ts | this.route.params.subscribe(params => {
this.selectedCountryId = params['id'];
this.getCountryData();
this.getShapeFile();
this.getCountriesList();
});
}
ngOnDestroy() {
this.sub.unsubscribe();
}
getShapeFile() {
this.countryCardService
.getShapeFile()
.then(responseObj => {
this.geoJsonData = responseObj;
this.loadmap();
this.loadlayer();
});
}
getCountriesList() {
this.countryCardService
.getCountryList()
.then(responseObj => {
this.countriesList = this.countryCardService.getSortedData(responseObj.features); // first sort the response object
});
}
// country data
getCountryData() {
this.countryCardService
.getCountryCardData().subscribe(
data => {
let yearContiner = [];
this.countryCardData = data[0];
this.countryCardMetaData = data[1];
data[0].filter(e => { // get unique year
if (yearContiner.indexOf(e.year) == -1) {
yearContiner.push(e.year);
}
});
yearContiner.sort(function (a, b) { return b - a });
this.countryCardDisplayData = [[], [], []];
this.countryCardData.filter(i => {
if ((i.year == yearContiner[0] && i.countryISO == this.selectedCountryId) || (i.year == yearContiner[yearContiner.length - 1] && i.countryISO == this.selectedCountryId)) {
if ((i.year == yearContiner[yearContiner.length - 1])) {
this.countryCardDisplayData[1].push(i);
}
if ((i.year == yearContiner[0])) {
this.countryCardDisplayData[0].push(i);
}
}
});
this.countryCardMetaData.filter(k => {
if (k.countryISO == this.selectedCountryId) {
this.countryCardDisplayData[2].push(k);
}
});
this.loadSpiderChart();
}
);
}
// init the leaflet object
loadmap() {
if (this.mapObj != null) return false;
this.mapObj = new L.Map('map-container', {
center: new L.LatLng(this.lat, this.lng),
zoom: this.zoom,
minZoom: this.minZoom,
maxZoom: this.maxZoom,
doubleClickZoom: false
});
}
// load the geojson layer on leaflet
loadlayer() {
this.geoJsonLayer = L.geoJson(this.geoJsonData, {
style: (layer) => {
return {
color: '#eee',
weight: 1,
opacity: 1,
fillColor: layer.properties.ISO_3_CODE == this.selectedCountryId ? '#00a3e0' : '#ffffff',
fillOpacity: 1,
className: ''
};
},
onEachFeature: (layer: any, feature: any) => {
feature.bindTooltip(layer.properties.CNTRY_TERR, { // bind tooptip for on each layer (now leaflet core supported)
direction: 'auto',
sticky: true,
opacity: 0.9
});
feature.on({
mouseover: (e: any) => { // mouse over highlight style
e.target.setStyle({
weight: 2,
color: 'white',
dashArray: '',
fillOpacity: 0.7
});
},
mouseout: (e: any) => { // mouse out reset layer style
this.geoJsonLayer.resetStyle(e.target);
},
click: () => { // click on layer
}
});
}
});
this.mapObj.addLayer(this.geoJsonLayer);
// Zoom selected country
this.geoJsonData.features.filter((layer) => {
if (layer.properties.ISO_3_CODE == this.selectedCountryId) {
var currentBounds = L.geoJson(layer).getBounds();
this.mapObj.fitBounds(currentBounds);
setTimeout(() => {
let zoomDiff = this.mapObj.getZoom()
if (this.mapObj.getZoom() > 4) {
zoomDiff = 4;
}
this.mapObj.setView(this.mapObj.getCenter(), zoomDiff);
}, 800);
}
});
}
| (e: any) {
this.geoJsonLayer(e.target);
}
removeGeoLayer = function () {
if (this.geoJsonLayer != undefined) {
this.mapObj.removeLayer(this.geoJsonLayer);
}
}
// chart
loadSpiderChart() {
Highcharts.chart('spider-chart-container', {
chart: {
polar: true,
type: 'line',
spacingLeft: 10,
marginRight: 100
},
credits: {
enabled: false
},
exporting: {
enabled: false
},
title: {
text: '',//this.selectedCountryName,
x: 0,
y: 3
},
pane: {
size: '80%'
},
xAxis: {
categories: ['Regulatory authority', 'Regulatory mandate', 'Regulatory regime', 'Competition framework'],
tickmarkPlacement: 'on',
lineWidth: 2,
labels: {
// distance: 15,
step: 1,
style: {
fontSize: '13px',
fontFamily: 'Verdana, sans-serif',
width: 150,
}
}
},
tooltip: {
shared: true,
crosshairs: true
},
yAxis: {
//gridLineInterpolation: 'polygon',
lineWidth: 2,
"tickInterval": 1,
"min": 0,
"max": 30,
endOnTick: true,
showLastLabel: false
},
legend: {
align: 'left',
verticalAlign: 'top',
y: 3,
layout: 'vertical'
},
plotOptions: {
/* line: {
marker: {
enabled: true
}
} */
series: {
states: {
hover: {
enabled: true,
halo: {
size: 0
}
}
}
}
},
series: [{
name: '2007',
data: [Number(this.countryCardDisplayData[1][0].cluster1RA), Number(this.countryCardDisplayData[1][0].cluster2RM), Number(this.countryCardDisplayData[1][0].cluster3RR), Number(this.countryCardDisplayData[1][0].cluster4CF)],
pointPlacement: 'on',
color: '#318dde',
marker: {
symbol: 'circle',
fillColor: '#318dde',
lineWidth: 1,
lineColor: null // inherit from series
}
}, {
name: '2015',
data: [Number(this.countryCardDisplayData[0][0].cluster1RA), Number(this.countryCardDisplayData[0][0].cluster2RM), Number(this.countryCardDisplayData[0][0].cluster3RR), Number(this.countryCardDisplayData[0][0].cluster4CF)],
pointPlacement: 'on',
color: '#b33226',
marker: {
symbol: 'circle',
fillColor: '#b33226',
lineWidth: 2,
lineColor: null // inherit from series
}
}]
});
}
// to set by country in dropdown
isSelected(country: any) {
if (country.properties.iso_a3 == this.selectedCountryId) {
this.selectedCountryName = country.properties.name;
this.selectedCountryFlagId = country.properties.iso_a2.toLowerCase();
return true;
}
}
// change the country for country card event hadler
onSelect(selection: any) {
this.selectedCountryName = selection.properties.name;
this.selectedCountryId = selection.properties.iso_a3;
this.selectedCountryFlagId = selection.properties.iso_a2.toLowerCase();
this.router.navigate(['/country-card', selection.properties.iso_a3]);
}
prepareFormatForDownloadData() {
return [
{" ":"Country Name", "_": this.countryCardDisplayData[0][0].countryName},
{" ":"Mobile-cellular telephone subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].mc_subs},
{" ":"Fixed broadband subscriptions per 100 inhabitants, 2015", "_": this.countryCardDisplayData[2][0].fb_subs},
{" ":"GNI per capita (in USD)", "_": this.countryCardDisplayData[2][0].gni},
{" ":"Region", "_": this.countryCardDisplayData[0][0].regionName},
{" ":"Tracker 2015 Rank", "_": this.countryCardDisplayData[0][0].rank},
{" ":"Tracker 2015 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Tracker 2007 Rank", "_": this.countryCardDisplayData[1][0].rank},
{" ":"Tracker 2007 Score", "_": this.countryCardDisplayData[0][0].overall},
{" ":"Cluster 1: REGULATORY AUTHORITY (Max Category Score | resetStyle | identifier_name |
pars_upload.rs | impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file) | }
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct UploadResponse {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => {
group.push(field);
}
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
impl | random_line_split | |
pars_upload.rs | impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file)
}
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct UploadResponse {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> | products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
| {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => {
group.push(field);
}
None => {
let group = vec![field]; | identifier_body |
pars_upload.rs | impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file)
}
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct UploadResponse {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => |
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
| {
group.push(field);
} | conditional_block |
pars_upload.rs | impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[header::AUTHORIZATION])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars" / String)
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(update_pars_handler)
.with(cors)
}
pub fn handler(
state_manager: StateManager,
pars_origin: &str,
) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
let cors = warp::cors()
.allow_origin(pars_origin)
.allow_headers(&[
header::AUTHORIZATION,
header::HeaderName::from_bytes(b"username").unwrap(),
])
.allow_methods(&[Method::POST])
.build();
warp::path!("pars")
.and(warp::post())
// Max upload size is set to a very high limit here as the actual limit should be managed using istio
.and(warp::multipart::form().max_length(1000 * 1024 * 1024))
.and(with_state(state_manager))
.and(warp::header("username"))
.and_then(upload_pars_handler)
.with(cors)
}
async fn add_file_to_temporary_blob_storage(
_job_id: Uuid,
file_data: &[u8],
licence_number: &str,
) -> Result<StorageFile, SubmissionError> {
let storage_client = AzureBlobStorage::temporary();
let storage_file = storage_client
.add_file(file_data, licence_number, HashMap::new())
.await
.map_err(|e| SubmissionError::BlobStorageError {
message: format!("Problem talking to temporary blob storage: {:?}", e),
})?;
Ok(storage_file)
}
fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document {
Document {
id: metadata.file_name.to_string(),
name: metadata.title.to_string(),
document_type: DocumentType::Par,
author: metadata.author.to_string(),
products: metadata.product_names.to_vec_string(),
keywords: match metadata.keywords {
Some(a) => Some(a.to_vec_string()),
None => None,
},
pl_number: metadata.pl_number,
territory: metadata.territory,
active_substances: metadata.active_substances.to_vec_string(),
file_source: FileSource::TemporaryAzureBlobStorage,
file_path: storage_file.name,
}
}
async fn queue_pars_upload(
form_data: FormData,
uploader_email: String,
state_manager: impl JobStatusClient,
) -> Result<Vec<Uuid>, Rejection> {
let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| {
tracing::debug!("Error reading PARS upload: {:?}", e);
warp::reject::custom(e)
})?;
let mut job_ids = Vec::with_capacity(metadatas.len());
for metadata in metadatas {
let job_id = accept_job(&state_manager).await?.id;
job_ids.push(job_id);
let storage_file =
add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number)
.await
.map_err(warp::reject::custom)?;
let document = document_from_form_data(storage_file, metadata);
check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?;
}
Ok(job_ids)
}
async fn update_pars_handler(
existing_par_identifier: String,
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let delete = delete_document_handler(
UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier),
&state_manager,
Some(username.clone()),
)
.await?;
let upload = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UpdateResponse { delete, upload }))
}
async fn upload_pars_handler(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<impl Reply, Rejection> {
let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?;
Ok(warp::reply::json(&UploadResponse { job_ids }))
}
async fn queue_upload_pars_job(
form_data: FormData,
state_manager: StateManager,
username: String,
) -> Result<Vec<Uuid>, Rejection> {
let request_id = Uuid::new_v4();
let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str());
let _enter = span.enter();
tracing::debug!("Received PARS submission");
tracing::info!("Uploader email: {}", username);
Ok(queue_pars_upload(form_data, username, state_manager).await?)
}
#[derive(Debug, Serialize)]
struct | {
job_ids: Vec<Uuid>,
}
#[derive(Debug, Serialize)]
struct UpdateResponse {
delete: JobStatusResponse,
upload: Vec<Uuid>,
}
async fn read_pars_upload(
form_data: FormData,
) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> {
let fields = collect_fields(form_data)
.await
.map_err(|error| SubmissionError::UploadError { error })?;
let GroupedFields {
products,
file_name,
file_data,
} = groups_fields_by_product(fields)?;
let metadatas = products
.into_iter()
.map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields))
.collect::<Result<_, _>>()?;
Ok((metadatas, file_data))
}
#[derive(Debug)]
struct GroupedFields {
products: Vec<Vec<Field>>,
file_name: String,
file_data: Vec<u8>,
}
fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> {
let mut products = Vec::new();
let mut file_field = None;
for field in fields {
if field.name == "file" {
file_field = Some(field.value);
continue;
}
if field.name == "product_name" {
products.push(vec![]);
}
match products.last_mut() {
Some(group) => {
group.push(field);
}
None => {
let group = vec![field];
products.push(group);
}
}
}
let file_name = file_field
.as_ref()
.and_then(|field| field.file_name())
.ok_or(SubmissionError::MissingField { name: "file" })?
.to_string();
let file_data = file_field
.and_then(|field| field.into_file_data())
.ok_or(SubmissionError::MissingField { name: "file" })?;
Ok(GroupedFields {
products,
file_name,
file_data,
})
}
fn product_form_data_to_blob_metadata(
file_name: String,
fields: Vec<Field>,
) -> Result<BlobMetadata, SubmissionError> {
let product_name = get_field_as_uppercase_string(&fields, "product_name")?;
let product_names = vec![product_name];
let title = get_field_as_uppercase_string(&fields, "title")?;
let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?;
let active_substances = fields
.iter()
.filter(|field| field.name == "active_substance")
.filter_map(|field| field.value.value())
.map(|s| s.to_uppercase())
.collect::<Vec<String>>();
let territory = fields
.iter()
.find(|field| field.name == "territory")
.and_then(|field| field.value.value())
.map(|s| TerritoryType::from_str(s))
.transpose()?;
let author = "".to_string();
Ok(BlobMetadata::new(
file_name,
DocumentType::Par,
title,
pl_number,
territory,
product_names,
active_substances,
author,
None,
))
}
fn get_field_as_uppercase_string(
fields: &[Field],
field_name: &'static str,
) -> Result<String, SubmissionError> {
fields
.iter()
.find(|field| field.name == field_name)
.and_then(|field| field.value.value())
.ok_or(SubmissionError::MissingField { name: field_name })
.map(|s| s.to_uppercase())
}
#[derive(Debug)]
enum SubmissionError {
UploadError {
error: anyhow::Error,
},
BlobStorageError {
message: String, // should maybe be StorageClientError but that is not
// Send + Sync so then we can't implement warp::reject::Reject
},
MissingField {
name: &'static str,
},
UnknownTerritoryType {
error: TerritoryTypeParseError,
},
}
impl From<TerritoryTypeParseError> for SubmissionError {
fn from(error: TerritoryTypeParseError) -> Self {
SubmissionError::UnknownTerritoryType { error }
}
}
| UploadResponse | identifier_name |
calc_CORL2017_Tables.py | </style>
<title> Self-Driving Car Research </title>
<p>By Mojtaba Valipour @ Shiraz University - 2018 </p>
<p><a href="http://vpcom.ir/">vpcom.ir</a></p>
</head>
<body><p>MODEL: %s</a></p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p></body>
</html>
"""
# Tested by latexbase.com
latexWrapper = """
\\documentclass{article}
\\usepackage{graphicx}
\\begin{document}
\\title{Self-Driving Car Research}
\\author{Mojtaba Valipour}
\\maketitle
\\section{Model : %s}
\\subsection{Tables}
\\subsubsection{Percentage of Success}
\\begin{center}
%s
Success rate for the agent (mean and standard deviation shown).
\\end{center}
\\subsubsection{Infractions : Straight}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : One Turn}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Num Infractions : Straight}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : One Turn}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : Navigation}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsection{Num Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\end{document}
"""
if (__name__ == '__main__'):
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'-n', '--model_name',
metavar='T',
default='CoRL2017-Paper',
help='The name of the model for writing in the reports'
)
argparser.add_argument(
'-p', '--path',
metavar='P',
default='test',
help='Path to all log files'
)
args = argparser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('sarting the calculations %s', "0") #TODO: add time instead on zero
experiment_suite = CoRL2017("Town01")
metrics_object = Metrics(experiment_suite.metrics_parameters,
experiment_suite.dynamic_tasks)
# Improve readability by adding a weather dictionary
weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon',
6: 'Heavy Rain Noon', 8: 'Clear Sunset',
4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'}
# names for all the test logs
pathNames = {0:'_Test01_CoRL2017_Town01',
1:'_Test02_CoRL2017_Town01',
2:'_Test03_CoRL2017_Town01',
3:'_Test01_CoRL2017_Town02',
4:'_Test02_CoRL2017_Town02',
5:'_Test03_CoRL2017_Town02'}
tasksSuccessRate = {0: 'Straight', 1: 'One Turn', 2: 'Navigation', 3: 'Nav. Dynamic'} # number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1])
tasksInfractions = {0: 'Opposite Lane', 1: 'Sidewalk', 2: 'Collision-static', 3: 'Collision-car', 4:'Collision-pedestrian'} #
states = {0: 'Training Conditions', 1: 'New Town', 2: 'New Weather', 3: 'New Town & Weather'}
statesSettings = {0: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.train_weathers},
1: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers},
2: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.test_weathers},
3: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers+experiment_suite.test_weathers}}
# In CoRL-2017 paper, infraction are only computed on the fourth task - "Navigation with dynamic obstacles".
dataSuccessRate = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 data
dataInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 data
dataNumInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 data
dataSuccessRateSTD = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 std data
dataInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 std data
dataNumInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 std data
# TABLE 1 - CoRL2017 Paper
metrics_to_average = [
'episodes_fully_completed',
'episodes_completion'
]
infraction_metrics = [
'collision_pedestrians',
'collision_vehicles',
'collision_other',
'intersection_offroad',
'intersection_otherlane'
]
# Configuration
table1Flag = True
table2Flag = True
table3Flag = True
# extract the start name of the folders
#TODO: Automatic this extraction process better and smartly
if args.path[-1]=='/':
addSlashFlag = False
allDir = glob.glob(args.path+'*')
else:
addSlashFlag = True
allDir = glob.glob(args.path+'/*')
extractedPath = allDir[0].split('/')[-1].replace(statesSettings[0]['Path'][0],'')
logging.info('Please make sure all the subdirectory of %s start with %s', args.path, extractedPath)
for sIdx, state in enumerate(states):
logging.debug('State: %s', state)
weathers = statesSettings[state]['Weathers']
allPath = statesSettings[state]['Path']
# This will make life easier for calculating std
dataListTable1 = [[] for i in range(len(tasksSuccessRate))]
dataListTable2 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
dataListTable3 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
#logging.debug("Data list table 2 init: %s",dataListTable2)
# calculate metrics : episodes_fully_completed
for p in allPath:
if addSlashFlag == True:
path = args.path + '/' + extractedPath + p
else:
path = args.path + extractedPath + p
metrics_summary = metrics_object.compute(path)
number_of_tasks = len(list(metrics_summary[metrics_to_average[0]].items())[0][1])
values = metrics_summary[metrics_to_average[0]] # episodes_fully_completed
if(table1Flag):
logging.debug("Working on table 1 ...")
metric_sum_values = np.zeros(number_of_tasks)
for w, tasks in values.items():
if w in set(weathers):
count = 0
for tIdx, t in enumerate(tasks):
#print(weathers[tIdx]) #float(sum(t)) / float(len(t)))
metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float(len(weathers))
count += 1
# array's elements displacement, this is because of std/avg calculation
for j in range(number_of_tasks):
dataListTable1[j].append(metric_sum_values[j])
# table 2
| background-color: #dddddd;
} | random_line_split | |
calc_CORL2017_Tables.py | kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Average number of kilometers travelled before an infraction.
\\end{center}
\\subsubsection{Num Infractions : Straight}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : One Turn}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsubsection{Num Infractions : Navigation}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\subsection{Num Infractions : Navigation With Dynamic Obstacles}
\\begin{center}
%s
Number of infractions occured in the whole path
\\end{center}
\\end{document}
"""
if (__name__ == '__main__'):
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'-n', '--model_name',
metavar='T',
default='CoRL2017-Paper',
help='The name of the model for writing in the reports'
)
argparser.add_argument(
'-p', '--path',
metavar='P',
default='test',
help='Path to all log files'
)
args = argparser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('sarting the calculations %s', "0") #TODO: add time instead on zero
experiment_suite = CoRL2017("Town01")
metrics_object = Metrics(experiment_suite.metrics_parameters,
experiment_suite.dynamic_tasks)
# Improve readability by adding a weather dictionary
weather_name_dict = {1: 'Clear Noon', 3: 'After Rain Noon',
6: 'Heavy Rain Noon', 8: 'Clear Sunset',
4: 'Cloudy After Rain', 14: 'Soft Rain Sunset'}
# names for all the test logs
pathNames = {0:'_Test01_CoRL2017_Town01',
1:'_Test02_CoRL2017_Town01',
2:'_Test03_CoRL2017_Town01',
3:'_Test01_CoRL2017_Town02',
4:'_Test02_CoRL2017_Town02',
5:'_Test03_CoRL2017_Town02'}
tasksSuccessRate = {0: 'Straight', 1: 'One Turn', 2: 'Navigation', 3: 'Nav. Dynamic'} # number_of_episodes = len(list(metrics_summary['episodes_fully_completed'].items())[0][1])
tasksInfractions = {0: 'Opposite Lane', 1: 'Sidewalk', 2: 'Collision-static', 3: 'Collision-car', 4:'Collision-pedestrian'} #
states = {0: 'Training Conditions', 1: 'New Town', 2: 'New Weather', 3: 'New Town & Weather'}
statesSettings = {0: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.train_weathers},
1: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers},
2: {'Path':[pathNames[0],pathNames[1],pathNames[2]], 'Weathers':experiment_suite.test_weathers},
3: {'Path':[pathNames[3],pathNames[4],pathNames[5]], 'Weathers':experiment_suite.train_weathers+experiment_suite.test_weathers}}
# In CoRL-2017 paper, infraction are only computed on the fourth task - "Navigation with dynamic obstacles".
dataSuccessRate = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 data
dataInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 data
dataNumInfractions = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 data
dataSuccessRateSTD = np.zeros((len(tasksSuccessRate),len(states))) # hold the whole table 1 std data
dataInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 2 std data
dataNumInfractionsSTD = np.zeros((len(tasksSuccessRate),len(tasksInfractions),len(states))) # hold the whole table 3 std data
# TABLE 1 - CoRL2017 Paper
metrics_to_average = [
'episodes_fully_completed',
'episodes_completion'
]
infraction_metrics = [
'collision_pedestrians',
'collision_vehicles',
'collision_other',
'intersection_offroad',
'intersection_otherlane'
]
# Configuration
table1Flag = True
table2Flag = True
table3Flag = True
# extract the start name of the folders
#TODO: Automatic this extraction process better and smartly
if args.path[-1]=='/':
addSlashFlag = False
allDir = glob.glob(args.path+'*')
else:
addSlashFlag = True
allDir = glob.glob(args.path+'/*')
extractedPath = allDir[0].split('/')[-1].replace(statesSettings[0]['Path'][0],'')
logging.info('Please make sure all the subdirectory of %s start with %s', args.path, extractedPath)
for sIdx, state in enumerate(states):
logging.debug('State: %s', state)
weathers = statesSettings[state]['Weathers']
allPath = statesSettings[state]['Path']
# This will make life easier for calculating std
dataListTable1 = [[] for i in range(len(tasksSuccessRate))]
dataListTable2 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
dataListTable3 = [[[] for i in range(len(tasksSuccessRate))] for i in range(len(tasksInfractions))]
#logging.debug("Data list table 2 init: %s",dataListTable2)
# calculate metrics : episodes_fully_completed
for p in allPath:
if addSlashFlag == True:
path = args.path + '/' + extractedPath + p
else:
path = args.path + extractedPath + p
metrics_summary = metrics_object.compute(path)
number_of_tasks = len(list(metrics_summary[metrics_to_average[0]].items())[0][1])
values = metrics_summary[metrics_to_average[0]] # episodes_fully_completed
if(table1Flag):
logging.debug("Working on table 1 ...")
metric_sum_values = np.zeros(number_of_tasks)
for w, tasks in values.items():
if w in set(weathers):
count = 0
for tIdx, t in enumerate(tasks):
#print(weathers[tIdx]) #float(sum(t)) / float(len(t)))
metric_sum_values[count] += (float(sum(t)) / float(len(t))) * 1.0 / float(len(weathers))
count += 1
# array's elements displacement, this is because of std/avg calculation
for j in range(number_of_tasks):
dataListTable1[j].append(metric_sum_values[j])
# table 2
if(table2Flag):
logging.debug("Working on table 2 and 3 ...")
for metricIdx, metric in enumerate(infraction_metrics):
values_driven = metrics_summary['driven_kilometers']
values = metrics_summary[metric]
metric_sum_values = np.zeros(number_of_tasks)
summed_driven_kilometers = np.zeros(number_of_tasks)
for items_metric, items_driven in zip(values.items(), values_driven.items()):
w = items_metric[0] # weather
tasks = items_metric[1]
tasks_driven = items_driven[1]
if w in set(weathers):
count = 0
for t, t_driven in zip(tasks, tasks_driven):
#logging.debug("t_driven: %s \n t: %s \n tSum: %f", t_driven, t, float(sum(t)))
metric_sum_values[count] += float(sum(t))
summed_driven_kilometers[count] += t_driven
count += 1
# array's elements displacement, this is because of std/avg calculation
for i in range(number_of_tasks):
dataListTable3[metricIdx][i].append(metric_sum_values[i])
if metric_sum_values[i] == 0:
dataListTable2[metricIdx][i].append(summed_driven_kilometers[i])
else:
| dataListTable2[metricIdx][i].append(summed_driven_kilometers[i] / metric_sum_values[i]) | conditional_block | |
sketch.js | of game
fill(0, 100);
rect(0,185, WIDTH, 150);
fill(255, 200, 0, 200);
textAlign(LEFT);
textSize(20);
text("Instructions: You're a bunny", WIDTH/8, HEIGHT/2 - 20);
image(bunnyFront, 4*WIDTH/6+5, HEIGHT/2 - 45, 40, 40);
text("trying to collect carrots!", WIDTH/8, HEIGHT/2 + 20);
image(carrotimg, 3*WIDTH/5+5, HEIGHT/2, 30, 30);
text("Try to avoid the tomatoes, they hurt!", WIDTH/8, HEIGHT/2 + 60);
image(tomatoimg, 4*WIDTH/5+15, HEIGHT/2 + 40, 30, 30);
//instructions/technical rules
textAlign(CENTER);
fill(255, 170, 0, 200);
text("Use arrow keys to move around.", WIDTH/2, 3*HEIGHT/4);
textSize(25);
text("Press Space to Start!", WIDTH/2, 3*HEIGHT/4 + 30);
//this is to set up the platforms 3 x 3
//maybe I shouldn't have done this with an array, because
//every three they change their placement
for (let i = 0; i < 9; i++) {
let x = (platformSize + 10) * (i%3) + WIDTH/2 - WIDTH/5;
if(i < 3) {
let y = HEIGHT/2 - HEIGHT/5;
platforms[i] = new platform(x, y, platformSize);
}
else if (6 <= i) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10);
platforms[i] = new platform(x, y, platformSize);
}
else if (3 <= i < 6) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10) * 2;
platforms[i] = new platform(x, y, platformSize);
}
}
//an array of spots/tomatoes! altered some of class code to fit obstacles
spots = [];
for (let i = 0; i < 3; i++){ // Make a for() loop to create the desired number of Spots
// Add an index [i] to create multiple Spots
spots[i] = new Spot(WIDTH,
0, 0.1 * i + 1, 0);
}
//set up a bunny!
bunnyvar = new Bunny(WIDTH/2, HEIGHT/2, bunnyFront);
//set up a carrot!
carrotvar = new Carrot(floor(random(0,3)) * movement + WIDTH/2 - movement,
floor(random(0,3)) * movement + WIDTH/2 - movement);
//make sure the score is really 0!
score = 0;
}
function update(){
imageMode(CORNER);
//background update
background(grassimg);
noStroke();
//set up for background/information
for (let i = 0; i < platforms.length; i++){
platforms[i].display();
}
textFont(font);
textSize(40);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Score: " + score, WIDTH/2, 80);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, 110);
//displaying bunny & carrot
bunnyvar.display();
carrotvar.display();
//checks consistently if the carrot is hit by the bunny
carrotvar.hit(bunnyvar);
//displaying and moving spots
for (let i = 0; i < spots.length; i++){ // Make a for() loop to loop through each Spot
spots[i].move(); // Move each object
spots[i].display(); // Display each object
spots[i].check(bunnyvar); // Check for mouse overlap
}
//to make the game harder, add a spot for every 10 points
if(spots.length < 3 + floor(score/10)) {
spots.push(new Spot(WIDTH,
0, 0.1 * spots.length + 1, 0));
}
}
//game over
function gameOver(){
imageMode(CORNER);
//graphics
background(grassimg);
textFont(font);
textSize(50);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Game Over!", WIDTH/2, HEIGHT/4);
textSize(20);
text("Final Score: " + score, WIDTH/2, HEIGHT/4 + 30);
textSize(25);
text("Press Space to Continue", WIDTH/2, 3*HEIGHT/4);
imageMode(CENTER);
image(bunnyEnd, WIDTH/2, HEIGHT/2, 150, 150);
//to update the high score if necessary
if(score > hiscore) {
// textSize(15);
// text("New High Score!", WIDTH/2, HEIGHT/2 + 55);
hiscore = score;
}
}
//key pressed function
function keyPressed() {
//key press for game states
if (gameState == 0 && key == ' '){ //start to playing
gameState = 1;
} else if (gameState == 2 && key == ' '){ //game over to restart to start again
gameState = 0;
//other key presses are for movement of bunny
} else if (keyCode === LEFT_ARROW) {
bunnyvar.moveTo(LEFT_ARROW);
} else if (keyCode === RIGHT_ARROW) {
bunnyvar.moveTo(RIGHT_ARROW);
}
else if (keyCode === UP_ARROW) {
bunnyvar.moveTo(UP_ARROW);
}
else if (keyCode === DOWN_ARROW) {
bunnyvar.moveTo(DOWN_ARROW);
}
}
//altered a tiny bit of game template's code/spot class
//included a direction so I could alter how the spots move
//Also, direction is a int. It was easier to randomize rather than strings
class Spot {
constructor(_x, _y, _speed, _direction) {
this.x = _x;
this.y = _y;
this.speed = _speed;
this.direction = _direction;
}
///moves the spot from one end to another
move() {
//if I'm going east & I hit the edge...
if(this.direction === 0) {
this.x += this.speed;
if (this.x > (WIDTH)){
this.place()
} //if I'm going west & I hit the edge...
} else if(this.direction === 1) {
this.x -= this.speed;
if (this.x < 0) {
this.place()
}
//if I'm going south & I hit the edge...
} else if(this.direction === 2) {
this.y += this.speed;
if (this.y > (HEIGHT)) {
this.place()
}
//if I'm going north & I hit the edge...
}else if(this.direction === 3) {
this.y -= this.speed;
if (this.y < 0) {
this.place()
}
}
}
//repspawns the spot in a new place
//this could've been placed in the move class
//but also it's a bit wordy
place() {
//decides the random direction of the spot
var randomDirection = floor(random(0,4));
//alter this placement & direction based on the direction
if(randomDirection === 0) { //east
this.x = 0;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 0;
} else if(randomDirection === 1) { //west
this.x = WIDTH;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 1;
} else if(randomDirection === 2) { //south
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = 0;
this.direction = 2;
} else if(randomDirection === 3) { //north
this.x = (floor(random(0,3)) * (platformSize + 10) + WIDTH/2 - movement);
this.y = HEIGHT;
this.direction = 3;
}
}
display() {
// fill(255, 0 ,0);
// ellipse(this.x, this.y, 25, 25);
imageMode(CENTER);
image(tomatoimg, this.x, this.y, 30, 30);
}
| check | identifier_name | |
sketch.js | ///set it up!
function setup(){
framerate = 20;
var myCanvas = createCanvas(WIDTH, HEIGHT);
myCanvas.parent("js-game");
var randomNumber = floor(random(0,3));
}
function draw(){
///Credit to class game template
if (gameState == 0){
startScreen();
} else if (gameState == 1){
update();
} else if (gameState == 2){
gameOver();
}
}
function startScreen(){
///Starting screen graphics & rules///
//background
imageMode(CORNER);
background(grassimg);
//title
textFont(font);
textSize(50);
fill(255, 170, 0, 200);
textAlign(CENTER);
text("Carrot Collector", WIDTH/2, HEIGHT/4);
//high score text:
fill(255, 200, 0, 200);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, HEIGHT/4 + 30);
//overall description of game
fill(0, 100);
rect(0,185, WIDTH, 150);
fill(255, 200, 0, 200);
textAlign(LEFT);
textSize(20);
text("Instructions: You're a bunny", WIDTH/8, HEIGHT/2 - 20);
image(bunnyFront, 4*WIDTH/6+5, HEIGHT/2 - 45, 40, 40);
text("trying to collect carrots!", WIDTH/8, HEIGHT/2 + 20);
image(carrotimg, 3*WIDTH/5+5, HEIGHT/2, 30, 30);
text("Try to avoid the tomatoes, they hurt!", WIDTH/8, HEIGHT/2 + 60);
image(tomatoimg, 4*WIDTH/5+15, HEIGHT/2 + 40, 30, 30);
//instructions/technical rules
textAlign(CENTER);
fill(255, 170, 0, 200);
text("Use arrow keys to move around.", WIDTH/2, 3*HEIGHT/4);
textSize(25);
text("Press Space to Start!", WIDTH/2, 3*HEIGHT/4 + 30);
//this is to set up the platforms 3 x 3
//maybe I shouldn't have done this with an array, because
//every three they change their placement
for (let i = 0; i < 9; i++) {
let x = (platformSize + 10) * (i%3) + WIDTH/2 - WIDTH/5;
if(i < 3) |
else if (6 <= i) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10);
platforms[i] = new platform(x, y, platformSize);
}
else if (3 <= i < 6) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10) * 2;
platforms[i] = new platform(x, y, platformSize);
}
}
//an array of spots/tomatoes! altered some of class code to fit obstacles
spots = [];
for (let i = 0; i < 3; i++){ // Make a for() loop to create the desired number of Spots
// Add an index [i] to create multiple Spots
spots[i] = new Spot(WIDTH,
0, 0.1 * i + 1, 0);
}
//set up a bunny!
bunnyvar = new Bunny(WIDTH/2, HEIGHT/2, bunnyFront);
//set up a carrot!
carrotvar = new Carrot(floor(random(0,3)) * movement + WIDTH/2 - movement,
floor(random(0,3)) * movement + WIDTH/2 - movement);
//make sure the score is really 0!
score = 0;
}
function update(){
imageMode(CORNER);
//background update
background(grassimg);
noStroke();
//set up for background/information
for (let i = 0; i < platforms.length; i++){
platforms[i].display();
}
textFont(font);
textSize(40);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Score: " + score, WIDTH/2, 80);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, 110);
//displaying bunny & carrot
bunnyvar.display();
carrotvar.display();
//checks consistently if the carrot is hit by the bunny
carrotvar.hit(bunnyvar);
//displaying and moving spots
for (let i = 0; i < spots.length; i++){ // Make a for() loop to loop through each Spot
spots[i].move(); // Move each object
spots[i].display(); // Display each object
spots[i].check(bunnyvar); // Check for mouse overlap
}
//to make the game harder, add a spot for every 10 points
if(spots.length < 3 + floor(score/10)) {
spots.push(new Spot(WIDTH,
0, 0.1 * spots.length + 1, 0));
}
}
//game over
function gameOver(){
imageMode(CORNER);
//graphics
background(grassimg);
textFont(font);
textSize(50);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Game Over!", WIDTH/2, HEIGHT/4);
textSize(20);
text("Final Score: " + score, WIDTH/2, HEIGHT/4 + 30);
textSize(25);
text("Press Space to Continue", WIDTH/2, 3*HEIGHT/4);
imageMode(CENTER);
image(bunnyEnd, WIDTH/2, HEIGHT/2, 150, 150);
//to update the high score if necessary
if(score > hiscore) {
// textSize(15);
// text("New High Score!", WIDTH/2, HEIGHT/2 + 55);
hiscore = score;
}
}
//key pressed function
function keyPressed() {
//key press for game states
if (gameState == 0 && key == ' '){ //start to playing
gameState = 1;
} else if (gameState == 2 && key == ' '){ //game over to restart to start again
gameState = 0;
//other key presses are for movement of bunny
} else if (keyCode === LEFT_ARROW) {
bunnyvar.moveTo(LEFT_ARROW);
} else if (keyCode === RIGHT_ARROW) {
bunnyvar.moveTo(RIGHT_ARROW);
}
else if (keyCode === UP_ARROW) {
bunnyvar.moveTo(UP_ARROW);
}
else if (keyCode === DOWN_ARROW) {
bunnyvar.moveTo(DOWN_ARROW);
}
}
//altered a tiny bit of game template's code/spot class
//included a direction so I could alter how the spots move
//Also, direction is a int. It was easier to randomize rather than strings
class Spot {
constructor(_x, _y, _speed, _direction) {
this.x = _x;
this.y = _y;
this.speed = _speed;
this.direction = _direction;
}
///moves the spot from one end to another
move() {
//if I'm going east & I hit the edge...
if(this.direction === 0) {
this.x += this.speed;
if (this.x > (WIDTH)){
this.place()
} //if I'm going west & I hit the edge...
} else if(this.direction === 1) {
this.x -= this.speed;
if (this.x < 0) {
this.place()
}
//if I'm going south & I hit the edge...
} else if(this.direction === 2) {
this.y += this.speed;
if (this.y > (HEIGHT)) {
this.place()
}
//if I'm going north & I hit the edge...
}else if(this.direction === 3) {
this.y -= this.speed;
if (this.y < 0) {
this.place()
}
}
}
//repspawns the spot in a new place
//this could've been placed in the move class
//but also it's a bit wordy
place() {
//decides the random direction of the spot
var randomDirection = floor(random(0,4));
//alter this placement & direction based on the direction
if(randomDirection === 0) { //east
this.x = 0;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 0;
| {
let y = HEIGHT/2 - HEIGHT/5;
platforms[i] = new platform(x, y, platformSize);
} | conditional_block |
sketch.js | }
///set it up!
function setup(){
framerate = 20;
var myCanvas = createCanvas(WIDTH, HEIGHT);
myCanvas.parent("js-game");
var randomNumber = floor(random(0,3));
}
function draw(){
///Credit to class game template
if (gameState == 0){
startScreen();
} else if (gameState == 1){
update();
} else if (gameState == 2){
gameOver();
}
}
function startScreen(){
///Starting screen graphics & rules///
//background
imageMode(CORNER);
background(grassimg);
//title
textFont(font);
textSize(50);
fill(255, 170, 0, 200);
textAlign(CENTER);
text("Carrot Collector", WIDTH/2, HEIGHT/4);
//high score text:
fill(255, 200, 0, 200);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, HEIGHT/4 + 30);
//overall description of game
fill(0, 100);
rect(0,185, WIDTH, 150);
fill(255, 200, 0, 200);
textAlign(LEFT);
textSize(20);
text("Instructions: You're a bunny", WIDTH/8, HEIGHT/2 - 20);
image(bunnyFront, 4*WIDTH/6+5, HEIGHT/2 - 45, 40, 40);
text("trying to collect carrots!", WIDTH/8, HEIGHT/2 + 20);
image(carrotimg, 3*WIDTH/5+5, HEIGHT/2, 30, 30);
text("Try to avoid the tomatoes, they hurt!", WIDTH/8, HEIGHT/2 + 60);
image(tomatoimg, 4*WIDTH/5+15, HEIGHT/2 + 40, 30, 30);
//instructions/technical rules
textAlign(CENTER);
fill(255, 170, 0, 200);
text("Use arrow keys to move around.", WIDTH/2, 3*HEIGHT/4);
textSize(25);
text("Press Space to Start!", WIDTH/2, 3*HEIGHT/4 + 30);
//this is to set up the platforms 3 x 3
//maybe I shouldn't have done this with an array, because
//every three they change their placement
for (let i = 0; i < 9; i++) {
let x = (platformSize + 10) * (i%3) + WIDTH/2 - WIDTH/5;
if(i < 3) {
let y = HEIGHT/2 - HEIGHT/5;
platforms[i] = new platform(x, y, platformSize);
}
else if (6 <= i) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10);
platforms[i] = new platform(x, y, platformSize);
}
else if (3 <= i < 6) {
let y = HEIGHT/2 - HEIGHT/5 + (platformSize + 10) * 2;
platforms[i] = new platform(x, y, platformSize);
}
}
//an array of spots/tomatoes! altered some of class code to fit obstacles
spots = [];
for (let i = 0; i < 3; i++){ // Make a for() loop to create the desired number of Spots
// Add an index [i] to create multiple Spots
spots[i] = new Spot(WIDTH,
0, 0.1 * i + 1, 0);
}
//set up a bunny!
bunnyvar = new Bunny(WIDTH/2, HEIGHT/2, bunnyFront);
//set up a carrot!
carrotvar = new Carrot(floor(random(0,3)) * movement + WIDTH/2 - movement,
floor(random(0,3)) * movement + WIDTH/2 - movement);
//make sure the score is really 0!
score = 0;
}
function update(){
imageMode(CORNER);
//background update
background(grassimg);
noStroke();
//set up for background/information
for (let i = 0; i < platforms.length; i++){
platforms[i].display();
}
textFont(font);
textSize(40);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Score: " + score, WIDTH/2, 80);
textSize(20);
text("High Score: " + hiscore, WIDTH/2, 110);
//displaying bunny & carrot
bunnyvar.display();
carrotvar.display();
//checks consistently if the carrot is hit by the bunny
carrotvar.hit(bunnyvar);
//displaying and moving spots
for (let i = 0; i < spots.length; i++){ // Make a for() loop to loop through each Spot
spots[i].move(); // Move each object
spots[i].display(); // Display each object
spots[i].check(bunnyvar); // Check for mouse overlap
}
//to make the game harder, add a spot for every 10 points
if(spots.length < 3 + floor(score/10)) {
spots.push(new Spot(WIDTH,
0, 0.1 * spots.length + 1, 0));
}
}
| function gameOver(){
imageMode(CORNER);
//graphics
background(grassimg);
textFont(font);
textSize(50);
fill(255, 200, 0, 200);
textAlign(CENTER);
text("Game Over!", WIDTH/2, HEIGHT/4);
textSize(20);
text("Final Score: " + score, WIDTH/2, HEIGHT/4 + 30);
textSize(25);
text("Press Space to Continue", WIDTH/2, 3*HEIGHT/4);
imageMode(CENTER);
image(bunnyEnd, WIDTH/2, HEIGHT/2, 150, 150);
//to update the high score if necessary
if(score > hiscore) {
// textSize(15);
// text("New High Score!", WIDTH/2, HEIGHT/2 + 55);
hiscore = score;
}
}
//key pressed function
function keyPressed() {
//key press for game states
if (gameState == 0 && key == ' '){ //start to playing
gameState = 1;
} else if (gameState == 2 && key == ' '){ //game over to restart to start again
gameState = 0;
//other key presses are for movement of bunny
} else if (keyCode === LEFT_ARROW) {
bunnyvar.moveTo(LEFT_ARROW);
} else if (keyCode === RIGHT_ARROW) {
bunnyvar.moveTo(RIGHT_ARROW);
}
else if (keyCode === UP_ARROW) {
bunnyvar.moveTo(UP_ARROW);
}
else if (keyCode === DOWN_ARROW) {
bunnyvar.moveTo(DOWN_ARROW);
}
}
//altered a tiny bit of game template's code/spot class
//included a direction so I could alter how the spots move
//Also, direction is a int. It was easier to randomize rather than strings
class Spot {
constructor(_x, _y, _speed, _direction) {
this.x = _x;
this.y = _y;
this.speed = _speed;
this.direction = _direction;
}
///moves the spot from one end to another
move() {
//if I'm going east & I hit the edge...
if(this.direction === 0) {
this.x += this.speed;
if (this.x > (WIDTH)){
this.place()
} //if I'm going west & I hit the edge...
} else if(this.direction === 1) {
this.x -= this.speed;
if (this.x < 0) {
this.place()
}
//if I'm going south & I hit the edge...
} else if(this.direction === 2) {
this.y += this.speed;
if (this.y > (HEIGHT)) {
this.place()
}
//if I'm going north & I hit the edge...
}else if(this.direction === 3) {
this.y -= this.speed;
if (this.y < 0) {
this.place()
}
}
}
//repspawns the spot in a new place
//this could've been placed in the move class
//but also it's a bit wordy
place() {
//decides the random direction of the spot
var randomDirection = floor(random(0,4));
//alter this placement & direction based on the direction
if(randomDirection === 0) { //east
this.x = 0;
this.y = (floor(random(0,3)) * (platformSize + 10) + HEIGHT/2 - movement);
this.direction = 0;
} | //game over | random_line_split |
rbtree.go | () bool {
return n.Left == nil || n.Left.Color == Black
}
// LeftRed the left child of a node is black if not nil and its color is black.
func (n *Node) LeftRed() bool {
return n.Left != nil && n.Left.Color == Red
}
// RightBlack the right child of a node is black if nil or its color is black.
func (n *Node) RightBlack() bool {
return n.Right == nil || n.Right.Color == Black
}
// RightRed the right child of a node is black if not nil and its color is black.
func (n *Node) RightRed() bool {
return n.Right != nil && n.Right.Color == Red
}
// RBTree red-black tree
type RBTree struct {
Node *Node
lock sync.RWMutex
stack *stack
}
// New create a new red-black tree
func New() *RBTree {
return &RBTree{
lock: sync.RWMutex{},
Node: nil,
stack: newStack(nil),
}
}
// LeftRotate left rotate a node.
func LeftRotate(n *Node) *Node {
r := n.Right
if r == nil {
return n
}
n.Right = r.Left
r.Left = n
return r
}
// RightRotate right rotate a node.
func RightRotate(n *Node) *Node {
l := n.Left
if l == nil {
return n
}
n.Left = l.Right
l.Right = n
return l
}
// Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil {
return
}
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item | LeftBlack | identifier_name | |
rbtree.go | add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil |
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) (n *Node, ret interface{}) {
if node == nil {
return nil, nil
}
return deleteTreeNode(newStack(node), node, item)
}
// deleteTreeNode delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {
root := node
var ret interface{}
// find the node
FOR:
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
ret = node.Item
break FOR
}
}
// not find
if node == nil {
return root, nil
}
var inorderSuccessor *Node
// find the inorder successor
if node.Right != nil {
stack.push | {
return
} | conditional_block |
rbtree.go | Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil {
return
}
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) (n *Node, ret interface{}) {
if node == nil {
return nil, nil
}
return deleteTreeNode(newStack(node), node, item)
}
// deleteTreeNode delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {
root := node
var ret interface{}
// find the node | node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
ret = node.Item
break FOR
}
}
// not find
if node == nil {
return root, nil
}
var inorderSuccessor *Node
// find the inorder successor
if node.Right != nil {
stack.push(node, | FOR:
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left) | random_line_split |
rbtree.go |
// RightRed the right child of a node is black if not nil and its color is black.
func (n *Node) RightRed() bool {
return n.Right != nil && n.Right.Color == Red
}
// RBTree red-black tree
type RBTree struct {
Node *Node
lock sync.RWMutex
stack *stack
}
// New create a new red-black tree
func New() *RBTree {
return &RBTree{
lock: sync.RWMutex{},
Node: nil,
stack: newStack(nil),
}
}
// LeftRotate left rotate a node.
func LeftRotate(n *Node) *Node {
r := n.Right
if r == nil {
return n
}
n.Right = r.Left
r.Left = n
return r
}
// RightRotate right rotate a node.
func RightRotate(n *Node) *Node {
l := n.Left
if l == nil {
return n
}
n.Left = l.Right
l.Right = n
return l
}
// Add add one key/value node in the tree, replace that if exist
func (t *RBTree) Add(item compare.Lesser) {
t.lock.Lock()
defer t.lock.Unlock()
t.Node = addTreeNode(t.stack, t.Node, item)
}
// Find node
func (t *RBTree) Find(key compare.Lesser) interface{} {
t.lock.RLock()
defer t.lock.RUnlock()
return Find(t.Node, key)
}
// Delete delete node, return the value of deleted node
func (t *RBTree) Delete(key compare.Lesser) (ret interface{}) {
t.lock.Lock()
defer t.lock.Unlock()
t.stack.init(t.Node)
t.Node, ret = deleteTreeNode(t.stack, t.Node, key)
t.stack.reset()
return ret
}
// addTreeNode add a tree node
func addTreeNode(stack *stack, node *Node, item compare.Lesser) *Node {
stack.init(node)
defer stack.reset()
if node == nil {
// case 1: new root
return &Node{
Item: item,
Color: Black,
}
}
for node != nil {
switch {
case item.Less(node.Item):
stack.push(node, Left)
node = node.Left
case node.Item.Less(item):
stack.push(node, Right)
node = node.Right
default:
node.Item = item
return stack.root()
}
}
stack.bindChild(&Node{
Item: item,
Color: Red,
})
addTreeNodeBalance(stack)
root := stack.root()
root.Color = Black
return root
}
// addTreeNodeBalance balance the tree after adding a node
// the pre condition is the child of current stack is red
func addTreeNodeBalance(stack *stack) {
for stack.index > 0 {
p := stack.node()
// case 2: P is black, balance finish
if p.Color == Black {
return
}
// P is red
pp := stack.parent()
// case 1: reach the root
if pp == nil {
return
}
s := stack.sibling()
// case 3: P is red, S is red, PP is black
// execute: set P,S to black, PP to red
// result: black count through PP is not change, continue balance on parent of PP
if s != nil && s.Color == Red {
p.Color = Black
s.Color = Black
pp.Color = Red
stack.pop().pop()
continue
}
// case 4: P is red, S is black, PP is black, the position of N and P are diff.
// execute: rotate up the red child
// result: let match the case 5.
pos, ppos := stack.position(), stack.parentPosition()
if pos != ppos {
if pos == Left {
p = RightRotate(p)
pp.Right = p
} else {
p = LeftRotate(p)
pp.Left = p
}
}
// case 5: P is red, S is black, PP is black, the position of N and P are the same.
// execute: set P to black, PP to red, and rotate P up
// result: black count through P will not change, balance finish.
p.Color = Black
pp.Color = Red
var ppn *Node
if ppos == Left {
ppn = RightRotate(pp)
} else {
ppn = LeftRotate(pp)
}
stack.pop().pop().bindChild(ppn)
return
}
}
// AddNode add new key/value, return the new root node.
// this method add node and balance the tree recursively, not using loop logic.
func AddNode(root *Node, item compare.Lesser) *Node {
return AddNewNode(root, &Node{
Item: item,
})
}
// AddNewNode add new node, return the new root node.
func AddNewNode(root *Node, node *Node) *Node {
// set the new node to red
node.Color = Red
root = addOneNode(root, Left, node)
// reset root color
root.Color = Black
return root
}
// addOneNode recursively down to leaf, and add the new node to the leaf,
// then rebuild the tree from the leaf to root.
// the main purpose is reduce two linked red nodes and keep the black count balance.
//
// code comment use the following terms:
// - N as the balance node
// - L as the left child of N
// - R as the right child of N
// - P as the parent of N
// - LL as the left child of left child of N
// - RR as the right child of right child of N
func addOneNode(node *Node, pos Position, one *Node) *Node {
// case 1: first node
if node == nil {
return one
}
if one.Item.Less(node.Item) {
node.Left = addOneNode(node.Left, Left, one)
// case 2: L is black means it's already balance.
if node.Left.Color == Black {
return node
}
if node.Color == Red {
// case 3: L is red, N is red, N is right child of P
// execute: right rotate up the L
// result: the black count through L,N will not change, but let it match the case 4
if pos == Right {
node = RightRotate(node)
}
// case 4: L is red, N is red, N is left child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
if node.Left.Left != nil && node.Left.Left.Color == Red {
// case 5: N is black, L is red, LL is red
// execute: right rotate N, and make LL to black
// result: black count through N is not change, while that through LL increase 1, tree is now balance.
node = RightRotate(node)
node.Left.Color = Black
}
return node
}
if node.Item.Less(one.Item) {
node.Right = addOneNode(node.Right, Right, one)
// case 2: R is black means it's already balance
if node.Right.Color == Black {
return node
}
if node.Color == Red {
if pos == Left {
// case 3: R is red, N is red, N is left child of P
// execute: left rotate up the R
// result: the black count through R,N will not change, but let it match the case 4
node = LeftRotate(node)
}
// case 4: R is red, N is red, N is right child of P
// execute: nothing
// result: it's the case 5 of PP
return node
}
// case 5: N is black, R is red, RR is red
// execute: left rotate N, and make RR to black
// result: black count through N is not change, while that through RR increase 1, tree is now balance.
if node.Right.Right != nil && node.Right.Right.Color == Red {
node = LeftRotate(node)
node.Right.Color = Black
}
return node
}
// case 6: find the exists node, just replace the old value with the new
node.Item = one.Item
return node
}
// Find find the value of a key.
func Find(node *Node, item compare.Lesser) compare.Lesser {
for node != nil {
switch {
case item.Less(node.Item):
node = node.Left
case node.Item.Less(item):
node = node.Right
default:
return node.Item
}
}
return nil
}
// Delete delete a node.
// return the new root node, and the value of the deleted node.
// the new root node will be nil if no node exists in the tree after deleted.
// the deleted node value will be nil if not found.
func Delete(node *Node, item compare.Lesser) ( | {
return n.Right == nil || n.Right.Color == Black
} | identifier_body | |
Estimate_Hazard.py | if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else:
print 'e',info
for info in data:
for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def | (k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
cur_house=hd[house]
| Update | identifier_name |
Estimate_Hazard.py | if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else:
|
for info in data:
for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def Update(k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
cur_house=hd[house]
| print 'e',info | conditional_block |
Estimate_Hazard.py | if info[7]=='1': #if they are male
hd[House_Name][0][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][0][2][2].append(info)
else:
print 'b',info
else:
print 'bb',info
print info[8]
elif info[3]=='': #if they are alive
if info[8]=='1':#if they are noble
if info[7]=='1': #if they are male
hd[House_Name][1][1][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][1][2].append(info)
else:
print 'c',info
elif info[8]=='0': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else: | for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def Update(k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
cur_house=hd[house]
| print 'e',info
for info in data: | random_line_split |
Estimate_Hazard.py | ': #if they are smallfolk
if info[7]=='1': #if they are male
hd[House_Name][1][2][1].append(info)
elif info[7]=='0': #if they are female
hd[House_Name][1][2][2].append(info)
else:
print 'd',info
else:
print 'e',info
for info in data:
for key in houselist:
house_list(key,info)
def ages(alive,dead):
got=72
cok=69
sos=81
ffc=45
dwd=72
bd={'got':got,'cok':cok,'sos':sos,'ffc':ffc,'dwd':dwd}
bnd={'got':0,'cok':1,'sos':2,'ffc':3,'dwd':4}
introductions=[]
lifetimes=[]
for pers in dead:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[13]=='1':
end='dwd'
elif pers[12]=='1':
end='ffc'
elif pers[11]=='1':
end='sos'
elif pers[10]=='1':
end='cok'
elif pers[9]=='1':
end='got'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
if pers[4]=='':
death=bnd[end]+1
else:
death=((float(pers[4])+1)/bd[end])+bnd[end]
life=death-birth
lifetimes.append(life)
for pers in alive:
if pers[9]=='1':
start='got'
elif pers[10]=='1':
start='cok'
elif pers[11]=='1':
start='sos'
elif pers[12]=='1':
start='ffc'
elif pers[13]=='1':
start='dwd'
if pers[5]=='':
birth=bnd[start]
else:
birth=(float(pers[5])/bd[start])+bnd[start]
introductions.append(5-birth)
return introductions,lifetimes
def SurvivalHaz(introductions,lifetimes):
haz=survival.EstimateHazardFunction(lifetimes, introductions)
sf=haz.MakeSurvival()
# thinkplot.plot(sf,color='Grey')
# plt.xlabel("Age (books)")
# plt.ylabel("Probability of Surviving")
# plt.title('Survial Function')
# thinkplot.show()
# thinkplot.plot(haz,color='Grey')
# plt.title('Hazard Function')
# plt.xlabel("Age (books)")
# plt.ylabel("Percent of Lives That End")
# thinkplot.show()
return sf,haz
class GOT(thinkbayes2.Suite, thinkbayes2.Joint):
def Likelihood(self, data, hypo):
age, alive = data
k, lam = hypo
if alive:
prob = 1-exponweib.cdf(age, k, lam)
else:
prob = exponweib.pdf(age, k, lam)
return prob
def Update(k, lam, age, alive):
joint = thinkbayes2.MakeJoint(k, lam)
suite = GOT(joint)
suite.Update((age, alive))
k, lam = suite.Marginal(0, label=k.label), suite.Marginal(1, label=lam.label)
return k, lam
def makePMF(k,lam):
k.label = 'K'
lam.label = 'Lam'
print("Updating deaths")
numDead = len(dead)
ticks = math.ceil(numDead/100)
i = 0
for age in lifetimes:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
# age = float(pers[-1])
k, lam = Update(k, lam, age, False)
print('Updating alives')
numAlive = len(alive)
ticks = math.ceil(numAlive/100)
i = 0
for age in introductions:
# if not i%ticks:
# print('.', end='', flush=True)
i += 1
k, lam = Update(k, lam, age, True)
return k,lam
def WriteFile(k,lam,House):
intervalk = k.Percentile(5), k.Percentile(95)
intervallam = lam.Percentile(5), lam.Percentile(95)
good = raw_input('Good? y/n')
if good=='y':
file = open("klam.txt", "a")
Words=[House,'\n','K\n',str(k),'\n','\n','lam\n',str(lam),'\n','\n','K-90per cred\n',str(intervalk),'\n','\n','Lam-90per cred\n',str(intervallam),'\n','\n',]
file.writelines(Words)
file.close()
def cred_params(house):
file = open('house_all_alivef.txt', 'r')
i=-1
cred_param=[['Stark'],['Baratheon'],['None'],['Lannister'],['Tully'],['Arryn'],['Targaryen'],['Greyjoy'],['Wildling'],['Night\'s Watch'],['Tyrell'],['Martell']]
linelist=[]
for line in file:
if line[0] =='(':
linelist.append(line)
j=0
for i in range(len(linelist)):
if i%2==0:
kl=float(linelist[i][1:19])
kh=float(linelist[i][21:38])
cred_param[j].append(kl)
cred_param[j].append(kh)
j+=1
j=0
for i in range(len(linelist)):
if i%2!=0:
ll=float(linelist[i][1:19])
lh=float(linelist[i][22:38])
cred_param[j].append(ll)
cred_param[j].append(lh)
j+=1
for i in range(len(cred_param)):
if cred_param[i][0]==house:
return cred_param[i][1],cred_param[i][2],cred_param[i][3],cred_param[i][4]
def CredIntPlt(sf,kl,kh,ll,lh,house,mk,ml,Title):
listcol=colordict[house]
Dark=listcol[0]
Mid=listcol[1]
Light=listcol[2]
arr=np.linspace(0,7,num=100)
weibSurv2 = exponweib.cdf(arr, kl, lh)
weibSurv4 = exponweib.cdf(arr, kh, ll)
weibSurv1 = exponweib.cdf(arr, mk, ml)
# p4,=plt.plot(arr, 1-weibSurv2,color=Dark,linewidth=3)
p1,=plt.plot(arr, 1-weibSurv2,color=Light,linewidth=4)
# p2,=plt.plot(arr, 1-weibSurv1,color=Mid,linewidth=3,linestyle='--')
p3,=plt.plot(arr, 1-weibSurv4,color=Light,linewidth=4)
plt.fill_between(arr,1-weibSurv2,1-weibSurv4, facecolor=Light, alpha=.3)
# thinkplot.plot(sf,color=Dark)
plt.xlabel('Age in Books')
plt.ylabel('Probability of Survival')
plt.ylim([.0,1])
plt.text(6.3,0.95,'Theon',color='Khaki')
plt.text(5.3,0.4,'Lord Walder Frey',color='DarkSeaGreen')
# plt.legend([p1,p2,p4],['90 Percent Credible Interval','Best Estimate','Data'])
plt.title(Title)
def char_lists(house,Gender,Class):
| cur_house=hd[house]
alive1=cur_house[1][1][1] #Noble Men
alive2=cur_house[1][1][2] #Noble Women
alive3=cur_house[1][2][1] #Small Men
alive4=cur_house[1][2][2] #Small Women
alive1.pop(0)
alive2.pop(0)
alive3.pop(0)
alive4.pop(0)
dead1=cur_house[0][1][1]
dead2=cur_house[0][1][2]
dead3=cur_house[0][2][1]
dead4=cur_house[0][2][2]
dead1.pop(0)
dead2.pop(0)
dead3.pop(0)
dead4.pop(0)
if Gender=='M' and Class=='Noble': | identifier_body | |
main.rs | , table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) | {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors);
optimise_gsl::gsl_match_crack(options, &mut factors)
}
};
} | identifier_body | |
main.rs | AC_PI_2;
use std::process;
use std::collections::BTreeSet;
use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")] | OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
| fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors), | random_line_split |
main.rs | io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => | {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors);
optimise_gsl::gsl_match_crack(options, &mut factors)
} | conditional_block | |
main.rs | _PI_2;
use std::process;
use std::collections::BTreeSet;
use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag};
use options_clap::get_options_clap;
use options::{OptimMethod, TerminatingOutput};
use fatigue::dadn::DaDn;
use fatigue::COMMENT;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use log::error;
use std::io::Write;
mod list;
mod optimise;
mod sweep;
mod factors;
mod options;
mod options_clap;
mod nelder;
mod numbers;
mod vector;
#[cfg(feature = "GSL")]
mod optimise_gsl;
fn main() {
env_logger::init();
// get all the data
let materials = material::get_all_dadns();
let mut options = options::get_default_options();
get_options_clap("", &mut options);
println!("{}easiGrow: version {}", COMMENT, crate_version!());
println!("{}", COMMENT);
if options.verbosity == options::Verbosity::Verbose {
println!("{}Options: ", COMMENT);
println!("{}", options);
}
options::read_all_files(&mut options);
// process all the modifications to the sequence
options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods);
// Get the cycles from either the external sequence file, command line or the cycle file.
if options.cycle_infile != "" && options.seq_infile != "" {
error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.",
options.seq_infile, options.cycle_infile);
std::process::exit(2)
}
let unclosed = if options.cycle_infile == "" {
let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method);
options.cycles = cycles;
left
} else {
Vec::new()
};
// process all the modifications to the cycles
options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods);
// Only keep those cycles that remain after filtering the cycles
// and mark the turning points associated with those cycles. This
// section is only for writing out the modified sequence, since
// the filtered cycles are all that is used for crack growth.
if options.seq_mods.cycles {
let mut keep = vec![false; options.sequence.len()];
for cycle in &options.cycles {
keep[cycle.max.index] = true;
keep[cycle.min.index] = true;
}
options.sequence.retain(|s| keep[s.index])
}
// Any request for file or info output will result in program
// termination. This policy is to reduce the complexity for the
// user as to what the program does.
// Write out the sequence file.
if let Some(outfile) = options.seq_mods.outfile {
io::write_sequence(&outfile, &options.sequence);
std::process::exit(0);
}
// Write out the cycles file.
if let Some(outfile) = options.cycle_mods.outfile {
io::write_cycles(&outfile, &options.cycles);
std::process::exit(0);
}
// write out the beta by converting to a beta table. This can be
// then read back in using the file: option for beta selection.
if options.beta_outfile != "" {
let beta = beta::get_beta_fn(&options.beta, &options.component);
let table_beta = beta.as_table();
// need to write to file
let path = Path::new(&options.beta_outfile);
let display = path.display();
let mut file = match File::create(&path) {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => {
error!(
"Error: could not create the file '{}': {}.",
display,
Error::description(&why)
);
std::process::exit(1)
}
Ok(file) => file,
};
let _ = write!(file, "{}", table_beta);
std::process::exit(0);
}
// write out summary information of the sequence
match options.output {
TerminatingOutput::Summary => {
let seq_source = if options.seq_infile != "" {
options.seq_infile
} else {
// This is a little vague as the sequence could be either
// the default sequence or overwritten with a supplied sequence.
String::from("(Used specified sequence)")
};
cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods);
let cycle_source = if options.cycle_infile != "" {
options.cycle_infile
} else {
format!(
"(Obtained from sequence using '{:?}' method)",
options.cycle_method
)
};
cycle::summarise_cycles(
&cycle_source,
&options.cycles,
&unclosed,
&options.cycle_mods,
);
std::process::exit(0)
}
// write out extended list of options and methods
TerminatingOutput::List => {
list::print_list();
std::process::exit(0);
}
_ => (),
}
// get the correct material parameters for the dadn equation or
// from the command line. If the params are not given, then get the
// dadn material constants from the internal database.
let mut params = options.params.clone();
if params.is_empty() {
// extract out the appropriate material parameters from a file
params = if options.dadn.starts_with("file:") {
let filename = options.dadn.trim_start_matches("file:");
println!(
"{}No parameters given, using the dk values in the dadn file {}",
COMMENT, filename
);
let table = table::Table::read_file(filename, true);
// collapse down the dks and use these as the parameters for optimising
table.variables()
// or from the internal database.
} else {
println!(
"{}No parameters given, obtaining from material library for {}",
COMMENT, options.dadn
);
match materials.iter().find(|m| options.dadn.starts_with(m.name)) {
Some(m) => m.eqn.variables(),
None => {
error!("Error: Unknown dadn model {}", options.dadn);
process::exit(1);
}
}
}
};
// Optimise the parameters to match the predicted crack growth
// rates with the associated measured crack growth rates.
if options.optimise.file != "" {
// optimisation scaling factors
options.params = params.clone();
println!(
"{}Now starting the optimisation with params {:?} ...",
COMMENT, options.params
);
let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation
optimise_error(&options, &mut factors);
println!("{}...finished the optimisation. ", COMMENT);
println!("{}The normalised factors are {:?}", COMMENT, factors);
// Rescale the parameters to include the optimised factors
params = options
.params
.iter()
.zip(factors)
.map(|(p, f)| p * f)
.collect::<Vec<f64>>();
println!("{}The scaled optimised factors are: {:?}", COMMENT, params);
if options.scale == 0.0 {
std::process::exit(0); // not an error if we have performed an optimisation
}
}
// Grow the crack
let history_all = generate_crack_history(&options, ¶ms);
// Lastly, now that we've grown the crack, check if we need to
// generate and write out a pseudo image.
if options.image.file != "" {
println!("Making a pseudo image...");
if options.image.file.ends_with(".svg") {
fracto::write_svg_pseudo_image(&history_all, &options.image);
println!("Image written to file '{}'", options.image.file);
} else {
error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix");
}
}
}
#[cfg(not(feature = "GSL"))]
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
optimise::nelder_match_crack(options, &mut factors)
}
};
}
#[cfg(feature = "GSL")]
fn | (options: &options::EasiOptions, mut factors: &mut [f64]) {
match options.optimise.method {
OptimMethod::Sweep => sweep::sweep(options, &mut factors),
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors),
OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors),
OptimMethod::All => {
sweep::sweep(options, &mut factors);
| optimise_error | identifier_name |
player.go | rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppGone: // opponent is gone
data := map[string]string{
"oppGone": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
}
}
}
// JSON-marshal and send message to the connection.
func sendTextMsg(data map[string]string, conn *websocket.Conn) error {
dataB, err := json.Marshal(data)
if err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(writeWait))
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
w.Write(dataB)
return w.Close()
}
// serveGame handles websocket requests from the peer.
func (rout *router) | serveGame | identifier_name | |
player.go | chan bool
oppAcceptedDraw chan bool
oppResigned chan bool
rematchOffer chan bool
oppAcceptedRematch chan bool
oppReady chan bool
oppDisconnected chan bool
oppGone chan bool
oppReconnected chan bool
cleanup func()
switchColors func()
color string
gameId string
timeLeft time.Duration
clock *time.Timer
lastMove time.Time
username string
userId string
}
type move struct {
Color string `json:"color"`
Pgn string `json:"pgn"`
move []byte
}
// Chat message
type message struct {
Move move `json:"move,omitempty"`
Text string `json:"chat"`
Username string `json:"from"`
Resign bool `json:"resign"`
DrawOffer bool `json:"drawOffer"`
AcceptDraw bool `json:"acceptDraw"`
GameOver bool `json:"gameOver"`
RematchOffer bool `json:"rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() | w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
| {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
| identifier_body |
player.go | chan bool
oppAcceptedDraw chan bool
oppResigned chan bool
rematchOffer chan bool
oppAcceptedRematch chan bool
oppReady chan bool
oppDisconnected chan bool
oppGone chan bool
oppReconnected chan bool
cleanup func()
switchColors func()
color string
gameId string
timeLeft time.Duration
clock *time.Timer
lastMove time.Time
username string
userId string
}
type move struct {
Color string `json:"color"`
Pgn string `json:"pgn"`
move []byte
}
// Chat message
type message struct {
Move move `json:"move,omitempty"`
Text string `json:"chat"`
Username string `json:"from"`
Resign bool `json:"resign"`
DrawOffer bool `json:"drawOffer"`
AcceptDraw bool `json:"acceptDraw"`
GameOver bool `json:"gameOver"`
RematchOffer bool `json:"rematchOffer"`
AcceptRematch bool `json:"acceptRematch"`
FinishRoom bool `json:"finishRoom"`
userId string
}
// readPump pumps messages from the websocket connection to the room's hub.
//
// The application runs readPump in a per-connection goroutine. The application
// ensures that there is at most one reader on a connection by executing all
// reads from this goroutine.
func (p *player) readPump() {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil |
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string | {
log.Println("Could not marshal data:", err)
break
} | conditional_block |
player.go | {
defer func() {
if p.room != nil {
p.room.disconnect<- p
}
p.sendMove = nil
p.conn.Close()
}()
p.conn.SetReadLimit(maxMessageSize)
p.conn.SetReadDeadline(time.Now().Add(pongWait))
p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, msg, err := p.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
websocket.CloseNormalClosure,
) {
log.Printf("%v player connection is gone with error: %v", p.color, err)
}
break
}
// Unmarshal message just to get the color.
m := message{}
if err = json.Unmarshal(msg, &m); err != nil {
log.Println("Could not unmarshal msg:", err)
break
}
switch {
case m.Move.Color != "":
// It's a move
m.Move.move = msg
p.room.broadcastMove<- m.Move
case m.Text != "":
// It's a chat message
text := strings.TrimSpace(strings.Replace(m.Text, newline, space, -1))
p.room.broadcastChat<- message{
Text: text,
Username: p.username,
userId: p.userId,
}
case m.Resign:
p.room.broadcastResign<- p.color
case m.DrawOffer:
p.room.broadcastDrawOffer<- p.color
case m.AcceptDraw:
p.room.broadcastAcceptDraw<- p.color
case m.GameOver:
p.room.stopClocks<- true
case m.RematchOffer:
p.room.broadcastRematchOffer<- p.color
case m.AcceptRematch:
p.room.broadcastAcceptRematch<- p.color
case m.FinishRoom:
return
default:
log.Println("Unexpected message", m)
}
}
}
// writePump pumps messages from the room's hub to the websocket connection.
//
// A goroutine running writePump is started for each connection. The
// application ensures that there is at most one writer to a connection by
// executing all writes from this goroutine.
func (p *player) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
p.conn.Close()
}()
for {
select {
case <-p.disconnect:
// Finish this goroutine to not to send messages anymore
return
case move, ok := <-p.sendMove: // Opponent moved a piece
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
payload := websocket.FormatCloseMessage(1001, "")
p.conn.WriteMessage(websocket.CloseMessage, payload)
return
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(move)
if err := w.Close(); err != nil {
return
}
case msg, ok := <-p.sendChat: // Chat msg
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// The hub closed the channel.
p.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w, err := p.conn.NextWriter(websocket.TextMessage)
if err != nil {
log.Println("Could not make next writer:", err)
return
}
w.Write(msgB)
// Add queued chat messages to the current websocket message.
n := len(p.sendChat)
for i := 0; i < n; i++ {
msg = <-p.sendChat
if (msg.userId == p.userId) && (msg.Username == DEFAULT_USERNAME) {
msg.Username = "you"
}
msgB, err := json.Marshal(msg)
if err != nil {
log.Println("Could not marshal data:", err)
break
}
w.Write([]byte(newline))
w.Write(msgB)
}
if err := w.Close(); err != nil {
log.Println("Could not close writer:", err)
return
}
case <-ticker.C: // ping
p.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := p.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
log.Println("Could not ping:", err)
return
}
case <-p.clock.C: // Player ran out ouf time
// Inform the opponent about this
p.room.broadcastNoTime<- p.color
data := map[string]string{
"OOT": "MY_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppRanOut: // Opponent ran out ouf time
data := map[string]string{
"OOT": "OPP_CLOCK",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.drawOffer: // Opponent offered draw
data := map[string]string{
"drawOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedDraw: // opponent accepted draw
data := map[string]string{
"oppAcceptedDraw": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppResigned: // opponent resigned
data := map[string]string{
"oppResigned": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.rematchOffer: // Opponent offered rematch
data := map[string]string{
"rematchOffer": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppAcceptedRematch: // opponent accepted rematch
data := map[string]string{
"oppAcceptedRematch": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReady: // opponent ready
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppDisconnected: // opponent disconnected
data := map[string]string{
"waitingOpp": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppReconnected: // opponent reconnected
data := map[string]string{
"oppReady": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
case <-p.oppGone: // opponent is gone
data := map[string]string{
"oppGone": "true",
}
if err := sendTextMsg(data, p.conn); err != nil {
log.Println("Could not send text msg:", err)
return
}
}
}
}
// JSON-marshal and send message to the connection.
func sendTextMsg(data map[string]string, conn *websocket.Conn) error {
dataB, err := json.Marshal(data)
if err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(writeWait))
w, err := conn.NextWriter(websocket.TextMessage)
if err != nil {
return err
}
w.Write(dataB)
return w.Close()
}
// serveGame handles websocket requests from the peer.
func (rout *router) serveGame(w http.ResponseWriter, r *http.Request,
gameId, color string, minutes int, cleanup, switchColors func(),
username, userId string) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
http.Error(w, "Could not upgrade conn", http.StatusInternalServerError)
return
}
playerClock := time.NewTimer(time.Duration(minutes) * time.Minute)
playerClock.Stop()
p := &player{ | cleanup: cleanup, | random_line_split | |
plugin.go | 1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil {
klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool |
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.Get | {
return true
} | identifier_body |
plugin.go | v1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil { | if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool {
return true
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.GetByKey | klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse) | random_line_split |
plugin.go | v1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func | (pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil {
klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool {
return true
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.Get | newPluginProvider | identifier_name |
plugin.go | 1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigFile, pluginBinDir string) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, err := readCredentialProviderConfigFile(pluginConfigFile)
if err != nil {
return err
}
errs := validateCredentialProviderConfig(credentialProviderConfig)
if len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
for _, provider := range credentialProviderConfig.Providers {
pluginBin := filepath.Join(pluginBinDir, provider.Name)
if _, err := os.Stat(pluginBin); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
credentialprovider.RegisterCredentialProvider(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok |
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// Provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin.
func (p *pluginProvider) Provide(image string) credentialprovider.DockerConfig {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}
}
cachedConfig, found, err := p.getCachedCredentials(image)
if err != nil {
klog.Errorf("Failed to get cached docker config: %v", err)
return credentialprovider.DockerConfig{}
}
if found {
return cachedConfig
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
res, err, _ := p.group.Do(image, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image)
})
if err != nil {
klog.Errorf("Failed getting credential from external registry credential provider: %v", err)
return credentialprovider.DockerConfig{}
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
klog.Errorf("Invalid response type returned by external credential provider")
return credentialprovider.DockerConfig{}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
klog.Errorf("credential provider plugin did not return a valid cacheKeyType: %q", cacheKeyType)
return credentialprovider.DockerConfig{}
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
klog.Errorf("Error adding auth entry to cache: %v", err)
}
return dockerConfig
}
// Enabled always returns true since registration of the plugin via kubelet implies it should be enabled.
func (p *pluginProvider) Enabled() bool {
return true
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
obj, found, err := p.cache.GetByKey(image)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
registry := parseRegistry(image)
obj, found, err = p.cache.GetByKey(registry)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
obj, found, err = p.cache.Get | {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
} | conditional_block |
Article.js | () {
var [ claps, setClaps] = React.useState(0);
const follows = [
{
image:
"https://upload.wikimedia.org/wikipedia/commons/a/a7/20180602_FIFA_Friendly_Match_Austria_vs._Germany_Mesut_%C3%96zil_850_0704.jpg",
nama: "Amin Subagiyo",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
},
{
image:
"https://cdn.i-scmp.com/sites/default/files/styles/768x768/public/d8/images/methode/2019/12/13/6b06cb22-1ca7-11ea-8971-922fdc94075f_image_hires_132744.jpg?itok=XditGQBc&v=1576214873",
nama: "King Salman",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
}
];
const [spacing, setSpacing] = React.useState(2);
const classes = useStyles();
const handleChange = event => {
setSpacing(Number(event.target.value));
};
return (
<div className="post">
<HomeBar />
<div>
<br></br>
<br></br>
<br></br>
<img src="https://miro.medium.com/max/2000/1*-T8oo_JoKkMxfnPKLt_Ciw.jpeg" alt="-" />
<h1>If You Only Read A Few Books In 2018, Read These</h1>
<p id="main">
If you’d liked to be jerked around less, provoked less, and more
productive and inwardly focused, where should you start? To me, the
answer is obvious: by turning to wisdom. Below is a list of 21 books
that will help lead you to a better, stronger 2018. Deep Work: Rules
for Focused Success in a Distracted World by Cal Newport Media
consumption went way up in 2017. For most of us, that meant happiness
and productivity went way down. The world is becoming noisier and will
become more so every day. If you can’t cultivate the ability to have
quiet, insightful, deeply focused periods of productive work, you’re
going to get screwed. This is a book that explains how to cultivate
and protect that skill — the ability to do deep work. I strongly urge
you to begin this practice in 2018— if you want to get anything done
or perform your best. The Subtle Art of Not Giving a F*ck: A
Counterintuitive Approach to Living a Good Life by Mark Manson To me,
practical philosophy has always been the art knowing what to — and
what not to — give a fuck about. That’s what Mark’s book is about.
It’s not about apathy. It’s about cultivating indifference to things
that don’t matter. Be careful, as Marcus Aurelius warns, not to give
the little things more time and thought they deserved. Maybe looking
back at this year reveals how much effort you’ve frittered away
worrying about the trivial. If so, let 2018 be a year that you only
devote energy to things that truly matter — get the important things
right by ignoring the insignificant. The Way to Love: The Last
Meditations of Anthony de Mello by Anthony de Mello Coach Shaka Smart
recommended this little book (and it’s a little book, probably the
smallest I’ve ever read. It fits in your palm). But it’s an incredibly
wise and helpful read. Written by a Catholic Priest who’d lived in
India, the book has this unusual convergence of eastern and western
thought. One of my favorite lines: “The question to ask is not ‘What’s
wrong with this person?’ but ‘What does this irritation tell me about
myself?’ I plan on regularly revisiting it throughout 2018. But What
If We’re Wrong by Chuck Klosterman It’s always good to remind
ourselves that almost everything we’re certain about will probably be
eventually proven wrong. Klosterman’s subtitle — Thinking About the
Present As If It Were the Past — is a brilliant exercise for getting
some perspective in 2018. Whether you think it’s going to be a year of
radical change for the better or a horrible year of excesses of
dangerous precedent, you’re probably wrong. You’re probably not even
in the ballpark. This book shows you why, not with lectures about
politics, but with a bunch of awesome thought experiments about music,
books, movies and science. Rules for Radicals: A Practical Primer for
Realistic Radicals by Saul Alinsky If Hillary Clinton had remembered
the lessons of Saul Alinsky (who she wrote her college thesis on), the
election may have turned out differently. Why? A notorious strategist
and community organizer, Alinsky was a die hard pragmatist, but he
also knew how to tell a story and create a collective cause. He could
work within the system but knew how to shake it up and generate
attention. This book is a classic and woefully underrated. Whatever
you set out to do in 2018, this book can provide you with strategic
guidance and insight. The Filter Bubble by Eli Pariser / Trust Me I’m
Lying by Ryan Holiday / The Brass Check by Upton Sinclair I strongly
recommend that you take the time in 2018 to read these books. In light
of this year, you owe it to yourself to study and better understand
how our media system works. In The Filter Bubble, Eli Pariser warns of
the danger of living in bubbles of personalization that reinforce and
insulate our worldview. Though Sinclair’s The Brass Check has been
almost entirely forgotten by history, it’s not only fascinating but a
timeless perspective. Sinclair deeply understood the economic
incentives of early 20th century journalism and thus could predict and
analyze the manipulative effect it had on The Truth. I used that book
as a model for my expose of the media system, Trust Me, I’m Lying.
Today, the incentives and pressures are different but they warp our
information in a similar way. In almost every substantial charge Upton
leveled against the yellow press, you could, today, sub in blogs and
the cable news cycle and be even more correct. 48 Laws of Power / 33
Strategies of War by Robert Greene Robert Greene is a master of human
psychology and human dynamics — he has a profound ability to explain
timeless truths through story and example. You can read the classics
and not always understand the lessons. But if you read Robert’s books,
I promise you will leave not just with actionable lessons but an
indelible sense of what to do in many trying and confusing situations.
I wrote earlier this year that strategic wisdom is not something we
are born with — but the lessons are there for us to pick up. Pick
these two up before the year ends and operate the next with a
strategic mindset and clarity. Conspiracy: Peter Thiel, Hulk Hogan,
Gawker, and the Anatomy of Intrigue by Ryan Holiday — If you want to
immerse yourself in the above topics of media and strategy, and are
looking for one book to teach you lessons in both, my book on the
nearly decade-long conspiracy that billionaire Peter Thiel waged
against Gawker will do this for you. This is a stunning story about
how power works in the modern age, and is a masterclass in strategy
and how to accomplish wildly ambitious aims. The Road To Character by
David Brooks When General Stanley McChrystal was asked on the Tim
Ferriss podcastwhat was a recent purchase that had most positively
impacted his life, he pointed to this book. I agree. It can be a bit
stilted and dense at times, but it should be assigned reading to any
young person today (a little challenge is a good thing). Illustrating
with examples and stories from great men and women, Brooks admonishes
the reader to undertake their own journey of character perfection. In
my own book, I explore the | Article | identifier_name | |
Article.js | 1576214873",
nama: "King Salman",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
}
];
const [spacing, setSpacing] = React.useState(2);
const classes = useStyles();
const handleChange = event => {
setSpacing(Number(event.target.value));
};
return (
<div className="post">
<HomeBar />
<div>
<br></br>
<br></br>
<br></br>
<img src="https://miro.medium.com/max/2000/1*-T8oo_JoKkMxfnPKLt_Ciw.jpeg" alt="-" />
<h1>If You Only Read A Few Books In 2018, Read These</h1>
<p id="main">
If you’d liked to be jerked around less, provoked less, and more
productive and inwardly focused, where should you start? To me, the
answer is obvious: by turning to wisdom. Below is a list of 21 books
that will help lead you to a better, stronger 2018. Deep Work: Rules
for Focused Success in a Distracted World by Cal Newport Media
consumption went way up in 2017. For most of us, that meant happiness
and productivity went way down. The world is becoming noisier and will
become more so every day. If you can’t cultivate the ability to have
quiet, insightful, deeply focused periods of productive work, you’re
going to get screwed. This is a book that explains how to cultivate
and protect that skill — the ability to do deep work. I strongly urge
you to begin this practice in 2018— if you want to get anything done
or perform your best. The Subtle Art of Not Giving a F*ck: A
Counterintuitive Approach to Living a Good Life by Mark Manson To me,
practical philosophy has always been the art knowing what to — and
what not to — give a fuck about. That’s what Mark’s book is about.
It’s not about apathy. It’s about cultivating indifference to things
that don’t matter. Be careful, as Marcus Aurelius warns, not to give
the little things more time and thought they deserved. Maybe looking
back at this year reveals how much effort you’ve frittered away
worrying about the trivial. If so, let 2018 be a year that you only
devote energy to things that truly matter — get the important things
right by ignoring the insignificant. The Way to Love: The Last
Meditations of Anthony de Mello by Anthony de Mello Coach Shaka Smart
recommended this little book (and it’s a little book, probably the
smallest I’ve ever read. It fits in your palm). But it’s an incredibly
wise and helpful read. Written by a Catholic Priest who’d lived in
India, the book has this unusual convergence of eastern and western
thought. One of my favorite lines: “The question to ask is not ‘What’s
wrong with this person?’ but ‘What does this irritation tell me about
myself?’ I plan on regularly revisiting it throughout 2018. But What
If We’re Wrong by Chuck Klosterman It’s always good to remind
ourselves that almost everything we’re certain about will probably be
eventually proven wrong. Klosterman’s subtitle — Thinking About the
Present As If It Were the Past — is a brilliant exercise for getting
some perspective in 2018. Whether you think it’s going to be a year of
radical change for the better or a horrible year of excesses of
dangerous precedent, you’re probably wrong. You’re probably not even
in the ballpark. This book shows you why, not with lectures about
politics, but with a bunch of awesome thought experiments about music,
books, movies and science. Rules for Radicals: A Practical Primer for
Realistic Radicals by Saul Alinsky If Hillary Clinton had remembered
the lessons of Saul Alinsky (who she wrote her college thesis on), the
election may have turned out differently. Why? A notorious strategist
and community organizer, Alinsky was a die hard pragmatist, but he | Lying by Ryan Holiday / The Brass Check by Upton Sinclair I strongly
recommend that you take the time in 2018 to read these books. In light
of this year, you owe it to yourself to study and better understand
how our media system works. In The Filter Bubble, Eli Pariser warns of
the danger of living in bubbles of personalization that reinforce and
insulate our worldview. Though Sinclair’s The Brass Check has been
almost entirely forgotten by history, it’s not only fascinating but a
timeless perspective. Sinclair deeply understood the economic
incentives of early 20th century journalism and thus could predict and
analyze the manipulative effect it had on The Truth. I used that book
as a model for my expose of the media system, Trust Me, I’m Lying.
Today, the incentives and pressures are different but they warp our
information in a similar way. In almost every substantial charge Upton
leveled against the yellow press, you could, today, sub in blogs and
the cable news cycle and be even more correct. 48 Laws of Power / 33
Strategies of War by Robert Greene Robert Greene is a master of human
psychology and human dynamics — he has a profound ability to explain
timeless truths through story and example. You can read the classics
and not always understand the lessons. But if you read Robert’s books,
I promise you will leave not just with actionable lessons but an
indelible sense of what to do in many trying and confusing situations.
I wrote earlier this year that strategic wisdom is not something we
are born with — but the lessons are there for us to pick up. Pick
these two up before the year ends and operate the next with a
strategic mindset and clarity. Conspiracy: Peter Thiel, Hulk Hogan,
Gawker, and the Anatomy of Intrigue by Ryan Holiday — If you want to
immerse yourself in the above topics of media and strategy, and are
looking for one book to teach you lessons in both, my book on the
nearly decade-long conspiracy that billionaire Peter Thiel waged
against Gawker will do this for you. This is a stunning story about
how power works in the modern age, and is a masterclass in strategy
and how to accomplish wildly ambitious aims. The Road To Character by
David Brooks When General Stanley McChrystal was asked on the Tim
Ferriss podcastwhat was a recent purchase that had most positively
impacted his life, he pointed to this book. I agree. It can be a bit
stilted and dense at times, but it should be assigned reading to any
young person today (a little challenge is a good thing). Illustrating
with examples and stories from great men and women, Brooks admonishes
the reader to undertake their own journey of character perfection. In
my own book, I explore the same topic (humility) from a different
angle using similar stories — I’m attacking ego, he’s building up
character. Both will be important for the next year. The Dip by Seth
Godin This book is a short 70 pages and it looks like something
someone would give as a joke gift, but it’s anything but. Godin talks
frankly about quitting and pushing through — and when to do each. Quit
when you’ll be mediocre, when the returns aren’t worth the investment,
when you no longer think you’ll enjoy the ends. Stick when the dip is
the obstacle that creates scarcity, when you’re simply bridging the
gap between beginner’s luck and mastery. I promise, next year you are
guaranteed to find yourself in moments when you don’t know what is the
right answer. This book will help you find it. Hillbilly Elegy: A
Memoir of a Family and Culture in Crisis by J. D. Vance / Strangers in
Their Own Land: Anger and Mourning on the American Right by Arlie
Russell Hochschild You might describe Hillbilly Elegy as a Ta-Nehisi
Coates style memoir about a community that — at least in progressive
circles — gets a lot less attention: disaffected, impoverished whites
(particularly in the mid-east and South | also knew how to tell a story and create a collective cause. He could
work within the system but knew how to shake it up and generate
attention. This book is a classic and woefully underrated. Whatever
you set out to do in 2018, this book can provide you with strategic
guidance and insight. The Filter Bubble by Eli Pariser / Trust Me I’m | random_line_split |
Article.js |
const [spacing, setSpacing] = React.useState(2);
const classes = useStyles();
const handleChange = event => {
setSpacing(Number(event.target.value));
};
return (
<div className="post">
<HomeBar />
<div>
<br></br>
<br></br>
<br></br>
<img src="https://miro.medium.com/max/2000/1*-T8oo_JoKkMxfnPKLt_Ciw.jpeg" alt="-" />
<h1>If You Only Read A Few Books In 2018, Read These</h1>
<p id="main">
If you’d liked to be jerked around less, provoked less, and more
productive and inwardly focused, where should you start? To me, the
answer is obvious: by turning to wisdom. Below is a list of 21 books
that will help lead you to a better, stronger 2018. Deep Work: Rules
for Focused Success in a Distracted World by Cal Newport Media
consumption went way up in 2017. For most of us, that meant happiness
and productivity went way down. The world is becoming noisier and will
become more so every day. If you can’t cultivate the ability to have
quiet, insightful, deeply focused periods of productive work, you’re
going to get screwed. This is a book that explains how to cultivate
and protect that skill — the ability to do deep work. I strongly urge
you to begin this practice in 2018— if you want to get anything done
or perform your best. The Subtle Art of Not Giving a F*ck: A
Counterintuitive Approach to Living a Good Life by Mark Manson To me,
practical philosophy has always been the art knowing what to — and
what not to — give a fuck about. That’s what Mark’s book is about.
It’s not about apathy. It’s about cultivating indifference to things
that don’t matter. Be careful, as Marcus Aurelius warns, not to give
the little things more time and thought they deserved. Maybe looking
back at this year reveals how much effort you’ve frittered away
worrying about the trivial. If so, let 2018 be a year that you only
devote energy to things that truly matter — get the important things
right by ignoring the insignificant. The Way to Love: The Last
Meditations of Anthony de Mello by Anthony de Mello Coach Shaka Smart
recommended this little book (and it’s a little book, probably the
smallest I’ve ever read. It fits in your palm). But it’s an incredibly
wise and helpful read. Written by a Catholic Priest who’d lived in
India, the book has this unusual convergence of eastern and western
thought. One of my favorite lines: “The question to ask is not ‘What’s
wrong with this person?’ but ‘What does this irritation tell me about
myself?’ I plan on regularly revisiting it throughout 2018. But What
If We’re Wrong by Chuck Klosterman It’s always good to remind
ourselves that almost everything we’re certain about will probably be
eventually proven wrong. Klosterman’s subtitle — Thinking About the
Present As If It Were the Past — is a brilliant exercise for getting
some perspective in 2018. Whether you think it’s going to be a year of
radical change for the better or a horrible year of excesses of
dangerous precedent, you’re probably wrong. You’re probably not even
in the ballpark. This book shows you why, not with lectures about
politics, but with a bunch of awesome thought experiments about music,
books, movies and science. Rules for Radicals: A Practical Primer for
Realistic Radicals by Saul Alinsky If Hillary Clinton had remembered
the lessons of Saul Alinsky (who she wrote her college thesis on), the
election may have turned out differently. Why? A notorious strategist
and community organizer, Alinsky was a die hard pragmatist, but he
also knew how to tell a story and create a collective cause. He could
work within the system but knew how to shake it up and generate
attention. This book is a classic and woefully underrated. Whatever
you set out to do in 2018, this book can provide you with strategic
guidance and insight. The Filter Bubble by Eli Pariser / Trust Me I’m
Lying by Ryan Holiday / The Brass Check by Upton Sinclair I strongly
recommend that you take the time in 2018 to read these books. In light
of this year, you owe it to yourself to study and better understand
how our media system works. In The Filter Bubble, Eli Pariser warns of
the danger of living in bubbles of personalization that reinforce and
insulate our worldview. Though Sinclair’s The Brass Check has been
almost entirely forgotten by history, it’s not only fascinating but a
timeless perspective. Sinclair deeply understood the economic
incentives of early 20th century journalism and thus could predict and
analyze the manipulative effect it had on The Truth. I used that book
as a model for my expose of the media system, Trust Me, I’m Lying.
Today, the incentives and pressures are different but they warp our
information in a similar way. In almost every substantial charge Upton
leveled against the yellow press, you could, today, sub in blogs and
the cable news cycle and be even more correct. 48 Laws of Power / 33
Strategies of War by Robert Greene Robert Greene is a master of human
psychology and human dynamics — he has a profound ability to explain
timeless truths through story and example. You can read the classics
and not always understand the lessons. But if you read Robert’s books,
I promise you will leave not just with actionable lessons but an
indelible sense of what to do in many trying and confusing situations.
I wrote earlier this year that strategic wisdom is not something we
are born with — but the lessons are there for us to pick up. Pick
these two up before the year ends and operate the next with a
strategic mindset and clarity. Conspiracy: Peter Thiel, Hulk Hogan,
Gawker, and the Anatomy of Intrigue by Ryan Holiday — If you want to
immerse yourself in the above topics of media and strategy, and are
looking for one book to teach you lessons in both, my book on the
nearly decade-long conspiracy that billionaire Peter Thiel waged
against Gawker will do this for you. This is a stunning story about
how power works in the modern age, and is a masterclass in strategy
and how to accomplish wildly ambitious aims. The Road To Character by
David Brooks When General Stanley McChrystal was asked on the Tim
Ferriss podcastwhat was a recent purchase that had most positively
impacted his life, he pointed to this book. I agree. It can be a bit
stilted and dense at times, but it should be assigned reading to any
young person today (a little challenge is a good thing). Illustrating
with examples and stories from great men and women, Brooks admonishes
the reader to undertake their own journey of character perfection. In
my own book, I explore the same | {
var [ claps, setClaps] = React.useState(0);
const follows = [
{
image:
"https://upload.wikimedia.org/wikipedia/commons/a/a7/20180602_FIFA_Friendly_Match_Austria_vs._Germany_Mesut_%C3%96zil_850_0704.jpg",
nama: "Amin Subagiyo",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
},
{
image:
"https://cdn.i-scmp.com/sites/default/files/styles/768x768/public/d8/images/methode/2019/12/13/6b06cb22-1ca7-11ea-8971-922fdc94075f_image_hires_132744.jpg?itok=XditGQBc&v=1576214873",
nama: "King Salman",
comment:
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
}
]; | identifier_body | |
__init__.py | ,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']: |
if self.data_struct['_track | for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i) | random_line_split |
__init__.py | ,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
| single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
if self.data_struct['_trackTrans | if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push: | identifier_body |
__init__.py | , 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
if self.data_struct['_trackTrans']:
if single_push:
| single_pushes.append(u"""['_trackTrans']""") | conditional_block | |
__init__.py | (text=''):
text = str(text)
return text.replace("\'", "\\'")
class GaqHub(object):
data_struct = None
def __init__(self, account_id, single_push=False):
"""Sets up self.data_struct dict which we use for storage.
You'd probably have something like this in your base controller:
class Handler(object):
def __init__(self, request):
self.request = request
h.gaq_setup(self.request, 'AccountId')
All of the other commands in the module accept an optional 'request' kwarg.
If no 'request' is submitted, it will call pyramid.threadlocal.get_current_request()
This should allow you to easily and cleanly call this within templates, and not just handler methods.
"""
self.data_struct = {
'__singlePush': single_push,
'__setAccountAdditional': set({}),
'_setAccount': account_id,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u""" | escape_text | identifier_name | |
aio.rs | .complete.send(Ok((retbuf, None))).expect("Could not send AioSession response");
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pread error {:?}", e),
}
}
IoOp::Pwrite(retbuf, token) => {
trace!(
" got pwrite response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pwrite.remove(token); //? .unwrap();
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pwrite error {:?}", e),
}
}
_ => (),
}
}
}
Err(e) => panic!("ctx.results failed: {:?}", e),
}
};
// Read all available incoming requests, enqueue in AIO batch
loop {
let msg = match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(msg)) => msg,
Poll::Ready(None) => break,
Poll::Pending => break, // AioThread.poll is automatically scheduled
};
match msg {
Message::PRead(file, offset, len, buf, complete) => {
self.stats.curr_preads += 1;
// The self is a Pin<&mut Self>. Obtaining mutable references to the fields
// will require going through DerefMut, which requires unique borrow.
// You can avoid the issue by dereferencing self once on entry to the method
// let this = &mut *self, and then continue accessing it
// through this.
// The basic idea is that each access to self.deref_mut()
// basically will create a new mutable reference to self, if
// you do it multiple times you get the error, so by
// effectively calling deref_mut by hand I can save the
// reference once and use it when needed.
let this = &mut *self;
let entry = this.handles_pread.vacant_entry();
let key = entry.key();
match this.ctx.pread(&file, buf, offset as i64, len, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
};
}
Message::PWrite(file, offset, buf, complete) => {
self.stats.curr_pwrites += 1;
let this = &mut *self;
let entry = this.handles_pwrite.vacant_entry();
let key = entry.key();
match this.ctx.pwrite(&file, buf, offset as i64, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
}
}
}
// TODO: If max queue depth is reached, do not receive any
// more messages, will cause clients to block
}
// TODO: Need busywait for submit timeout
trace!(" batch size {}", self.ctx.batched());
while self.ctx.batched() > 0 {
if let Err(e) = self.ctx.submit() {
panic!("batch submit failed {:?}", e);
}
}
let need_read = self.handles_pread.len() > 0 || self.handles_pwrite.len() > 0;
if need_read {
// Not sure I totally understand how the old need_read works vs the
// new clear_read_ready call.
trace!(" calling stream.clear_read_ready()");
Pin::new(&mut self.stream).clear_read_ready(cx, ready);
}
// Print some useful stats
if self.stats.curr_polls % 10000 == 0 {
let elapsed = self.last_report_ts.elapsed().expect("Time drift!");
let elapsed_ms = ((elapsed.as_secs() * 1_000_000_000) as f64
+ elapsed.subsec_nanos() as f64)
/ 1000000.0;
let polls = self.stats.curr_polls - self.stats.prev_polls;
let preads = self.stats.curr_preads - self.stats.prev_preads;
let pwrites = self.stats.curr_pwrites - self.stats.prev_pwrites;
let preads_inflight = self.handles_pread.len();
let pwrites_inflight = self.handles_pwrite.len();
let thread_id = unsafe { libc::pthread_self() };
info!("threadid:{} polls:{:.0}/sec preads:{:.0}/sec pwrites:{:.0}/sec, inflight:({},{}) reqs/poll:{:.2}",
thread_id,
polls as f64 / elapsed_ms * 1000.0,
preads as f64 / elapsed_ms * 1000.0,
pwrites as f64 / elapsed_ms * 1000.0,
preads_inflight,
pwrites_inflight,
(preads as f64 + pwrites as f64) / polls as f64);
self.stats.prev_polls = self.stats.curr_polls;
self.stats.prev_preads = self.stats.curr_preads;
self.stats.prev_pwrites = self.stats.curr_pwrites;
self.last_report_ts = SystemTime::now();
}
// Run forever
Poll::Pending
}
}
// Register the eventfd with mio
struct AioEventFd {
inner: EventFD,
}
impl mio::Evented for AioEventFd {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
trace!("AioEventFd.register");
mio::unix::EventedFd(&self.inner.as_raw_fd()).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
trace!("AioEventFd.reregister");
mio::unix::EventedFd(&self.inner.as_raw_fd()).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
trace!("AioEventFd.deregister");
mio::unix::EventedFd(&self.inner.as_raw_fd()).deregister(poll)
}
}
#[cfg(test)]
mod tests {
extern crate env_logger;
extern crate tempdir;
extern crate uuid;
use self::tempdir::TempDir;
use byteorder::{BigEndian, ByteOrder};
use std::fs::File;
use std::io;
use std::io::Write;
use std::path::Path;
use aio::{Message, Session};
use bytes::{Buf, BufMut, BytesMut, IntoBuf};
use libaio::directio::{DirectFile, FileAccess, Mode};
use futures::channel::oneshot;
use futures::{stream, Future, Sink, Stream};
#[test]
fn test_init() {
let session = Session::new(512);
assert!(session.is_ok());
}
// TODO: Test max queue depth
#[test]
fn test_pread() {
env_logger::init().unwrap();
let path = new_file_with_sequential_u64("pread", 1024);
let file = DirectFile::open(path, Mode::Open, FileAccess::Read, 4096).unwrap();
let session = Session::new(2).unwrap();
let mut buf = BytesMut::with_capacity(512);
unsafe { buf.set_len(512) };
let (tx, rx) = oneshot::channel();
let fut = session.inner.send(Message::PRead(file, 0, 512, buf, tx));
fut.wait();
let res = rx.wait();
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.is_ok());
let (mut buf, err) = res.unwrap();
assert!(err.is_none());
for i in 0..(512 / 8) {
assert_eq!(i, buf.split_to(8).into_buf().get_u64::<BigEndian>());
}
assert_eq!(0, buf.len());
}
#[test]
fn test_pread_many() {
//env_logger::init().unwrap();
let path = new_file_with_sequential_u64("pread", 10240);
let session = Session::new(4).unwrap();
//let handle1 = session.handle();
//let handle2 = session.handle(); |
// let reads = (0..5).map(|_| {
// println!("foo"); | random_line_split | |
aio.rs | Ok(_) => (),
Err(e) => panic!("get_evfd_stream failed: {}", e),
};
let evfd = ctx.evfd.as_ref().unwrap().clone();
// Add the eventfd to the file descriptors we are
// interested in. This will use epoll under the hood.
let source = AioEventFd { inner: evfd };
let stream = PollEvented::new(source);
let fut = AioThread {
rx: rx,
ctx: ctx,
stream: stream,
handles_pread: Slab::with_capacity(max_queue_depth),
handles_pwrite: Slab::with_capacity(max_queue_depth),
last_report_ts: SystemTime::now(),
stats: AioStats {
..Default::default()
},
};
core.spawn(fut);
core.run().unwrap();
});
let tid = executor::block_on(tid_rx).unwrap();
Ok(Session {
inner: tx,
thread: t,
pthread: tid,
})
}
pub fn thread_id(&self) -> libc::pthread_t {
self.pthread
}
}
struct AioThread {
rx: mpsc::Receiver<Message>,
ctx: Iocontext<usize, BytesMut, BytesMut>,
stream: PollEvented<AioEventFd>,
// Handles to outstanding requests
handles_pread: Slab<HandleEntry>,
handles_pwrite: Slab<HandleEntry>,
last_report_ts: SystemTime,
stats: AioStats,
}
struct HandleEntry {
complete: oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>,
}
#[derive(Default)]
struct | {
curr_polls: u64,
curr_preads: u64,
curr_pwrites: u64,
prev_polls: u64,
prev_preads: u64,
prev_pwrites: u64,
}
impl Future for AioThread {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
trace!(
"============ AioThread.poll (inflight_preads:{} inflight_pwrites:{})",
self.handles_pread.len(),
self.handles_pwrite.len()
);
self.stats.curr_polls += 1;
// If there are any responses from the kernel available, read
// as many as we can without blocking.
let ready = mio::Ready::readable();
if Pin::new(&mut self.stream)
.poll_read_ready(cx, ready)
.is_ready()
{
match self.ctx.results(0, 100, None) {
Ok(res) => {
trace!(" got {} AIO responses", res.len());
for (op, result) in res.into_iter() {
match op {
IoOp::Pread(retbuf, token) => {
trace!(
" got pread response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pread.remove(token); //? .unwrap();
//let elapsed = entry.timestamp.elapsed().expect("Time drift!");
//trace!("pread returned in {} us", ((elapsed.as_secs() * 1_000_000_000) + elapsed.subsec_nanos() as u64) / 1000);
//entry.complete.send(Ok((retbuf, None))).expect("Could not send AioSession response");
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pread error {:?}", e),
}
}
IoOp::Pwrite(retbuf, token) => {
trace!(
" got pwrite response, token {}, is error? {}",
token,
result.is_err()
);
match result {
Ok(_) => {
let entry = self.handles_pwrite.remove(token); //? .unwrap();
entry.complete.send(Ok((retbuf, None)));
}
Err(e) => panic!("pwrite error {:?}", e),
}
}
_ => (),
}
}
}
Err(e) => panic!("ctx.results failed: {:?}", e),
}
};
// Read all available incoming requests, enqueue in AIO batch
loop {
let msg = match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(msg)) => msg,
Poll::Ready(None) => break,
Poll::Pending => break, // AioThread.poll is automatically scheduled
};
match msg {
Message::PRead(file, offset, len, buf, complete) => {
self.stats.curr_preads += 1;
// The self is a Pin<&mut Self>. Obtaining mutable references to the fields
// will require going through DerefMut, which requires unique borrow.
// You can avoid the issue by dereferencing self once on entry to the method
// let this = &mut *self, and then continue accessing it
// through this.
// The basic idea is that each access to self.deref_mut()
// basically will create a new mutable reference to self, if
// you do it multiple times you get the error, so by
// effectively calling deref_mut by hand I can save the
// reference once and use it when needed.
let this = &mut *self;
let entry = this.handles_pread.vacant_entry();
let key = entry.key();
match this.ctx.pread(&file, buf, offset as i64, len, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
};
}
Message::PWrite(file, offset, buf, complete) => {
self.stats.curr_pwrites += 1;
let this = &mut *self;
let entry = this.handles_pwrite.vacant_entry();
let key = entry.key();
match this.ctx.pwrite(&file, buf, offset as i64, key) {
Ok(()) => {
entry.insert(HandleEntry { complete: complete });
}
Err((buf, _token)) => {
complete
.send(Ok((
buf,
Some(io::Error::new(io::ErrorKind::Other, "pread failed")),
)))
.expect("Could not send AioThread error response");
}
}
}
}
// TODO: If max queue depth is reached, do not receive any
// more messages, will cause clients to block
}
// TODO: Need busywait for submit timeout
trace!(" batch size {}", self.ctx.batched());
while self.ctx.batched() > 0 {
if let Err(e) = self.ctx.submit() {
panic!("batch submit failed {:?}", e);
}
}
let need_read = self.handles_pread.len() > 0 || self.handles_pwrite.len() > 0;
if need_read {
// Not sure I totally understand how the old need_read works vs the
// new clear_read_ready call.
trace!(" calling stream.clear_read_ready()");
Pin::new(&mut self.stream).clear_read_ready(cx, ready);
}
// Print some useful stats
if self.stats.curr_polls % 10000 == 0 {
let elapsed = self.last_report_ts.elapsed().expect("Time drift!");
let elapsed_ms = ((elapsed.as_secs() * 1_000_000_000) as f64
+ elapsed.subsec_nanos() as f64)
/ 1000000.0;
let polls = self.stats.curr_polls - self.stats.prev_polls;
let preads = self.stats.curr_preads - self.stats.prev_preads;
let pwrites = self.stats.curr_pwrites - self.stats.prev_pwrites;
let preads_inflight = self.handles_pread.len();
let pwrites_inflight = self.handles_pwrite.len();
let thread_id = unsafe { libc::pthread_self() };
info!("threadid:{} polls:{:.0}/sec preads:{:.0}/sec pwrites:{:.0}/sec, inflight:({},{}) reqs/poll:{:.2}",
thread_id,
polls as f64 / elapsed_ms * 1000.0,
preads as f64 / elapsed_ms * 1000.0,
pwrites as f64 / elapsed_ms * 1000.0,
preads_inflight,
pwrites_inflight,
(preads as f64 + pwrites as f64) / polls as f64);
self.stats.prev_polls = self.stats.curr_polls;
self.stats.prev_preads = self.stats.curr_preads;
self.stats.prev_pwrites = self.stats.curr_pwrites;
self.last_report_ts = SystemTime::now();
}
// Run forever
Poll::Pending
}
}
// Register the eventfd with mio
struct AioEventFd {
inner: EventFD,
}
impl mio::Evented | AioStats | identifier_name |
workflow.py | gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa":
from inferelator.tfa_workflow import TFAWorkFlow
workflow_class = TFAWorkFlow
elif workflow == "amusr":
from inferelator.amusr_workflow import SingleCellMultiTask
workflow_class = SingleCellMultiTask
elif workflow == "single-cell":
from inferelator.single_cell_workflow import SingleCellWorkflow
workflow_class = SingleCellWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a workflow class".format(val=workflow))
# Or just use a workflow class directly
elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):
workflow_class = workflow
else:
raise ValueError("Workflow must be a string that maps to a workflow class or an actual workflow class")
# Decide which regression workflow to use
# Return just the workflow if regression is set to None
if regression is None:
return workflow_class
# String arguments are parsed for convenience in the run script
elif isinstance(regression, basestring):
if regression == "bbsr":
from inferelator.regression.bbsr_python import BBSRRegressionWorkflow
regression_class = BBSRRegressionWorkflow
elif regression == "elasticnet":
from inferelator.regression.elasticnet_python import ElasticNetWorkflow
regression_class = ElasticNetWorkflow
elif regression == "amusr":
from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow
regression_class = AMUSRRegressionWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a regression class".format(val=regression))
# Or just use a regression class directly
elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):
regression_class = regression
else:
raise ValueError("Regression must be a string that maps to a regression class or an actual regression class")
class RegressWorkflow(regression_class, workflow_class):
regression_type = regression_class
return RegressWorkflow
def inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
Create and instantiate a workflow
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an initialized object which is the multi-inheritance result of both the regression workflow and | the preprocessing/postprocessing workflow
"""
return create_inferelator_workflow(regression=regression, workflow=workflow)() | random_line_split | |
workflow.py | 1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
"""
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa":
from inferelator.tfa_workflow import TFAWorkFlow
workflow_class = TFAWorkFlow
elif workflow == "amusr":
from inferelator.amusr_workflow import SingleCellMultiTask
workflow_class = SingleCellMultiTask
elif workflow == "single-cell":
from inferelator.single_cell_workflow import SingleCellWorkflow
workflow_class = SingleCellWorkflow
else:
raise ValueError("{val} is not a string that can be mapped to a workflow class".format(val=workflow))
# Or just use a workflow class directly
elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):
| workflow_class = workflow | conditional_block | |
workflow.py | _AXIS
shuffle_prior_axis = None
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix dataframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data dataframe [G x ?]
priors_data = None # priors data dataframe [G x K]
gold_standard = None # gold standard dataframe [G x K]
# Multiprocessing controller
initialize_mp = True
multiprocessing_controller = None
def __init__(self):
# Get environment variables
self.get_environmentals()
def initialize_multiprocessing(self):
"""
Register the multiprocessing controller if set and run .connect()
"""
if self.multiprocessing_controller is not None:
MPControl.set_multiprocess_engine(self.multiprocessing_controller)
MPControl.connect()
def get_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing all data into a ready format for regression.
"""
if self.initialize_mp:
self.initialize_multiprocessing()
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute any data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute any data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after all configuration.
"""
raise NotImplementedError # implement in subclass
def get_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_dataframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
# Load the class variable if no file is passed
file = self.tf_names_file if file is None else file
# Read in a dataframe with no header or index
tfs = self.input_dataframe(file, header=None, index_col=None)
# Cast the dataframe into a list
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def | (expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
| create_default_meta_data | identifier_name |
workflow.py | _and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_dataframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
# Load the class variable if no file is passed
file = self.tf_names_file if file is None else file
# Read in a dataframe with no header or index
tfs = self.input_dataframe(file, header=None, index_col=None)
# Cast the dataframe into a list
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().tolist()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_dataframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_dataframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_dataframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_unique(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_unique(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replaced by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".format(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def shuffle_priors(self):
"""
Shuffle prior labels if shuffle_prior_axis is set
"""
if self.shuffle_prior_axis is None:
return None
elif self.shuffle_prior_axis == 0:
# Shuffle index (genes) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] gene data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.index.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=0, random_state=self.random_seed)
self.priors_data.index = prior_index
elif self.shuffle_prior_axis == 1:
# Shuffle columns (TFs) in the priors_data
utils.Debug.vprint("Randomly shuffling prior [{sh}] TF data".format(sh=self.priors_data.shape))
prior_index = self.priors_data.columns.tolist()
self.priors_data = self.priors_data.sample(frac=1, axis=1, random_state=self.random_seed)
self.priors_data.columns = prior_index
else:
raise ValueError("shuffle_prior_axis must be 0 or 1")
def input_path(self, filename):
"""
Join filename to input_dir
"""
return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
# Set defaults for index_col and header
kwargs['index_col'] = kwargs.pop('index_col', 0)
kwargs['header'] = kwargs.pop('header', 0)
# Use any kwargs for this function and any file settings from default
file_settings = self.file_format_settings.copy()
file_settings.update(kwargs)
# Update the file settings with anything that's in file-specific overrides
if filename in self.file_format_overrides:
file_settings.update(self.file_format_overrides[filename])
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def append_to_path(self, var_name, to_append):
"""
Add a string to an existing path variable in class
"""
path = getattr(self, var_name, None)
if path is None:
raise ValueError("Cannot append {to_append} to {var_name} (Which is None)".format(to_append=to_append,
var_name=var_name))
setattr(self, var_name, os.path.join(path, to_append))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data dataframe from basic defaults
"""
metadata_rows = expression_matrix.columns.tolist()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = pd.Series(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return pd.DataFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targets = self.expression_matrix.index
expressed_or_prior = expressed_targets.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.intersection(self.tf_names)
if len(keeper_regulators) == 0 or len(expressed_targets) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 length")
self.priors_data = self.priors_data.reindex(expressed_targets, axis=0)
self.priors_data = self.priors_data.reindex(keeper_regulators, axis=1)
self.priors_data = pd.DataFrame.fillna(self.priors_data, 0)
self.shuffle_priors()
def get_bootstraps(self):
"""
Generate sequence of bootstrap parameter objects for run.
"""
col_range = range(self.response.shape[1])
random_state = np.random.RandomState(seed=self.random_seed)
return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
"""
Output result report(s) for workflow run.
"""
raise NotImplementedError # implement in subclass
def is_master(self):
"""
Return True if this is the master thread
"""
return MPControl.is_master
def create_output_dir(self):
"""
Set a default output_dir if nothing is set. Create the path if it doesn't exist.
"""
if self.output_dir is None:
new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))
try:
os.makedirs(self.output_dir)
except OSError:
pass
def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):
| """
This is the factory method to create workflow ckasses that combine preprocessing and postprocessing (from workflow)
with a regression method (from regression)
:param regression: RegressionWorkflow subclass
A class object which implements the run_regression and run_bootstrap methods for a specific regression strategy
:param workflow: WorkflowBase subclass
A class object which implements the necessary data loading and preprocessing to create design & response data
for the regression strategy, and then the postprocessing to turn regression betas into a network
:return RegressWorkflow:
This returns an uninstantiated class which is the multi-inheritance result of both the regression workflow and
the preprocessing/postprocessing workflow
"""
# Decide which preprocessing/postprocessing workflow to use
# String arguments are parsed for convenience in the run script
if isinstance(workflow, basestring):
if workflow == "base":
workflow_class = WorkflowBase
elif workflow == "tfa": | identifier_body | |
handshake.rs | /// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
// Noise Wrapper
// -------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseWrapper(noise::NoiseConfig);
impl NoiseWrapper {
/// Create a new NoiseConfig with the provided keypair
pub fn new(key: x25519::PrivateKey) -> Self {
Self(noise::NoiseConfig::new(key))
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done
pub async fn upgrade_connection<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
remote_public_key: Option<x25519::PublicKey>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key)
.await?
}
ConnectionOrigin::Inbound => {
self.accept(socket, anti_replay_timestamps, trusted_peers)
.await?
}
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
pub async fn dial<TSocket>(
&self,
mut socket: TSocket,
mutual_authentication: bool,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = if mutual_authentication {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
} else {
None
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.0
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.0
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
pub async fn accept<TSocket>(
&self,
mut socket: TSocket,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.0
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// make sure the public key is a validator before continuing (if we're in the validator network)
if let Some(trusted_peers) = trusted_peers {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found { | their_public_key
),
));
}
}
// if on a mutually authenticated network
if let Some(anti_replay_timestamps) = &anti_replay_timestamps {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = | // TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}", | random_line_split |
handshake.rs | If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
// Noise Wrapper
// -------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseWrapper(noise::NoiseConfig);
impl NoiseWrapper {
/// Create a new NoiseConfig with the provided keypair
pub fn new(key: x25519::PrivateKey) -> Self {
Self(noise::NoiseConfig::new(key))
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done
pub async fn upgrade_connection<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
remote_public_key: Option<x25519::PublicKey>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => |
};
self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key)
.await?
}
ConnectionOrigin::Inbound => {
self.accept(socket, anti_replay_timestamps, trusted_peers)
.await?
}
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
pub async fn dial<TSocket>(
&self,
mut socket: TSocket,
mutual_authentication: bool,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = if mutual_authentication {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
} else {
None
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.0
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.0
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
pub async fn accept<TSocket>(
&self,
mut socket: TSocket,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.0
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// make sure the public key is a validator before continuing (if we're in the validator network)
if let Some(trusted_peers) = trusted_peers {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if on a mutually authenticated network
if let Some(anti_replay_timestamps) = &anti_replay_timestamps {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut | {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
} | conditional_block |
handshake.rs | /// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn | (&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
// Noise Wrapper
// -------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseWrapper(noise::NoiseConfig);
impl NoiseWrapper {
/// Create a new NoiseConfig with the provided keypair
pub fn new(key: x25519::PrivateKey) -> Self {
Self(noise::NoiseConfig::new(key))
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done
pub async fn upgrade_connection<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
remote_public_key: Option<x25519::PublicKey>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key)
.await?
}
ConnectionOrigin::Inbound => {
self.accept(socket, anti_replay_timestamps, trusted_peers)
.await?
}
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
pub async fn dial<TSocket>(
&self,
mut socket: TSocket,
mutual_authentication: bool,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = if mutual_authentication {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
} else {
None
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.0
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.0
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
pub async fn accept<TSocket>(
&self,
mut socket: TSocket,
anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>,
trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.0
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// make sure the public key is a validator before continuing (if we're in the validator network)
if let Some(trusted_peers) = trusted_peers {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if !found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if on a mutually authenticated network
if let Some(anti_replay_timestamps) = &anti_replay_timestamps {
// check that the payload received as the client timestamp (in seconds)
if payload.len() != PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut | is_replay | identifier_name |
acpi.rs | dp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType {
Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() {
return unsafe { Madt::new(entry.try_into()?) };
}
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE)
as *const AcpiMadtFields,
);
// Parse entries.
let mut num_lapic_entries = 0;
let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN];
let mut ptr = (madt_ptr.0 as *const u8)
.add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE);
let end = (madt_ptr.0 as *const u8).add(hdr.length as usize);
while ptr < end {
let ty = core::ptr::read_unaligned(ptr);
let length = core::ptr::read_unaligned(ptr.add(1));
// LAPIC.
if ty == 0 {
if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
let lapic =
core::ptr::read_unaligned(ptr as *const AcpiMadtLapic);
lapic_entries[num_lapic_entries] = MadtLapic {
proc_uid: lapic.proc_uid,
apic_id: lapic.apic_id,
flags: lapic.flags,
};
num_lapic_entries += 1;
}
ptr = ptr.add(length as usize);
}
Ok(Madt {
fields,
lapic_entries,
num_lapic_entries,
})
}
/// Local Interrupt Controller Address. In other words, the 32-bit physical
/// address at which each processor can access its local interrupt
/// controller.
pub fn lapic_addr(&self) -> u32 {
self.fields.lapic_addr
}
/// Multiple ACPI flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | PCAT_COMPAT
/// 1 | 31 | Reserved (zero)
pub fn | flags | identifier_name | |
acpi.rs | the Root System Description Pointer (RSDP) of ACPI 2.0+.
#[derive(Debug)]
pub struct Rsdp20 {
rsdp20: AcpiRsdp20,
}
impl Rsdp20 {
/// Creates a new `Rsdp20` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// RSDP 2.0+ structure.
///
/// # Safety
///
/// The `Rsdp20` structure is created using a pointer. Thus, this function
/// is considered unsafe.
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType {
Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() |
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const | {
return unsafe { Madt::new(entry.try_into()?) };
} | conditional_block |
acpi.rs | .
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType {
Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() {
return unsafe { Madt::new(entry.try_into()?) };
}
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> | {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE)
as *const AcpiMadtFields,
);
// Parse entries.
let mut num_lapic_entries = 0;
let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN];
let mut ptr = (madt_ptr.0 as *const u8)
.add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE);
let end = (madt_ptr.0 as *const u8).add(hdr.length as usize);
while ptr < end {
let ty = core::ptr::read_unaligned(ptr); | identifier_body | |
acpi.rs | Represents the Root System Description Pointer (RSDP) of ACPI 2.0+.
#[derive(Debug)]
pub struct Rsdp20 {
rsdp20: AcpiRsdp20,
}
impl Rsdp20 {
/// Creates a new `Rsdp20` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// RSDP 2.0+ structure.
///
/// # Safety
///
/// The `Rsdp20` structure is created using a pointer. Thus, this function
/// is considered unsafe.
pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> {
let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20;
let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr);
// Check table's signature.
if rsdp20.signature != ACPI_RSDP_SIGNATURE {
return Err(Error::InvalidSignature);
}
// Check table's revision.
if rsdp20.revision < 2 {
return Err(Error::InvalidRevision);
}
// Check table's checksum.
let checksum = utils::add_bytes(
&rsdp20 as *const AcpiRsdp20 as *const u8,
rsdp20.length as usize,
);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(Rsdp20 { rsdp20 })
}
/// Returns the Extended System Description Table (XSDT).
pub fn xsdt(&self) -> Result<Xsdt, Error> {
// An `Rsdp20` is only created after checking its signature, checksum
// and revision. Thus, we assume that the pointer to the XSDT
// will be valid.
unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) }
}
}
/// System Description Table types.
enum SdtType { | Xsdt,
Madt,
}
impl SdtType {
/// Returns the signature of the SDT.
fn signature(&self) -> &[u8] {
match self {
SdtType::Xsdt => b"XSDT",
SdtType::Madt => b"APIC",
}
}
}
/// System Description Table header of the ACPI specification. It is common to
/// all System Description Tables.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiSdtHeader {
signature: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: u32,
creator_revision: u32,
}
impl AcpiSdtHeader {
/// Creates a new `AcpiSdtHeader` from a given pointer.
///
/// # Errors
///
/// This function returns error if the signature of the table does not
/// match the provided `SdtType` or the checksum is invalid.
unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> {
// Parse SDT header.
let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader;
let hdr = core::ptr::read_unaligned(sdt_ptr);
// Check SDT header's signature.
if hdr.signature != sdt_type.signature() {
return Err(Error::InvalidSignature);
}
// Check SDT header's checksum.
let checksum =
utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize);
if checksum != 0 {
return Err(Error::InvalidCheckSum);
}
Ok(hdr)
}
}
/// Maximum number of entries in the XSDT.
const ACPI_XSDT_ENTRIES_LEN: usize = 32;
/// Represents the Extended System Description Table (XSDT).
#[derive(Debug)]
pub struct Xsdt {
entries: [u64; ACPI_XSDT_ENTRIES_LEN],
num_entries: usize,
}
impl Xsdt {
/// Creates a new `Xsdt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// XSDT.
///
/// # Safety
///
/// The `Xsdt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?;
// Calculate number of entries.
let entries_length = hdr.length as usize - ACPI_SDT_SIZE;
if entries_length % 8 != 0 {
return Err(Error::InvalidAcpiData);
}
let num_entries = entries_length / 8;
// Check that there is enough room for the entries in the fixed size
// array.
if num_entries > ACPI_XSDT_ENTRIES_LEN {
return Err(Error::BufferTooSmall);
}
// Parse entries.
let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN];
for (i, it) in entries.iter_mut().take(num_entries).enumerate() {
let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8)
as *const u64;
*it = core::ptr::read_unaligned(ptr);
}
Ok(Xsdt {
entries,
num_entries,
})
}
/// Returns the Multiple APIC Description Table (MADT).
pub fn madt(&self) -> Result<Madt, Error> {
// An `Xsdt` is only created after checking its signature and checksum
// Thus, we assume that the pointer to the MADT will be valid.
for &entry in self.entries.iter().take(self.num_entries) {
// Look for a table with the correct signature.
let ptr = entry as *const [u8; 4];
let signature = unsafe { core::ptr::read_unaligned(ptr) };
if signature == SdtType::Madt.signature() {
return unsafe { Madt::new(entry.try_into()?) };
}
}
// If we reach this point, the table could not be found.
Err(Error::NotFound)
}
}
/// Size of the SDT header.
const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>();
/// Maximum number of entries in the MADT.
const ACPI_MADT_ENTRIES_LEN: usize = 256;
/// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI
/// specification.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct AcpiMadtFields {
lapic_addr: u32,
flags: u32,
}
/// Processor Local APIC Structure in the ACPI specification.
#[repr(C, packed)]
struct AcpiMadtLapic {
ty: u8,
length: u8,
proc_uid: u8,
apic_id: u8,
flags: u32,
}
/// Represents a Processor Local APIC Structure.
#[derive(Debug, Default, Clone, Copy)]
pub struct MadtLapic {
proc_uid: u8,
apic_id: u8,
flags: u32,
}
impl MadtLapic {
/// Processor's UID.
pub fn proc_uid(&self) -> u8 {
self.proc_uid
}
/// Processor's local APIC ID.
pub fn acpi_id(&self) -> u8 {
self.apic_id
}
/// Local APIC flags.
///
/// Bit offset | Bit length | Flag
/// ---------- | ---------- | ---------------
/// 0 | 1 | Enabled
/// 1 | 1 | Online Capable
/// 2 | 30 | Reserved (zero)
pub fn flags(&self) -> u32 {
self.flags
}
}
/// Represents the Multiple APIC Description Table (MADT).
#[derive(Debug)]
pub struct Madt {
fields: AcpiMadtFields,
lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN],
num_lapic_entries: usize,
}
impl Madt {
/// Creates a new `Madt` from a given pointer.
///
/// # Errors
///
/// This function returns error if the pointer does not point to a valid
/// MADT.
///
/// # Safety
///
/// The `Madt` structure is created using a pointer. Thus, this function is
/// considered unsafe.
pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> {
// Parse header.
let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?;
// Parse fields.
let fields = core::ptr::read_unaligned(
(madt_ptr.0 as *const u | random_line_split | |
snapshot.rs | are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
}
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() {
self.create_source(&Source::ProcKcore)?;
} else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation is a no-op.
#[cfg(not(target_family = "unix"))]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() {
return Err(Error::Other(
"unable to check disk usage on this platform",
format!("os:{OS}"),
));
}
Ok(())
}
fn kcore(&self) -> Result<()> {
if !is_kcore_ok() {
return Err(Error::LockedDownKcore);
}
let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?;
self.check_disk_usage(&image)?;
let file =
elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?;
let mut segments: Vec<&ProgramHeader> = file
.segments()
.iter()
.filter(|x| x.p_type == PT_LOAD)
.collect();
segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr));
let first_vaddr = segments
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))?
.p_vaddr;
let first_start = self
.memory_ranges
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))?
.start;
let start = first_vaddr - first_start;
let mut physical_ranges = vec![];
for phdr in segments {
let entry_start = phdr.p_vaddr - start;
let entry_end = entry_start + phdr.p_memsz;
physical_ranges.push(Block {
range: entry_start..entry_end,
offset: phdr.p_offset,
});
}
let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges);
image.write_blocks(&blocks)?;
Ok(())
}
fn phys(&self, mem: &Path) -> Result<()> {
let is_crash = mem == Path::new("/dev/crash");
let blocks = self
.memory_ranges
.iter()
.map(|x| Block {
offset: x.start,
range: if is_crash {
x.start..((x.end >> 12) << 12)
} else {
x.start..x.end
},
})
.collect::<Vec<_>>();
let mut image = Image::new(self.version, mem, self.destination)?;
self.check_disk_usage(&image)?;
image.write_blocks(&blocks)?;
Ok(())
}
}
#[cfg(test)] | mod tests {
use super::*;
#[test]
fn translate_ranges() { | random_line_split | |
snapshot.rs | _>) -> std::fmt::Result {
format_error(self, f)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, ValueEnum)]
pub enum Source {
/// Provides a read-only view of physical memory. Access to memory using
/// this device must be paged aligned and read one page at a time.
///
/// On RHEL based distributions, this device is frequently provided by
/// default. A loadable kernel module version is available as part of
/// the Linux utility `crash`:
/// <https://github.com/crash-utility/crash/tree/master/memory_driver>
#[value(name = "/dev/crash")]
DevCrash,
/// Provides a read-write view of physical memory, though AVML opens it in a
/// read-only fashion. Access to to memory using this device can be
/// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM`
/// or `CONFIG_IO_STRICT_DEVMEM`.
///
/// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be
/// accessed.
#[value(name = "/dev/mem")]
DevMem,
/// Provides a virtual ELF coredump of kernel memory. This can be used to
/// access physical memory.
///
/// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but
/// is either inaccessible or doesn't allow access to all of the kernel
/// memory.
#[value(name = "/proc/kcore")]
ProcKcore,
/// User-specified path to a raw memory file
#[value(skip)]
Raw(PathBuf),
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevCrash => write!(f, "/dev/crash"),
Self::DevMem => write!(f, "/dev/mem"),
Self::ProcKcore => write!(f, "/proc/kcore"),
Self::Raw(path) => write!(f, "{}", path.display()),
}
}
}
#[must_use]
fn can_open(src: &Path) -> bool {
OpenOptions::new().read(true).open(src).is_ok()
}
// The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical
// memory in size.
//
// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is
// either inaccessible or doesn't allow access to all of the kernel memory.
//
// /dev/mem and /dev/crash, if available, are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool |
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() {
self.create_source(&Source::ProcKcore)?;
} else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation | {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
} | identifier_body |
snapshot.rs | , are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
}
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() {
self.create_source(&Source::ProcKcore)?;
} else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation is a no-op.
#[cfg(not(target_family = "unix"))]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() {
return Err(Error::Other(
"unable to check disk usage on this platform",
format!("os:{OS}"),
));
}
Ok(())
}
fn kcore(&self) -> Result<()> {
if !is_kcore_ok() {
return Err(Error::LockedDownKcore);
}
let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?;
self.check_disk_usage(&image)?;
let file =
elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?;
let mut segments: Vec<&ProgramHeader> = file
.segments()
.iter()
.filter(|x| x.p_type == PT_LOAD)
.collect();
segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr));
let first_vaddr = segments
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))?
.p_vaddr;
let first_start = self
.memory_ranges
.get(0)
.ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))?
.start;
let start = first_vaddr - first_start;
let mut physical_ranges = vec![];
for phdr in segments {
let entry_start = phdr.p_vaddr - start;
let entry_end = entry_start + phdr.p_memsz;
physical_ranges.push(Block {
range: entry_start..entry_end,
offset: phdr.p_offset,
});
}
let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges);
image.write_blocks(&blocks)?;
Ok(())
}
fn phys(&self, mem: &Path) -> Result<()> {
let is_crash = mem == Path::new("/dev/crash");
let blocks = self
.memory_ranges
.iter()
.map(|x| Block {
offset: x.start,
range: if is_crash {
x.start..((x.end >> 12) << 12)
} else {
x.start..x.end
},
})
.collect::<Vec<_>>();
let mut image = Image::new(self.version, mem, self.destination)?;
self.check_disk_usage(&image)?;
image.write_blocks(&blocks)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | translate_ranges | identifier_name | |
snapshot.rs | <'_>) -> std::fmt::Result {
format_error(self, f)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, ValueEnum)]
pub enum Source {
/// Provides a read-only view of physical memory. Access to memory using
/// this device must be paged aligned and read one page at a time.
///
/// On RHEL based distributions, this device is frequently provided by
/// default. A loadable kernel module version is available as part of
/// the Linux utility `crash`:
/// <https://github.com/crash-utility/crash/tree/master/memory_driver>
#[value(name = "/dev/crash")]
DevCrash,
/// Provides a read-write view of physical memory, though AVML opens it in a
/// read-only fashion. Access to to memory using this device can be
/// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM`
/// or `CONFIG_IO_STRICT_DEVMEM`.
///
/// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be
/// accessed.
#[value(name = "/dev/mem")]
DevMem,
/// Provides a virtual ELF coredump of kernel memory. This can be used to
/// access physical memory.
///
/// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but
/// is either inaccessible or doesn't allow access to all of the kernel
/// memory.
#[value(name = "/proc/kcore")]
ProcKcore,
/// User-specified path to a raw memory file
#[value(skip)]
Raw(PathBuf),
}
impl std::fmt::Display for Source {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DevCrash => write!(f, "/dev/crash"),
Self::DevMem => write!(f, "/dev/mem"),
Self::ProcKcore => write!(f, "/proc/kcore"),
Self::Raw(path) => write!(f, "{}", path.display()),
}
}
}
#[must_use]
fn can_open(src: &Path) -> bool {
OpenOptions::new().read(true).open(src).is_ok()
}
// The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical
// memory in size.
//
// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is
// either inaccessible or doesn't allow access to all of the kernel memory.
//
// /dev/mem and /dev/crash, if available, are devices, rather than virtual
// files. As such, we don't check those for size.
#[must_use]
fn is_kcore_ok() -> bool {
metadata(Path::new("/proc/kcore"))
.map(|x| x.len() > 0x2000)
.unwrap_or(false)
&& can_open(Path::new("/proc/kcore"))
}
// try to perform an action, either returning on success, or having the result
// of the error in an indented string.
//
// This special cases `DiskUsageEstimateExceeded` errors, as we want this to
// fail fast and bail out of the `try_method` caller.
macro_rules! try_method {
($func:expr) => {{
match $func {
Ok(x) => return Ok(x),
Err(err) => {
if matches!(
err,
Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}),
) {
return Err(err);
}
crate::indent(format!("{:?}", err), 4)
}
}
}};
}
pub struct Snapshot<'a, 'b> {
source: Option<&'b Source>,
destination: &'a Path,
memory_ranges: Vec<Range<u64>>,
version: u32,
max_disk_usage: Option<NonZeroU64>,
max_disk_usage_percentage: Option<f64>,
}
impl<'a, 'b> Snapshot<'a, 'b> {
/// Create a new memory snapshot.
///
/// The default version implements the `LiME` format.
#[must_use]
pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self {
Self {
source: None,
destination,
memory_ranges,
version: 1,
max_disk_usage: None,
max_disk_usage_percentage: None,
}
}
/// Specify the maximum disk usage to stay under as a percentage
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self {
Self {
max_disk_usage_percentage,
..self
}
}
/// Specify the maximum disk space in MB to use
///
/// This is an estimation, calculated at start time
#[must_use]
pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self {
Self {
max_disk_usage,
..self
}
}
/// Specify the source for creating the snapshot
#[must_use]
pub fn source(self, source: Option<&'b Source>) -> Self {
Self { source, ..self }
}
/// Specify the version of the snapshot format
#[must_use]
pub fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn create_source(&self, src: &Source) -> Result<()> {
match src {
Source::ProcKcore => self.kcore(),
Source::DevCrash => self.phys(Path::new("/dev/crash")),
Source::DevMem => self.phys(Path::new("/dev/mem")),
Source::Raw(s) => self.phys(s),
}
.map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone()))
}
/// Create a memory snapshot
pub fn create(&self) -> Result<()> {
if let Some(src) = self.source {
self.create_source(src)?;
} else if self.destination == Path::new("/dev/stdout") {
// If we're writing to stdout, we can't start over if reading from a
// source fails. As such, we need to do more work to pick a source
// rather than just trying all available options.
if is_kcore_ok() | else if can_open(Path::new("/dev/crash")) {
self.create_source(&Source::DevCrash)?;
} else if can_open(Path::new("/dev/mem")) {
self.create_source(&Source::DevMem)?;
} else {
return Err(Error::UnableToCreateSnapshot(
"no source available".to_string(),
));
}
} else {
let crash_err = try_method!(self.create_source(&Source::DevCrash));
let kcore_err = try_method!(self.create_source(&Source::ProcKcore));
let devmem_err = try_method!(self.create_source(&Source::DevMem));
let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n");
return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4)));
}
Ok(())
}
// given a set of ranges from iomem and a set of Blocks derived from the
// pseudo-elf phys section headers, derive a set of ranges that can be used
// to create a snapshot.
fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> {
let mut result = vec![];
'outer: for range in ranges {
let mut range = range.clone();
'inner: for header in headers {
match (
header.range.contains(&range.start),
// TODO: ranges is currently inclusive, but not a
// RangeInclusive. this should be adjusted.
header.range.contains(&(range.end - 1)),
) {
(true, true) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.clone(),
};
result.push(block);
continue 'outer;
}
(true, false) => {
let block = Block {
offset: header.offset + range.start - header.range.start,
range: range.start..header.range.end,
};
result.push(block);
range.start = header.range.end;
}
_ => {
continue 'inner;
}
};
}
}
result
}
/// Check disk usage of the destination
///
/// NOTE: This requires `Image` because we want to ensure this is called
/// after the file is created.
#[cfg(target_family = "unix")]
fn check_disk_usage(&self, _: &Image) -> Result<()> {
disk_usage::check(
self.destination,
&self.memory_ranges,
self.max_disk_usage,
self.max_disk_usage_percentage,
)
}
/// Check disk usage of the destination
///
/// On non-Unix platforms, this operation | {
self.create_source(&Source::ProcKcore)?;
} | conditional_block |
real_comparison.py |
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
return data
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def __iter__(self):
|
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=( | return self | identifier_body |
real_comparison.py |
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
return data
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def | (self):
return self
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t | __iter__ | identifier_name |
real_comparison.py | "]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
return data
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def __iter__(self):
return self
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=(260,310),
u=(-15,15),
v=(-15,15),
rain=(0,0.000005))
def make_subplot(data,ny,nx,curplot,v,extra_title):
plt.subplot(ny,nx,curplot)
plt.imshow(data)
plt.clim(clims[v])
plt.colorbar()
plt.title(v+extra_title)
def make_plots(data1,data2,date,fig=None):
plt.close("all")
if fig==None:
fig=plt.figure(figsize=(24,14));
else:
fig.clear()
ny=3
nx=4
curplot=0
varnames=["qv","qc","u","v","t","rain"]
for v in varnames:
curplot+=1
make_subplot(data1[v],ny,nx,curplot,v," "+str(date)[:14])
curplot+=1
make_subplot(data2[v],ny,nx,curplot,v," "+str(date)[:14])
return fig
def main(icar_dir="output/",output_dir="./"):
output_filename=output_dir+"vis_{}.png"
wrf_files=glob.glob(wrf_dir+"wrfout*")
wrf_files.sort()
icar_files=glob.glob(icar_dir+"swim_out*")
icar_files.sort()
wrf_data=DataReader(wrf_files,datatype="WRF")
icar_data=DataReader(icar_files,datatype="ICAR")
fig=plt.figure(figsize=(24,14));
for i in range(len(wrf_data)):
wrf=wrf_data.next()
icar=icar_data.next()
print(str(wrf.date),str(icar.date))
sys.stdout.flush() | random_line_split | ||
real_comparison.py |
_last_rain=None
_last_rain_pos=-1
_lr_pos_in_file=-1
_lr_curfile=0
_rainvar="RAINNC"
_testvar=None
# _var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],"RAINNC",[op.add,"T2",300],"U","V","W"]
_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],[op.add,"T2",300],"U","V"]
_short_names=dict(QVAPOR="qv",QCLOUD="qc",QICE="qc",RAINNC="rain",T="t",T2="t",U="u",V="v",W="w",QRAIN="rain",
qv="qv",qc="qc",rain="rain",qr="rain",th="t",u="u",v="v",w="w")
_collapse_functions=dict(QVAPOR=np.mean,QCLOUD=np.sum,T=np.mean,U=np.mean,V=np.mean,W=np.mean,
QICE=np.sum,QRAIN=np.sum,QSNOW=np.sum,
qv=np.mean,qc=np.sum,th=np.mean,u=np.mean,v=np.mean,w=np.mean,
qi=np.sum,qr=np.sum,qs=np.sum,p=np.mean)
_wrf_var_names=["QVAPOR",[op.add,"QCLOUD","QICE"],[op.add,"QRAIN","QSNOW"],"T2","U","V"]#[op.add,"T",290],"U","V"]
_icar_var_names=["qv",[op.add,"qc","qi"],[op.add,"qr","qs"],[exner,"th","p"],"u","v"]
x=slice(0,None) #by default take all data in the file in x,y, and z
y=slice(0,None)
# z=slice(0,None)
z=slice(0,10)
# zslices=dict(qv=slice(0,10),qc=slice(0,10),t=slice(1),)
# yslices=dict()
# yslices.setdefault(y)
llh
def __init__(self, filenames,start_pos=0,datatype="WRF"):
super(DataReader,self).__init__()
self.files=filenames
self._datamodel=datatype
if datatype=="WRF":
self._var_names=self._wrf_var_names
test_var=mygis.read_nc(self.files[0],self._var_names[0],returnNCvar=True)
self.times_per_file=test_var.data.shape[0]
test_var.ncfile.close()
self.zaxis=0
self.DIM_2D_SHAPE=3
self.DIM_3D_SHAPE=4
if datatype=="ICAR":
self._var_names=self._icar_var_names
self.times_per_file=1
self._rainvar="rain"
tmp=self.y
self.y=self.z
self.z=tmp
self.zaxis=1
self.DIM_2D_SHAPE=2
self.DIM_3D_SHAPE=3
#note this calls the setter which will set pos_in_file and cur_file
self.curpos=start_pos
def _get_collapsing_func(self,varname):
"""docstring for get_collapsing_func"""
try:
myfunc=self._collapse_functions[varname]
except:
myfunc=np.mean
return myfunc
def collapse_z(self,data,varname):
if len(data.shape)==3:
myfunc=self._get_collapsing_func(varname)
return myfunc(data,axis=self.zaxis)
else:
|
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def curpos(self):
return self._curpos
@curpos.setter
def curpos(self,pos):
self._curpos=pos
self._pos_in_file= int(self._curpos) % int(self.times_per_file)
self._curfile = int(self._curpos) / int(self.times_per_file)
# Get/Set the position in the timeseries, while properly updating the filenumber and position in file
@property
def last_rain_pos(self):
return self._last_rain_pos
@curpos.setter
def last_rain_pos(self,pos):
self._last_rain_pos=pos
self._lr_pos_in_file= int(self._last_rain_pos) % int(self.times_per_file)
self._lr_curfile = int(self._last_rain_pos) / int(self.times_per_file)
# Get/Set the last_rain variable
@property
def last_rain(self):
if self._last_rain==None:
self.last_rain_pos=self.curpos-1
if (self._pos_in_file>0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[self._last_rain_pos,self.y,self.x]
nc_data.ncfile.close()
elif (self._curfile==0):
nc_data=mygis.read_nc(self.files[self._curfile],self._rainvar,returnNCvar=True)
nx=nc_data.data.shape[1]
ny=nc_data.data.shape[2]
self._last_rain=np.zeros((nx,ny))[self.x,self.y]
nc_data.ncfile.close()
else:
nc_data=mygis.read_nc(self.files[self._curfile-1],self._rainvar,returnNCvar=True)
self._last_rain=nc_data.data[-1,self.x,self.y]
nc_data.ncfile.close()
# else: we already have a valid _last_rain, just return it this should be the case most of the time
return self._last_rain
@last_rain.setter
def last_rain(self,value):
if hasattr(value,__iter__):
self.last_rain_pos=value[0]
self._last_rain=value[1]
else:
self.last_rain_pos=value
self._last_rain=None # the getter will automagically generate last_rain
def load_data(self,varname, filename=None, curtime=None):
if type(varname)!=str:
return varname
if filename==None:
filename=self.files[self._curfile]
if curtime==None:
curtime=self._pos_in_file
data=mygis.read_nc(filename,varname,returnNCvar=True)
dimlen=len(data.data.shape)
# 2D vars e.g. RAINNC, rain
if dimlen==self.DIM_2D_SHAPE:
if dimlen==2:
outputdata=data.data[self.y,self.x]
else:
outputdata=data.data[curtime,self.y,self.x]
# 3D vars e.g. QVAPOR, qv
elif dimlen==self.DIM_3D_SHAPE:
if dimlen==3:
outputdata=self.collapse_z(data.data[self.z,self.y,self.x],varname)
else:
outputdata=self.collapse_z(data.data[curtime,self.z,self.y,self.x],varname)
else:
raise IndexError("Do not know how to process {} dimensions".format(len(data.data.shape)))
if varname==self._rainvar:
curent_rain=outputdata[:]
outputdata-=self.last_rain
self.last_rain=(self.curpos,curent_rain)
return outputdata
def get_current_date(self):
"""Assumes a hard coded filename (e.g. WRF output filenames wrfout_d01_2007-01-01_00:00:00)"""
if self._datamodel=="WRF":
datestring=self.files[self._curfile].split("_")[2]+"-"+str(self._pos_in_file)
return datetime.datetime.strptime(datestring,"%Y-%m-%d-%H")
else:
return datetime.datetime(2007,01,01,00)+datetime.timedelta(self.curpos/24.0)
def __len__(self):
return len(self.files)*self.times_per_file
def __iter__(self):
return self
def __next__(self):
self.curpos+=1
output_data=Bunch()
filename=self.files[self._curfile]
for v in self._var_names:
if type(v)==str:
curdata=self.load_data(v)
curvarname=v
elif type(v)==list:
cur_operator=v[0]
for varname in v[1:]:
if type(varname)==str:
curvarname=v[1]
break
curdata=self.load_data(v[1])
for curv in v[2:]:
next_data=self.load_data(curv)
cur_operator(curdata,next_data)
output_data[self._short_names[curvarname]]=curdata
output_data.date=self.get_current_date()
return output_data
next=__next__
clims=dict( qv=(0,0.004),
qc=(0,0.0003),
t=( | return data | conditional_block |
PanoImageRenderer.js | this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl.enableVertexAttribArray(shaderProgram.textureCoordAttribute);
// clear buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
// Use TEXTURE0
gl.uniform1i(shaderProgram.samplerUniform, 0);
this.shaderProgram = shaderProgram;
}
_onWebglcontextlost(e) {
e.preventDefault();
this.trigger(EVENTS.RENDERING_CONTEXT_LOST);
}
_onWebglcontextrestored(e) {
this._initWebGL();
this.trigger(EVENTS.RENDERING_CONTEXT_RESTORE);
}
updateFieldOfView(fieldOfView) {
this.fieldOfView = fieldOfView;
this._updateViewport();
}
updateViewportDimensions(width, height) {
let viewPortChanged = false;
this.width = width;
this.height = height;
const w = width * DEVICE_PIXEL_RATIO;
const h = height * DEVICE_PIXEL_RATIO;
if (w !== this.canvas.width) {
this.canvas.width = w;
viewPortChanged = true;
}
if (h !== this.canvas.height) {
this.canvas.height = h;
viewPortChanged = true;
}
if (!viewPortChanged) {
return;
}
this._updateViewport();
this._shouldForceDraw = true;
}
_updateViewport() {
mat4.perspective(
this.pMatrix,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
this.context = WebGLUtils.getWebglContext(this.canvas, this._renderingContextAttributes);
if (!this.context) {
throw new Error("Failed to acquire 3D rendering context");
}
}
_initBuffers() {
const vertexPositionData = this._renderer.getVertexPositionData();
const indexData = this._renderer.getIndexData();
const textureCoordData = this._renderer.getTextureCoordData(this._imageConfig);
const gl = this.context;
this.vertexBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(vertexPositionData), 3,
this.shaderProgram.vertexPositionAttribute);
this.indexBuffer = WebGLUtils.initBuffer(
gl, gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indexData), 1);
this.textureCoordBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(textureCoordData), this._isCubeMap ? 3 : 2,
this.shaderProgram.textureCoordAttribute);
this._bindBuffers();
}
_bindTexture() {
// Detect if it is EAC Format while CUBESTRIP mode.
// We assume it is EAC if image is not 3/2 ratio.
if (this._imageType === ImageType.CUBESTRIP) {
const {width, height} = this._renderer.getDimension(this._image);
const isEAC = width && height && width / height !== 1.5;
this.context.uniform1f(this.context.getUniformLocation(this.shaderProgram, "uIsEAC"), isEAC);
} else if (this._imageType === ImageType.PANORAMA) {
const {width, height} = this._renderer.getDimension(this._image);
const imageAspectRatio = width && height && width / height;
this._renderer.updateShaderData({imageAspectRatio});
}
// intialize shader buffers after image is loaded.(by updateShaderData)
// because buffer may be differ by image size.(eg. CylinderRenderer)
this._initBuffers();
this._renderer.bindTexture(
this.context,
this.texture,
this._image,
this._imageConfig,
);
this._shouldForceDraw = true;
this.trigger(EVENTS.BIND_TEXTURE);
}
_updateTexture() {
this._renderer.updateTexture(
this.context,
this._image,
this._imageConfig,
);
}
keepUpdate(doUpdate) {
if (doUpdate && this.isImageLoaded() === false) {
// Force to draw a frame after image is loaded on render()
this._shouldForceDraw = true;
}
this._keepUpdate = doUpdate;
}
startRender() {
this._animator.setCallback(this._render.bind(this));
this._animator.start();
}
stopRender() {
this._animator.stop();
}
renderWithQuaternion(quaternion, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastQuaternion && quat.exactEquals(this._lastQuaternion, quaternion | ) &&
this.fie | conditional_block | |
PanoImageRenderer.js | ,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
this.context = WebGLUtils.getWebglContext(this.canvas, this._renderingContextAttributes);
if (!this.context) {
throw new Error("Failed to acquire 3D rendering context");
}
}
_initBuffers() {
const vertexPositionData = this._renderer.getVertexPositionData();
const indexData = this._renderer.getIndexData();
const textureCoordData = this._renderer.getTextureCoordData(this._imageConfig);
const gl = this.context;
this.vertexBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(vertexPositionData), 3,
this.shaderProgram.vertexPositionAttribute);
this.indexBuffer = WebGLUtils.initBuffer(
gl, gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indexData), 1);
this.textureCoordBuffer = WebGLUtils.initBuffer(
gl, gl.ARRAY_BUFFER, new Float32Array(textureCoordData), this._isCubeMap ? 3 : 2,
this.shaderProgram.textureCoordAttribute);
this._bindBuffers();
}
_bindTexture() {
// Detect if it is EAC Format while CUBESTRIP mode.
// We assume it is EAC if image is not 3/2 ratio.
if (this._imageType === ImageType.CUBESTRIP) {
const {width, height} = this._renderer.getDimension(this._image);
const isEAC = width && height && width / height !== 1.5;
this.context.uniform1f(this.context.getUniformLocation(this.shaderProgram, "uIsEAC"), isEAC);
} else if (this._imageType === ImageType.PANORAMA) {
const {width, height} = this._renderer.getDimension(this._image);
const imageAspectRatio = width && height && width / height;
this._renderer.updateShaderData({imageAspectRatio});
}
// intialize shader buffers after image is loaded.(by updateShaderData)
// because buffer may be differ by image size.(eg. CylinderRenderer)
this._initBuffers();
this._renderer.bindTexture(
this.context,
this.texture,
this._image,
this._imageConfig,
);
this._shouldForceDraw = true;
this.trigger(EVENTS.BIND_TEXTURE);
}
_updateTexture() {
this._renderer.updateTexture(
this.context,
this._image,
this._imageConfig,
);
}
keepUpdate(doUpdate) {
if (doUpdate && this.isImageLoaded() === false) {
// Force to draw a frame after image is loaded on render()
this._shouldForceDraw = true;
}
this._keepUpdate = doUpdate;
}
startRender() {
this._animator.setCallback(this._render.bind(this));
this._animator.start();
}
stopRender() {
this._animator.stop();
}
renderWithQuaternion(quaternion, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastQuaternion && quat.exactEquals(this._lastQuaternion, quaternion) &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// updatefieldOfView only if fieldOfView is changed.
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
this.mvMatrix = mat4.fromQuat(mat4.create(), quaternion);
this._draw();
this._lastQuaternion = quat.clone(quaternion);
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
renderWithYawPitch(yaw, pitch, fieldOfView) {
if (!this.isImageLoaded()) {
return;
}
if (this._keepUpdate === false &&
this._lastYaw !== null && this._lastYaw === yaw &&
this._lastPitch !== null && this._lastPitch === pitch &&
this.fieldOfView && this.fieldOfView === fieldOfView &&
this._shouldForceDraw === false) {
return;
}
// fieldOfView 가 존재하면서 기존의 값과 다를 경우에만 업데이트 호출
if (fieldOfView !== undefined && fieldOfView !== this.fieldOfView) {
this.updateFieldOfView(fieldOfView);
}
mat4.identity(this.mvMatrix);
mat4.rotateX(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(pitch));
mat4.rotateY(this.mvMatrix, this.mvMatrix, -glMatrix.toRadian(yaw));
this._draw();
this._lastYaw = yaw;
this._lastPitch = pitch;
if (this._shouldForceDraw) {
this._shouldForceDraw = false;
}
}
_render() {
const yawPitchControl = this._yawPitchControl;
const fov = yawPitchControl.getFov();
if (yawPitchControl.shouldRenderWithQuaternion()) {
const quaternion = yawPitchControl.getQuaternion();
this.renderWithQuaternion(quaternion, fov);
} else {
const yawPitch = yawPitchControl.getYawPitch();
this.renderWithYawPitch(yawPitch.yaw, yawPitch.pitch, fov);
}
}
_renderStereo = (time, frame) => {
const vr = this._vr;
const gl = this.context;
const eyeParams = vr.getEyeParams(gl, frame);
if (!eyeParams) return;
vr.beforeRender(gl, frame);
// Render both eyes
for (const eyeIndex of [0, 1]) {
const eyeParam = eyeParams[eyeIndex];
this.mvMatrix = eyeParam.mvMatrix;
this.pMatrix = eyeParam.pMatrix;
gl.viewport(...eyeParam.viewport);
gl.uniform1f(this.shaderProgram.uEye, eyeIndex);
this._bindBuffers();
this._draw();
}
vr.afterRender();
}
_bindBuffers() {
const gl = this.context;
const program = this.shaderProgram;
const vertexBuffer = this.vertexBuffer;
const textureCoordBuffer = this.textureCoordBuffer;
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.enableVertexAttribArray(program.vertexPositionAttribute);
gl.vertexAttribPointer(
program.vertexPositionAttribute, vertexBuffer.itemSize, gl.FLOAT, false, 0, 0
);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer);
gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
gl.enableVertexAttribArray(program.textureCoordAttribute);
gl.vertexAttribPointer(
program.textureCoordAttribute, textureCoordBuffer.itemSize, gl.FLOAT, false, 0, 0
);
}
_draw() {
if (this._isVideo && this._keepUpdate) {
this._updateTexture();
}
this._renderer.render({
gl: this.context,
shaderProgram: this.shaderProgram,
indexBuffer: this.indexBuffer,
mvMatrix: this.mvMatrix,
pMatrix: this.pMatrix,
});
}
/**
* Returns projection renderer by each type
*/
getProjectionRenderer() {
return this._renderer;
}
/**
* @return Promise
*/
enterVR() {
const vr = this._vr;
if (!WEBXR_SUPPORTED && !navigator.getVRDisplays) {
return Promise.reject("VR is not available on this browser.");
}
if (vr && vr.isPresenting()) {
return Promise.resolve("VR already enabled.");
}
return this._requestPresent();
} | random_line_split | ||
PanoImageRenderer.js | alConfig, renderingContextAttributes
) {
// Super constructor
super();
this.sphericalConfig = sphericalConfig;
this.fieldOfView = sphericalConfig.fieldOfView;
this.width = width;
this.height = height;
this._lastQuaternion = null;
this._lastYaw = null;
this._lastPitch = null;
this._lastFieldOfView = null;
this.pMatrix = mat4.create();
this.mvMatrix = mat4.create();
// initialzie pMatrix
mat4.perspective(this.pMatrix, glMatrix.toRadian(this.fieldOfView), width / height, 0.1, 100);
this.textureCoordBuffer = null;
this.vertexBuffer = null;
this.indexBuffer = null;
this.canvas = this._initCanvas(width, height);
this._setDefaultCanvasStyle();
this._wrapper = null; // canvas wrapper
this._wrapperOrigStyle = null;
this._renderingContextAttributes = renderingContextAttributes;
this._image = null;
this._imageConfig = null;
this._imageIsReady = false;
this._shouldForceDraw = false;
this._keepUpdate = false; // Flag to specify 'continuous update' on video even when still.
this._onContentLoad = this._onContentLoad.bind(this);
this._onContentError = this._onContentError.bind(this);
this._animator = new WebGLAnimator();
// VR/XR manager
this._vr = null;
if (image) {
this.setImage({
image,
imageType: sphericalConfig.imageType,
isVideo,
cubemapConfig: sphericalConfig.cubemapConfig
});
}
}
// FIXME: Please refactor me to have more loose connection to yawpitchcontrol
setYawPitchControl(yawPitchControl) {
this._yawPitchControl = yawPitchControl;
}
getContent() {
return this._image;
}
setImage({image, imageType, isVideo = false, cubemapConfig}) {
this._imageIsReady = false;
this._isVideo = isVideo;
this._imageConfig = Object.assign(
{
/* RLUDBF is abnormal, we use it on CUBEMAP only */
order: (imageType === ImageType.CUBEMAP) ? "RLUDBF" : "RLUDFB",
tileConfig: {
flipHorizontal: false,
rotation: 0
}
},
cubemapConfig
);
this._setImageType(imageType);
if (this._contentLoader) {
this._contentLoader.destroy();
}
if (isVideo) {
this._contentLoader = new VideoLoader();
this._keepUpdate = true;
} else {
this._contentLoader = new ImageLoader();
this._keepUpdate = false;
}
// img element or img url
this._contentLoader.set(image);
// 이미지의 사이즈를 캐시한다.
// image is reference for content in contentLoader, so it may be not valid if contentLoader is destroyed.
this._image = this._contentLoader.getElement();
return this._contentLoader.get()
.then(this._onContentLoad, this._onContentError)
.catch(e => setTimeout(() => { throw e; }));// Prevent exceptions from being isolated in promise chain.
}
_setImageType(imageType) {
if (!imageType || this._imageType === imageType) {
return;
}
this._imageType = imageType;
this._isCubeMap = imageType === ImageType.CUBEMAP;
if (this._renderer) {
this._renderer.off();
}
switch (imageType) {
case ImageType.CUBEMAP:
this._renderer = new CubeRenderer();
break;
case ImageType.CUBESTRIP:
this._renderer = new CubeStripRenderer();
break;
case ImageType.PANORAMA:
this._renderer = new CylinderRenderer();
break;
case ImageType.STEREOSCOPIC_EQUI:
this._renderer = new SphereRenderer(this.sphericalConfig.stereoFormat);
break;
default:
this._renderer = new SphereRenderer(STEREO_FORMAT.NONE);
break;
}
this._renderer.on(Renderer.EVENTS.ERROR, e => {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.RENDERER_ERROR,
message: e.message
});
});
this._initWebGL();
}
_initCanvas(width, height) {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
this._onWebglcontextlost = this._onWebglcontextlost.bind(this);
this._onWebglcontextrestored = this._onWebglcontextrestored.bind(this);
canvas.addEventListener("webglcontextlost", this._onWebglcontextlost);
canvas.addEventListener("webglcontextrestored", this._onWebglcontextrestored);
return canvas;
}
_setDefaultCanvasStyle() {
const canvas = this.canvas;
canvas.style.bottom = 0;
canvas.style.left = 0;
canvas.style.right = 0;
canvas.style.top = 0;
canvas.style.margin = "auto";
canvas.style.maxHeight = "100%";
canvas.style.maxWidth = "100%";
canvas.style.outline = "none";
canvas.style.position = "absolute";
}
_onContentError(error) {
this._imageIsReady = false;
this._image = null;
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.FAIL_IMAGE_LOAD,
message: "failed to load image"
});
return false;
}
_triggerContentLoad() {
this.trigger(EVENTS.IMAGE_LOADED, {
content: this._image,
isVideo: this._isVideo,
projectionType: this._imageType
});
}
_onContentLoad(image) {
this._imageIsReady = true;
this._triggerContentLoad();
return true;
}
isImageLoaded() {
return !!this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl | eo, spheric | identifier_name | |
PanoImageRenderer.js | 한다.
// image is reference for content in contentLoader, so it may be not valid if contentLoader is destroyed.
this._image = this._contentLoader.getElement();
return this._contentLoader.get()
.then(this._onContentLoad, this._onContentError)
.catch(e => setTimeout(() => { throw e; }));// Prevent exceptions from being isolated in promise chain.
}
_setImageType(imageType) {
if (!imageType || this._imageType === imageType) {
return;
| this._renderer = new SphereRenderer(this.sphericalConfig.stereoFormat);
break;
default:
this._renderer = new SphereRenderer(STEREO_FORMAT.NONE);
break;
}
this._renderer.on(Renderer.EVENTS.ERROR, e => {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.RENDERER_ERROR,
message: e.message
});
});
this._initWebGL();
}
_initCanvas(width, height) {
const canvas = document.createEl
ement("canvas");
canvas.width = width;
canvas.height = height;
this._onWebglcontextlost = this._onWebglcontextlost.bind(this);
this._onWebglcontextrestored = this._onWebglcontextrestored.bind(this);
canvas.addEventListener("webglcontextlost", this._onWebglcontextlost);
canvas.addEventListener("webglcontextrestored", this._onWebglcontextrestored);
return canvas;
}
_setDefaultCanvasStyle() {
const canvas = this.canvas;
canvas.style.bottom = 0;
canvas.style.left = 0;
canvas.style.right = 0;
canvas.style.top = 0;
canvas.style.margin = "auto";
canvas.style.maxHeight = "100%";
canvas.style.maxWidth = "100%";
canvas.style.outline = "none";
canvas.style.position = "absolute";
}
_onContentError(error) {
this._imageIsReady = false;
this._image = null;
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.FAIL_IMAGE_LOAD,
message: "failed to load image"
});
return false;
}
_triggerContentLoad() {
this.trigger(EVENTS.IMAGE_LOADED, {
content: this._image,
isVideo: this._isVideo,
projectionType: this._imageType
});
}
_onContentLoad(image) {
this._imageIsReady = true;
this._triggerContentLoad();
return true;
}
isImageLoaded() {
return !!this._image && this._imageIsReady &&
(!this._isVideo || this._image.readyState >= 2 /* HAVE_CURRENT_DATA */);
}
bindTexture() {
return new Promise((res, rej) => {
if (!this._contentLoader) {
rej("ImageLoader is not initialized");
return;
}
this._contentLoader.get()
.then(() => {
this._bindTexture();
}, rej)
.then(res);
});
}
// 부모 엘리먼트에 canvas 를 붙임
attachTo(parentElement) {
this.detach();
parentElement.appendChild(this.canvas);
this._wrapper = parentElement;
}
forceContextLoss() {
if (this.hasRenderingContext()) {
const loseContextExtension = this.context.getExtension("WEBGL_lose_context");
if (loseContextExtension) {
loseContextExtension.loseContext();
}
}
}
// 부모 엘리먼트에서 canvas 를 제거
detach() {
if (this.canvas.parentElement) {
this.canvas.parentElement.removeChild(this.canvas);
}
}
destroy() {
if (this._contentLoader) {
this._contentLoader.destroy();
}
this._animator.stop();
this.detach();
this.forceContextLoss();
this.off();
this.canvas.removeEventListener("webglcontextlost", this._onWebglcontextlost);
this.canvas.removeEventListener("webglcontextrestored", this._onWebglcontextrestored);
}
hasRenderingContext() {
if (!(this.context && !this.context.isContextLost())) {
return false;
} else if (
this.context &&
!this.context.getProgramParameter(this.shaderProgram, this.context.LINK_STATUS)) {
return false;
}
return true;
}
_initShaderProgram() {
const gl = this.context;
if (this.shaderProgram) {
gl.deleteProgram(this.shaderProgram);
this.shaderProgram = null;
}
const renderer = this._renderer;
const vsSource = renderer.getVertexShaderSource();
const fsSource = renderer.getFragmentShaderSource();
const vertexShader = WebGLUtils.createShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = WebGLUtils.createShader(gl, gl.FRAGMENT_SHADER, fsSource);
const shaderProgram = WebGLUtils.createProgram(gl, vertexShader, fragmentShader);
if (!shaderProgram) {
throw new Error(`Failed to intialize shaders: ${WebGLUtils.getErrorNameFromWebGLErrorCode(gl.getError())}`);
}
gl.useProgram(shaderProgram);
shaderProgram.vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(shaderProgram.vertexPositionAttribute);
shaderProgram.pMatrixUniform = gl.getUniformLocation(shaderProgram, "uPMatrix");
shaderProgram.mvMatrixUniform = gl.getUniformLocation(shaderProgram, "uMVMatrix");
shaderProgram.samplerUniform = gl.getUniformLocation(shaderProgram, "uSampler");
shaderProgram.textureCoordAttribute = gl.getAttribLocation(shaderProgram, "aTextureCoord");
shaderProgram.uEye = gl.getUniformLocation(shaderProgram, "uEye");
gl.enableVertexAttribArray(shaderProgram.textureCoordAttribute);
// clear buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
// Use TEXTURE0
gl.uniform1i(shaderProgram.samplerUniform, 0);
this.shaderProgram = shaderProgram;
}
_onWebglcontextlost(e) {
e.preventDefault();
this.trigger(EVENTS.RENDERING_CONTEXT_LOST);
}
_onWebglcontextrestored(e) {
this._initWebGL();
this.trigger(EVENTS.RENDERING_CONTEXT_RESTORE);
}
updateFieldOfView(fieldOfView) {
this.fieldOfView = fieldOfView;
this._updateViewport();
}
updateViewportDimensions(width, height) {
let viewPortChanged = false;
this.width = width;
this.height = height;
const w = width * DEVICE_PIXEL_RATIO;
const h = height * DEVICE_PIXEL_RATIO;
if (w !== this.canvas.width) {
this.canvas.width = w;
viewPortChanged = true;
}
if (h !== this.canvas.height) {
this.canvas.height = h;
viewPortChanged = true;
}
if (!viewPortChanged) {
return;
}
this._updateViewport();
this._shouldForceDraw = true;
}
_updateViewport() {
mat4.perspective(
this.pMatrix,
glMatrix.toRadian(this.fieldOfView),
this.canvas.width / this.canvas.height,
0.1,
100);
this.context.viewport(0, 0, this.context.drawingBufferWidth, this.context.drawingBufferHeight);
}
_initWebGL() {
let gl;
// TODO: Following code does need to be executed only if width/height, cubicStrip property is changed.
try {
this._initRenderingContext();
gl = this.context;
this.updateViewportDimensions(this.width, this.height);
this._initShaderProgram();
} catch (e) {
this.trigger(EVENTS.ERROR, {
type: ERROR_TYPE.NO_WEBGL,
message: "no webgl support"
});
this.destroy();
console.error(e); // eslint-disable-line no-console
return;
}
// 캔버스를 투명으로 채운다.
gl.clearColor(0, 0, 0, 0);
const textureTarget = this._isCubeMap ? gl.TEXTURE_CUBE_MAP : gl.TEXTURE_2D;
if (this.texture) {
gl.deleteTexture(this.texture);
}
this.texture = WebGLUtils.createTexture(gl, textureTarget);
if (this._imageType === ImageType.CUBESTRIP) {
// TODO: Apply following options on other projection type.
gl.enable(gl.CULL_FACE);
// gl.enable(gl.DEPTH_TEST);
}
}
_initRenderingContext() {
if (this.hasRenderingContext()) {
return;
}
if (!window.WebGLRenderingContext) {
throw new Error("WebGLRenderingContext not available.");
}
| }
this._imageType = imageType;
this._isCubeMap = imageType === ImageType.CUBEMAP;
if (this._renderer) {
this._renderer.off();
}
switch (imageType) {
case ImageType.CUBEMAP:
this._renderer = new CubeRenderer();
break;
case ImageType.CUBESTRIP:
this._renderer = new CubeStripRenderer();
break;
case ImageType.PANORAMA:
this._renderer = new CylinderRenderer();
break;
case ImageType.STEREOSCOPIC_EQUI: | identifier_body |
row.rs | }
pub fn read(io: &mut dyn Read) -> Result<BoxRow> {
let mut header = [0; ROW_LAYOUT.size()];
io.read_exact(&mut header).context("reading header")?;
let header_crc32c = (&header[0..4]).read_u32::<LittleEndian>().unwrap();
let header_crc32c_calculated = crc32c(&header[4..]);
if header_crc32c_calculated != header_crc32c {
bail!("header crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}", | header_crc32c, header_crc32c_calculated);
}
let len = (&header[ROW_LAYOUT.size() - 8..]).read_u32::<LittleEndian>().unwrap();
let mut row = BoxRow { ptr: Self::alloc(len) };
row.as_bytes_mut().copy_from_slice(&header);
debug_assert!(row.len == len);
io.read_exact(row.data_mut()).context("reading body")?;
if crc32c(row.data()) != row.data_crc32c {
bail!("data crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
{row.data_crc32c}, crc32c(row.data()));
}
log::debug!("read row LSN:{}", {row.lsn});
Ok(row)
}
pub fn write(&self, io: &mut dyn io::Write) -> io::Result<usize> {
io.write_all(self.as_bytes())?; // FIXME: nasty and unportable
io.write_all(self.data())?;
Ok(ROW_LAYOUT.size() + self.data().len())
}
fn as_bytes(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self as *const _ as *const u8, ROW_LAYOUT.size())
}
}
fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self as *mut _ as *mut u8, ROW_LAYOUT.size())
}
}
pub fn update_crc(&mut self) {
self.data_crc32c = crc32c(self.data());
self.header_crc32c = crc32c(&self.as_bytes()[4..])
}
fn tag(&self) -> Tag {
Tag::new(self.tag & TAG_MASK)
}
fn tag_type(&self) -> TagType {
TagType::new(self.tag & !TAG_MASK)
}
}
#[derive(Debug)]
#[derive(PartialEq, Eq)]
pub enum Tag {
SnapInitial,
SnapData,
WalData,
SnapFinal,
WalFinal,
RunCrc,
Nop,
RaftAppend,
RaftCommit,
RaftVote,
ShardCreate,
ShardAlter,
ShardFinal,
Tlv,
SysTag(u8),
UserTag(u8),
}
impl Tag {
fn new(repr: u16) -> Self {
match repr & TAG_MASK {
1 => Tag::SnapInitial,
2 => Tag::SnapData,
3 => Tag::WalData,
4 => Tag::SnapFinal,
5 => Tag::WalFinal,
6 => Tag::RunCrc,
7 => Tag::Nop,
8 => Tag::RaftAppend,
9 => Tag::RaftCommit,
10 => Tag::RaftVote,
11 => Tag::ShardCreate,
12 => Tag::ShardAlter,
13 => Tag::ShardFinal,
14 => Tag::Tlv,
t if t < 32 => Tag::SysTag(t as u8),
t => Tag::UserTag((t >> 5) as u8),
}
}
fn as_u16(&self) -> u16 {
match self {
Tag::SnapInitial => 1,
Tag::SnapData => 2,
Tag::WalData => 3,
Tag::SnapFinal => 4,
Tag::WalFinal => 5,
Tag::RunCrc => 6,
Tag::Nop => 7,
Tag::RaftAppend => 8,
Tag::RaftCommit => 9,
Tag::RaftVote => 10,
Tag::ShardCreate => 11,
Tag::ShardAlter => 12,
Tag::ShardFinal => 13,
Tag::Tlv => 14,
Tag::SysTag(t) => *t as u16,
Tag::UserTag(t) => *t as u16,
}
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tag::SnapInitial => write!(f, "snap_initial"),
Tag::SnapData => write!(f, "snap_data"),
Tag::SnapFinal => write!(f, "snap_final"),
Tag::WalData => write!(f, "wal_data"),
Tag::WalFinal => write!(f, "wal_final"),
Tag::ShardCreate => write!(f, "shard_create"),
Tag::ShardAlter => write!(f, "shard_alter"),
Tag::ShardFinal => write!(f, "shard_final"),
Tag::RunCrc => write!(f, "run_crc"),
Tag::Nop => write!(f, "nop"),
Tag::RaftAppend => write!(f, "raft_append"),
Tag::RaftCommit => write!(f, "raft_commit"),
Tag::RaftVote => write!(f, "raft_vote"),
Tag::Tlv => write!(f, "tlv"),
Tag::SysTag(n) => write!(f, "sys{}", n),
Tag::UserTag(n) => write!(f, "usr{}", n)
}
}
}
/* two highest bit in tag encode tag type:
00 - invalid
01 - snap
10 - wal
11 - system wal */
pub const TAG_MASK: u16 = 0x3fff;
const TAG_SIZE: usize = 14;
enum TagType {
SNAP = 0x4000,
WAL = 0x8000,
SYS = 0xc000,
INVALID = 0,
}
impl TagType {
fn new(repr: u16) -> TagType {
match repr & !TAG_MASK {
0x4000 => TagType::SNAP,
0x8000 => TagType::WAL,
0xc000 => TagType::SYS,
_ => TagType::INVALID,
}
}
}
impl fmt::Display for TagType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TagType::SNAP => write!(f, "snap"),
TagType::WAL => write!(f, "wal"),
TagType::SYS => write!(f, "sys"),
TagType::INVALID => write!(f, "invalid"),
}
}
}
#[derive(PartialEq, Eq)]
enum ShardType {
POR,
RAFT,
PART
}
impl ShardType {
fn new(repr: u8) -> Result<Self> {
match repr {
0 => Ok(ShardType::POR),
1 => Ok(ShardType::RAFT),
2 => Ok(ShardType::PART),
_ => bail!("invalid shard type {}", repr)
}
}
}
// TODO: switch to byteordered?
struct LittleEndianReader<'a> (std::io::Cursor<&'a[u8]>);
impl<'a> LittleEndianReader<'a> {
fn new(buf: &'a[u8]) -> Self { Self(std::io::Cursor::new(buf)) }
fn read_u8(&mut self) -> u8 { self.0.read_u8().unwrap() }
fn read_u16(&mut self) -> u16 { self.0.read_u16::<LittleEndian>().unwrap() }
fn read_u32(&mut self) -> u32 { self.0.read_u32::<LittleEndian>().unwrap() }
fn read_i64(&mut self) -> i64 { self.0.read_i64::<LittleEndian>().unwrap() }
fn read_u64(&mut self) -> u64 { self.0.read_u64::<LittleEndian>().unwrap() }
fn read_str(&mut self, len: usize) -> &str {
let pos = self.0.position() as usize;
let raw = &self.0.get_ref()[pos..pos+len];
let (raw, _) = raw.split_at(raw.iter().position(|&x| x == 0).unwrap_or(len));
let str = std::str::from_utf8(raw).unwrap();
self.0.set_position((pos+len) as u64);
str
}
fn into_cursor(self) -> std::io::Cursor<&'a[u8]> { self.0 }
fn unparsed(&self) -> &[u8] {
&self.0.get | random_line_split | |
row.rs | }
pub fn read(io: &mut dyn Read) -> Result<BoxRow> {
let mut header = [0; ROW_LAYOUT.size()];
io.read_exact(&mut header).context("reading header")?;
let header_crc32c = (&header[0..4]).read_u32::<LittleEndian>().unwrap();
let header_crc32c_calculated = crc32c(&header[4..]);
if header_crc32c_calculated != header_crc32c {
bail!("header crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
header_crc32c, header_crc32c_calculated);
}
let len = (&header[ROW_LAYOUT.size() - 8..]).read_u32::<LittleEndian>().unwrap();
let mut row = BoxRow { ptr: Self::alloc(len) };
row.as_bytes_mut().copy_from_slice(&header);
debug_assert!(row.len == len);
io.read_exact(row.data_mut()).context("reading body")?;
if crc32c(row.data()) != row.data_crc32c {
bail!("data crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
{row.data_crc32c}, crc32c(row.data()));
}
log::debug!("read row LSN:{}", {row.lsn});
Ok(row)
}
pub fn write(&self, io: &mut dyn io::Write) -> io::Result<usize> {
io.write_all(self.as_bytes())?; // FIXME: nasty and unportable
io.write_all(self.data())?;
Ok(ROW_LAYOUT.size() + self.data().len())
}
fn as_bytes(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self as *const _ as *const u8, ROW_LAYOUT.size())
}
}
fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self as *mut _ as *mut u8, ROW_LAYOUT.size())
}
}
pub fn update_crc(&mut self) {
self.data_crc32c = crc32c(self.data());
self.header_crc32c = crc32c(&self.as_bytes()[4..])
}
fn tag(&self) -> Tag {
Tag::new(self.tag & TAG_MASK)
}
fn tag_type(&self) -> TagType {
TagType::new(self.tag & !TAG_MASK)
}
}
#[derive(Debug)]
#[derive(PartialEq, Eq)]
pub enum Tag {
SnapInitial,
SnapData,
WalData,
SnapFinal,
WalFinal,
RunCrc,
Nop,
RaftAppend,
RaftCommit,
RaftVote,
ShardCreate,
ShardAlter,
ShardFinal,
Tlv,
SysTag(u8),
UserTag(u8),
}
impl Tag {
fn new(repr: u16) -> Self {
match repr & TAG_MASK {
1 => Tag::SnapInitial,
2 => Tag::SnapData,
3 => Tag::WalData,
4 => Tag::SnapFinal,
5 => Tag::WalFinal,
6 => Tag::RunCrc,
7 => Tag::Nop,
8 => Tag::RaftAppend,
9 => Tag::RaftCommit,
10 => Tag::RaftVote,
11 => Tag::ShardCreate,
12 => Tag::ShardAlter,
13 => Tag::ShardFinal,
14 => Tag::Tlv,
t if t < 32 => Tag::SysTag(t as u8),
t => Tag::UserTag((t >> 5) as u8),
}
}
fn as_u16(&self) -> u16 {
match self {
Tag::SnapInitial => 1,
Tag::SnapData => 2,
Tag::WalData => 3,
Tag::SnapFinal => 4,
Tag::WalFinal => 5,
Tag::RunCrc => 6,
Tag::Nop => 7,
Tag::RaftAppend => 8,
Tag::RaftCommit => 9,
Tag::RaftVote => 10,
Tag::ShardCreate => 11,
Tag::ShardAlter => 12,
Tag::ShardFinal => 13,
Tag::Tlv => 14,
Tag::SysTag(t) => *t as u16,
Tag::UserTag(t) => *t as u16,
}
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tag::SnapInitial => write!(f, "snap_initial"),
Tag::SnapData => write!(f, "snap_data"),
Tag::SnapFinal => write!(f, "snap_final"),
Tag::WalData => write!(f, "wal_data"),
Tag::WalFinal => write!(f, "wal_final"),
Tag::ShardCreate => write!(f, "shard_create"),
Tag::ShardAlter => write!(f, "shard_alter"),
Tag::ShardFinal => write!(f, "shard_final"),
Tag::RunCrc => write!(f, "run_crc"),
Tag::Nop => write!(f, "nop"),
Tag::RaftAppend => write!(f, "raft_append"),
Tag::RaftCommit => write!(f, "raft_commit"),
Tag::RaftVote => write!(f, "raft_vote"),
Tag::Tlv => write!(f, "tlv"),
Tag::SysTag(n) => write!(f, "sys{}", n),
Tag::UserTag(n) => write!(f, "usr{}", n)
}
}
}
/* two highest bit in tag encode tag type:
00 - invalid
01 - snap
10 - wal
11 - system wal */
pub const TAG_MASK: u16 = 0x3fff;
const TAG_SIZE: usize = 14;
enum TagType {
SNAP = 0x4000,
WAL = 0x8000,
SYS = 0xc000,
INVALID = 0,
}
impl TagType {
fn new(repr: u16) -> TagType {
match repr & !TAG_MASK {
0x4000 => TagType::SNAP,
0x8000 => TagType::WAL,
0xc000 => TagType::SYS,
_ => TagType::INVALID,
}
}
}
impl fmt::Display for TagType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TagType::SNAP => write!(f, "snap"),
TagType::WAL => write!(f, "wal"),
TagType::SYS => write!(f, "sys"),
TagType::INVALID => write!(f, "invalid"),
}
}
}
#[derive(PartialEq, Eq)]
enum ShardType {
POR,
RAFT,
PART
}
impl ShardType {
fn new(repr: u8) -> Result<Self> {
match repr {
0 => Ok(ShardType::POR),
1 => Ok(ShardType::RAFT),
2 => Ok(ShardType::PART),
_ => bail!("invalid shard type {}", repr)
}
}
}
// TODO: switch to byteordered?
struct LittleEndianReader<'a> (std::io::Cursor<&'a[u8]>);
impl<'a> LittleEndianReader<'a> {
fn new(buf: &'a[u8]) -> Self { Self(std::io::Cursor::new(buf)) }
fn read_u8(&mut self) -> u8 { self.0.read_u8().unwrap() }
fn read_u16(&mut self) -> u16 { self.0.read_u16::<LittleEndian>().unwrap() }
fn read_u32(&mut self) -> u32 { self.0.read_u32::<LittleEndian>().unwrap() }
fn read_i64(&mut self) -> i64 { self.0.read_i64::<LittleEndian>().unwrap() }
fn read_u64(&mut self) -> u64 { self.0.read_u64::<LittleEndian>().unwrap() }
fn read_str(&mut self, len: usize) -> &str {
let pos = self.0.position() as usize;
let raw = &self.0.get_ref()[pos..pos+len];
let (raw, _) = raw.split_at(raw.iter().position(|&x| x == 0).unwrap_or(len));
let str = std::str::from_utf8(raw).unwrap();
self.0.set_position((pos+len) as u64);
str
}
fn into_cursor(self) -> std::io::Cursor<&'a[u8]> { self.0 }
fn | (&self) -> &[u8] {
&self.0 | unparsed | identifier_name |
main.go | .upstream)
if err != nil {
glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
}
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls.Certificate{cert},
MinVersion: version,
// To enable http/2
// See net/http.Server.shouldConfigureHTTP2ForServe for more context
NextProtos: []string{"h2"},
}
} else {
certReloader, err := setupTLSCertReloader(fileWatcherCtx, cfg.tls.certFile, cfg.tls.keyFile)
if err != nil {
glog.Fatalf("Failed to create ReloadableTLSCertProvider: %v", err)
}
//Configure srv with GetCertificate function
srv.TLSConfig = &tls.Config{
GetCertificate: certReloader.GetCertificateFunc,
}
}
l, err := net.Listen("tcp", cfg.secureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on secure address: %v", err)
}
glog.Infof("Listening securely on %v", cfg.secureListenAddress)
go srv.ServeTLS(l, "", "")
}
if cfg.metricsListenAddress != "" {
srv := &http.Server{Handler: promhttp.Handler()}
l, err := net.Listen("tcp", cfg.metricsListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening for metrics on %v", cfg.metricsListenAddress)
go srv.Serve(l)
}
if cfg.insecureListenAddress != "" {
if cfg.upstreamForceH2C && !proxyForApiserver {
// Force http/2 for connections to the upstream i.e. do not start with HTTP1.1 UPGRADE req to
// initialize http/2 session.
// See https://github.com/golang/go/issues/14141#issuecomment-219212895 for more context
rp.Transport = &http2.Transport{
// Allow http schema. This doesn't automatically disable TLS
AllowHTTP: true,
// Do disable TLS.
// In combination with the schema check above. We could enforce h2c against the upstream server
DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {
return net.Dial(netw, addr)
},
}
}
// Background:
//
// golang's http2 server doesn't support h2c
// https://github.com/golang/go/issues/16696
//
//
// Action:
//
// Use hkwi/h2c so that you can properly handle HTTP Upgrade requests over plain TCP,
// which is one of consequences for a h2c support.
//
// See https://github.com/golang/go/issues/14141 for more context.
//
// Possible alternative:
//
// We could potentially use grpc-go server's HTTP handler support
// which would handle HTTP UPGRADE from http1.1 to http/2, especially in case
// what you wanted kube-rbac-proxy to authn/authz was gRPC over h2c calls.
//
// Note that golang's http server requires a client(including gRPC) to send HTTP Upgrade req to
// property start http/2.
//
// but it isn't straight-forward to understand.
// Also note that at time of writing this, grpc-go's server implementation still lacks
// a h2c support for communication against the upstream.
//
// See belows for more information:
// - https://github.com/grpc/grpc-go/pull/1406/files
// - https://github.com/grpc/grpc-go/issues/549#issuecomment-191458335
// - https://github.com/golang/go/issues/14141#issuecomment-176465220
h2cHandler := &h2c.Server{Handler: mux}
srv := &http.Server{Handler: h2cHandler}
l, err := net.Listen("tcp", cfg.insecureListenAddress)
if err != nil {
glog.Fatalf("Failed to listen on insecure address: %v", err)
}
glog.Infof("Listening insecurely on %v", cfg.insecureListenAddress)
go srv.Serve(l)
}
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
glog.Info("Received SIGTERM, exiting gracefully...")
fileWatcherCtxCancel()
}
//Allow for file watchers to close gracefully
time.Sleep(1 * time.Second)
}
// Returns intiliazed config, allows local usage (outside cluster) based on provided kubeconfig or in-cluter
func initKubeConfig(kcLocation string) *rest.Config {
if kcLocation != "" {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kcLocation)
if err != nil {
glog.Fatalf("unable to build rest config based on provided path to kubeconfig file %s", err.Error())
}
return kubeConfig
}
kubeConfig, err := rest.InClusterConfig()
if err != nil {
glog.Fatal("cannot find Service Account in pod to build in-cluster rest config")
}
return kubeConfig
}
func | newReverseProxy | identifier_name | |
main.go |
func main() {
cfg := config{
auth: proxy.Config{
Authentication: &authn.AuthnConfig{
X509: &authn.X509Config{},
Header: &authn.AuthnHeaderConfig{},
OIDC: &authn.OIDCConfig{},
},
Authorization: &authz.Config{},
},
cors: corsConfig{},
}
flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
// Add glog flags
flagset.AddGoFlagSet(flag.CommandLine)
// kube-rbac-proxy flags
flagset.StringVar(&cfg.insecureListenAddress, "insecure-listen-address", "", "The address the kube-rbac-proxy HTTP server should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream)
if err != nil {
glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.Generate | {
if version, ok := versions[versionName]; ok {
return version, nil
}
return 0, fmt.Errorf("unknown tls version %q", versionName)
} | identifier_body | |
main.go | should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream)
if err != nil {
glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil |
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls | {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
} | conditional_block |
main.go | server should listen on.")
flagset.StringVar(&cfg.secureListenAddress, "secure-listen-address", "", "The address the kube-rbac-proxy HTTPs server should listen on.")
flagset.StringVar(&cfg.upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.")
flagset.BoolVar(&cfg.upstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communiate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only")
flagset.StringVar(&cfg.auth.Authorization.ResourceAttributesFile, "resource-attributes-file", "", "File spec of attributes-record to use for SubjectAccessReview. If unspecified, requests will attempted to be verified through non-resource-url attributes in the SubjectAccessReview.")
// TLS flags
flagset.StringVar(&cfg.tls.certFile, "tls-cert-file", "", "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert)")
flagset.StringVar(&cfg.tls.keyFile, "tls-private-key-file", "", "File containing the default x509 private key matching --tls-cert-file.")
flagset.StringVar(&cfg.tls.minVersion, "tls-min-version", "VersionTLS12", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.")
flagset.StringSliceVar(&cfg.tls.cipherSuites, "tls-cipher-suites", nil, "Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used")
// Auth flags
flagset.StringVar(&cfg.auth.Authentication.X509.ClientCAFile, "client-ca-file", "", "If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.")
flagset.BoolVar(&cfg.auth.Authentication.Header.Enabled, "auth-header-fields-enabled", false, "When set to true, kube-rbac-proxy adds auth-related fields to the headers of http requests sent to the upstream")
flagset.StringVar(&cfg.auth.Authentication.Header.UserFieldName, "auth-header-user-field-name", "x-remote-user", "The name of the field inside a http(2) request header to tell the upstream server about the user's name")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupsFieldName, "auth-header-groups-field-name", "x-remote-groups", "The name of the field inside a http(2) request header to tell the upstream server about the user's groups")
flagset.StringVar(&cfg.auth.Authentication.Header.GroupSeparator, "auth-header-groups-field-separator", "|", "The separator string used for concatenating multiple group names in a groups header field's value")
//Authn OIDC flags
flagset.StringVar(&cfg.auth.Authentication.OIDC.IssuerURL, "oidc-issuer", "", "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT).")
flagset.StringVar(&cfg.auth.Authentication.OIDC.ClientID, "oidc-clientID", "", "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set.")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsClaim, "oidc-groups-claim", "groups", "Identifier of groups in JWT claim, by default set to 'groups'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.UsernameClaim, "oidc-username-claim", "email", "Identifier of the user in JWT claim, by default set to 'email'")
flagset.StringVar(&cfg.auth.Authentication.OIDC.GroupsPrefix, "oidc-groups-prefix", "", "If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies.")
flagset.StringArrayVar(&cfg.auth.Authentication.OIDC.SupportedSigningAlgs, "oidc-sign-alg", []string{"RS256"}, "Supported signing algorithms, default RS256")
flagset.StringVar(&cfg.auth.Authentication.OIDC.CAFile, "oidc-ca-file", "", "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used.")
//Kubeconfig flag
flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", "Path to a kubeconfig file, specifying how to connect to the API server. If unset, in-cluster configuration will be used")
// CORS flags
flagset.StringSliceVar(&cfg.cors.allowOrigin, "cors-allow-origin", []string{"*"}, "List of CORS allowed origins")
flagset.StringSliceVar(&cfg.cors.allowMethods, "cors-allow-methods", []string{"GET", "POST", "PUT", "DELETE"}, "List of CORS allowed methods")
flagset.StringSliceVar(&cfg.cors.allowHeaders, "cors-allow-headers", []string{"Authorization", "Content-Type"}, "List of CORS allowed headers")
// Prometheus
flagset.StringVar(&cfg.metricsListenAddress, "metrics-listen-address", "", "The address the metric endpoint binds to.")
flagset.Parse(os.Args[1:])
kcfg := initKubeConfig(cfg.kubeconfigLocation)
upstreamURL, err := url.Parse(cfg.upstream) | glog.Fatalf("Failed to build parse upstream URL: %v", err)
}
spdyMetrics := monitoring.NewSPDYMetrics()
spdyProxy := spdy.New(kcfg, upstreamURL, spdyMetrics)
kubeClient, err := kubernetes.NewForConfig(kcfg)
if err != nil {
glog.Fatalf("Failed to instantiate Kubernetes client: %v", err)
}
var oidcAuthenticator authenticator.Request
fileWatcherCtx, fileWatcherCtxCancel := context.WithCancel(context.Background())
// If OIDC configuration provided, use oidc authenticator
if cfg.auth.Authentication.OIDC.IssuerURL != "" {
oidcAuthenticator, err = setupOIDCAuthReloader(fileWatcherCtx, cfg.auth.Authentication.OIDC)
if err != nil {
glog.Fatalf("Failed to instantiate OIDC authenticator: %v", err)
}
} else {
//Use Delegating authenticator
tokenClient := kubeClient.AuthenticationV1().TokenReviews()
oidcAuthenticator, err = authn.NewDelegatingAuthenticator(tokenClient, cfg.auth.Authentication)
if err != nil {
glog.Fatalf("Failed to instantiate delegating authenticator: %v", err)
}
}
metrics, err := monitoring.NewProxyMetrics()
if err != nil {
glog.Fatalf("Failed to create metrics: %v", err)
}
authProxy := proxy.New(cfg.auth, nil, oidcAuthenticator, metrics)
if err != nil {
glog.Fatalf("Failed to create rbac-proxy: %v", err)
}
proxyForApiserver := strings.Contains(cfg.upstream, proxy.KUBERNETES_SERVICE)
rp, err := newReverseProxy(upstreamURL, kcfg, proxyForApiserver)
if err != nil {
glog.Fatalf("Unable to create reverse proxy, %s", err)
}
//Prometheus
prometheusRegistry := prometheus.NewRegistry()
err = prometheusRegistry.Register(prometheus.NewGoCollector())
if err != nil {
glog.Fatalf("failed to register Go runtime metrics: %v", err)
}
err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
glog.Fatalf("failed to register process metrics: %v", err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ok := authProxy.Handle(w, req)
if !ok {
return
}
if spdyProxy.IsSpdyRequest(req) {
spdyProxy.ServeHTTP(w, req)
} else {
rp.ServeHTTP(w, req)
}
}))
if cfg.secureListenAddress != "" {
srv := &http.Server{Handler: getCORSHandler(mux, cfg.cors)}
if cfg.tls.certFile == "" && cfg.tls.keyFile == "" {
glog.Info("Generating self signed cert as no cert is provided")
certBytes, keyBytes, err := certutil.GenerateSelfSignedCertKey("", nil, nil)
if err != nil {
glog.Fatalf("Failed to generate self signed cert and key: %v", err)
}
cert, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
glog.Fatalf("Failed to load generated self signed cert and key: %v", err)
}
version, err := tlsVersion(cfg.tls.minVersion)
if err != nil {
glog.Fatalf("TLS version invalid: %v", err)
}
cipherSuiteIDs, err := cliflag.TLSCipherSuites(cfg.tls.cipherSuites)
if err != nil {
glog.Fatalf("Failed to convert TLS cipher suite name to ID: %v", err)
}
srv.TLSConfig = &tls.Config{
CipherSuites: cipherSuiteIDs,
Certificates: []tls.C | if err != nil { | random_line_split |
main.go | (w, r, url, http.StatusFound)
}
// oauth2callback is the handler to which Google's OAuth service redirects the
// user after they have granted the appropriate permissions.
func oauth2callbackHandler(w http.ResponseWriter, r *http.Request) {
// Create an oauth transport with a urlfetch.Transport embedded inside.
t := &oauth.Transport{Config: config(r.Host)}
// Exchange the code for access and refresh tokens.
tok, err := t.Exchange(r.FormValue("code"))
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: exchange")
return
}
o, err := oauth2.New(t.Client())
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: oauth get")
return
}
u, err := o.Userinfo.Get().Do()
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: userinfo get")
return
}
userId := fmt.Sprintf("%s_%s", strings.Split(clientId, ".")[0], u.Id)
if err = storeUserID(w, r, userId); err != nil {
w.WriteHeader(500)
LogPrintf("oauth: store userid")
return
}
userSer, err := json.Marshal(u)
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: json marshal")
return
}
storeCredential(userId, tok, string(userSer))
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func SetupHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("setup: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(401)
LogPrintf("setup: auth")
return
}
setupUser(r, t.Client(), userId)
}
// signout Revokes access for the user and removes the associated credentials from the datastore.
func signoutHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("signout: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(500)
LogPrintf("signout: auth")
return
}
req, err := http.NewRequest("GET", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)
response, err := http.DefaultClient.Do(req)
if err != nil {
w.WriteHeader(500)
LogPrintf("signout: revoke")
return
}
defer response.Body.Close()
storeUserID(w, r, "")
deleteCredential(userId)
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func sendImageCard(image string, text string, svc *mirror.Service) {
nt := &mirror.TimelineItem{
SpeakableText: text,
MenuItems: []*mirror.MenuItem{&mirror.MenuItem{Action: "READ_ALOUD"}, &mirror.MenuItem{Action: "DELETE"}},
Html: "<img src=\"attachment:0\" width=\"100%\" height=\"100%\">",
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
}
req := svc.Timeline.Insert(nt)
req.Media(strings.NewReader(image))
_, err := req.Do()
if err != nil {
LogPrintf("sendimage: insert")
return
}
}
func getImageAttachment(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem) ([]byte, error) | return nil, err
}
return imageData, nil
}
func notifyOpenGlass(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem, userId string) {
if !hasFlagSingle(userId, "flags", "user_openglass") {
LogPrintf("openglass: flag user_openglass")
return
}
var err error
flags, err := getUserFlags(userId, "uflags")
if err != nil {
LogPrintf("openglass: uflags")
return
}
if t.Attachments != nil && len(t.Attachments) > 0 {
imageData, err := getImageAttachment(conn, svc, trans, t)
if err != nil {
LogPrintf("openglass: attachment")
return
}
imageRow, err := PicarusApiImageUpload(conn, imageData)
if err != nil {
LogPrintf("openglass: picarus upload")
return
}
pushUserListTrim(userId, "images", imageRow, maxImages)
PicarusApiRowThumb(conn, imageRow)
if hasFlag(flags, "match_memento") {
mementoMatches, _, err := matchMementoImage(conn, imageRow, userId)
if err != nil {
LogPrintf("openglass: memento match")
} else {
for row, note := range mementoMatches {
m, err := conn.GetRow("images", row, []string{picarus.B64Dec(glassImageModel)})
if err != nil {
LogPrintf("openglass: memento get thumb")
continue
}
sendImageCard(m[picarus.B64Dec(glassImageModel)], note, svc)
}
}
}
if hasFlag(flags, "location") && hasFlag(flags, "location:streetview") {
//searchData, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(locationModel))
}
if err != nil {
LogPrintf("openglass: image search")
}
// Warped image example
var imageWarped string
if hasFlag(flags, "warp") {
imageWarped, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(homographyModel))
if err != nil {
LogPrintf("openglass: image warp")
imageWarped = ""
} else {
sendImageCard(imageWarped, "", svc)
}
}
// If there is a caption, send it to the annotation task
if len(t.Text) > 0 {
if hasFlag(flags, "crowdqa") {
imageType := "full"
if strings.HasPrefix(t.Text, "augmented ") {
if len(imageWarped) > 0 {
imageWarpedData := []byte(imageWarped)
imageRowWarped, err := PicarusApiImageUpload(conn, imageWarpedData)
PicarusApiRowThumb(conn, imageRowWarped)
if err != nil {
LogPrintf("openglass: warp image upload")
} else {
imageRow = imageRowWarped
imageData = imageWarpedData
imageType = "augmented"
}
}
t.Text = t.Text[10:] // Remove "augmented "
}
_, err = conn.PatchRow("images", imageRow, map[string]string{"meta:question": t.Text, "meta:openglass_user": userId,
"meta:openglass_image_type": imageType}, map[string][]byte{})
if err != nil {
LogPrintf("openglass: patch image")
return
}
// TODO: Here is where we would resize the image, we can do that later
_, err = conn.PostRow("jobs", annotationTask, map[string]string{"action": "io/annotation/sync"})
if err != nil {
LogPrintf("openglass: sync annotations")
return
}
}
} else {
if hasFlag(flags, "predict") {
confHTML := "<article><section><ul class=\"text-x-small\">"
menuItems := []*mirror.MenuItem{}
for modelName, modelRow := range predictionModels {
confMsgpack, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(modelRow))
if err != nil {
LogPrintf("openglass: predict")
return
}
var value float64
err = msgpack.Unmarshal([]byte(confMsgpack), &value, nil)
if err != nil {
LogPrintf("openglass: predict msgpack")
return
}
confHTML = confHTML + fmt.Sprintf("<li>%s: %f</li>", modelName, value)
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: modelName + " 1", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: modelName, IconUrl: fullUrl + "/static/icon_plus.png | {
a, err := svc.Timeline.Attachments.Get(t.Id, t.Attachments[0].Id).Do()
if err != nil {
LogPrintf("getattachment: metadata")
return nil, err
}
req, err := http.NewRequest("GET", a.ContentUrl, nil)
if err != nil {
LogPrintf("getattachment: http")
return nil, err
}
resp, err := trans.RoundTrip(req)
if err != nil {
LogPrintf("getattachment: content")
return nil, err
}
defer resp.Body.Close()
imageData, err := ioutil.ReadAll(resp.Body)
if err != nil {
LogPrintf("getattachment: body") | identifier_body |
main.go | (w, r, url, http.StatusFound)
}
// oauth2callback is the handler to which Google's OAuth service redirects the
// user after they have granted the appropriate permissions.
func oauth2callbackHandler(w http.ResponseWriter, r *http.Request) {
// Create an oauth transport with a urlfetch.Transport embedded inside.
t := &oauth.Transport{Config: config(r.Host)}
// Exchange the code for access and refresh tokens.
tok, err := t.Exchange(r.FormValue("code"))
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: exchange")
return
}
o, err := oauth2.New(t.Client())
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: oauth get")
return
}
u, err := o.Userinfo.Get().Do()
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: userinfo get")
return
}
userId := fmt.Sprintf("%s_%s", strings.Split(clientId, ".")[0], u.Id)
if err = storeUserID(w, r, userId); err != nil {
w.WriteHeader(500)
LogPrintf("oauth: store userid")
return
}
userSer, err := json.Marshal(u)
if err != nil {
w.WriteHeader(500)
LogPrintf("oauth: json marshal")
return
}
storeCredential(userId, tok, string(userSer))
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func SetupHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("setup: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(401)
LogPrintf("setup: auth")
return
}
setupUser(r, t.Client(), userId)
}
// signout Revokes access for the user and removes the associated credentials from the datastore.
func signoutHandler(w http.ResponseWriter, r *http.Request) {
userId, err := userID(r)
if err != nil || userId == "" {
w.WriteHeader(400)
LogPrintf("signout: userid")
return
}
t := authTransport(userId)
if t == nil {
w.WriteHeader(500)
LogPrintf("signout: auth")
return
}
req, err := http.NewRequest("GET", fmt.Sprintf(revokeEndpointFmt, t.Token.RefreshToken), nil)
response, err := http.DefaultClient.Do(req)
if err != nil {
w.WriteHeader(500)
LogPrintf("signout: revoke")
return
}
defer response.Body.Close()
storeUserID(w, r, "")
deleteCredential(userId)
http.Redirect(w, r, fullUrl, http.StatusFound)
}
func sendImageCard(image string, text string, svc *mirror.Service) {
nt := &mirror.TimelineItem{
SpeakableText: text,
MenuItems: []*mirror.MenuItem{&mirror.MenuItem{Action: "READ_ALOUD"}, &mirror.MenuItem{Action: "DELETE"}},
Html: "<img src=\"attachment:0\" width=\"100%\" height=\"100%\">",
Notification: &mirror.NotificationConfig{Level: "DEFAULT"},
}
req := svc.Timeline.Insert(nt)
req.Media(strings.NewReader(image))
_, err := req.Do()
if err != nil {
LogPrintf("sendimage: insert")
return
}
}
func getImageAttachment(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem) ([]byte, error) {
a, err := svc.Timeline.Attachments.Get(t.Id, t.Attachments[0].Id).Do()
if err != nil {
LogPrintf("getattachment: metadata")
return nil, err
}
req, err := http.NewRequest("GET", a.ContentUrl, nil)
if err != nil |
resp, err := trans.RoundTrip(req)
if err != nil {
LogPrintf("getattachment: content")
return nil, err
}
defer resp.Body.Close()
imageData, err := ioutil.ReadAll(resp.Body)
if err != nil {
LogPrintf("getattachment: body")
return nil, err
}
return imageData, nil
}
func notifyOpenGlass(conn *picarus.Conn, svc *mirror.Service, trans *oauth.Transport, t *mirror.TimelineItem, userId string) {
if !hasFlagSingle(userId, "flags", "user_openglass") {
LogPrintf("openglass: flag user_openglass")
return
}
var err error
flags, err := getUserFlags(userId, "uflags")
if err != nil {
LogPrintf("openglass: uflags")
return
}
if t.Attachments != nil && len(t.Attachments) > 0 {
imageData, err := getImageAttachment(conn, svc, trans, t)
if err != nil {
LogPrintf("openglass: attachment")
return
}
imageRow, err := PicarusApiImageUpload(conn, imageData)
if err != nil {
LogPrintf("openglass: picarus upload")
return
}
pushUserListTrim(userId, "images", imageRow, maxImages)
PicarusApiRowThumb(conn, imageRow)
if hasFlag(flags, "match_memento") {
mementoMatches, _, err := matchMementoImage(conn, imageRow, userId)
if err != nil {
LogPrintf("openglass: memento match")
} else {
for row, note := range mementoMatches {
m, err := conn.GetRow("images", row, []string{picarus.B64Dec(glassImageModel)})
if err != nil {
LogPrintf("openglass: memento get thumb")
continue
}
sendImageCard(m[picarus.B64Dec(glassImageModel)], note, svc)
}
}
}
if hasFlag(flags, "location") && hasFlag(flags, "location:streetview") {
//searchData, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(locationModel))
}
if err != nil {
LogPrintf("openglass: image search")
}
// Warped image example
var imageWarped string
if hasFlag(flags, "warp") {
imageWarped, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(homographyModel))
if err != nil {
LogPrintf("openglass: image warp")
imageWarped = ""
} else {
sendImageCard(imageWarped, "", svc)
}
}
// If there is a caption, send it to the annotation task
if len(t.Text) > 0 {
if hasFlag(flags, "crowdqa") {
imageType := "full"
if strings.HasPrefix(t.Text, "augmented ") {
if len(imageWarped) > 0 {
imageWarpedData := []byte(imageWarped)
imageRowWarped, err := PicarusApiImageUpload(conn, imageWarpedData)
PicarusApiRowThumb(conn, imageRowWarped)
if err != nil {
LogPrintf("openglass: warp image upload")
} else {
imageRow = imageRowWarped
imageData = imageWarpedData
imageType = "augmented"
}
}
t.Text = t.Text[10:] // Remove "augmented "
}
_, err = conn.PatchRow("images", imageRow, map[string]string{"meta:question": t.Text, "meta:openglass_user": userId,
"meta:openglass_image_type": imageType}, map[string][]byte{})
if err != nil {
LogPrintf("openglass: patch image")
return
}
// TODO: Here is where we would resize the image, we can do that later
_, err = conn.PostRow("jobs", annotationTask, map[string]string{"action": "io/annotation/sync"})
if err != nil {
LogPrintf("openglass: sync annotations")
return
}
}
} else {
if hasFlag(flags, "predict") {
confHTML := "<article><section><ul class=\"text-x-small\">"
menuItems := []*mirror.MenuItem{}
for modelName, modelRow := range predictionModels {
confMsgpack, err := PicarusApiModel(conn, imageRow, picarus.B64Dec(modelRow))
if err != nil {
LogPrintf("openglass: predict")
return
}
var value float64
err = msgpack.Unmarshal([]byte(confMsgpack), &value, nil)
if err != nil {
LogPrintf("openglass: predict msgpack")
return
}
confHTML = confHTML + fmt.Sprintf("<li>%s: %f</li>", modelName, value)
menuItems = append(menuItems, &mirror.MenuItem{Action: "CUSTOM", Id: modelName + " 1", Values: []*mirror.MenuValue{&mirror.MenuValue{DisplayName: modelName, IconUrl: fullUrl + "/static/icon_plus | {
LogPrintf("getattachment: http")
return nil, err
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.